#!/usr/bin/env python # coding: utf-8 # # Intermediate Measurements Tutorial # This tutorial will demonstrate how perform tomography on gate sets which, in addition to normal gates, contain *quantum instruments*. Quantum instruments are maps that act on a qubit state (density matrix) and produce a qubit state along with a classical outcome. That is, instruments are maps from $\mathcal{B}(\mathcal{H})$, the space of density matrices, to $\mathcal{B}(\mathcal{H}) \otimes K(n)$, where $K(n)$ is a classical space of $n$ elements. # # In pyGSTi, instruments are represented as collections of gates, one for each classical "outcome" of the instrument. This tutorial will demonstrate how to add instruments to `GateSet` objects, compute probabilities using such `GateSet`s, and ultimately perform tomography on them. We'll start with a few familiar imports: # In[1]: import pygsti from pygsti.construction import std1Q_XYI as std import numpy as np # ## Instrument construction # Next, we'll add an instrument to our "standard" gate set - a 1-qubit gate set containing $I$, $X(\pi/2)$, and $Y(\pi/2)$ gates. The ideal instrument will be named `"Iz"` (all instrument names must begin with `"I"`), and consist of perfect projectors onto the 0 and 1 states. Instead of labelling the associated outcomes "0" and "1", which might me most logical, we'll name them "p0" and "p1" so it's easier to distinguish them from the final POVM outcomes which *are* labelled "0" and "1". # In[2]: #Make a copy so we don't modify the original gs_target = std.gs_target.copy() #Create and add the ideal instrument E0 = gs_target.effects['0'] E1 = gs_target.effects['1'] # Alternate indexing that uses POVM label explicitly # E0 = gs_target['Mdefault']['0'] # 'Mdefault' = POVM label, '0' = effect label # E1 = gs_target['Mdefault']['1'] Gmz_plus = np.dot(E0,E0.T) #note effect vectors are stored as column vectors Gmz_minus = np.dot(E1,E1.T) gs_target['Iz'] = pygsti.obj.Instrument({'p0': Gmz_plus, 'p1': Gmz_minus}) #For later use, record the identity POVM vector povm_ident = gs_target.effects['0'] + gs_target.effects['1'] # In order to generate some simulated data later on, we'll now create a noisy version of `gs_target` by depolarizing the state preparation, gates, and POVM, and also rotating the basis that is measured by the instrument and POVM. # In[3]: gs_noisy = gs_target.depolarize(gate_noise=0.01, spam_noise=0.01) gs_noisy.effects.depolarize(0.01) #because above call only depolarizes the state prep, not the POVMs # add a rotation error to the POVM Uerr = pygsti.rotation_gate_mx([0,0.02,0]) gs_noisy.effects['0'] = np.dot(gs_noisy.effects['0'].T,Uerr).T gs_noisy.effects['1'] = povm_ident - gs_noisy.effects['0'] #Could also do this: #E0 = np.dot(gs_noisy['Mdefault']['0'].T,Uerr).T #E1 = povm_ident - E0 #gs_noisy['Mdefault'] = pygsti.obj.UnconstrainedPOVM({'0': E0, '1': E1}) # Use the same rotated effect vectors to "rotate" the instrument Iz too E0 = gs_noisy.effects['0'] E1 = gs_noisy.effects['1'] Gmz_plus = np.dot(E0,E0.T) Gmz_minus = np.dot(E1,E1.T) gs_noisy['Iz'] = pygsti.obj.Instrument({'p0': Gmz_plus, 'p1': Gmz_minus}) #print(gs_noisy) #print the gate set # ## Generating probabilities # Instrument labels (e.g. `"Iz"`) may be included within `GateString` objects, and `GateSet` objects are able to compute probabilities for them just like normal (non-instrument) gate sequences. The difference is that probabilities are labeled by tuples of instrument and POVM outcomes - referred to as **"outcome tuples"** - one for each instrument and one for the final POVM: # In[4]: dict(gs_target.probs( pygsti.obj.GateString(('Gx','Iz')) )) # In[5]: dict(gs_target.probs( pygsti.obj.GateString(('Iz','Gx','Iz')) )) # In fact, pyGSTi *always* labels probabilties using outcome tuples, it's just that in the non-instrument case they're always 1-tuples and by `OutcomeLabelDict` magic can be treated as if they were just strings: # In[6]: probs = gs_target.probs( pygsti.obj.GateString(('Gx',)) ) print("probs = ",dict(probs)) print("probs['0'] = ", probs['0']) #This works... print("probs[('0',)] = ", probs[('0',)]) # and so does this. # ## Performing tomography # # ### Simulated data generation # Now let's perform tomography on a gate set that includes instruments. First, we'll generate some data using `gs_noisy` in exactly the same way as we would for any other gate set: # In[7]: germs = std.germs fiducials = std.fiducials max_lengths = [1] # keep it simple & fast lsgst_list = pygsti.construction.make_lsgst_experiment_list( gs_noisy,fiducials,fiducials,germs,max_lengths) #print("Gate sequences:") #print(lsgst_list) #note that this contains LGST strings with "Iz" #Create the DataSet ds = pygsti.construction.generate_fake_data(gs_noisy,lsgst_list,1000,'multinomial',seed=2018) #Write it to a text file to demonstrate the format: pygsti.io.write_dataset("tutorial_files/intermediate_meas_dataset.txt",ds) # Notice the format of [tutorial_files/intermediate_meas_dataset.txt](tutorial_files/intermediate_meas_dataset.txt), which includes a column for each distinct outcome tuple. Since not all experiments contain data for all outcome tuples, the `"--"` is used as a placeholder. Now that the data is generated, we run LGST or LSGST just like we would for any other gateset: # # ### LGST # In[8]: #Run LGST gs_lgst = pygsti.do_lgst(ds, fiducials,fiducials, gs_target) #print(gs_lgst) #Gauge optimize the result to the true data-generating gate set (gs_noisy), # and compare. Mismatch is due to finite sample noise. gs_lgst_opt = pygsti.gaugeopt_to_target(gs_lgst,gs_noisy) print(gs_noisy.strdiff(gs_lgst_opt)) print("Frobdiff after GOpt = ",gs_noisy.frobeniusdist(gs_lgst_opt)) # ### Long-sequence GST # Instruments just add parameters to a `GateSet` like gates, state preparations, and POVMs do. The total number of parameters in our gate set is # # $4$ (prep) + $2\times 4$ (2 effects) + $5\times 16$ (3 gates and 2 instrument members) $ = 92$. # In[9]: gs_target.num_params() # In[10]: #Run long sequence GST results = pygsti.do_long_sequence_gst(ds,gs_target,fiducials,fiducials,germs,max_lengths) # In[11]: #Compare estimated gate set (after gauge opt) to data-generating one gs_est = results.estimates['default'].gatesets['go0'] gs_est_opt = pygsti.gaugeopt_to_target(gs_est,gs_noisy) print("Frobdiff after GOpt = ", gs_noisy.frobeniusdist(gs_est_opt)) # The same analysis can be done for a trace-preserving gate set, whose instruments are constrained to *add* to a perfectly trace-preserving map. The number of parameters in the gate set are now: # # $3$ (prep) + $1\times 4$ (effect and complement) + $3\times 12$ (3 gates) + $(2\times 16 - 3)$ (TP instrument) $ = 71$ # In[12]: gs_targetTP = gs_target.copy() gs_targetTP.set_all_parameterizations("TP") print("POVM type = ",type(gs_targetTP["Mdefault"])," Np=",gs_targetTP["Mdefault"].num_params()) print("Instrument type = ",type(gs_targetTP["Iz"])," Np=",gs_targetTP["Iz"].num_params()) print("Number of gateset parameters = ", gs_targetTP.num_params()) # In[13]: resultsTP = pygsti.do_long_sequence_gst(ds,gs_targetTP,fiducials,fiducials,germs,max_lengths) # In[14]: #Again compare estimated gate set (after gauge opt) to data-generating one gs_est = resultsTP.estimates['default'].gatesets['go0'] gs_est_opt = pygsti.gaugeopt_to_target(gs_est,gs_noisy) print("Frobdiff after GOpt = ", gs_noisy.frobeniusdist(gs_est_opt)) # **Thats it!** You've done tomography on a gate set with intermediate measurments (instruments). # In[ ]: