#!/usr/bin/env python # coding: utf-8 # # HEOM 4: Dynamical decoupling of a non-Markovian environment # ## Introduction # # Following [Lorenza Viola and Seth Lloyd](https://arxiv.org/abs/quant-ph/9803057) we consider an example of dynamical decoupling. # We choose a drive which performs pi rotations, interspersed with short periods where the bath causes dephasing. # # We first show the standard example of equally spaced pulses, and then consider the 'optimal' Uhrig spacing ([Götz S. Uhrig Phys. Rev. Lett. 98, 100504 (2007)](https://arxiv.org/abs/quant-ph/0609203)). # ## Setup # In[1]: import numpy as np import matplotlib.pyplot as plt import qutip from qutip import ( QobjEvo, basis, expect, ket2dm, sigmax, sigmaz, ) from qutip.solver.heom import ( HEOMSolver, DrudeLorentzPadeBath, ) from ipywidgets import IntProgress from IPython.display import display get_ipython().run_line_magic('matplotlib', 'inline') # ## Helper functions # # Let's define some helper functions for calculating the spectral density: # In[2]: def dl_spectrum(w, lam, gamma): """ Return the Drude-Lorentz spectral density. """ J = w * 2 * lam * gamma / (gamma**2 + w**2) return J # In[3]: # Solver options: # The max_step must be set to a short time than the # length of the shortest pulse, otherwise the solver # might skip over a pulse. options = { "nsteps": 1500, "store_states": True, "rtol": 1e-12, "atol": 1e-12, "max_step": 1 / 20.0, "method": "vern9", "progress_bar": "enhanced", } # ## System and bath definition # # Now we define the system and bath properties and the HEOM parameters. The system is a single stationary qubit with $H = 0$ and the bath is a bosonic bath with a Drude-Lorentz spectrum. # In[4]: # Define the system Hamlitonian. # # The system isn't evolving by itself, so the Hamiltonian is 0 (with the # correct dimensions): H_sys = 0 * sigmaz() # In[5]: # Define some operators with which we will measure the system # 1,1 element of density matrix - corresonding to groundstate P11p = basis(2, 0) * basis(2, 0).dag() P22p = basis(2, 1) * basis(2, 1).dag() # 1,2 element of density matrix - corresonding to coherence P12p = basis(2, 0) * basis(2, 1).dag() # In[6]: # Properties for the Drude-Lorentz bath lam = 0.0005 gamma = 0.005 T = 0.05 # bath-system coupling operator: Q = sigmaz() # number of terms to keep in the expansion of the bath correlation function: Nk = 3 bath = DrudeLorentzPadeBath(Q, lam=lam, gamma=gamma, T=T, Nk=Nk) # In[7]: # HEOM parameters # number of layers to keep in the hierarchy: NC = 6 # To perform the dynamic decoupling from the environment, we will drive the system with a time-dependent pulse that couples to the system via the $\sigma_x$ operator. The area under the pulse will usual be set to $\pi / 2$ so that the pulse flips the qubit state. # # Below we define a function that returns the pulse (which is itself a function): # In[8]: def drive(amplitude, delay, integral): """ Coefficient of the drive as a function of time. The drive consists of a series of constant pulses with a fixed delay between them. Parameters ---------- amplitude : float The amplitude of the drive during the pulse. delay : float The time delay between successive pulses. integral : float The integral of the pulse. This determines the duration of each pulse with the duration equal to the integral divided by the amplitude. """ duration = integral / amplitude period = duration + delay def pulse(t): t = t % period if t < duration: return amplitude return 0 return pulse H_drive = sigmax() # ## Plot the spectral density # # Let's start by plotting the spectral density of our Drude-Lorentz bath: # In[9]: wlist = np.linspace(0, 0.5, 1000) J = dl_spectrum(wlist, lam, gamma) fig, axes = plt.subplots(1, 1, figsize=(8, 8)) axes.plot(wlist, J, 'r', linewidth=2) axes.set_xlabel(r'$\omega$', fontsize=28) axes.set_ylabel(r'J', fontsize=28); # ## Dynamic decoupling with fast and slow pulses # # Now we are ready to explore dynamic decoupling from the environment. # # First we will drive the system with fast, large amplitude pulses. Then we will drive the system with slower, smaller amplitude pulses. The faster pulses decoupling the system more effectively and retain the coherence longer, but the slower pulses help too. # # Let's start by simulating the fast pulses: # In[10]: # Fast driving (quick, large amplitude pulses) tlist = np.linspace(0, 400, 1000) # start with a superposition so there is something to dephase! rho0 = (basis(2, 1) + basis(2, 0)).unit() rho0 = ket2dm(rho0) # without pulses hsolver = HEOMSolver(H_sys, bath, NC, options=options) outputnoDD = hsolver.run(rho0, tlist) # with pulses drive_fast = drive(amplitude=0.5, delay=20, integral=np.pi / 2) H_d = qutip.QobjEvo([H_sys, [H_drive, drive_fast]]) hsolver = HEOMSolver(H_d, bath, NC, options=options) outputDD = hsolver.run(rho0, tlist) # And now the longer slower pulses: # In[11]: # Slow driving (longer, small amplitude pulses) # without pulses hsolver = HEOMSolver(H_sys, bath, NC, options=options) outputnoDDslow = hsolver.run(rho0, tlist) # with pulses drive_slow = drive(amplitude=0.01, delay=20, integral=np.pi/2) H_d = QobjEvo([H_sys, [H_drive, drive_slow]]) hsolver = HEOMSolver(H_d, bath, NC, options=options) outputDDslow = hsolver.run(rho0, tlist) # Now let's plot all of the results and the shapes of the pulses: # In[12]: def plot_dd_results(outputnoDD, outputDD, outputDDslow): fig, axes = plt.subplots(2, 1, sharex=False, figsize=(12, 12)) # Plot the dynamic decoupling results: tlist = outputDD.times P12 = basis(2, 1) * basis(2, 0).dag() P12DD = qutip.expect(outputDD.states, P12) P12noDD = qutip.expect(outputnoDD.states, P12) P12DDslow = qutip.expect(outputDDslow.states, P12) plt.sca(axes[0]) plt.yticks([0, 0.25, 0.5], [0, 0.25, 0.5]) axes[0].plot( tlist, np.real(P12DD), 'green', linestyle='-', linewidth=2, label="HEOM with fast DD", ) axes[0].plot( tlist, np.real(P12DDslow), 'blue', linestyle='-', linewidth=2, label="HEOM with slow DD", ) axes[0].plot( tlist, np.real(P12noDD), 'orange', linestyle='--', linewidth=2, label="HEOM no DD", ) axes[0].locator_params(axis='y', nbins=3) axes[0].locator_params(axis='x', nbins=3) axes[0].set_ylabel(r"$\rho_{01}$", fontsize=30) axes[0].legend(loc=4) axes[0].text(0, 0.4, "(a)", fontsize=28) # Plot the drive pulses: pulse = [drive_fast(t) for t in tlist] pulseslow = [drive_slow(t) for t in tlist] plt.sca(axes[1]) plt.yticks([0., 0.25, 0.5], [0, 0.25, 0.5]) axes[1].plot( tlist, pulse, 'green', linestyle='-', linewidth=2, label="Drive fast", ) axes[1].plot( tlist, pulseslow, 'blue', linestyle='--', linewidth=2, label="Drive slow", ) axes[1].locator_params(axis='y', nbins=3) axes[1].locator_params(axis='x', nbins=3) axes[1].set_xlabel(r'$t\bar{V}_{\mathrm{f}}$', fontsize=30) axes[1].set_ylabel(r'Drive amplitude/$\bar{V}_{\mathrm{f}}$', fontsize=30) axes[1].legend(loc=1) axes[1].text(0, 0.4, "(b)", fontsize=28) fig.tight_layout() # In[13]: plot_dd_results(outputnoDD, outputDD, outputDDslow) # ## Non-equally spaced pulses # Next we consider non-equally spaced pulses. # # Rather than plot as a function of time we just consider the final coherence after time $T$ and 100 pulses. We change the width of the environment to demonstate that the Uhrig sequence (i.e. the evenly spaced pulses) can be sub-optimal when the bath is very broad. # # Instead of evenly spaced pulses, we will use pulses where the cummulative delay after $j$ pulses is given by: # # $$ # \sin^2(\frac{\pi}{2} \frac{j}{N + 1}) # $$ # # This is just a convenient way to describe the varying delay. We could have chosen another monotonically increasing function to represent the cummulative delay (although it might not be as effective). # In[14]: def cummulative_delay_fractions(N): """ Return an array of N + 1 cummulative delay fractions. The j'th entry in the array should be the sum of all delays before the j'th pulse. The last entry should be 1 (i.e. the entire cummulative delay should have been used once the sequence of pulses is complete). The function should be monotonically increasing, strictly greater than zero and the last value should be 1. This implementation returns: sin((pi / 2) * (j / (N + 1)))**2 as the cummulative delay after the j'th pulse. """ return np.array([ np.sin((np.pi / 2) * (j / (N + 1)))**2 for j in range(0, N + 1) ]) def drive_opt(amplitude, avg_delay, integral, N): """ Return an optimized distance pulse function. Our previous pulses were evenly spaced. Here we instead use a varying delay after the j'th pulse. The cummulative delay is described by the function ``cummulative_delay_fractions`` above. """ duration = integral / amplitude cummulative_delays = N * avg_delay * cummulative_delay_fractions(N) t_start = cummulative_delays + duration * np.arange(0, N + 1) t_end = cummulative_delays + duration * np.arange(1, N + 2) def pulse(t): if any((t_start <= t) & (t <= t_end)): return amplitude return 0.0 return pulse # Let's plot the cummulative delays and see what they look like. Note that the cummulative delay starts at $0$, ends at $1$ and is monotonically increasing, as required. # # On the same axes we plot the individual $j^{th}$ delays as a fraction of the average delay. # In[15]: def plot_cummulative_delay_fractions(N): cummulative = cummulative_delay_fractions(N) individual = (cummulative[1:] - cummulative[:-1]) * N plt.plot(np.arange(0, N + 1), cummulative, label="Cummulative delay") plt.plot(np.arange(0, N), individual, label="j'th delay") plt.xlabel("j") plt.ylabel("Fraction of delay") plt.legend() plot_cummulative_delay_fractions(100) # And now let us plot the first ten even and optimally spaced pulses together to compare them: # In[16]: def plot_even_and_optimally_spaced_pulses(): amplitude = 10.0 integral = np.pi / 2 duration = integral / amplitude delay = 1.0 - duration tlist = np.linspace(0, 10, 1000) pulse_opt = drive_opt(amplitude, delay, integral, 100) pulse_eq = drive(amplitude, delay, integral) plt.plot( tlist, [pulse_opt(t) for t in tlist], label="opt", ) plt.plot( tlist, [pulse_eq(t) for t in tlist], label="eq", ) plt.legend(loc=4) plot_even_and_optimally_spaced_pulses() # Now let's simulate the effectiveness of the two sets of delays by comparing how well they maintain coherence after a hundred pulses. # # We'll perform the simulation over a range of lambdas and gammas to show how the non-evenly spaced delays become optimal as the width of the bath spectral function increases. # In[17]: # Bath parameters to simulate over: # We use only two lambdas and two gammas so that the notebook executes # quickly: lams = [0.005, 0.0005] gammas = np.linspace(0.005, 0.05, 2) # But one can also extend the lists to larger ones: # # lams = [0.01, 0.005, 0.0005] # gammas = np.linspace(0.005, 0.05, 10) # Setup a progress bar: progress = IntProgress(min=0, max=(2 * len(lams) * len(gammas))) display(progress) def simulate_100_pulses(lam, gamma, T, NC, Nk): """ Simulate the evolution of 100 evenly and optimally spaced pulses. Returns the expectation value of P12p from the final state of each evolution. """ rho0 = (basis(2, 1) + basis(2, 0)).unit() rho0 = ket2dm(rho0) N = 100 # number of pulses to simulate avg_cycle_time = 1.0 # average time from one pulse to the next t_max = N * avg_cycle_time tlist = np.linspace(0, t_max, 100) amplitude = 10.0 integral = np.pi / 2 duration = integral / amplitude delay = avg_cycle_time - duration bath = DrudeLorentzPadeBath(Q, lam=lam, gamma=gamma, T=T, Nk=Nk) # Equally spaced pulses: pulse_eq = drive(amplitude, delay, integral) H_d = QobjEvo([H_sys, [H_drive, pulse_eq]]) hsolver = HEOMSolver(H_d, bath, NC, options=options) result = hsolver.run(rho0, tlist) P12_eq = expect(result.states[-1], P12p) progress.value += 1 # Non-equally spaced pulses: pulse_opt = drive_opt(amplitude, delay, integral, N) H_d = QobjEvo([H_sys, [H_drive, pulse_opt]]) hsolver = HEOMSolver(H_d, bath, NC, options=options) result = hsolver.run(rho0, tlist) P12_opt = expect(result.states[-1], P12p) progress.value += 1 return P12_opt, P12_eq # We use NC=2 and Nk=2 to speed up the simulation: P12_results = [ list(zip(*( simulate_100_pulses(lam=lam_, gamma=gamma_, T=0.5, NC=2, Nk=2) for gamma_ in gammas ))) for lam_ in lams ] # Now that we have the expectation values of $\rho_{01}$ let's plot them as a function of gamma for each lambda. Note how in each case the non-evenly spaced pulses become optimal once gamma is sufficiently small: # In[18]: fig, axes = plt.subplots(1, 1, sharex=False, figsize=(10, 7)) colors = ["green", "red", "blue"] for i in range(len(lams)): color = colors[i % len(colors)] axes.plot( gammas, np.real(P12_results[i][0]), color, linestyle='-', linewidth=2, label=f"Optimal DD [$\\lambda={lams[i]}$]", ) axes.plot( gammas, np.real(P12_results[i][1]), color, linestyle='-.', linewidth=2, label=f"Even DD [$\\lambda={lams[i]}$]", ) axes.set_ylabel(r"$\rho_{01}$") axes.set_xlabel(r"$\gamma$") axes.legend(fontsize=16) fig.tight_layout(); # And now you know about dynamically decoupling a qubit from its environment! # ## About # In[19]: qutip.about() # ## Testing # # This section can include some tests to verify that the expected outputs are generated within the notebook. We put this section at the end of the notebook, so it's not interfering with the user experience. Please, define the tests using assert, so that the cell execution fails if a wrong output is generated. # In[20]: assert 1 == 1