We are looking at the problem Max $k$XOR, where each variable is involved in $D+1$ clauses and has no overlapping constraints.

This is code comparing one-local algorithms: Hirvonen's threshold algorithm and Farhi's QAOA at depth 1.

In [1]:
from functools import lru_cache
from matplotlib.ticker import StrMethodFormatter
from mpl_toolkits.axes_grid.inset_locator import (inset_axes, InsetPosition, mark_inset)
from numpy import cos, sin, tan, arctan as atan
from scipy.optimize import minimize
from scipy.special import binom, erf
from sympy import exp, simplify, expand, expand_trig, trigsimp
import matplotlib.pyplot as plt
import numpy as np
import sympy
import time
plt.rc('axes', axisbelow=True)
/tmp/ipykernel_78203/3810261615.py:3: MatplotlibDeprecationWarning: 
The mpl_toolkits.axes_grid module was deprecated in Matplotlib 2.1 and will be removed two minor releases later. Use mpl_toolkits.axes_grid1 and mpl_toolkits.axisartist, which provide the same functionality instead.
  from mpl_toolkits.axes_grid.inset_locator import (inset_axes, InsetPosition, mark_inset)
In [2]:
print("Starting time:", time.ctime())
Starting time: Sat Sep 11 18:13:44 2021

QAOA for Max 3XOR

In [3]:
def qaoa_perf_3xor(s, D):
    return s/ ( 1 + 3/(1 - s*s)**D)**0.5
In [4]:
gamma = sympy.Symbol('gamma')
D_symbol = sympy.Symbol('D')
qaoa_perf_3xor(sympy.sin(gamma), D_symbol)
Out[4]:
$\displaystyle \frac{\sin{\left(\gamma \right)}}{\left(1 + 3 \left(1 - \sin^{2}{\left(\gamma \right)}\right)^{- D}\right)^{0.5}}$
In [5]:
Ds = [5, 10, 20, 50, 100, 500, 1000, 5000, 10000, 50000, 500000]
inps = np.linspace(0, 3, 100000)
In [6]:
for D in Ds:
    outs = [qaoa_perf_3xor(np.sin(i*D**-0.5), D)*D**0.5 for i in inps]
    plt.plot(inps, outs, label="$D$=" + str(D))
plt.grid()
plt.legend()
plt.title("QAOA performance for $k=3$")
plt.xlabel("$\gamma \sqrt{D}$")
plt.ylabel("$C_{3,D}$: performance is $1/2 + C_{3,D}/\sqrt{D}$")
plt.savefig("images/QAOA_max3xor.png",  dpi=300)

We can also look at the derivative of performance with respect to $\gamma$:

In [7]:
qaoa_perf_derivative = sympy.lambdify((gamma, D_symbol), sympy.Derivative(qaoa_perf_3xor(sympy.sin(gamma), D_symbol), gamma, evaluate=True))
In [8]:
for D in Ds:
    outs = [qaoa_perf_derivative(i*D**-0.5, D) for i in inps]
    plt.plot(inps, outs, label="$D$=" + str(D))
    pt = np.argmin(np.abs(outs))
    print(D, inps[pt], outs[pt])
plt.grid()
plt.plot(inps, [0]*inps, color='black')
plt.legend()
plt.title("first derivative")
5 0.9889298892988929 -7.89382122379223e-06
10 1.019440194401944 -5.557421224877679e-07
20 1.035970359703597 9.036765468040109e-06
50 1.0463804638046381 -6.05624229221835e-06
100 1.049920499204992 -3.116431702476774e-06
500 1.052770527705277 7.448490694839105e-06
1000 1.053130531305313 7.55082733017165e-06
5000 1.053430534305343 -1.179938904916078e-07
10000 1.053460534605346 3.909266844126513e-06
50000 1.053490534905349 3.155058544646039e-06
500000 1.053490534905349 7.4604497740105025e-06
Out[8]:
Text(0.5, 1.0, 'first derivative')

Let's check to make sure this works given $\gamma$ and $\beta$. Using the asymptotic values:

In [9]:
def qaoa_perf_2var_3xor(beta, gamma, D):
    p, q = np.cos(2*beta), np.sin(2*beta)
    c, s = np.cos(gamma), np.sin(gamma)
    return 0.5*q*s*(3*p*p*c**D - q*q*c**(3*D))
In [10]:
D = 100
gamma = 1.0535/D**0.5
beta = np.arcsin((1/(np.cos(gamma)**(2*D) + 3))**0.5)/2
print(beta, gamma)
print(qaoa_perf_2var_3xor(beta, gamma, D)*D**0.5)
0.29003649304891665 0.10535000000000001
0.3305431427004284
In [11]:
Ds = range(3, 500, 1)
plt.plot(Ds, [qaoa_perf_2var_3xor(0.29, 1.0535/D**0.5, D)*D**0.5 for D in Ds])
plt.grid()
plt.xlabel("degree $D$")
plt.ylabel("$C_{3,D}$: performance is $1/2 + C_{3,D}/\sqrt{D}$")
plt.title("QAOA for Max 3XOR")
Out[11]:
Text(0.5, 1.0, 'QAOA for Max 3XOR')
In [12]:
Ds = range(3000000, 1000000000, 100000)
plt.plot(Ds, [qaoa_perf_2var_3xor(0.29, 1.0535/D**0.5, D)*D**0.5 for D in Ds])
plt.grid()

plt.xlabel("degree $D$")
plt.ylabel("$C_{3,D}$: performance is $1/2 + C_{3,D}/\sqrt{D}$")
plt.title("QAOA for Max 3XOR")
Out[12]:
Text(0.5, 1.0, 'QAOA for Max 3XOR')

What's the asymptotic value of QAOA on Max 3XOR?

In [13]:
def v(k):
    return k / (1 + 3*exp(k*k))**0.5
In [14]:
k = sympy.Symbol('k')
der = sympy.lambdify(k, sympy.Derivative(v(k), k, evaluate=True))
In [15]:
sympy.Derivative(v(k), k, evaluate=True)
Out[15]:
$\displaystyle - \frac{3.0 k^{2} e^{k^{2}}}{\left(3 e^{k^{2}} + 1\right)^{1.5}} + \left(3 e^{k^{2}} + 1\right)^{-0.5}$
In [16]:
inps = np.linspace(0, 2, 10000000)
plt.plot(inps, der(inps))
plt.plot(inps, [0]*inps, color='black')
plt.grid()
In [17]:
pt = np.argmin(np.abs(der(inps)))
inps[pt], der(inps)[pt]
Out[17]:
(1.0535025053502505, 3.2796303450766118e-09)
In [18]:
v(inps[pt])
Out[18]:
$\displaystyle 0.331462726717571$

What are the optimal values of QAOA for every degree?

In [19]:
fmaker = lambda D: lambda i: -qaoa_perf_3xor(np.sin(i*D**-0.5), D)*D**0.5
In [20]:
Ds = range(2, 10000)
In [21]:
optimized_vals = [-minimize(fmaker(D), 1).fun for D in Ds]
In [22]:
comparison = [qaoa_perf_2var_3xor(0.29, 1.0535/D**0.5, D)*D**0.5 for D in Ds]
assert np.all(np.array([o-c for o,c in zip(optimized_vals, comparison)]) > 0)
In [23]:
plt.plot(Ds, optimized_vals)
plt.grid()

The threshold algorithm for Max 3XOR

In [24]:
@lru_cache(maxsize=int(1e7))
def g(D, t):
    return 2**(-D) * sum([binom(D,i) for i in range(0, t+1)])
In [25]:
@lru_cache(maxsize=int(1e7))
def Delta(D, t):
    return 2**(-D)* binom(D, t)
In [26]:
@lru_cache(maxsize=int(1e7))
def general_improvement(D, t, k):
    val_delta = Delta(D, t)
    val_g = g(D, t)
    return 0.25 * ( (1-2*val_g + 2*val_delta)**k - (1 - 2*val_g)**k )

What's the performance at $k=3$?

In [27]:
for D in [5, 10, 20, 30, 40, 80, 100, 300, 1000]:
    inps = np.array(list(range(-1, D+2)))
    plt.plot((inps/D - 1/2)*D**0.5, [general_improvement(D, t, 3)*D**0.5 for t in inps], label="$D$=" + str(D))

plt.legend()
plt.ylabel("$C_{3,D}$: performance is $1/2 + C_{3,D}/\sqrt{D}$")
plt.xlabel("a: threshold is $D/2 + a/\sqrt{D}$")
plt.xlim(-2, 2)
plt.title("Performance of threshold algorithm for Max 3XOR")
plt.grid()

Finding the optimal threshold:

In [28]:
def get_argmax(D, k, negative_only=False):
    if negative_only:
        inps = range(-1, int(D/2))
    else:
        inps = range(-1, D+2)
    return inps[np.argmax([general_improvement(D, t, k) for t in inps])]
In [29]:
inps = range(1, 500)
plt.scatter(inps, [(get_argmax(D, 3) - D/2)/D**0.5 for D in inps], s=3)
plt.xlabel("degree $D$")
plt.ylabel("a: threshold is $D/2 + a/\sqrt{D}$")
plt.title("Optimal threshold for Max 3XOR")
plt.grid()

If we only look at thresholds below $D/2$, we get a clearer pattern:

In [30]:
inps = range(1, 500)
plt.scatter(inps, [(get_argmax(D, 3, True) - D/2)/D**0.5 for D in inps], s=3)
plt.xlabel("degree $D$")
plt.ylabel("a: threshold is $D/2 + a/\sqrt{D}$")
plt.title("Optimal (negative) threshold for Max 3XOR")
plt.grid()

Comparison for Max 3XOR

In [31]:
def get_max(D, k):
    inps = range(-1, D+2)
    return max([general_improvement(D, t, k) for t in inps])
In [32]:
inps = range(1, 500)
plt.plot(inps, [get_max(D, 3)*D**0.5 for D in inps], label='threshold')
plt.plot(inps, optimized_vals[:len(inps)], label='qaoa')
plt.legend()
plt.grid()
plt.title("Performance on Max 3XOR")
plt.xlabel("degree $D$")
plt.ylabel("$C_{3,D}$: performance is $1/2 + C_{3,D}/\sqrt{D}$")
Out[32]:
Text(0, 0.5, '$C_{3,D}$: performance is $1/2 + C_{3,D}/\\sqrt{D}$')

Zooming in at small degree:

In [33]:
inps = range(1, 100)
plt.plot(inps, [get_max(D, 3)*D**0.5 for D in inps], label='threshold')
plt.plot(inps, optimized_vals[:len(inps)], label='qaoa')
plt.legend()
plt.grid()
plt.title("Performance on Max 3XOR")
plt.xlabel("degree $D$")
plt.ylabel("$C_{3,D}$: performance is $1/2 + C_{3,D}/\sqrt{D}$")
Out[33]:
Text(0, 0.5, '$C_{3,D}$: performance is $1/2 + C_{3,D}/\\sqrt{D}$')

When does QAOA win?

In [34]:
np.array(inps)[[get_max(D, 3)*D**0.5 - o < 0 for D, o in zip(inps, optimized_vals[:len(inps)])]]
Out[34]:
array([ 1,  3,  4,  6,  8, 11, 13, 18, 20, 27])

Threshold algorithm for Max $k$XOR

The performance increases with $k$.

In [35]:
inps = range(1, 200)
for k in range(2, 4):
    plt.plot(inps, [get_max(D, k)*D**0.5 for D in inps],label="threshold $k$=" + str(k))
plt.grid()
plt.legend()
plt.title("Performance on Max $k$XOR")
plt.xlabel("degree $D$")
plt.ylabel("$C_{k,D}$: performance is $1/2 + C_{k,D}/\sqrt{D}$")
Out[35]:
Text(0, 0.5, '$C_{k,D}$: performance is $1/2 + C_{k,D}/\\sqrt{D}$')
In [36]:
inps = range(20, 300)
for k in range(2, 10):
    plt.plot(inps, [get_max(D, k)*D**0.5 for D in inps],label="threshold $k$=" + str(k))
plt.grid()
plt.legend()
plt.title("Performance on Max $k$XOR")
plt.xlabel("degree $D$")
plt.ylabel("$C_{k,D}$: performance is $1/2 + C_{k,D}/\sqrt{D}$")
Out[36]:
Text(0, 0.5, '$C_{k,D}$: performance is $1/2 + C_{k,D}/\\sqrt{D}$')
In [37]:
inps = range(50, 200)
for k in range(100, 1000, 100):
    plt.plot(inps, [get_max(D, k)*D**0.5 for D in inps],label="threshold $k$=" + str(k))
plt.grid()
plt.legend()
plt.title("Performance on Max $k$XOR")
plt.xlabel("degree $D$")
plt.ylabel("$C_{k,D}$: performance is $1/2 + C_{k,D}/\sqrt{D}$")
Out[37]:
Text(0, 0.5, '$C_{k,D}$: performance is $1/2 + C_{k,D}/\\sqrt{D}$')

What about the optimal threshold?

In [38]:
inps = range(50, 500)
for k in range(2,10):
    plt.plot(inps, [abs(get_argmax(D, k) - D/2)/D**0.5 for D in inps], label='$k$=' + str(k))
plt.grid()
plt.legend()
plt.title("Optimal threshold for Max $k$XOR")
plt.xlabel("degree $D$")
plt.ylabel("a: threshold is $D/2 + a/\sqrt{D}$")
Out[38]:
Text(0, 0.5, 'a: threshold is $D/2 + a/\\sqrt{D}$')

Large degree approximation

In [39]:
def f(alpha):
    return -erf(2**0.5 * alpha)

def grtD(alpha):
    return (2/np.pi)**0.5 * np.e**(-2*alpha*alpha)

def large_degree_improvement(alpha, k):
    return f(alpha)**k * (k/2) * (-2*alpha)/(k-1)

inps = np.linspace(-1, -0.1, 100000)
plt.plot(inps, [grtD(i)/f(i) for i in inps])
for k in range(2, 7):
    plt.plot(inps, -2*inps/(k-1), label='$k$=' + str(k))
    alpha = inps[[(grtD(i)/f(i)+2*i/(k-1) > 0) for i in inps]][0]
    print('alpha (k=' + str(k) + '):', alpha)
    print('improvement*rtD:', large_degree_improvement(alpha, k))
plt.legend()
plt.grid()
alpha (k=2): -0.4384483844838448
improvement*rtD: 0.33648925802613555
alpha (k=3): -0.5661056610566105
improvement*rtD: 0.34753464968074316
alpha (k=4): -0.6461164611646116
improvement*rtD: 0.35948123367974816
alpha (k=5): -0.7040770407704077
improvement*rtD: 0.37007481521513996
alpha (k=6): -0.7493114931149312
improvement*rtD: 0.37934716987803424

We can use SciPy to find the optimal threshold and value.

In [40]:
fmaker = lambda k: lambda i: abs(grtD(i)/f(i)+2*i/(k-1))
In [41]:
for k in [2,3,4]:
    plt.plot(inps, fmaker(k)(inps), label='$k$=' + str(k))
plt.xlabel("alpha")
plt.ylabel("difference (optimal at zero)")
plt.grid()
plt.legend()
Out[41]:
<matplotlib.legend.Legend at 0x7fda24d32280>
In [42]:
ks = range(2, 200)
best_alphas = [minimize(fmaker(k), -1, method='Nelder-Mead', options={"ftol":1e-10}).x[0] for k in ks]
best_threshold_perf = [large_degree_improvement(a, k) for a,k in zip(best_alphas, ks)]
In [43]:
for i in range(18):
    print("k:", ks[i], "alpha:", best_alphas[i], "perf:", best_threshold_perf[i])
k: 2 alpha: -0.4384504927322263 perf: 0.33649336434608373
k: 3 alpha: -0.566110652685165 perf: 0.34754360662427763
k: 4 alpha: -0.646119719278067 perf: 0.35948708249696404
k: 5 alpha: -0.7040850546211003 perf: 0.37008946845499674
k: 6 alpha: -0.7493118736892935 perf: 0.37934788180340984
k: 7 alpha: -0.7862569719552992 perf: 0.3874867747324553
k: 8 alpha: -0.817399865761399 perf: 0.39471621046350674
k: 9 alpha: -0.8442601535469293 perf: 0.40120335548614106
k: 10 alpha: -0.8678350865840913 perf: 0.4070774997450675
k: 11 alpha: -0.8888135105371475 perf: 0.4124388540093239
k: 12 alpha: -0.9076905906200408 perf: 0.41736591797938943
k: 13 alpha: -0.924833858758211 perf: 0.4219210415038646
k: 14 alpha: -0.9405233614146706 perf: 0.4261543840769254
k: 15 alpha: -0.9549772322177886 perf: 0.43010682728185895
k: 16 alpha: -0.9683685787022114 perf: 0.43381207871486654
k: 17 alpha: -0.9808370001614093 perf: 0.4372982310985033
k: 18 alpha: -0.9924966532737016 perf: 0.4405889197979946
k: 19 alpha: -1.0034420400857929 perf: 0.44370420750299183
In [44]:
plt.scatter(ks, best_threshold_perf)
plt.grid()
plt.xlabel("$k$")
plt.ylabel("$C_{k}$: performance is $1/2 + C_{k}/\sqrt{D}$")
plt.title("Best asymptotic performance of threshold algorithm for Max $k$XOR")
Out[44]:
Text(0.5, 1.0, 'Best asymptotic performance of threshold algorithm for Max $k$XOR')

Comparing the expression with its large-degree asymptotic form:

In [45]:
@lru_cache(maxsize=int(1e7))
def long_range_improvement(D, t, k):
    alpha = (t - D/2)*D**-0.5
    val_delta = (2/np.pi/D)**0.5 * np.e**(-2*alpha*alpha)
    val_g = 0.5 + 0.5*erf(2**0.5 * alpha) + 0.5 * val_delta
    return 0.25 * ( (1-2*val_g + 2*val_delta)**k - (1 - 2*val_g)**k )
In [46]:
D = 1000
k = 8
inps = np.array(list(range(-1, D+2)))
plt.plot((inps/D - 1/2)*D**0.5, [long_range_improvement(D, t, k)*D**0.5 for t in inps], label='asymptotic')
plt.plot((inps/D - 1/2)*D**0.5, [general_improvement(D, t, k)*D**0.5 for t in inps], label='generic')
plt.xlim(-2, 2)
plt.grid()
plt.xlabel("$a$: threshold is $D/2 + a/\sqrt{D}$")
plt.ylabel("$C_{k}$: performance is $1/2 + C_{k}/\sqrt{D}$")
plt.legend()
plt.title("asymptotic (large-degree) vs generic performance of threshold algorithm; $D$="  +str(D))
Out[46]:
Text(0.5, 1.0, 'asymptotic (large-degree) vs generic performance of threshold algorithm; $D$=1000')

QAOA for Max $k$XOR

We have a relationship from $\gamma$ to $\beta$. So let's plot performance as a single-variable function:

In [47]:
# @lru_cache(maxsize=int(1e7))
def gamma_from_beta(b, k, D):
    return atan( ((tan(2*b)**(-2) + 1)/k/D)**0.5 )
In [48]:
def f(b, k, D):
    p, q = cos(2*b), sin(2*b)
    g = gamma_from_beta(b, k, D)
    c, s = cos(g), sin(g)
    return -0.25*s*1j*( (p + 1j*q*c**D)**k - (p - 1j*q*c**D)**k )
In [49]:
for k in [3, 4, 5, 7, 10, 100]:
    plt.figure()
    Ds = [2, 5, 10, 20, 100, 500, 1000, 10000]
    for D in Ds:
        inps = np.linspace(1e-15, np.pi/4, 10000)
        outs = [complex(f(i, k, D)).real*D**0.5 for i in inps]
        plt.plot(inps/np.pi, outs, label="$D$=" + str(D))
        print('k:', k, "D:", D, 'beta/pi:', inps[np.argmax(outs)]/np.pi, 'gamma:', gamma_from_beta(inps[np.argmax(outs)], k, D), 'perf:', np.max(outs))
    plt.title("QAOA for Max $k$XOR, $k$=" + str(k))
    plt.grid()
    plt.ylabel("$C_{" + str(k) + ",D}$: performance is $1/2 + C_{" + str(k) + ",D}/\sqrt{D}$")
    plt.xlabel("$ beta / \pi$")
    plt.legend()
k: 3 D: 2 beta/pi: 0.09113411341134134 gamma: 0.6457133550969802 perf: 0.29410412861806556
k: 3 D: 5 beta/pi: 0.09178417841784199 gamma: 0.4422524799948612 perf: 0.3146569554910147
k: 3 D: 10 beta/pi: 0.09203420342034223 gamma: 0.3223806563933743 perf: 0.32268419004512405
k: 3 D: 20 beta/pi: 0.09215921592159236 gamma: 0.23167755729751552 perf: 0.32697175492210245
k: 3 D: 100 beta/pi: 0.09228422842284248 gamma: 0.10498972786802262 perf: 0.33054742740112547
k: 3 D: 500 beta/pi: 0.09230923092309251 gamma: 0.04707999959190555 perf: 0.33127896219847697
k: 3 D: 1000 beta/pi: 0.09230923092309251 gamma: 0.033302887938561315 perf: 0.33137080088961723
k: 3 D: 10000 beta/pi: 0.09230923092309251 gamma: 0.010534803199471842 perf: 0.3314535299965894
k: 4 D: 2 beta/pi: 0.07350735073507372 gamma: 0.6707077469306936 perf: 0.31273335595182417
k: 4 D: 5 beta/pi: 0.0744824482448247 gamma: 0.460203752247109 perf: 0.33641563218166026
k: 4 D: 10 beta/pi: 0.07485748574857508 gamma: 0.33568475060518954 perf: 0.34572486409324354
k: 4 D: 20 beta/pi: 0.07505750575057528 gamma: 0.2412987329831272 perf: 0.35071041090831745
k: 4 D: 100 beta/pi: 0.07523252325232545 gamma: 0.10938136825980022 perf: 0.3548750224898438
k: 4 D: 500 beta/pi: 0.07525752575257547 gamma: 0.04905838095594313 perf: 0.35572784016109366
k: 4 D: 1000 beta/pi: 0.07525752575257547 gamma: 0.034703431880728046 perf: 0.35583491989138766
k: 4 D: 10000 beta/pi: 0.07525752575257547 gamma: 0.010978155318363525 perf: 0.3559313779605138
k: 5 D: 2 beta/pi: 0.06238123812381262 gamma: 0.6914843004927927 perf: 0.3281876209826412
k: 5 D: 5 beta/pi: 0.06355635563556379 gamma: 0.4750964091162255 perf: 0.3546963040752536
k: 5 D: 10 beta/pi: 0.06398139813981421 gamma: 0.3468346065582889 perf: 0.36517453657251486
k: 5 D: 20 beta/pi: 0.06423142314231446 gamma: 0.2493408953847929 perf: 0.370799005813208
k: 5 D: 100 beta/pi: 0.06443144314431466 gamma: 0.11306015754691891 perf: 0.3755040521630587
k: 5 D: 500 beta/pi: 0.06445644564456468 gamma: 0.050716447902231246 perf: 0.37646827287224693
k: 5 D: 1000 beta/pi: 0.06445644564456468 gamma: 0.0358773219470221 perf: 0.37658933555084917
k: 5 D: 10000 beta/pi: 0.06448144814481471 gamma: 0.011345631207865314 perf: 0.3766984221260389
k: 7 D: 2 beta/pi: 0.04885488548854911 gamma: 0.7241763672015993 perf: 0.3524320255598121
k: 7 D: 5 beta/pi: 0.05020502050205046 gamma: 0.49886966247200565 perf: 0.38384779513791106
k: 7 D: 10 beta/pi: 0.050705070507050955 gamma: 0.364531438704665 perf: 0.39638119437081776
k: 7 D: 20 beta/pi: 0.05098009800980123 gamma: 0.2622344618087329 perf: 0.4031346410999332
k: 7 D: 100 beta/pi: 0.05120512051205146 gamma: 0.11896535473529471 perf: 0.40879767143215534
k: 7 D: 500 beta/pi: 0.0512551255125515 gamma: 0.05335431842602078 perf: 0.4099596976860411
k: 7 D: 1000 beta/pi: 0.0512551255125515 gamma: 0.0377451050228159 perf: 0.4101056478639212
k: 7 D: 10000 beta/pi: 0.0512551255125515 gamma: 0.011941154273739346 perf: 0.4102371058046652
k: 10 D: 2 beta/pi: 0.037803780378038075 gamma: 0.7599211884000758 perf: 0.3786286364946791
k: 10 D: 5 beta/pi: 0.03922892289228949 gamma: 0.5252912290279241 perf: 0.4160734614929132
k: 10 D: 10 beta/pi: 0.039778977897790044 gamma: 0.38420289214024267 perf: 0.4311762570138485
k: 10 D: 20 beta/pi: 0.040079007900790345 gamma: 0.2765142408952303 perf: 0.4393511679241211
k: 10 D: 100 beta/pi: 0.04030403040304057 gamma: 0.125554693219044 perf: 0.44622581933117295
k: 10 D: 500 beta/pi: 0.040354035403540614 gamma: 0.0563186231943233 perf: 0.4476386226442099
k: 10 D: 1000 beta/pi: 0.040379037903790646 gamma: 0.03982022411775004 perf: 0.447816050716899
k: 10 D: 10000 beta/pi: 0.040379037903790646 gamma: 0.01259825382237099 perf: 0.44797610429101614
k: 100 D: 2 beta/pi: 0.007475747574757784 gamma: 0.9846191191253346 perf: 0.528330274340077
k: 100 D: 5 beta/pi: 0.008450845084508758 gamma: 0.7002022674161685 perf: 0.621413963206732
k: 100 D: 10 beta/pi: 0.008825882588259133 gamma: 0.5184745277920191 perf: 0.6625250178308172
k: 100 D: 20 beta/pi: 0.009050905090509357 gamma: 0.3748139084978714 perf: 0.6856606454079309
k: 100 D: 100 beta/pi: 0.009225922592259532 gamma: 0.17092097273305687 perf: 0.7055912953067912
k: 100 D: 500 beta/pi: 0.009250925092509557 gamma: 0.07683141025610506 perf: 0.7097254589486673
k: 100 D: 1000 beta/pi: 0.009275927592759583 gamma: 0.05423536485283009 perf: 0.710256965197822
k: 100 D: 10000 beta/pi: 0.009275927592759583 gamma: 0.017165877947379102 perf: 0.7107338122175934

Find the maximum performance of QAOA for any k and D:

In [50]:
fmaker = lambda k, D: lambda i: -complex(f(i, k, D)).real*D**0.5
In [51]:
def get_qaoa_max(k,D):
    # do pass to find approximate optimum
    inps = np.linspace(1e-15, np.pi/4, 100)
    outs = [complex(f(i, k, D)).real*D**0.5 for i in inps]
    return -minimize(fmaker(k, D), inps[np.argmax(outs)]).fun
In [52]:
%%time
for k in range(2, 10):
    Ds = range(2, 300)
    qaoa_kxor = [get_qaoa_max(k,D) for D in Ds]
    plt.plot(Ds, qaoa_kxor, label="$k$=" + str(k))
plt.grid()
plt.xlabel('degree $D$')
plt.legend()
plt.ylabel("$C_{k,D}$: performance is $1/2 + C_{k,D}/\sqrt{D}$")
plt.title("Best QAOA performance for Max $k$XOR")
plt.savefig('images/qaoa_best_maxkxor.png',  dpi=300)
CPU times: user 7.26 s, sys: 15.8 ms, total: 7.28 s
Wall time: 7.28 s

Limiting behavior for QAOA on Max $k$XOR

In [53]:
def qaoa_asymptotic_kxor(k,t):
    if t*t*k < 1:
        return 0
    term_1 = (t*t*k - 1)**0.5 + 1j*np.e**(-t*t/2)
    term_2 = (t*t*k - 1)**0.5 - 1j*np.e**(-t*t/2)
    return -0.25*1j * t**(1-k) * k**(-k/2) * (term_1**k - term_2**k)
In [54]:
qaoa_asymptotic_maxes = []
for k in range(2, 200):
    inps = np.linspace(0, 2, 100000)
    outs