#!/usr/bin/env python # coding: utf-8 # # Problem 4 # What is the global minimum of the function # $ f(x,y) = e^{\sin(50x)} + \sin(60e^y) + \sin(70\sin{x}) + \sin(\sin{80y}) - \sin(10(x+y)) + (x^2+y^2)/4 $ ? # Plotting the function given, we note that $f$ is bounded by the $(x^2 + y^2)/4$ term for large $x,y$; however, the function has many local minima/maxima at $x,y \approx 0$ making it difficult to find a global minimum. # In[1]: #from math import exp, sin import numpy as np from numpy import exp, sin import matplotlib.pyplot as plt from matplotlib import cm f = lambda x,y: exp(sin(50*x)) + sin(60*exp(y)) + sin(70*sin(x)) + sin(sin(80*y)) - sin(10*(x+y)) + (x*x+y*y)/4 X = np.arange(-10,10,0.5) Y = np.arange(-10,10,0.5) X, Y = np.meshgrid(X,Y) Z = f(X,Y) # fig = plt.figure() fig = plt.figure(figsize=plt.figaspect(0.5)) ax = fig.add_subplot(1, 2, 1, projection='3d') ax.view_init(elev=10) surf = ax.plot_surface(X,Y,Z,cmap=cm.hsv,linewidth=20) X = np.arange(-0.5,0.5,0.001) Y = np.arange(-0.5,0.5,0.001) X, Y = np.meshgrid(X,Y) Z = f(X,Y) ax = fig.add_subplot(1, 2, 2, projection='3d') surf = ax.plot_surface(X,Y,Z,cmap=cm.coolwarm,linewidth=20,antialiased=False) # ### Paper and pencil minimization # Note that we have # # $ \begin{cases} # \min(e^{\sin(50x)}) = 1/e\\ # \min(\sin(60 e^y)) = -1 \\ # \min(\sin(\sin 70x)) = -\sin1 \\ # \min(\sin(\sin 80y)) = -\sin1 \\ # \min(-\sin(10(x+y)) = -1 \\ # \min((x^2+y^2)/4) = 0 \\ # \end{cases} \implies \min(f(x,y)) \approx 1/e-2-2\sin1 \approx -3.315 $ # ### Using built-in methods # In[2]: f = lambda x,y: exp(sin(50*x)) + sin(60*exp(y)) + sin(70*sin(x)) + sin(sin(80*y)) - sin(10*(x+y)) + (x*x+y*y)/4 f_min = lambda x: f(x[0],x[1]) # wrapper for f so that it can be passed to scipy.optimize from scipy.optimize import minimize from time import time methods = ["Nelder-Mead","Powell","CG","BFGS","L-BFGS-B","TNC","COBYLA","SLSQP","trust-constr"] def time_method(method): # wrapper function to help time scipy.optimize with given method start = time() out = minimize(f_min,(0,0),method=method,tol=1e-12) return (out.fun, out.x, time()-start, method) for method in methods: print(time_method(method)) # As we can see, none of these methods yield well to our given function. The Powell method provides the lowest value of the functio, however, calculating the derivative of the function at the point given: # In[3]: from scipy.optimize import approx_fprime print(approx_fprime([ 0.24280684, -0.09332387], f_min, [1e-12,1e-12])) # we have that that the partial derivative with respect to $x$ of the function at this point is negative. Thus, we cannot have this as a global minimum of this function as we can show # In[4]: print(f(0.2428064,-0.09332387)) # Minimum provided by scipy.optimize print(f(0.2428066,-0.09332387)) # Lower value of fuction # ### Bringing out the big guns # Since this function has many local minima, we have to resort to using more specialized algorithms in order to find a global minimum. We use two below # ##### Basin-hopping # The basin-hopping algorithm described in a 1997 paper by David J Wales and Jonathan Doye is a global optimization algorithm originally developed for finding the minimum energy structure of molecules. # # From the `scipy` documentation of the implementation of this function: # > Designed to mimic the natural process of energy minimization of clusters of atoms, it works well for similar problems with “funnel-like, but rugged” energy landscapes # # Thus, it should work well for our function: # In[5]: from scipy.optimize import basinhopping import time start = time.time() print(basinhopping(f_min, [0,0], niter = 5000)) print("Time elapsed: ", time.time()-start, " seconds") # ##### Differential Evolution # Differential evolution is a stochastic genetic algorithm that is often used for global optimization problems. Briefly, some initial candidates for the minimum of the function are generated. Then these candidates are "bred" and similarly to the process of natural selection, over multiple generations, candidates that are closer to the minimum of the function are generated # In[6]: from scipy.optimize import differential_evolution import time start = time.time() print(differential_evolution(f_min, [[-1,1],[-1,1]],tol=1e-12)) print("Time elapsed: ", time.time()-start, " seconds") # ### Wrapping it up: more digits # Since we now have the location of the global minimum of the function, we can use local optimization algorithms to get more digits of the function's minimum. In this case, we'll start with the result given above as the minimum of the function and then do a simple grid search in the neighborhood of this point, updating the resulting minimum until we get the desired accuracy. # In[7]: from mpmath import mp, mpf, exp, sin from tqdm import tqdm mp.dps = 1000 f = lambda x,y: exp(sin(50*x)) + sin(60*exp(y)) + sin(70*sin(x)) + sin(sin(80*y)) - sin(10*(x+y)) + (x*x+y*y)/4 x = mpf("-0.02440308") y = mpf("0.21061242") mins = [None] step = mpf("1e-7") for i in tqdm(range(10000)): neighborhood = [[x,y+step,f(x,y+step)], [x,y-step,f(x,y-step)], [x+step,y,f(x+step,y)], [x-step,y,f(x-step,y)], [x-step,y-step,f(x-step,y-step)], [x-step,y+step,f(x-step,y+step)], [x+step,y-step,f(x+step,y-step)], [x+step,y+step,f(x+step,y+step)]] min_f_value = min(neighborhood, key = lambda x: x[2]) x,y,minimum = min_f_value mins.append(minimum) if mins[-1]==mins[-2]: print(i); break step /= 2 print(mins[-1])