#!/usr/bin/env python # coding: utf-8 # # OPTaaS: Surrogate Prediction # # The surrogate model is what the optimizer *thinks* the scoring function looks like. It is part of the mechanism used to choose optimal configurations. # # You can generate predictions from the surrogate model (effectively asking OPTaaS to guess what the scoring function may be at a certain point) at any set of arbitrary configuration points. # ## Connect to OPTaaS using your API Key # In[1]: from mindfoundry.optaas.client.client import OPTaaSClient client = OPTaaSClient('https://optaas.mindfoundry.ai', '') # ## Create a simple task # In[2]: from mindfoundry.optaas.client.parameter import FloatParameter from mindfoundry.optaas.client.client import Goal task = client.create_task( title='Basic 2D Example', parameters=[ FloatParameter('x', minimum=-3, maximum=1), FloatParameter('y', minimum=-6, maximum=21) ], goal=Goal.min, ) # ## Define your scoring function # In[3]: def scoring_function(x, y): ''' A simple well with min at 0, 0''' score = x**2 + y**2 return score # ## Run your task # In[4]: best_result = task.run(scoring_function, max_iterations=20) print("Best Result:", best_result) # ## Evaluating the surrogate # ### Ask the surrogate for a prediction at the known best point (x=0, y=0) # The surrogate model should predict a fairly low score with high confidence, since it has been exploring the vicinity of this point. # In[5]: interesting_configs = [{'x': 0.0, 'y': 0.0}] # In[6]: predictions = task.get_surrogate_predictions(interesting_configs) # In[7]: [(p.mean, p.variance) for p in predictions] # ### Ask the surrogate about a couple of points far away from the explored area (x=1, y=20) and (x=-3, y=-6) # The surrogate model should be significantly less confident, as there were no evaluations near this point. # In[8]: far_away_points = [{'x': 1.0, 'y': 20.0}, {'x': -1.0, 'y': -6.0}] # In[9]: predictions = task.get_surrogate_predictions(far_away_points) # In[10]: [(p.mean, p.variance) for p in predictions] # ### Observation # The predictions are quite accurate, as the function is quite simple, so the surrogate is able to learn it fairly quickly. The increased variance reflect the lower certainty, as expected. # ## Want to know more? # Here's an article we wrote on how the surrogate works: https://towardsdatascience.com/the-intuitions-behind-bayesian-optimization-with-gaussian-processes-7e00fcc898a0