#!/usr/bin/env python # coding: utf-8 # # Using the Cost/Likelihood classes # This example will introduce the cost function methods used for both evaluating the output of and predicting the forward model. This example will use a cost class (`pybop.SumofPower`) as an example, but the methods discussed here are transferable to the other cost classes as well as the likelihood classes. # # ### Setting up the Environment # # Before we begin, we need to ensure that we have all the necessary tools. We will install PyBOP and upgrade dependencies: # In[ ]: get_ipython().run_line_magic('pip', 'install --upgrade pip ipywidgets -q') get_ipython().run_line_magic('pip', 'install pybop -q') # ### Importing Libraries # # With the environment set up, we can now import PyBOP alongside other libraries we will need: # In[ ]: import numpy as np import pybop pybop.plot.PlotlyManager().pio.renderers.default = "notebook_connected" # First, to construct a `pybop.Cost` class, we need the following objects: # - Model # - Dataset # - Parameters to identify # - Problem # # Given the above, we will first construct the model, then the parameters and corresponding dataset. Once that is complete, the problem will be created. With the cost class created, we will showcase the different interactions users can have with the class. A small example with evaluation as well as computation is presented. # In[ ]: parameter_set = pybop.ParameterSet.pybamm("Chen2020") model = pybop.lithium_ion.SPM(parameter_set=parameter_set) # Now that we have the model constructed, let's define the parameters for identification. # In[ ]: parameters = pybop.Parameters( pybop.Parameter( "Negative electrode active material volume fraction", initial_value=0.6, ), pybop.Parameter( "Positive electrode active material volume fraction", initial_value=0.6, ), ) # Next, we generate some synthetic data from the model using the `model.predict` method. This then gets corrupted with Gaussian noise and used to create the Dataset. # In[ ]: t_eval = np.linspace(0, 10, 100) values = model.predict(t_eval=t_eval) dataset = pybop.Dataset( { "Time [s]": t_eval, "Current function [A]": values["Current [A]"].data, "Voltage [V]": values["Voltage [V]"].data, } ) # Now that we have the model, parameters, and dataset, we can combine them and construct the problem class. This class forms the basis for evaluating the forward model for the defined fitting process (parameters and operating conditions). # In[ ]: problem = pybop.FittingProblem(model, parameters, dataset) # Perfect, let's now construct the cost class and move onto the main point of this example, # In[ ]: cost = pybop.SumofPower(problem) # The conventional way to use the cost class is through the `cost.__call__` method, which is completed below, # In[ ]: cost([0.5, 0.5]) # This does two things, it first evaluates the forward model at the given parameter values of `[0.5,0.5]`, then it computes the cost for the forward models prediction compared to the problem target values, which are provided from the dataset we constructed above. # # However, there is an alternative method to achieve this which provides the user with more flexibility in their assessment of the cost function, this is done through the `cost.compute` method, as shown below. # In[ ]: out = problem.evaluate([0.5, 0.5]) cost.compute(out) # This splits the evaluation of the forward model and the computation of the cost function into two separate calls, allowing for the model evaluation to be decoupled from the cost computation. This decoupling can be helpful in the case where you want to assess the problem across multiple costs (see pybop.WeightedCost for a PyBOP implementation of this), or want to modify the problem output before assessing a cost. # # Next, let's present a few of these use-cases. In the first use-case, the problem is evaluated once, with random noise added and the cost computed. # In[ ]: def my_cost(inputs): y = problem.evaluate(inputs) y["Voltage [V]"] += np.random.normal(0, 0.003, len(t_eval)) return cost.compute(y) # In[ ]: my_cost([0.5, 0.5]) # The above method showcases how the `cost.__call__` method can be constructed at the user level. Furthermore, the above example can be reimplemented with gradient calculations as well via the `calculate_gradient` argument within the `cost.compute` method. # In[ ]: def my_cost_gradient(inputs): y, dy = problem.evaluateS1(inputs) y["Voltage [V]"] += np.random.normal(0, 0.003, len(t_eval)) return cost.compute(y, dy=dy, calculate_grad=True) # In[ ]: my_cost_gradient([0.5, 0.5]) # This provides the computed cost for the parameter values, alongside the gradient with respect to those parameters. This is the exact structure that is used within PyBOP's gradient-based optimisers. Finally, the above can be easily reproduced via the `cost.__call__` method with the corresponding `calculate_gradient=True` argument. # In[ ]: cost([0.5, 0.5], calculate_grad=True)