#!/usr/bin/env python # coding: utf-8 # The adaptive integrator # =================== # # The ``taylor_adaptive`` class provides an easy-to-use interface to heyoka.py's # main capabilities. Objects of this class can be constructed from a system # of ODEs and a set of initial conditions (plus a number of optional configuration parameters # with - hopefully - sensible defaults). Methods are provided to # propagate in time the state of the system, either step-by-step or by specifying # time limits. # # Let's see how we can use ``taylor_adaptive`` to integrate the ODE # system of the [simple pendulum](https://en.wikipedia.org/wiki/Pendulum_(mathematics)), # # $$ # \begin{cases} # x^\prime = v \\ # v^\prime = -9.8 \sin x # \end{cases} # $$ # # with initial conditions # # $$ # \begin{cases} # x\left( 0 \right) = 0.05 \\ # v\left( 0 \right) = 0.025 # \end{cases} # $$ # # Construction # ------------- # In[1]: import heyoka as hy # Create the symbolic variables x and v. x, v = hy.make_vars("x", "v") # Create the integrator object. ta = hy.taylor_adaptive( # Definition of the ODE system: # x' = v # v' = -9.8 * sin(x) sys = [(x, v), (v, -9.8 * hy.sin(x))], # Initial conditions for x and v. state = [0.05, 0.025]) ta # After creating the symbolic variables ``x`` and ``v``, we # construct an instance of ``taylor_adaptive`` called ``ta``. # By default, ``taylor_adaptive`` operates using double-precision arithmetic. As (mandatory) construction arguments, we pass in the system of differential equations and a set # of initial conditions for ``x`` and ``v`` respectively. # # By default, the error tolerance of an adaptive integrator is set to the # machine epsilon, which, for double precision, is $\sim 2.2\times10^{-16}$. # From this value, heyoka.py deduces an optimal Taylor order of 20, as indicated # by the screen output. ``taylor_adaptive`` manages its own copy of the state vector and the time variable. # Both the state vector and the time variable are updated automatically by the timestepping # methods. Note also how, by default, the time variable is initially set to zero. # # Single timestep # --------------- # # Let's now try to perform a single integration timestep: # In[2]: # Perform a single step. oc, h = ta.step() # Print the outcome flag and the timestep used. print("Outcome : {}".format(oc)) print("Timestep: {}".format(h)) # First, we invoke the ``step()`` method, which returns a pair of values. # The first value is a status flag indicating the outcome of the integration timestep, # while the second value is the step size that was selected by heyoka.py in order # to respect the desired error tolerance. # # Let's also print again the integrator object to screen in order to inspect how state and time have changed: # In[3]: ta # It is also possible to perform a single timestep backward in time # via the ``step_backward()`` method: # In[4]: # Perform a step backward. oc, h = ta.step_backward() # Print the outcome flag and the timestep used. print("Outcome : {}".format(oc)) print("Timestep: {}".format(h)) # The ``step()`` method can also be called with an argument representing # the maximum step size ``max_delta_t``: if the adaptive timestep # selected by heyoka.py is larger (in absolute value) than ``max_delta_t``, # then the timestep will be clamped to ``max_delta_t``: # In[5]: # Perform a step forward in time, clamping # the timestep size to 0.01. oc, h = ta.step(max_delta_t = 0.01) # Print the outcome flag and the timestep used. print("Outcome : {}".format(oc)) print("Timestep: {}\n".format(h)) # Perform a step backward in time, clamping # the timestep size to 0.02. oc, h = ta.step(max_delta_t = -0.02) # Print the outcome flag and the timestep used. print("Outcome : {}".format(oc)) print("Timestep: {}".format(h)) # Note that the integration outcome is now ``time_limit``, instead of ``success``. # # Before moving on, we need to point out an important caveat when using the single # step functions: # # ```{warning} # # If the exact solution of the ODE system can be expressed as a polynomial function # of time, the automatic timestep deduction algorithm may return a timestep of infinity. # This is the case, for instance, when integrating the rectilinear motion of a free # particle or the constant-gravity free-fall equation. In such cases, the step functions # should be called with a finite ``max_delta_t`` argument, in order to clamp the timestep # to a finite value. # # Note that the ``propagate_*()`` functions (described {ref}`below`) # are not affected by this issue. # # ``` # # Accessing state and time # ------------------------ # # It is possible to read from and write to both the time variable and the state # vector: # In[6]: # Print the current time. print("Current time : {}".format(ta.time)) # Print out the current state vector. print("Current state vector: {}\n".format(ta.state)) # Reset the time and state to the initial values. ta.time = 0. ta.state[:] = [0.05, 0.025] # Print them again. print("Current time : {}".format(ta.time)) print("Current state vector: {}".format(ta.state)) # Note that the time is stored as a scalar value, while the state is stored as a NumPy array. # # Because of technical reasons related to the management of the lifetime of arrays when interacting with the underlying heyoka C++ library, it is **not** possible to directly set the state via the syntax # # ```python # >>> ta.state = [0.05, 0.025] # Won't work! # ``` # # Thus, the array assignment syntax ``ta.state[:] = ...`` must be used instead. Similarly, it is also possible to set directly the values of the components of the array: # In[7]: ta.state[0] = 0.05 ta.state[1] = 0.025 # (time_limited_prop)= # # Time-limited propagation # ------------------------ # # In addition to the step-by-step integration methods, # ``taylor_adaptive`` also provides methods to propagate # the state of the system for a specified amount of time. # These methods are called ``propagate_for()`` and # ``propagate_until()``: the former integrates # the system for a specified amount of time, the latter # propagates the state up to a specified epoch. # In[8]: # Propagate for 5 time units. status, min_h, max_h, nsteps, _ = ta.propagate_for(delta_t = 5.) print("Outcome : {}".format(status)) print("Min. timestep: {}".format(min_h)) print("Max. timestep: {}".format(max_h)) print("Num. of steps: {}".format(nsteps)) print("Current time : {}\n".format(ta.time)) # Propagate until t = 20. status, min_h, max_h, nsteps, _ = ta.propagate_until(t = 20.) print("Outcome : {}".format(status)) print("Min. timestep: {}".format(min_h)) print("Max. timestep: {}".format(max_h)) print("Num. of steps: {}".format(nsteps)) print("Current time : {}".format(ta.time)) # The time-limited propagation methods return # a tuple of 5 values, which represent, respectively: # # * the outcome of the integration (which will always be # ``time_limit``, unless error conditions arise), # * the minimum and maximum integration timesteps # that were used in the propagation, # * the total number of steps that were taken, # * the [continuous output](<./Dense output.ipynb>) function object, # if requested (off by default). # # The time-limited propagation methods can be used # to propagate both forward and backward in time: # In[9]: # Propagate back to t = 0. status, min_h, max_h, nsteps, _ = ta.propagate_until(t = 0.) print("Outcome : {}".format(status)) print("Min. timestep: {}".format(min_h)) print("Max. timestep: {}".format(max_h)) print("Num. of steps: {}".format(nsteps)) print("Current time : {}\n".format(ta.time)) print(ta) # Note also that the time-limited propagation methods will stop # integrating if a non-finite value is detected in the state vector # at the end of the timestep. In such case, the outcome of the # integration will be ``err_nf_state``. # # The ``propagate_for()`` and ``propagate_until()`` methods # can be invoked with additional optional keyword arguments: # # - ``max_delta_t``: similarly to the ``step()`` function, this value # represents the maximum timestep size in absolute value; # - ``callback``: this is a callable which will be invoked at the end of # each timestep, with the integrator object as only argument. If the callback returns ``True`` then the integration # will continue after the invocation of the callback, otherwise the integration # will be interrupted; # - ``c_output``: a boolean flag that enables [continuous output](<./Dense output.ipynb>). # In[10]: # Propagate to t = .5 using a max_delta_t and # providing a callback that prints the current time. # The callback. def cb(ta): print("Current time: {}".format(ta.time)) return True ta.propagate_until(t = .5, max_delta_t = .1, callback = cb); # Optionally, callbacks can implement a ``pre_hook()`` method that will be invoked # once *before* the first step is taken by the ``propagate_for()`` and ``propagate_until()`` methods: # In[11]: # Callback with pre_hook(). class cb: def __call__(self, ta): print("Current time: {}".format(ta.time)) return True def pre_hook(self, ta): print("pre_hook() invoked!") ta.propagate_until(t = 1.5, max_delta_t = .1, callback = cb()); # Propagation over a time grid # ---------------------------- # # Another way of propagating the state of a system in a ``taylor_adaptive`` # integrator is over a time grid. In this mode, the integrator # uses [dense output](<./Dense output.ipynb>) to compute the state of the system # over a grid of time coordinates provided by the user. If the grid is denser # than the typical timestep size, this can be noticeably more efficient than # repeatedly calling ``propagate_until()`` on the grid points, because # propagating the system state via dense output is much faster than taking # a full integration step. # # Let's see a simple usage example: # In[12]: # Reset the time and state to the initial values. ta.time = 0. ta.state[:] = [0.05, 0.025] # Propagate over a time grid from 0 to 1 # at regular intervals. out = ta.propagate_grid(grid = [.1, .2, .3, .4, .5, .6, .7, .8, .9, 1.]) out # ``propagate_grid()`` takes in input a grid of time points, # and returns a tuple of 5 values. The first 4 values are the same # as in the other ``propagate_*()`` functions: # # * the outcome of the integration, # * the minimum and maximum integration timesteps # that were used in the propagation, # * the total number of steps that were taken. # # The fifth value returned by ``propagate_grid()`` is a 2D array containing # the state of the system at the time points in the grid: # In[13]: # Print the state at t = 0.4 (index 3 in the time grid). print("State at t = 0.4: {}".format(out[4][3])) # As you can see from the screen output above (where we printed the ``out`` variable), the use of ``propagate_grid()`` resulted in 5 integration timesteps being taken. Had we used ``propagate_until()``, we would have needed 10 integration timesteps to obtain the same result. # # There are no special requirements on the time values in the grid (apart from the fact that they must be finite and ordered monotonically). # # The ``propagate_grid()`` method # can be invoked with additional optional keyword arguments: # # - ``max_delta_t``: similarly to the ``step()`` function, this value # represents the maximum timestep size in absolute value; # - ``callback``: this is a callable which will be invoked at the end of # each timestep, with the integrator object as only argument. If the callback returns ``True`` then the integration # will continue after the invocation of the callback, otherwise the integration # will be interrupted; # - ``c_output``: a boolean flag that enables [continuous output](<./Dense output.ipynb>). # In[14]: # Propagate over a grid using a max_delta_t and # providing a callback that prints the current time. # The callback. def cb(ta): print("Current time: {}".format(ta.time)) return True ta.propagate_grid(grid = [1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7, 1.8, 1.9, 2.], max_delta_t = .1, callback = cb); # Optionally, callbacks can implement a ``pre_hook()`` method that will be invoked # once *before* the first step is taken by the ``propagate_grid()`` method: # In[15]: # Callback with pre_hook(). class cb: def __call__(self, ta): print("Current time: {}".format(ta.time)) return True def pre_hook(self, ta): print("pre_hook() invoked!") ta.propagate_grid(grid = [1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7, 1.8, 1.9, 2.], max_delta_t = .1, callback = cb());