#!/usr/bin/env python # coding: utf-8 # # PEtab application example # While the [PEtab yaml2sbml notebook](petab_yaml2sbml.ipynb) illustrates on a toy model the basics of the pyABC PEtab import, this notebook contains an application example based on real data. # In[ ]: # install if not done yet get_ipython().system('pip install pyabc --quiet') # In[1]: import os import amici.petab_import import numpy as np import petab import pyabc from pyabc.petab import AmiciPetabImporter # We illustrate the usage of PEtab models using a model taken from the benchmark collection. The following cell clones the git repository. # In[2]: get_ipython().system('git clone --depth 1 https://github.com/LeonardSchmiester/Benchmark-Models.git tmp/benchmark-models || (cd tmp/benchmark-models && git pull)') # Now we can import a problem, here using the "Boehm_JProteomer2014" example, to AMICI and PEtab: # In[2]: # read the petab problem from yaml petab_problem = petab.Problem.from_yaml( "tmp/benchmark-models/hackathon_contributions_new_data_format/" "Boehm_JProteomeRes2014/Boehm_JProteomeRes2014.yaml" ) # compile the petab problem to an AMICI ODE model model = amici.petab_import.import_petab_problem(petab_problem) # the solver to numerically solve the ODE solver = model.getSolver() # import everything to pyABC importer = AmiciPetabImporter(petab_problem, model, solver) # extract what we need from the importer prior = importer.create_prior() model = importer.create_model() kernel = importer.create_kernel() # Once everything has been compiled and imported, we can simply call the model: # In[3]: model(petab_problem.x_nominal_free_scaled) # Now we can run an analysis using pyABC's exact sequential sampler under the assumption of measurement noise. Note that the following cell takes, depending on the resources, minutes to hours to run through. Also, the resulting database is not provided here. # In[6]: get_ipython().run_cell_magic('script', 'false --no-raise-error', '# Uncomment this line to run cell\n\n# this takes some time\n\nsampler = pyabc.MulticoreEvalParallelSampler(n_procs=20)\n\ntemperature = pyabc.Temperature()\nacceptor = pyabc.StochasticAcceptor(\n pdf_norm_method = pyabc.ScaledPDFNorm())\n\nabc = pyabc.ABCSMC(model, prior, kernel, \n eps=temperature,\n acceptor=acceptor,\n sampler=sampler,\n population_size=1000)\nabc.new("sqlite:////tmp/petab_amici_boehm.db", {})\nabc.run()\n') # Now we can use pyABC's standard analysis and visualization routines to analyze the obtained posterior sample. In particular, we can extract boundaries and literature parameter values from the PEtab problem: # In[4]: get_ipython().run_cell_magic('script', 'false --no-raise-error', '# Uncomment this line to run cell\n\nh = pyabc.History("sqlite:///tmp/petab_amici_boehm.db", _id=1)\nrefval = {k: v for k,v in zip(petab_problem.x_free_ids, petab_problem.x_nominal_free_scaled)}\nfor i, par in enumerate(petab_problem.x_free_ids):\n pyabc.visualization.plot_kde_1d_highlevel(\n h, x=par,\n xmin=petab_problem.get_lb(scaled=True,fixed=False)[i],\n xmax=petab_problem.get_ub(scaled=True,fixed=False)[i],\n refval=refval, refval_color=\'k\')\n') # Apparently, in this case seven out of the nine parameters can be estimated with high confidence, while two other parameters can only be bounded.