#!/usr/bin/env python # coding: utf-8 # In[2]: import xarray as xr import pandas as pd import numpy as np import calliope calliope.set_log_verbosity('INFO', include_solver_output=False) # # Model input # In[3]: # Initialise the model with the Urban Scale example model m = calliope.examples.urban_scale() # In[4]: # Get information on the model m.info() # ## model_run # # m.\_model\_run is a python dictionary. The underscore before the method indicates that it defaults to being hidden (i.e. you wouldn't see it by trying a tab auto-complete and it isn't documented) # In[5]: # Model run holds all the data from the YAML and CSV files, restructured into one dictionary m._model_run.keys() # In[6]: # All locations now hold all information about a technology at that location m._model_run['locations']['X2']['techs']['pv'] # In[7]: # This includes location-specific overrides, such as energy_cap_max of 50 for the pv technology at location X3 m._model_run['locations']['X3']['techs']['pv'] # In[8]: # All sets have also been collated. # locations and technologies are concatenated into loc::tech sets, # to create a dense matrix and smaller overall model size m._model_run['sets']['loc_techs'] # In[9]: # For every constraint, a set of loc_techs (or loc_tech_carriers) is prepared, # so we only build the constraint over that set m._model_run['constraint_sets']['loc_techs_energy_capacity_constraint'] # In[10]: m._model_run['constraint_sets']['loc_techs_resource_area_constraint'] # In[11]: # timeseries data is stored as dataframes, having been loaded from CSV m._model_run['timeseries_data']['pv_resource.csv'].head() # ## model_data # # m.\_model\_data is an xarray Dataset # In[12]: # Users would usually access information for the initialised model using m.inputs m.inputs # In[13]: # This is just a filtered view on the model_data Dataset, which includes all the information # which will be sent to the solver m._model_data # In[14]: # If timeseries aggregation of any kind has taken place, then m._model_data_original can be accessed to see the # model data prior to aggregation m._model_data_original # In this case, it is the same as m._model_data # In[15]: # We can find the same PV energy_cap_max data as seen in m._model_run m._model_data.energy_cap_max.loc[{'loc_techs': 'X2::pv'}] # In[16]: m._model_data.energy_cap_max.loc[{'loc_techs': 'X3::pv'}] # In[17]: # We can also see the constraint-specific set of loc::techs for setting the energy capacity constraint m._model_data.loc_techs_energy_capacity_constraint # In[18]: # It is these constraint sets that we cannot see in m.inputs m.inputs.loc_techs_energy_capacity_constraint # # Run the model # In[19]: m.run() # In[20]: # Results are processed and merged into m.model data, and can be viewed in m.results m.results # In[21]: # As with inputs, the results dataset is a filtered view of m._model_data. # All variables in `m.results` have the attribute `is_result` = 1 m._model_data.energy_cap # In[22]: # Data can also be reformatted to be easier to read (removes dimension concatenation). # Conversion to a pandas DataFrame is a good idea for greater readibility. m.get_formatted_array('energy_cap').to_pandas() # In[23]: # >2 dimensions cannot be easily viewed in a pandas dataframe, unless a MultiIndex is used. # To view a 4-dimensional result, we can use `to_series()` m.get_formatted_array('carrier_prod').to_series().dropna() # drop_na() removes all NaN values # ## backend_model # # m.\_backend\_model is a Pyomo data structure, attached to which are Pyomo objects including Sets, Parameters, Constraints, and Variables # In[24]: # A set m._backend_model.loc_techs_energy_capacity_constraint.pprint() # In[25]: # A Parameter m._backend_model.energy_cap_max.pprint() # In[26]: # A constraint m._backend_model.energy_capacity_constraint.pprint() # In[27]: # A variable m._backend_model.energy_cap.pprint() # ## Backend interface # There are a few interface methods available to the standard user, i.e. avoiding m.\_backend\_model # In[28]: # The inputs as used by Pyomo can be printed. This includes filled default data where necessary pd.concat( (m.backend.access_model_inputs()['energy_cap_max'].to_pandas().rename('backend'), # get the data from Pyomo m.inputs['energy_cap_max'].to_pandas().rename('pre-run')), # get the data from model_data (via inputs) axis=1, sort=True ) # In[29]: # We can activate and deactivate constraints, such as switching off the energy capacity constraint m.backend.activate_constraint('energy_capacity_constraint', False) # set to True to activate m._backend_model.energy_capacity_constraint.pprint() # In[30]: # Rerun the model with this constraint switched off. # This will dump results to a new dataset, *NOT* to m._model_data (or m.results) new_model = m.backend.rerun() # In[31]: # The results are now updated, which we can compare to our old results pd.concat((new_model.results.energy_cap.to_pandas().rename('new'), m.results.energy_cap.to_pandas().rename('old')), axis=1, sort=True) # In[32]: # We can also see that the Pyomo backend_model has updated to the new values m._backend_model.energy_cap.pprint() # ## Plot # In[33]: # With the original data (i.e. capacity constraint is active), we can plot the capacities m.plot.capacity() # # Save # In[ ]: # We can save at any point, which will dump the entire m._model_data to file. # NetCDF is recommended, as it retains most of the data and can be reloaded into a Calliope model at a later date. m.to_netcdf('path/to/file.nc') # Saves a single file m.to_csv('path/to/folder') # Saves a file for each xarray DataArray