#!/usr/bin/env python # coding: utf-8 # # Time-explicit LCA of an electric vehicle # # This notebook shows how to use `bw_timex` with a cradle-to-grave case study of an electric vehicle. The case study is simplified, not meant to reflect the complexity of electric mobility but to demonstrate hot to use `bw_timex`. # # More information on the inner workings of `bw_timex` can be found [here](https://timex.readthedocs.io/en/latest/content/theory.html). # In[1]: import bw2data as bd bd.projects # bd.projects.set_current("timex_example_electric_vehicle") # In[2]: if 'bw25_premise_background_v2' not in bd.projects: import bw2io as bi bi.backup.restore_project_directory(fp= '/Users/ajakobs/Documents/prospective_dynamic_lca/brightway2-project-bw25_premise_background_v2-backup.26-March-2024-01-40PM.tar.gz', overwrite_existing=True) # In[3]: bd.projects.set_current('bw25_premise_background_v2') # ## Prospective databases # # Using [Ecoinvent v3.9](https://ecoinvent.org/) and [`premise`](https://github.com/polca/premise), we created a set of prospective databases with projections for the future electricity sectors using the SSP2-RCP19 pathway from the IAM IMAGE. We selected this pathway to simply demonstrate some future development in this case study, and many other models and pathways are available. # In the [documentation](https://premise.readthedocs.io/en/latest/) of `premise`, you can find instructions for the creation of prospective background databases. # # In[4]: db_2020 = bd.Database("db_2020") db_2030 = bd.Database("db_2030") db_2040 = bd.Database("db_2040") # ## Case study setup # # Let's create a new foreground database to store the EV process: # # In[5]: del bd.databases["foreground"] # to make sure we create the foreground from scratch foreground = bd.Database("foreground") foreground.write({}) # Getting the input processes of the EV from the background database: # # In[6]: glider_production = bd.get_activity(("db_2020", "133b33cc867081af144475d62179286b")) powertrain_production = bd.get_activity( ("db_2020", "f6d3f0b01e4a38c055e3c5c1356a4bba") ) # eol included battery_production = bd.get_activity(("db_2020", "ba87aff6361d99be2636e8c59e55a5b2")) electricity_production = bd.get_activity( ("db_2020", "fec93a95a9a84d7fa0ede9c3082bb79f") ) glider_eol = bd.get_activity(("db_2020", "f8114e0ff375b3c6d72ccfa49f79e44d")) battery_eol = bd.get_activity(("db_2020", "82ebcdf42e8512cbe00151dda6210d29")) # Creating the EV activity: # # In[7]: fg = bd.Database("foreground") # In[8]: fg.new_node("EV_lifecycle", name="Electric vehicle, lifecycle", unit="unit").save() ev_lifecycle = fg.get("EV_lifecycle") # Here are some parameter assumptions for the EV: # # In[9]: ELECTRICITY_CONSUMPTION = 0.2 # kWh/km MILEAGE = 150_000 # km LIFETIME = 16 # years # Overall mass: 1200 kg MASS_GLIDER = 840 # kg MASS_POWERTRAIN = 80 # kg MASS_BATTERY = 280 # kg # Based on these parameters, we create the following exchanges: # # In[10]: ev_lifecycle.new_edge(input=ev_lifecycle, amount=1, type="production").save() # production exchange # In[11]: glider_production_exchange = ev_lifecycle.new_edge( input=glider_production, amount=MASS_GLIDER, type="technosphere" ) powertrain_production_exchange = ev_lifecycle.new_edge( input=powertrain_production, amount=MASS_POWERTRAIN, type="technosphere" ) battery_production_exchange = ev_lifecycle.new_edge( input=battery_production, amount=MASS_BATTERY, type="technosphere" ) electricity_production_exchange = ev_lifecycle.new_edge( input=electricity_production, amount=ELECTRICITY_CONSUMPTION * MILEAGE, type="technosphere", ) glider_eol_exchange = ev_lifecycle.new_edge( input=glider_eol, amount=MASS_GLIDER, # amount not negative as its not modeled as a "true" waste process type="technosphere", ) battery_eol_exchange = ev_lifecycle.new_edge( input=battery_eol, amount=-MASS_BATTERY, type="technosphere" ) # We need to add the temporal distributions at the exchanges level. # See [bw_temporalis documentation](https://github.com/brightway-lca/bw_temporalis) for more information on `TemporalDistribution`. Timedelta, which we use below, describes relative temporal relations, e.g. 2 years earlier. # # In[12]: from bw_temporalis import TemporalDistribution, easy_timedelta_distribution import numpy as np # In[13]: td_production = easy_timedelta_distribution( start=-4, end=0, resolution="Y", # "Y": "Years", "M": "Months", "D": "Days", "h": "Hours" etc. steps=5, kind="triangular", # available kinds: "triangular", "uniform", "normal" param=-1, ) td_use_phase = easy_timedelta_distribution( start=0, # (inclusive) end=LIFETIME, # (inclusive) resolution="Y", steps=(LIFETIME + 1), # Includes both start and end kind="uniform", ) td_eol = TemporalDistribution( date=np.array([LIFETIME + 1], dtype="timedelta64[Y]"), amount=np.array([1]) # if you build a TD manually, make sure that length of date array == length of amount array, and the sum of elements in the amount array == 1 ) # Let's explore what a `TemporalDistribution` looks like: # In[14]: print("td_production.date [s]: ", td_production.date) print("td_production.amount [-]:", td_production.amount) type(td_production) # We now add the temporal information to the inputs of our EV. We add temporal distributions to all exchanges, but you don't have to. # # In[15]: glider_production_exchange["temporal_distribution"] = td_production glider_production_exchange.save() powertrain_production_exchange["temporal_distribution"] = td_production powertrain_production_exchange.save() battery_production_exchange["temporal_distribution"] = td_production battery_production_exchange.save() electricity_production_exchange["temporal_distribution"] = td_use_phase electricity_production_exchange.save() glider_eol_exchange["temporal_distribution"] = td_eol glider_eol_exchange.save() battery_eol_exchange["temporal_distribution"] = td_eol battery_eol_exchange.save() # ## LCA using `bw_timex` # # As usual, we need to select a method: # In[16]: method = ("EF v3.1", "climate change", "global warming potential (GWP100)") # `bw_timex` needs to know the representative time of the databases: # In[17]: from datetime import datetime database_date_dict = { "db_2020": datetime.strptime("2020", "%Y"), "db_2030": datetime.strptime("2030", "%Y"), "db_2040": datetime.strptime("2040", "%Y"), "foreground": "dynamic", # flag databases that should be temporally distributed with "dynamic" } # Now, we can instantiate a `TimexLCA`. It has a similar structure as `bw2calc.LCA`, but with the additional argument `database_date_dict`. # # Not sure about the required inputs? Check the documentation using `?` # In[18]: from bw_timex import TimexLCA get_ipython().run_line_magic('pinfo', 'TimexLCA') # Let's create a `TimexLCA` object for our EV life cycle: # In[19]: tlca = TimexLCA({ev_lifecycle.key: 1}, method, database_date_dict) # Next, we build a timeline of the exchanges. To do this, we can call the `build_timeline()` method, which does the graph traversal and creates a timeline dataframe from the results. The exchanges (rows of the dataframe) are aggregated to the resolution specified in the argument `temporal_grouping`. There are also many more options to specify the timeline creation and graph traversal process. Here are the most important ones: # - `temporal_grouping`: temporal resolution to which processes will be aggregated,"year" (default), "month", "day" or "hour" # - `interpolation_type`: How the best fitting background database is selected: "linear"(default), "closest" # - `edge_filter_function`: Custom filter function specifying when to stop the graph traversal. # - `cutoff`: stops graph traversal for nodes below this contribution to the static impact score. # - `max_calc`: stops graph traversal if this number of nodes has been traversed # # For all these options, we provide sensible default values. Of course you can always just check the docstrings to see all your options and our assumptions for default values. # # So, let's build the timeline: # # In[20]: tlca.build_timeline() # The interpolation weights in the timeline (right most column above) specify the share of the amount of an exchange to be sourced from the respective database. # `None` means that the exchange is in the foreground supply chain, and not at the intersection with the background system. # Next, we calculate the time-explicit LCI. Here we have the option to expand the technosphere matrix, in case we want to have further access to the standard BW matrix based functions, or if we want just the dynamic inventory. The `TimexLCA.lci()` function takes care of all the relinking if necessary, based on the information from the timeline. For the expanded matrix set expand_matrix=True (default). # In[21]: # first try without the expanded matrix tlca.lci(expand_technosphere=False) # In[24]: tlca.static_lcia() # In[24]: tlca.dynamic_biomatrix # Taking a look at the `dynamic_inventory` that was now created, we can see that it has more rows (emissions) than our usual biosphere3 flows. Instead of one row for each emission in the biosphere database we now get one row for each emission at each point in time. # In[25]: tlca.dynamic_inventory # In[26]: tlca.dynamic_inventory_df # In[25]: # now let's calculate the lci with matrix expansion tlca.lci(expand_technosphere=True) # In[26]: tlca.static_lcia() # # In[28]: tlca.dynamic_biomatrix # In[29]: tlca.dynamic_inventory # In[30]: tlca.dynamic_inventory_df # The standard, non-dynamic inventory has far less rows because the temporal resolution is missing. Looking at the timeline again, we see that we have processes at 21 different points in time, which should exactly match the ratio of the dimensions of our two inventories: # In[24]: tlca.inventory # In[25]: tlca.dynamic_inventory.shape[0] / tlca.inventory.shape[0] # If we are only interested in the new overall time-explicit scores and don't care about the timing of the emissions, we can set `build_dynamic_biosphere=False` (default is `True`), which saves time and memory. In that case, you only get the `TimexLCA.inventory`, but not the `TimexLCA.dynamic_inventory`. # In case the timing of emissions is not important, one can directly calculate the LCIA the "standard way" using static characterization methods. Per default, the following calculates the static lcia score based on the impact method chosen in the very beginning: # In[ ]: tlca.lci(build_dynamic_biosphere=False) tlca.static_lcia() tlca.score #kg CO2-eq # ## Dynamic Characterization # In addition to the standard static characterization, the time-explicit, dynamic inventory generated by a `TimexLCA` allows for dynamic characterization. Users can provide their own dynamic characterization functions and link them to corresponding biosphere flows (see example on [dynamic characterization](https://github.com/TimoDiepers/timex/blob/main/notebooks/example_simple_dynamic_characterization.ipynb)), or use the ones we provide out of the box. # # We provide two different metrics for dynamic LCIA of Climate Change: Radiative forcing [W/m2] and Global Warming Potential (GWP) [kg CO2-eq]. For both of these metrics, we have parameterized dynamic characterization functions for all GHG's that [IPCC AR6](https://www.ipcc.ch/report/ar6/wg1/chapter/chapter-7/) provides data for. # # For the dynamic characterization, users can also choose the length of the considered time horizon (`time_horizon`) and whether it is a fixed time horizon (`fixed_time_horizon`). Fixed means that the time horizon for all emissions (no matter when they occur) starts counting at the time of the functional unit, resulting in shorter time horizons for emissions occuring later. If the time horizon is not fixed (this is what conventional impact assessment factors assume), it starts counting from the timing of the emission. # # ### Radiative forcing # # Let's characterize our dynamic inventory, regarding radiative forcing with a fixed time horizon and the default time horizon length of 100 years: # In[32]: tlca.dynamic_lcia(metric="radiative_forcing", fixed_time_horizon=True) # The method call returns a dataframe of all the individual emissions at their respective timesteps, but we can also just look at the overall score: # In[27]: # inventory from timeline tlca.dynamic_score #W/m2 (radiative forcing) # In[33]: tlca.dynamic_score #W/m2 (radiative forcing) # To visualize the results, we provide a simple plotting functions: # In[34]: tlca.plot_dynamic_characterized_inventory(sum_emissions_within_activity=True) # In[ ]: tlca.plot_dynamic_characterized_inventory(sum_emissions_within_activity=True) # Without summing up the emissions within the activity, one can see that there are also negative emissions in the system, which stem from the premise-induced BECCS in the future electricity production: # In[ ]: tlca.plot_dynamic_characterized_inventory() # There is also a flag to plot the cumulative radiative forcing: # In[ ]: tlca.plot_dynamic_characterized_inventory(sum_activities= True, cumsum=True) # ### GWP # # Similar options are available for the metric GWP, which compares the radiative forcing of a GHG to that of CO2 over a certain time horizon (commonly 100 years, but it can be set flexibly in `time_horizon`). # In[ ]: tlca.dynamic_lcia(metric="GWP", fixed_time_horizon=False, time_horizon = 70) tlca.dynamic_score #kg CO2-eq (GWP) # Plotting the GWP results over time: # In[ ]: tlca.plot_dynamic_characterized_inventory() # Cumulative: # In[ ]: tlca.plot_dynamic_characterized_inventory(sum_emissions_within_activity=True, cumsum=True) # ### Comparison of time-explicit results to static results # It's helpful to understand how the time-explicit results differ from those using static assessments. # # We compare the time-explicit results with those of an LCA for the year 2020 and 2040 for the standard GWP100 metric (time horizon=100 and no fixed time horizon). This means we neglect the additional differences of the time-explicit results that would arise from using dynamic LCIA. # Time-explicit scores: # In[ ]: tlca.dynamic_lcia(metric="GWP", fixed_time_horizon=False, time_horizon=100) tlca.dynamic_score # The 2020 (static) score has already been calculated by TimexLCA in the beginning, originally to set the priorities for the graph traversal. But we can still access the score: # In[ ]: tlca.base_lca.score # However, further down we also want to look at what part of the life cycle has what contribution. To get this info, we need some more calculations: # In[ ]: import bw2calc as bc static_scores = {} for e in ev_lifecycle.exchanges(): if e.input == ev_lifecycle.key: continue lca = bc.LCA({e.input: e.amount}, method) lca.lci() # one could probably do this more efficiently by using .redo_lcia, but who doesn't like a 15s break :) lca.lcia() static_scores[e.input["name"]] = lca.score # Similarly, we calculate the 2040 (prospective) scores by just changing the database the exchanges point to: # In[ ]: #first create a copy of the system and relink to processes from 2040 database prospective_ev_lifecycle = ev_lifecycle.copy() for exc in prospective_ev_lifecycle.exchanges(): if exc.input == prospective_ev_lifecycle: continue exc.input = bd.get_node( **{ "database": "db_2040", "name": exc.input["name"], "product": exc.input["reference product"], "location": exc.input["location"], } ) exc.save() prospective_scores = {} for e in prospective_ev_lifecycle.exchanges(): if e.input == prospective_ev_lifecycle.key: continue lca = bc.LCA({e.input: e.amount}, method) lca.lci() lca.lcia() prospective_scores[e.input["name"]] = lca.score # Lets compare the overall scores: # In[ ]: print("Static score: ", sum(static_scores.values())) # should be the same as tlca.base_lca.score print("Prospective score: ", sum(prospective_scores.values())) print("Time-explicit score: ", tlca.dynamic_score) # To better understand what's going on, let's plot the scores as a waterfall chart based on timing of emission. Also, we can look at the "first-level contributions": # In[ ]: from bw_timex.utils import plot_characterized_inventory_as_waterfall order_stacked_activities = ( [ # to sort stacked bars in waterfall plot chronologically from production to EoL "market for glider, passenger car", "market for powertrain, for electric passenger car", "battery production, Li-ion, LiMn2O4, rechargeable, prismatic", "market group for electricity, low voltage", "market for manual dismantling of used electric passenger car", "market for used Li-ion battery", ] ) plot_characterized_inventory_as_waterfall( tlca.characterized_inventory, metric=tlca.metric, static_scores=static_scores, prospective_scores=prospective_scores, order_stacked_activities=order_stacked_activities, ) # One can see that the time-explicit results (in the middle) are somewhere in between the static and the prospective results. This makes sense as at each timestep, the underlying processes are sourced from progressively "cleaner" background databases, reaching a lower impact than if they are only sourced from the current database, but not so low as the prospective results, which are fully sourced from the most decarbonized database. Notably, the electricity consumption in the use-phase, modelled uniformly over the lifetime of the EV, contributes less and less to the score in the later years, since the electricity becomes cleaner in the future databases.