#!/usr/bin/env python # coding: utf-8 # # Module 6 # # ## Video 28: Aggregating Cargo Movements Data # **Python for the Energy Industry** # # The CargoMovements endpoint provides granular data about individual cargoes. We can convert this data into 'macro' data about the flows of products by 'aggregating' this data. That is the subject of this lesson. # # [Cargo Movements documentation](https://vortechsa.github.io/python-sdk/endpoints/cargo_movements/) # In[1]: # initial imports import pandas as pd import numpy as np from datetime import datetime from dateutil.relativedelta import relativedelta import vortexasdk as v # We start by storing the datetimes and IDs needed to filter for crude flows out of the US in the last 7 weeks. # In[2]: # datetimes to access last 7 weeks of data now = datetime.utcnow() seven_weeks_ago = now - relativedelta(weeks=7) # In[3]: # Find US ID us = [g.id for g in v.Geographies().search('united states').to_list() if 'country' in g.layer] assert len(us) == 1 # In[4]: # Find crude ID crude = [p.id for p in v.Products().search('crude').to_list() if p.name=='Crude'] assert len(crude) == 1 # In[5]: # Columns to pull out, and shortened names required_columns = [ # primary vessel info "vessels.0.name", "vessels.0.vessel_class", # product information and quantity "product.group.label", "product.category.label", "quantity", "status", # loading info "events.cargo_port_load_event.0.location.port.label", "events.cargo_port_load_event.0.end_timestamp", # unloading info "events.cargo_port_unload_event.0.location.port.label", "events.cargo_port_unload_event.0.location.country.label", "events.cargo_port_unload_event.0.end_timestamp", ] new_labels = [ "vessel_name", "vessel_class", "product_group", "product_category", "quantity", "status", "loading_port", "loading_finish", "unloading_port", "unloading_country", "unloading_finish", ] relabel = dict(zip(required_columns,new_labels)) # Do the search for crude flows out of the US (and not destined for another US location) in the last 7 weeks. Convert this to a DataFrame. # In[6]: cms = v.CargoMovements().search( filter_activity = 'loading_end', filter_origins = us, exclude_destinations = us, filter_products = crude, filter_time_min = seven_weeks_ago, filter_time_max = now, cm_unit = 'b' ).to_df(columns=required_columns).rename(relabel,axis=1) # In[7]: # How many associated movements? len(cms) # We add a new column to the DataFrame which gets the week of the year in which loading was completed: # In[8]: cms['loading_week'] = cms['loading_finish'].dt.isocalendar().week # We then use `groupby` to aggregate the data by week, and sum over the numeric data. This shows the total volume (in # of barrels) exported each week. # In[9]: weekly_quantity = cms.groupby('loading_week').sum() weekly_quantity # We could also break this information down further. Let's say we wanted to know the weekly exported quantities of each category of crude separately. It's possible to group by multiple categories: # In[10]: quantity_by_category = cms.groupby(by = ['loading_week','product_category']).sum().reset_index() quantity_by_category # This is not the most intuitive way of displaying the data. We can 'pivot' the table, so that each product category gets its own column: # In[11]: quantity_by_category = quantity_by_category.pivot(index = 'loading_week', columns = 'product_category', values = 'quantity') quantity_by_category # This is better. However, a number of elements are 'NaN'. Clearly, these represent an absence of any exports of that category in that week - so we replace these with zeroes. # In[12]: quantity_by_category = quantity_by_category.fillna(0) quantity_by_category # Another way of breaking down the exports data is by receiving country. # In[13]: quantity_by_destination = cms.groupby('unloading_country').sum()[['quantity']] quantity_by_destination.sort_values(by='quantity',ascending = False, inplace=True) # In[14]: quantity_by_destination.head() # We might not want data for all recipient countries in our data, so we can collect those besides the top 10 in an 'other' category. # In[15]: # grab the top 10 top_destination_countries = quantity_by_destination.head(10) # grab the bottom, sum their quantities and put into a DataFrame rest = pd.DataFrame(index = ['Other'], columns = ['quantity']) rest.loc['Other'] = quantity_by_destination[10:].sum().values # join the two DataFrames top_destination_countries = pd.concat([top_destination_countries, rest]) # Finally, we can add a new column containing the percentage of total exports represented by exports to that country: # In[16]: top_destination_countries['%'] = round(top_destination_countries['quantity']*100 / top_destination_countries['quantity'].sum(),2) top_destination_countries # *Note: In the next module, we will see how the Cargo Time Series endpoint offers another approach to this same kind of analysis* # ### Exercise # # Make a pivoted table showing how exports to each of the top 5 desination countries varied over the 7 weeks. # In[ ]: