%config IPCompleter.greedy=True
import pandas as pd
import matplotlib as mpl
import matplotlib.pyplot as plt
import geopandas as gpd
import math
import numpy as np
import json
import seaborn as sns
import folium
import os
from shapely.geometry import Point, Polygon
from descartes import PolygonPatch
from mpl_toolkits.axes_grid1 import AxesGrid
from matplotlib.offsetbox import AnchoredText
Lets have a look at the model grid data contained in the Area Peril dictionary file. Note that the dictionary is only meta-data, and not required for model execution.
area_peril_dictionary = pd.read_csv("../keys_data/PiWind/areaperil_dict.csv")
area_peril_dictionary.head()
Lets plot the area peril cells on a map of the UK. For this model, the area perils are a simple uniform grid in a square.
m = folium.Map(location=[ 52.737027, -0.914618], zoom_start=11, tiles='cartodbpositron')
area_peril_dictionary['lat']=area_peril_dictionary['LAT1']
area_peril_dictionary['lon']=area_peril_dictionary['LON1']
num_cells = area_peril_dictionary.lat.count()
num_cells_per_side = math.sqrt(num_cells)
cell_size_lat = (max(area_peril_dictionary.lat) - min(area_peril_dictionary.lat)) / (num_cells_per_side - 1)
cell_size_lon = (max(area_peril_dictionary.lon) - min(area_peril_dictionary.lon)) / (num_cells_per_side - 1)
for i, row in area_peril_dictionary.iterrows():
geometry = [Polygon([
(row.lon, row.lat),
(row.lon, row.lat + cell_size_lat),
(row.lon + cell_size_lon, row.lat + cell_size_lat),
(row.lon + cell_size_lon, row.lat)])]
crs = 'epsg:4326'
d = {'Description': ['All']}
df = pd.DataFrame(data=d)
gdf = gpd.GeoDataFrame(df, crs=crs, geometry=geometry)
folium.GeoJson(gdf).add_to(m)
m.save("piwind_extent_map.html")
%%HTML
<iframe width="100%" height=350 src="piwind_extent_map.html"></iframe>
Lets have a look at the data contained in the Intensity Bin dictionary file. Note that the dictionary is only meta-data, and not required for model execution.
intensity_bin_dictionary = pd.read_csv("../model_data/PiWind/intensity_bin_dict.csv")
intensity_bin_dictionary.head()
Lets have a look at the data contained in the footprint file.
footprints = pd.read_csv("../model_data/PiWind/footprint.csv")
footprints.head()
Lets visualize the first 5 event footprints.
area_peril_dictionary['gridcell'] = area_peril_dictionary['AREA_PERIL_ID'].apply(
lambda ap: str(int((ap-1)/10)+1)+"-"+str(ap-(int((ap-1)/10))*10))
footprints_with_hazard = footprints.merge(
intensity_bin_dictionary, how='inner',
left_on='intensity_bin_id', right_on='bin_index').merge(
area_peril_dictionary, how='inner',
left_on='areaperil_id', right_on='AREA_PERIL_ID')
footprints_with_hazard = footprints_with_hazard[footprints_with_hazard['PERIL_ID']=='WTC']
footprints_with_hazard = footprints_with_hazard[footprints_with_hazard['COVERAGE_TYPE']==1]
fig = plt.figure(figsize=(20,10))
grid = AxesGrid(fig, 111,
nrows_ncols=(1, 5),
axes_pad=0.05,
share_all=True,
label_mode="L",
cbar_location="right",
cbar_mode="single",
)
vmin = min(footprints_with_hazard.interpolation)
vmax = max(footprints_with_hazard.interpolation)
for idx, ax in enumerate(grid):
a = np.zeros([10, 10])
for __, row in footprints_with_hazard[footprints_with_hazard.event_id == idx+1].iterrows():
i, j = row.gridcell.split('-')
a[10-int(i), int(j)-1] = row.interpolation
im = ax.imshow(a, cmap=plt.cm.get_cmap('Blues'), vmin=vmin, vmax=vmax,
extent=(
min(area_peril_dictionary.lon), max(area_peril_dictionary.lon),
min(area_peril_dictionary.lat), max(area_peril_dictionary.lat)))
ax.set_xlabel("longitude")
ax.set_ylabel("latitude")
at = AnchoredText(
"Event ID = {}".format(idx + 1),
prop=dict(size=8),
frameon=True,
loc=2,
)
at.patch.set_boxstyle("round,pad=0.,rounding_size=0.2")
ax.add_artist(at)
grid[0].cax.colorbar(im)
cax = grid.cbar_axes[0]
axis = cax.axis[cax.orientation]
axis.label.set_text("Intensity - Peak gust (mph)")
plt.show()
Lets have a look at the data contained in the Damage Bin dictionary file. Note that the dictionary is required for model execution.
damage_bin_dictionary = pd.read_csv("../model_data/PiWind/damage_bin_dict.csv")
damage_bin_dictionary.head()
Lets have a look at the data contained in the Vulnerability file.
vulnerabilities = pd.read_csv("../model_data/PiWind/vulnerability.csv")
vulnerabilities.head()
The model has seperate vulnerability curves for Residential, Commerical and Industrial occupancies. Lets visualise these curves.
vulnerabilities_with_hazard_and_damage = vulnerabilities.merge(
intensity_bin_dictionary, how='inner',
left_on='intensity_bin_id', right_on='bin_index').merge(
damage_bin_dictionary, how='inner',
suffixes=["_i", "_d"], left_on='damage_bin_id', right_on='bin_index')
fig = plt.figure(figsize=(10,20))
grid = AxesGrid(fig, 111,
nrows_ncols=(1, 3),
axes_pad=0.05,
share_all=True,
label_mode="L",
cbar_location="right",
cbar_mode="single",
)
vmin = 0.0
vmax = max(vulnerabilities_with_hazard_and_damage.probability)
labels = ["Residential", "Commercial", "Industrial"]
for idx, ax in enumerate(grid):
a = np.zeros((29, 12))
for index, row in vulnerabilities_with_hazard_and_damage[
vulnerabilities_with_hazard_and_damage.vulnerability_id == idx + 1].iterrows():
a[int(row.bin_index_i-1), 11-int(row.bin_index_d-1)] = row.probability
im = ax.imshow(a, cmap=plt.cm.get_cmap('Blues'), vmin=vmin, vmax=vmax,
extent=(
min(intensity_bin_dictionary.interpolation), max(intensity_bin_dictionary.interpolation),
min(damage_bin_dictionary.interpolation) * 100, max(damage_bin_dictionary.interpolation) * 100))
at = AnchoredText(labels[idx],
prop=dict(size=8), frameon=True,
loc=2,
)
at.patch.set_boxstyle("round,pad=0.,rounding_size=0.2")
ax.add_artist(at)
ax.set_xlabel("Intensity - Peak gust (mph)")
ax.set_ylabel("Damage")
grid[0].cax.colorbar(im)
cax = grid.cbar_axes[0]
axis = cax.axis[cax.orientation]
axis.label.set_text("Probability of damage")
plt.show()
To run the model we need some test exposure data. Lets have a look at an example Location and Account file.
test_locations = pd.read_csv('../tests/inputs/SourceLocOEDPiWind.csv')
test_locations.head()
test_accounts = pd.read_csv('../tests/inputs/SourceAccOEDPiWind.csv')
test_accounts.head()
To run the model, we also need to define some analysis settings. Lets have a look at an example settings file.
with open('../analysis_settings.json', 'r') as myfile:
analysis_settings=json.loads(myfile.read().replace('\n', ''))
print(json.dumps(analysis_settings, indent=True))
We can now run the model using the Oasis MDK.
! rm -rf /tmp/analysis_test
! oasislmf model run -C ../oasislmf.json -r /tmp/analysis_test
Lets visualize the output of our analysis.
analysis_directory = "/tmp/analysis_test"
gul_aep = pd.read_csv(os.path.join(analysis_directory, "output", "gul_S1_leccalc_full_uncertainty_aep.csv"))
gul_aep = gul_aep[gul_aep.type == 1]
gul_oep = pd.read_csv(os.path.join(analysis_directory, "output", "gul_S1_leccalc_full_uncertainty_oep.csv"))
gul_oep = gul_oep[gul_oep.type == 1]
eps = pd.merge(gul_oep, gul_aep, on=["summary_id", "return_period"], suffixes=["_oep", "_aep"])
eps = eps.sort_values(by="return_period", ascending=True)
fig, ax = plt.subplots()
eps.plot(ax=ax, kind='bar', x='return_period', y=["loss_oep", "loss_aep"])
ax.set_yticklabels(['{:,}'.format(int(x)) for x in ax.get_yticks().tolist()])
ax.set_xticklabels(['{:,}'.format(int(x)) for x in eps.return_period])
plt.legend(('OEP', 'AEP'))
ax.set_xlabel("Return period (years)")
ax.set_ylabel("Loss")