This notebook will compare sea surface height between new and old configurations for nov 2006.
new config: sea level pressure adjusted to sea level + corr15 tides. So bottom friction is 5 e-3. SSH anomaly calculated using 2005 harmonics at Tofino/Port Hardy
old config: no pressure correction and old tides. Bottom friction is 1 e-3, M2/K1 adjusted for good match in SoG.
%matplotlib inline
from matplotlib import pylab
import matplotlib.pyplot as plt
import netCDF4 as NC
import numpy as np
import arrow
import datetime
import pandas as pd
import pytz
from salishsea_tools import tidetools
from salishsea_tools import stormtools
from salishsea_tools import nc_tools
from salishsea_tools import viz_tools
fB = NC.Dataset('/data/nsoontie/MEOPAR/NEMO-forcing/grid/bathy_meter_SalishSea2.nc','r')
path = '/ocean/nsoontie/MEOPAR/SalishSea/results/storm-surges/final/nov2006/'
runs = {'all_forcing', 'tidesonly'}
fUs={}; fVs={}; fTs={};
for key in runs:
fUs[key] = NC.Dataset(path+key +'/SalishSea_4h_20061112_20061118_grid_U.nc','r');
fVs[key] = NC.Dataset(path+key +'/SalishSea_4h_20061112_20061118_grid_V.nc','r');
fTs[key] = NC.Dataset(path+key +'/SalishSea_4h_20061112_20061118_grid_T.nc','r');
# define plot parameters
#time, depth, colormap etc
timestamp =41#model output time
times = fVs[key].variables['time_counter'] #array of output times
time = times[timestamp] #physical time in seconds
depthlevel = 0 #model level to plot
depths = fVs[key].variables['depthv'] #array of depths
depth = depths[depthlevel] #physical depth in meters
cmap = plt.get_cmap('jet')
u_lat = fUs[key].variables['nav_lat']
u_lon = fUs[key].variables['nav_lon']
T_lat = fTs[key].variables['nav_lat']
T_lon = fTs[key].variables['nav_lon']
v_lat = fVs[key].variables['nav_lat']
v_lon = fVs[key].variables['nav_lon']
Us={}; Vs={}; Es ={}; Ss={}; Ts={};
for key in runs:
[Us[key], Vs[key], Es[key], Ss[key],
Ts[key]] = stormtools.get_variables(fUs[key],fVs[key],fTs[key],timestamp,depthlevel)
stations = {'PointAtkinson': 7795, 'Victoria': 7120, 'PatriciaBay': 7277, 'CampbellRiver': 8074}
datums = {'PointAtkinson': 3.09, 'Victoria': 1.881, 'PatriciaBay': 2.256, 'CampbellRiver': 2.916,
'CrescentBeach': 2.44, 'WhiteRock': 2.85, 'BoundaryBay': 1 }
thalwegs = ['Thalweg1','Thalweg2', 'Thalweg3', 'Thalweg4', 'Thalweg5', 'Thalweg6']
allstations = {'PointAtkinson': 7795, 'Victoria': 7120, 'PatriciaBay': 7277, 'CampbellRiver': 8074,
'CrescentBeach': 2.44, 'WhiteRock': 2.85 , 'BoundaryBay': 1,
'Thalweg1': [], 'Thalweg2': [], 'Thalweg3': [], 'Thalweg4': [], 'Thalweg5': [], 'Thalweg6': [],
'Plume': 1}
#datums from t_xtide
run_stations={}
us={}; vs={}; lats={}; lons={}; tmps={}; sals={}; sshs={}; ts={};
for run in runs:
for key in allstations:
string = path+ run + '/1h_' + key + '.nc'
run_stations[key] = NC.Dataset(string,'r');
tim = run_stations[key].variables['time_counter']
t_count=np.arange(0, tim.shape[0])
t=nc_tools.timestamp(run_stations[key],t_count)
tlist=[]
for a in t:
tlist.append(a.datetime)
ts[run]=tlist
[us[run], vs[run], lats[run], lons[run], tmps[run], sals[run], sshs[run]] = stormtools.combine_data(run_stations)
run_stations={};
start='31-Dec-2005'; end='02-Jan-2007'
wlev_meas={}; ttide={}; msl={}
for key in stations:
location=key
#filename for predictions
filename='/data/nsoontie/MEOPAR/analysis/storm_surges/data/'+location+'_t_tide_compare8_' +start+'_'+end+'.csv'
[ttide[key], msl[key]] = stormtools.load_tidal_predictions(filename)
wlev_meas[key] = stormtools.load_observations(start,end,location)
def truncate_data(data,time,sdt,edt):
#returns a subarray of data between give time values edt, edt
inds = np.where(time==sdt)[0]
inde = np.where(time==edt)[0]
sub_data=np.array(data[inds:inde+1])
sub_time = np.array(time[inds:inde+1])
return sub_data, sub_time
def compare_ssh(ssh,t_model,ttide,wlev_meas,msl,sdt,edt,location):
fig = plt.figure(figsize=(20,5))
ax=pylab.subplot(1,1,1)
import matplotlib.dates as mdates
ax.fmt_xdata = mdates.DateFormatter('%Y-%m-%d')
fig.autofmt_xdate()
pylab.plot(wlev_meas[location].time,wlev_meas[location].slev-datums[location], 'k', label='Observations',lw=2)
for key in runs:
ssh_corr=stormtools.correct_model(sshs[key][location][:,0,0],ttide[location],sdt,edt)
ax.plot(t_model[key], ssh_corr,label=key,lw=2)
ax.set_xlim([sdt,edt])
ax.set_ylim([-3,3])
pylab.legend(loc=0)
pylab.grid(True,lw=1)
pylab.title(location)
s='12-Nov-2006'; e='19-Nov-2006';
unaware=datetime.datetime.strptime(s,"%d-%b-%Y")
sdt = unaware.replace(tzinfo=pytz.timezone('utc'))
unaware=datetime.datetime.strptime(e,"%d-%b-%Y")
edt = unaware.replace(tzinfo=pytz.timezone('utc'))
for key in stations:
location=key
compare_ssh(sshs,ts,ttide,wlev_meas,msl,sdt,edt,location)
Output some of the statisitics
def compare_surge(ssh,t_model,ttide,wlev_meas,msl,sdt,edt,location):
fig = plt.figure(figsize=(20,5))
ax=pylab.subplot(1,1,1)
import matplotlib.dates as mdates
ax.fmt_xdata = mdates.DateFormatter('%Y-%m-%d')
fig.autofmt_xdate()
anom_obs=stormtools.observed_anomaly(ttide[location],wlev_meas[location],msl[location])
pylab.plot(wlev_meas[location].time, anom_obs, label='Observations',lw=2)
for key in {'all_forcing'}:
anom=sshs[key][location][:,0,0]-sshs['tidesonly'][location][:,0,0]
ax.plot(t_model[key], anom,label=key,lw=2)
ax.set_xlim([sdt,edt])
ax.set_ylim([-1,1])
pylab.legend(loc=0)
pylab.grid(True,lw=1)
pylab.title(location)
s='12-Nov-2006'; e='19-Nov-2006';
unaware=datetime.datetime.strptime(s,"%d-%b-%Y")
sdt = unaware.replace(tzinfo=pytz.timezone('utc'))
unaware=datetime.datetime.strptime(e,"%d-%b-%Y")
edt = unaware.replace(tzinfo=pytz.timezone('utc'))
for key in stations:
location=key
compare_surge(sshs,ts,ttide,wlev_meas,msl,sdt,edt,location)