This notebook was developed to provide a python example of sharpening 70 m ECOSTRESS TIR imagery using 30 m Landsat 8 imagery. The sharpening method demonstrated here uses a least squares method to fit a polynomial relating Landsat 8 albedo, Landsat 8 NDVI, and ECOSTRESS TIR (similar to Lillo-Saavedra, et al., 2017).
The following python code was adapted from MATLAB example code and notes from Glynn Hulley (NASA JPL) as part of the Urban Canopy + Earthhacks urban heat island hackathon in October 2020.
Seattle, WA had record-breaking high temperatures on Aug. 16, 2020 (1, 2, 3). ECOSTRESS imagery from that day can show us hot spots related to that contribute to the urban heat island effect. However, the 70 m spatial resolution of ECOSTRESS limits a researcher or city planner's ability to pinpoint specific lots or buildings that could be chosen for urban heat mitigation efforts.
The higher resolution and wider spectral range of Landsat 8 bands allow us to map properties such as albedo and NDVI at 30 m spatial resolution. Prior research has demonstrated the relationships between land surface albedo and NDVI in urban areas as predictors of surface temperature, and therefore sharpening methods such as the one demonstrated here can leverage the higher spatial resolution provided by Landsat 8, along with the more frequent thermal infrared (TIR) observations by ECOSTRESS to get 30 m land surface temperature maps every 3-4 days.
From AppEEARS, I've downloaded a collection of geotiffs for ECOSTRESS LST and Landsat 8 surface reflectance (bands 1-7) for the more urban counties on the east side of Puget Sound in Washington (Thurston, Pierce, King, and Snohomish counties; covering the Olympia-Tacoma-Seattle-Everett metro area).
In this example, I use an ECOSTRESS image from Aug. 16, 2020 (day of year 229), which was a record-breaking temperature day, and Landsat 8 imagery from Aug. 23, 2020.
Import packages
# for math and data manipulation
import numpy as np
import pandas as pd
# xarray and rioxarray for working with geospatial raster imagery
import xarray as xr
import rioxarray as rioxr
# least squares function for fitting our model
from scipy.linalg import lstsq
# matplotlib for plotting
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
%matplotlib inline
# Landsat 8 filepaths
l8_b2_filepath = '../sharpening/Seattle/CU_LC08.001_SRB2_doy2020236_aid0001.tif'
l8_b3_filepath = '../sharpening/Seattle/CU_LC08.001_SRB3_doy2020236_aid0001.tif'
l8_b4_filepath = '../sharpening/Seattle/CU_LC08.001_SRB4_doy2020236_aid0001.tif'
l8_b5_filepath = '../sharpening/Seattle/CU_LC08.001_SRB5_doy2020236_aid0001.tif'
l8_b6_filepath = '../sharpening/Seattle/CU_LC08.001_SRB6_doy2020236_aid0001.tif'
l8_b7_filepath = '../sharpening/Seattle/CU_LC08.001_SRB7_doy2020236_aid0001.tif'
# Get band 2 reflectance (blue, 0.45-0.515 micron)
l8_b2 = xr.open_rasterio(l8_b2_filepath)
# Get band 3 reflectance (green, 0.533-0.590 micron)
l8_b3 = xr.open_rasterio(l8_b3_filepath)
# Get band 4 reflectance (red, 0.63-0.68 micron)
l8_b4 = xr.open_rasterio(l8_b4_filepath)
# Get band 5 reflectance (NIR, 0.845-0.885 micron)
l8_b5 = xr.open_rasterio(l8_b5_filepath)
# Get band 6 reflectance (SWIR, 1.560-1.660 micron)
l8_b6 = xr.open_rasterio(l8_b6_filepath)
# Get band 7 reflectance (SWIR2, 2.107-2.294 micron)
l8_b7 = xr.open_rasterio(l8_b7_filepath)
Define a function to "clean up" and format each Landsat 8 band image
def cleanup_landsat_band(l8_band):
# replace nodataval with nans
l8_band = l8_band.where(l8_band.values != float(l8_band.attrs['nodatavals'][0]), np.nan)
# scale refectance values
l8_band.values = l8_band.values*float(l8_band.attrs['scale_factor'])
# drop and remove the extra "band" dimension
l8_band = l8_band.drop('band').squeeze()
return l8_band
Get attributes (needed primarily for CRS information) and merge all Landsat 8 bands we need into a single xarray dataset
# Get attributes from one of the input landsat band images
attrs_dict = l8_b2.attrs.copy()
_ = attrs_dict.pop('scale_factor') # remove scale factor metadata
attrs_dict['nodatavals'] = np.nan # set nodataval to NaN
# Clean up and merge all landsat 8 band images we need into a single xarray dataset
l8 = xr.Dataset({'b2': cleanup_landsat_band(l8_b2),
'b3': cleanup_landsat_band(l8_b3),
'b4': cleanup_landsat_band(l8_b4),
'b5': cleanup_landsat_band(l8_b5),
'b6': cleanup_landsat_band(l8_b6),
'b7': cleanup_landsat_band(l8_b7)}, attrs=attrs_dict)
Compute NDVI, Albedo, and MNDWI (water index for removing large bodies of water)
# Normalized Difference Vegetation Index
l8['ndvi'] = (l8.b5 - l8.b4) / (l8.b5 + l8.b4)
# Albedo for Landsat 8
l8['albedo'] = ((0.356*l8.b2) + (0.130*l8.b4) + (0.373*l8.b5) + (0.085*l8.b6) + (0.072*l8.b7) -0.0018) / 1.016
# Modified Normalized Difference Water Index
l8['mndwi'] = (l8.b3 - l8.b6) / (l8.b3 + l8.b6)
Where the NDVI and Albedo values exceed their normal values, set them to "no data" (NaN), then add CRS to the attribues of each new data array
# set invalid NDVI to nan
l8['ndvi'] = l8.ndvi.where((l8.ndvi >= -1) | (l8.ndvi <= 1), np.nan)
# set invalid albedo to nan
l8['albedo'] = l8.albedo.where((l8.albedo >= 0) | (l8.albedo <= 1), np.nan)
# add CRS attributes
l8['ndvi'] = l8.ndvi.assign_attrs({'crs': l8.attrs['crs']})
l8['mndwi'] = l8.mndwi.assign_attrs({'crs': l8.attrs['crs']})
l8['albedo'] = l8.albedo.assign_attrs({'crs': l8.attrs['crs']})
eco_lst_filepath = '../data/ECO2LSTE.001_SDS_LST_doy2020229172658_aid0001.tif'
eco_lst = xr.open_rasterio(eco_lst_filepath)
Define a function to "clean up" and format the ECOSTRESS LST image
def cleanup_ecostress_lst(eco_lst):
# replace nodataval with nans
eco_lst = eco_lst.where(eco_lst.values != float(eco_lst.attrs['nodatavals'][0]), np.nan)
# scale refectance values
eco_lst.values = eco_lst.values*float(eco_lst.attrs['scale_factor'])
# drop and remove the extra "band" dimension
eco_lst = eco_lst.drop('band').squeeze()
return eco_lst
eco_lst = cleanup_ecostress_lst(eco_lst)
Coarsen the Landsat 8 images to the same spatial resolution as ECOSTRESS LST
# Coarsen Landsat 8 to ECOSTRESS resolution
l8_repr_match = l8.rio.reproject_match(eco_lst)
Remove bodies of water from our datasets (using MNDWI)
# remove water areas
eco_lst = eco_lst.where(l8_repr_match.mndwi < 0.5, np.nan)
l8_repr_match['ndvi'] = l8_repr_match.ndvi.where(l8_repr_match.mndwi < 0.5, np.nan)
l8_repr_match['albedo'] = l8_repr_match.albedo.where(l8_repr_match.mndwi < 0.5, np.nan)
Plot the coarse resolution datasets. These will be the training datasets for the sharpening model.
fig, [ax1, ax2, ax3] = plt.subplots(nrows=1, ncols=3, figsize=(15,5), tight_layout=True)
l8_repr_match.ndvi[900:1300,1200:1700].plot(cmap='Greens', vmin=0, vmax=1, ax=ax1)
ax1.set_title('Seattle, WA\nLandsat 8 - NDVI')
l8_repr_match.albedo[900:1300,1200:1700].plot(cmap='Greys_r', vmin=0, vmax=1, ax=ax2)
ax2.set_title('Seattle, WA\nLandsat 8 - Albedo')
eco_lst[900:1300,1200:1700].plot(cmap='magma', ax=ax3)
ax3.set_title('Seattle, WA\nECOSTRESS - LST')
for ax in [ax1, ax2, ax3]:
ax.set_xlabel('Longitude');
ax.set_ylabel('Latitude');
Set up and fit the model with the training data
training_albedo = l8_repr_match.albedo.values.ravel()
training_ndvi = l8_repr_match.ndvi.values.ravel()
training_lst = eco_lst.values.ravel()
inrange = (training_albedo>=0) & (training_albedo<=1) & (training_ndvi>=-1) & (training_ndvi<=1)
training_albedo = training_albedo[inrange]
training_ndvi = training_ndvi[inrange]
training_lst = training_lst[inrange]
notnan = ~np.isnan(training_albedo) & ~np.isnan(training_ndvi) & ~np.isnan(training_lst)
training_albedo = training_albedo[notnan]
training_ndvi = training_ndvi[notnan]
training_lst = training_lst[notnan]
C:\Users\steve\Anaconda3\envs\rasterenv\lib\site-packages\ipykernel_launcher.py:5: RuntimeWarning: invalid value encountered in greater_equal """ C:\Users\steve\Anaconda3\envs\rasterenv\lib\site-packages\ipykernel_launcher.py:5: RuntimeWarning: invalid value encountered in less_equal """
Plot the training data
# select just a subset of the data for plotting, this lets us draw a plot more quickly
indices = np.random.randint(0,len(training_lst), size=1000)
# set up the plot
fig = plt.figure(figsize=(7,7))
ax = fig.add_subplot(111, projection='3d')
# add the 3d scatterplot
ax.scatter(training_albedo[indices],
training_ndvi[indices],
training_lst[indices],
alpha=0.5, s=5, marker='o', c=training_lst[indices].ravel(), cmap='magma', vmin='250', vmax='320')
# set axes limits
ax.set_xlim((0,1))
ax.set_ylim((-1,1))
ax.set_zlim((250,320))
# add labels and title
ax.set_xlabel('L8 Albedo', fontsize=15)
ax.set_ylabel('L8 NDVI', fontsize=15)
ax.set_zlabel('ECO LST [K]', fontsize=15)
ax.set_title('70 m Training Data', fontsize=20)
Text(0.5, 0.92, '70 m Training Data')
Create the input array for our least squares fitting function (not that here is where we can specify the functional form of our model)
# create the input array
multi_input = np.array([np.ones_like(training_albedo),
training_albedo, training_ndvi,
training_albedo**2, training_ndvi**2,
training_ndvi*training_albedo,
training_albedo**3, training_ndvi**3,
training_ndvi*(training_albedo**2), (training_ndvi**2)*training_albedo,
training_albedo**4, training_ndvi**4,
training_ndvi*(training_albedo**3), (training_ndvi**3)*training_albedo
]).T # Transform this array with ".T" to swap rows and columns
# Show the shapes of our two inputs to the lstsq function to make sure they have the same first dimension length
print(multi_input.shape)
print(training_lst.shape)
(4511688, 14) (4511688,)
Use the least squares fitting function to get our regression coefficients
c, _, _, _ = lstsq(multi_input, training_lst)
print(c)
[ 291.47759807 33.34005518 5.21075578 -2.46596395 19.35072795 315.07656054 -368.25461156 -13.62631134 470.04293072 -945.52656389 559.26039633 -15.36338604 -1505.19763481 638.92021115]
Demonstrate the model by testing it with uniformly-spaced albedo and NDVI values
albedo = np.random.rand(100,100)
ndvi = np.random.rand(100,100)*2-1
# apply model
fit_lst = c[0] + c[1]*albedo + c[2]*ndvi + c[3]*(albedo**2) + c[4]*(ndvi**2) + \
c[5]*ndvi*albedo +c[6]*(albedo**3) + c[7]*(ndvi**3) + c[8]*ndvi*(albedo**2) + \
c[9]*(ndvi**2)*albedo + c[10]*(albedo**4) + c[11]*(ndvi**4) + c[12]*ndvi*(albedo**3) + \
c[13]*(ndvi**3)*albedo
Plot this model demo
# set up the plot
fig = plt.figure(figsize=(7,7))
ax = fig.add_subplot(111, projection='3d')
# add the 3d scatterplot
ax.scatter(albedo.ravel(),
ndvi.ravel(),
fit_lst.ravel(),
alpha=0.5, s=5, marker='o', c=fit_lst.ravel(), cmap='magma', vmin='250', vmax='320')
# set axes limits
ax.set_xlim((0,1))
ax.set_ylim((-1,1))
ax.set_zlim((250,320))
# add labels and title
ax.set_xlabel('L8 Albedo', fontsize=15)
ax.set_ylabel('L8 NDVI', fontsize=15)
ax.set_zlabel('ECO LST [K]', fontsize=15)
ax.set_title('Model Demo', fontsize=20)
Text(0.5, 0.92, 'Model Demo')
This is the same coarse resolution, 70 m, data that we had used to train the model. This will give us an idea of what the model residuals (errors) look like.
albedo = l8_repr_match.albedo
ndvi = l8_repr_match.ndvi
# apply model
fit_lst = c[0] + c[1]*albedo + c[2]*ndvi + c[3]*(albedo**2) + c[4]*(ndvi**2) + \
c[5]*ndvi*albedo +c[6]*(albedo**3) + c[7]*(ndvi**3) + c[8]*ndvi*(albedo**2) + \
c[9]*(ndvi**2)*albedo + c[10]*(albedo**4) + c[11]*(ndvi**4) + c[12]*ndvi*(albedo**3) + \
c[13]*(ndvi**3)*albedo
# select just a subset of the data for plotting, this lets us draw a plot more quickly
indices = np.random.randint(0,len(fit_lst), size=100)
# set up the plot
fig = plt.figure(figsize=(7,7))
ax = fig.add_subplot(111, projection='3d')
# add the 3d scatterplot (these are xarray data arrays, use .values.ravel() to get a 1d array of the values)
ax.scatter(albedo[indices].values.ravel(),
ndvi[indices].values.ravel(),
fit_lst[indices].values.ravel(),
alpha=0.5, s=5, marker='o', c=fit_lst[indices].values.ravel(), cmap='magma', vmin='250', vmax='320')
# set axes limits
ax.set_xlim((0,1))
ax.set_ylim((-1,1))
ax.set_zlim((250,320))
# add labels and title
ax.set_xlabel('L8 Albedo', fontsize=15)
ax.set_ylabel('L8 NDVI', fontsize=15)
ax.set_zlabel('ECO LST [K]', fontsize=15)
ax.set_title('70 m Training Data', fontsize=20)
Text(0.5, 0.92, '70 m Training Data')
Compute the residuals (difference between observed 70 m LST and modeled 70 m LST)
# difference between observed and coars resolution fit
lst_diff = eco_lst - fit_lst
Plot a section of the observed LST, modeled LST, and their difference.
fig, [ax1, ax2, ax3] = plt.subplots(nrows=1, ncols=3, figsize=(15,5))
ax1.imshow(eco_lst[900:1300,1200:1700], cmap='magma', vmin=290, vmax=315)
ax1.set_title('Observed 70 m LST')
ax2.imshow(fit_lst[900:1300,1200:1700], cmap='magma', vmin=290, vmax=315)
ax2.set_title('Modeled 70 m LST')
ax3.imshow(lst_diff[900:1300,1200:1700], cmap='RdBu', vmin=-10, vmax=10)
ax3.set_title('Observed - Modeled 70 m LST');
NOTE: this is a crucial step and needs to be performed to get accurate results
Compute running means across the difference map (observed - modeled residuals), so that element "i" is replaced by the mean of a (2N_row+1)-by- (2N_col+1) rectangle centered on element "i".
# convert the lst_diff array into an xarray data array object (so that we can use rolling functions across it)
lst_diff_da = xr.DataArray(lst_diff)#.fillna(0)
# compute running mean across x and y dimensions of the difference map
lst_diff_da_smoothed = lst_diff_da.rolling(x=2).mean().rolling(y=2).mean()
# Apply attributes from the original observed ECOSTRESS LST to get spatial dimension and CRS metadata
lst_diff_da_smoothed = lst_diff_da_smoothed.assign_attrs(eco_lst.attrs)
Plot a section of the observed LST, modeled LST, the difference map, and the smoothed difference map.
fig, [ax1, ax2, ax3, ax4] = plt.subplots(nrows=1, ncols=4, figsize=(20,5))
ax1.imshow(eco_lst[900:1300,1200:1700], cmap='magma', vmin=290, vmax=315)
ax1.set_title('Observed 70 m LST')
ax2.imshow(fit_lst[900:1300,1200:1700], cmap='magma', vmin=290, vmax=315)
ax2.set_title('Modeled 70 m LST')
ax3.imshow(lst_diff[900:1300,1200:1700], cmap='RdBu', vmin=-10, vmax=10)
ax3.set_title('Observed - Modeled 70 m LST');
ax4.imshow(lst_diff_da_smoothed[900:1300,1200:1700], cmap='RdBu', vmin=-10, vmax=10)
ax4.set_title('Smoothed: Observed - Modeled 70 m LST');
Here we finally apply the model we developed to sharpen the ECOSTRESS LST image to 30 m spatial resolution.
albedo = l8.albedo.where((l8.albedo>=0) & (l8.albedo<=1)).values
ndvi = l8.ndvi.where((l8.ndvi>=-1) & (l8.ndvi<=1)).values
# apply model
fit_lst = c[0] + c[1]*albedo + c[2]*ndvi + c[3]*(albedo**2) + c[4]*(ndvi**2) + \
c[5]*ndvi*albedo +c[6]*(albedo**3) + c[7]*(ndvi**3) + c[8]*ndvi*(albedo**2) + \
c[9]*(ndvi**2)*albedo + c[10]*(albedo**4) + c[11]*(ndvi**4) + c[12]*ndvi*(albedo**3) + \
c[13]*(ndvi**3)*albedo
Resample our smoothed difference map (70 m) to 30 m spatial resolution.
lst_diff_high_res = lst_diff_da_smoothed.rio.reproject_match(l8.albedo)
Apply the smoothed difference map to the model fit to get our final sharpened ECOSTRESS LST image
# Final 30m sharpened LST
LSTsharp = fit_lst + lst_diff_high_res
Plot to preview the sharpened image for a small region
LSTsharp[2200:3200,2900:4000].plot(cmap='magma', vmin=290, vmax=315, figsize=(10,10))
<matplotlib.collections.QuadMesh at 0x1f4793a5ba8>
Export the resulting image as a NetCDF file
LSTsharp.to_netcdf('LST_sharp.nc')
Import some extra packages we'll need here.
import geopandas as gpd
from shapely.geometry import box, mapping
Open our Seattle neighborhoods shapefiles (downloaded from: Seattle GeoData):
seattle = gpd.read_file('zip://../shapefiles/City_Clerk_Neighborhoods-shp.zip')
# Aaggregate by the larger neighborhood boundaries:
seattle = seattle.dissolve(by='L_HOOD', aggfunc='sum')
# preview the first few rows
#seattle.head(3)
Clip the observed ECOSTRESS LST and the Sharpened ECOSTRESS LST to the Seattle shapefile boundaries.
# Observed LST
# mask using the neightborhoods shapefile
obs_seattle_lst = eco_lst.rio.clip(seattle.geometry.apply(mapping),crs=seattle.crs)
# switch our zero values to nan values (if present)
obs_seattle_lst = obs_seattle_lst.where(obs_seattle_lst > 0)
# convert our clipped dataset from K to C
obs_seattle_lst = obs_seattle_lst - 273.15
# Sharpened LST
# mask using the neightborhoods shapefile
sharp_seattle_lst = LSTsharp.rio.clip(seattle.geometry.apply(mapping),crs=seattle.crs)
# switch our zero values to nan values (if present)
sharp_seattle_lst = sharp_seattle_lst.where(sharp_seattle_lst > 0)
# convert our clipped dataset from K to C
sharp_seattle_lst = sharp_seattle_lst - 273.15
Plot observed LST, sharpened LST, and their difference
fig, ax = plt.subplots(nrows=1, ncols=3, figsize=(15,5), gridspec_kw={'width_ratios': [1, 1, 1]}, tight_layout=True)
[ax1, ax3, ax5] = ax.ravel()
# Observed LST
values = obs_seattle_lst.values.flatten() # Compute zonal statistics
values = values[~np.isnan(values)] # Remove NaN pixel values
# Print zonal statistics
summary_stats = 'Temperature Histogram\nMean: {}, Median: {}\nMax: {}, Min: {}, Standard Deviation: {}' \
.format(np.round(values.mean(),1),
np.round(np.median(values),1),
np.round(values.max(),1),
np.round(values.min(),1),
np.round(values.std(),1))
# image
obs_seattle_lst.plot(x='x', y='y', ax=ax1, vmin=0, vmax=35, cmap='magma')
seattle.plot(ax=ax1, edgecolor='black', linestyle='--', facecolor='none');
ax1.set_ylabel('Latitude')
ax1.set_xlabel('Longitude')
ax1.set_title('Observed ECOSTRESS LST (70m)\nSeattle, WA, Aug. 16, 2020')
ax1.set_aspect(1)
# Sharpened LST
values = sharp_seattle_lst.values.flatten() # Compute zonal statistics
values = values[~np.isnan(values)] # Remove NaN pixel values
# Print zonal statistics
summary_stats = 'Temperature Histogram\nMean: {}, Median: {}\nMax: {}, Min: {}, Standard Deviation: {}' \
.format(np.round(values.mean(),1),
np.round(np.median(values),1),
np.round(values.max(),1),
np.round(values.min(),1),
np.round(values.std(),1))
# image
sharp_seattle_lst.plot(x='x', y='y', ax=ax3, vmin=0, vmax=35, cmap='magma')
seattle.plot(ax=ax3, edgecolor='black', linestyle='--', facecolor='none');
ax3.set_ylabel('Latitude')
ax3.set_xlabel('Longitude')
ax3.set_title('Sharpened ECOSTRESS LLST (30m)\nSeattle, WA, Aug. 16, 2020')
ax3.set_aspect(1)
# Difference
diff = obs_seattle_lst.rio.reproject_match(sharp_seattle_lst) - sharp_seattle_lst
values = diff.values.flatten() # Compute zonal statistics
values = values[~np.isnan(values)] # Remove NaN pixel values
# Print zonal statistics
summary_stats = 'Temperature Histogram\nMean: {}, Median: {}\nMax: {}, Min: {}, Standard Deviation: {}' \
.format(np.round(values.mean(),1),
np.round(np.median(values),1),
np.round(values.max(),1),
np.round(values.min(),1),
np.round(values.std(),1))
# image
diff.plot(x='x', y='y', ax=ax5, vmin=-2, vmax=2, cmap='RdBu')
seattle.plot(ax=ax5, edgecolor='black', linestyle='--', facecolor='none');
ax5.set_ylabel('Latitude')
ax5.set_xlabel('Longitude')
ax5.set_title('Observed - Sharpened Difference\nSeattle, WA, Aug. 16, 2020')
ax5.set_aspect(1)
plt.savefig('demo.png', dpi=150)
Plot the difference and a histogram of the difference between observed and sharpened LST
fig, (ax5, ax2) = plt.subplots(nrows=1, ncols=2, figsize=(20,5), gridspec_kw={'width_ratios': [3, 1]})
# Difference
diff = obs_seattle_lst.rio.reproject_match(sharp_seattle_lst) - sharp_seattle_lst
values = diff.values.flatten() # Compute zonal statistics
values = values[~np.isnan(values)] # Remove NaN pixel values
# Print zonal statistics
summary_stats = 'Observed - Sharpened Difference Histogram\nMean: {}, Median: {}\nMax: {}, Min: {}, Standard Deviation: {}' \
.format(np.round(values.mean(),1),
np.round(np.median(values),1),
np.round(values.max(),1),
np.round(values.min(),1),
np.round(values.std(),1))
# image
diff.plot(x='x', y='y', ax=ax5, vmin=-2, vmax=2, cmap='RdBu')
seattle.plot(ax=ax5, edgecolor='black', linestyle='--', facecolor='none');
ax5.set_ylabel('Latitude')
ax5.set_xlabel('Longitude')
ax5.set_title('Observed - Sharpened Difference\nSeattle, WA, Aug. 16, 2020')
ax5.set_aspect(1)
# histogram
ax2.hist(values, bins=300, facecolor='k');
ax2.axvline(x=values.mean(), c='r', linestyle='--')
ax2.set_title(summary_stats)
ax2.set_ylabel('Number of Pixels')
ax2.set_xlabel('Temperature Difference [C]')
ax2.set_xlim((-10,10))
plt.tight_layout()