import folium
import geopandas as gpd
import rasterio
from rasterio import plot
import numpy as np
from rasterio.mask import mask
import os
from datetime import date, datetime, timedelta
import pandas as pd
## NIFC has replaced GeoMAC as a the official source of perimeters in the US.
## There are 2 files, active and archived. The active file below has the most recent perimeters for the current year.
fires = gpd.read_file(r'https://opendata.arcgis.com/datasets/5da472c6d27b4b67970acc7b5044c862_0.geojson')
# Alternative when NIFC down https://services9.arcgis.com/RHVPKKiFTONKtxq3/ArcGIS/rest/services/USA_Wildfires_v1/FeatureServer/1 (manual download)
# Import county data and filter by state
counties = gpd.read_file(r'D:\data\us_counties.geojson')
# Fire and state where it is located
firename = 'Glass'
state = 'CA'
fireyear = '2020'
# Update the basepath based on the most recent data update maintained in the list below
current = '2019'
prev = '2018'
# Where the score tifs are located
baset = r'D:/Geotiff/'
# Use string, not os.path.join, for gdal command later in the script
if os.path.exists(baset + current):
basetif = baset + current
else:
basetif = baset + prev
# Where to store the outputs
base = r'D:\\data'
# Score tif
score1file = os.path.join(basetif, state, state + '_score1.tif')
def invalidGeom(infile):
#if not infile.is_valid.bool():
f = infile['geometry'].buffer(0)
infile['geometry'] = f
return infile
# Format the fire names in the NIFC file, clip to get the county/state
county = counties.loc[counties.state == state]
fires.IncidentName=fires.IncidentName.str.title().str.strip()
invalidGeom(fires)
firestate = gpd.clip(fires,county)
firestate['state'],firestate['county'] = state,county.fl
# Select the fire by name
fire = firestate.loc[firestate.IncidentName == firename]
# Check that the correct fire is selected
if len(fire) < 1:
print('Search for a manually created perimeter file')
try:
fire = gpd.read_file(os.path.join(r'Z:\FirePerimeters', fireyear, state,firename, firename.replace(' ','_')+'.gpkg'))
print('Selected a manually created file')
except:
print('Try complex name')
if len(fire) < 1:
try:
# Try for a manually created perimeter file (Remember to create 'IncidentName' and 'DateCurrent' fields)
fire = fires.loc[(~fires.ComplexName.isnull()) & (fires.ComplexName.str.contains(firename))]
fire.IncidentName = fire.ComplexName
fire['GISAcres'] = fire['GISAcres'].sum()
fire = fire.dissolve(by='ComplexName')
print('Selected by Complex Name')
except:
print('Try partial string')
if len(fire) < 1:
try:
fire = firestate.loc[firestate.IncidentName.str.contains(firename.title().strip())]
except:
print('No matches')
elif len(fire) > 1:
try:
fire = firestate.loc[firestate.IncidentName.str.contains(firename.title().strip())]
print('Multiple fires selected')
except:
print('No matches')
# Sometimes the NIFC data has invalid geometries, use 0 width buffer to fix
if len(fire) == 1:
invalidGeom(fire)
print(str(len(fire)) + ' fire(s) selected')
# If multiple fires, select the correct fire
#fire = fire.iloc[[1]] #Double bracket to maintain datatype when using iloc with geopandas
fire.iloc[0].DateCurrent
%%capture
def boundsfire(df):
# Get the bounds of the fire for the folium map
minmax = ['minx','miny','maxx','maxy']
for f in minmax:
df[f] = df.bounds[f]
return df[f]
# Create the date field for the marketing text
def dateNIFC(datefield):
datefield = pd.to_datetime(datefield)
dt_string = datefield.strftime("%Y-%m-%d %H:%M:%S")
dt = datefield.strftime("%m-%d-%Y")
return dt
date = dateNIFC(fire.iloc[0].DateCurrent)
boundsfire(fire)
# Create a gpkg for the fire to clip the score tif data
# Remove any spaces from fire name for output files
x = str(fire.iloc[0].IncidentName).replace(' ','_').replace('-','_')
# fire data output
basepath = os.path.join(base,fireyear,state,fire.iloc[0].IncidentName)
if not os.path.exists(basepath):
os.makedirs(basepath)
fireshp = os.path.join(basepath,x+'.gpkg')
fireshpshp = os.path.join(basepath,x+'.shp')
fire.to_file(fireshp, driver="GPKG",OVERWRITE='YES')
fire.to_file(fireshpshp,OVERWRITE='YES')
# paths for gdal command
outfirepath = basepath +'\\'+ x +'.tif'
infireshppath = basepath +'\\'+ x +'.gpkg'
outfire = str('"' +outfirepath+'"')
inscore1 = str('"' +score1file+'"')
infireshp = str('"' +infireshppath+'"')
# Run gdal cmd to cut the raster to the fire polygon
# Ensure connection through vpn
cmd = "gdalwarp -cutline "+ infireshp +" -crop_to_cutline -dstalpha -dstnodata 255 -overwrite " + inscore1 + " " + outfire
os.system(cmd)
print(cmd)
def reclassify(src):
# Reclassfy the score for rendering in the map
global img
img = src.read(1)
img[img==0] = 0 #Negligible
img[img==1] = 1 #Low
img[np.where((img >=2) & (img <=3))] = 2 #Moderate
img[np.where((img>=4) & (img<=12))] = 3 #High
img[np.where((img>=13) & (img<=30))] = 4 #Extreme
return img
def pixelCount(r, vals):
# Create a list of the raster values to summarize % of score categories
val_list = []
size = float(r.size)
for v in vals:
count = np.sum(r == v)
val_list.append(count)#/size for pct - can't use with alpha band/nodata
return val_list
def pixelCountTest(r, vals):
# Create a list of the raster values to summarize % of score categories
val_list = []
size = float(r.size)
for v in vals:
count = np.sum(r == v)
pct = float(count/size)
val_list.append(pct)#/size for pct - can't use with alpha band/nodata
return val_list
# Basemaps
def cartoBasemap(miny, maxx):
global m
m = folium.Map([fire.miny, fire.maxx], zoom_start=5, tiles='cartodbpositron')
return m
def esriBasemap(miny, maxx):
global m
tile = "https://server.arcgisonline.com/ArcGIS/rest/services/World_Imagery/MapServer/tile/{z}/{y}/{x}"
attribu = 'Tiles © Esri — Source: Esri, i-cubed, USDA, USGS, AEX, GeoEye, Getmapping, Aerogrid, IGN, IGP, UPR-EGP, and the GIS User Community'
m = folium.Map([fire.miny, fire.maxx], zoom_start=9, tiles=tile,attr=attribu)
return m
%%capture
esriBasemap(fire.iloc[0].miny, fire.iloc[0].maxx)
# score1 display colors
# r,g,b,a - change a to 0 for translucent, 1 for opaque
score1_color = {
0: (233,233,238,1),
1: (243,242,6,1),
2: (253,187,15,1),
3: (244,119,33,1),
4: (208,22,38,1),
255: (0, 0, 0, 0)}
# Add fire raster to map and calculate the % in each score category
# Create folium map
with rasterio.open(outfirepath) as src:
reclassify(src)
### Temporary workaround - lambda function assigns colormap incorrectly if the raster does not start at 0
### Add 1 '0' value cell in west corner
img[0,0] = 0
## Folium map - optional for viewing fire
folium.raster_layers.ImageOverlay(img, colormap=(lambda x: score1_color[x]),
bounds =[[src.bounds.top, src.bounds.left], [src.bounds.bottom, src.bounds.right]]).add_to(m)
# calculate the pixel count for each score category
results = []
val_list = [0,1, 2, 3, 4] # score reclass values
results.append(pixelCount(img, val_list))
print(results)
## Folium map - optional
m
# Calculate the % of WFHC for the fire
def roundCalc(rs,x,total):
if rs[0][x] == 0:
y = 0
else:
y = (rs[0][x]/total)*100
x = round(y)
return x
# Round to 1 decimal place
def rCalc(rs,x,total):
if rs[0][x] == 0:
y = 0
else:
y = (rs[0][x]/total)*100
x = round(y,1)
return x
## Stats for blurb
# Tif file names
append = ['_score1.tif','_score2.tif','_score3.tif','_score4.tif']
firename_string = x+"_"+str(date)
# Create fire tifs
for v in append:
tif, outtif = os.path.join(basetif,state,state+v), os.path.join(basepath,firename_string+v)
intif, outfire = str('"' +tif+'"'),str('"' +outtif+'"')
cmd = "gdalwarp -cutline "+ infireshp +" -crop_to_cutline -dstnodata 255 -overwrite " + intif + " " + outfire
# remove -dstalpha alpha band for symbology in arcmap, add for visualizing with folium
os.system(cmd)
## Maintain order of the values/variables in the lists below
## Be sure to check for significant non-burnable/negligible areas - remove from analysis
# calculate the pixel counts for the categorical rasters
# Order of the score below correspond to raster values 0,1,2,3,4,5,6. 'null' for values not used in each score category
score1 = ['score11','score12','score13','score14','score15','null']
score2 = ['score21','score22','null','score23','score24','score25','score26']
score3 = ['null','score31','score32','score33','null','score34','null']
score4 = ['score41','score42','null','score43','null','score44','null']
variables = [score1,score2,score3,score4]
variable_list = ['score1','score2','score3','score4'] # Values are placeholders for calculated stats
# Open each score raster and calculate the score distribution
n = 0
print_list = pd.DataFrame(columns=['Score','Value','pct'])
for v in append:
varname_list = variables[n]
outtif = os.path.join(basepath,firename_string+v)
if os.path.exists(outtif):
with rasterio.open(outtif) as src:
if varname_list == score1:# Reclassify score1 only
reclassify(src)
else:
img = src.read(1)
# calculate the pixel count for each score category
r = []
val_list = [0, 1, 2, 3, 4, 5, 6]
r.append(pixelCount(img, val_list))
# Get total number of pixels to divide by
t = r[0][0]+r[0][1]+r[0][2]+r[0][3]+r[0][4]+r[0][5]+r[0][6]
# Calculate the % of each value and reassign each score category to variable_list
v0,v1,v2,v3,v4,v5,v6 = rCalc(r,0,t),rCalc(r,1,t),rCalc(r,2,t),rCalc(r,3,t),rCalc(r,4,t),rCalc(r,5,t),rCalc(r,6,t)
vname = variable_list[n]
variable_list[n] = [v0,v1,v2,v3,v4,v5,v6]
# Print the % in each score category formatted for marketing
y = 0
for var in varname_list:
if varname_list[y] == 'null':
a=1 # skip 'null'
else:
print(str(varname_list[y])+": "+str(variable_list[n][y])+"%")
print_list = print_list.append({'Score': vname,'Value': (str(varname_list[y])),'pct': (str(variable_list[n][y]))}, ignore_index=True)
# Get high & extreme pct
if varname_list[y] == 'score14':
sc1 = variable_list[n][y]
if varname_list[y] == 'score15':
sc2 = variable_list[n][y]
y = y + 1
else:
print(state + v + ' does not exist')
n = n + 1
print('')
# Combine High & Extreme for marketing snippet
sc1sc2 = round(sc1 + sc2,2)
acres = 'As of ' + date + ', the '+ str(firename)+ ' Fire has burned approximately ' + str(round(fire.iloc[0].GISAcres,0))+ ' acres in ' + state +' according to NIFC perimeter data.'
risk = 'X has determined that approximately ' +str(sc1sc2)+'% of the area is in X risk categories.'
print(marketing,risk)
print_list = print_list.append({'Score': acres,'Value': risk}, ignore_index=True)
print_list.to_csv(os.path.join(basepath, firename_string+'_scores.csv'), index=False)
# Reset fire name for command line arguments in MXD creation script
print(firename)
if firename != fire.iloc[0].IncidentName:
firename = fire.iloc[0].IncidentName
# Create the MXD and pdf map with arcpy scripts
score_1 = "c:/pythonenv/arcgis10.6/python D:\scripts\score1map.py " + fireyear +" "+ state +" "+ '"'+firename+ '"'
score_2 = "c:/pythonenv/arcgis10.6/python D:\scripts\score2map.py " + fireyear +" "+ state +" "+ '"'+firename+ '"'
os.system(score_1)
os.system(score_2)