import holoviews as hv import dask.dataframe as dd import datashader as ds, datashader.geo from holoviews import opts from holoviews.operation.datashader import datashade, rasterize hv.extension('bokeh') df = dd.read_parquet('../data/earthquakes.parq', engine='fastparquet').persist() print('%s Rows' % len(df)) print('Columns:', list(df.columns)) x, y = ds.geo.lnglat_to_meters(df.longitude, df.latitude) ddf = df.assign(x=x, y=y).persist() points = hv.Points(ddf, ['x', 'y']) datashade(points).opts(width=700, height=500, bgcolor="lightgray") datashade.streams # Exercise: Plot the earthquake locations ('longitude' and 'latitude' columns) # Warning: Don't try to display hv.Points() directly; it's too big! Use datashade() or rasterize() for any display # Optional: Change the cmap on the datashade operation to inferno # from datashader.colors import inferno from holoviews.element.tiles import EsriImagery tiles = EsriImagery().opts(xaxis=None, yaxis=None, width=700, height=500) tiles * datashade(points) # Exercise: Overlay the earthquake data on top of the Wikipedia tile source selected = points.select(depth=(None, 0)) selected.data = selected.data.persist() tiles * rasterize(selected, aggregator=ds.mean('mag')).opts(colorbar=True) # Exercise: Use the ds.min or ds.max aggregator to visualize other fields # Optional: Eliminate outliers by using select dset = hv.Dataset(ddf) grouped = dset.to(hv.Points, ['x', 'y'], groupby=['type'], dynamic=True) tiles.opts(alpha=0.4, bgcolor="black") * datashade(grouped).opts( opts.RGB(width=600, height=500, xaxis=None, yaxis=None, tools=['hover'])) # Exercise: Facet a subset of the types as an NdLayout # Hint: You can reuse the existing grouped variable or select a subset before using the .to method