If you've ever poked around in Trove's 'map' zone, you might have noticed the beautiful deep-zoomable images available for many of the NLA's digitised maps. Even better, in many cases the high-resolution TIFF versions of the digitised maps are available for download.
I knew there were lots of great maps you could download from Trove, but how many? And how big were the files? I thought I'd try to quantify this a bit by harvesting and analysing the metadata.
The size of the downloadable files (both in bytes and pixels) are embedded within the landing pages for the digitised maps. So harvesting the metadata involves a number of steps:
Here's the downloaded metadata as a CSV formatted file. You can also browse the results using Google Sheets.
import requests
from tqdm import tqdm_notebook
from requests.adapters import HTTPAdapter
from requests.packages.urllib3.util.retry import Retry
from IPython.display import display, FileLink
import re
import json
import time
import pandas as pd
from bs4 import BeautifulSoup
import altair as alt
s = requests.Session()
retries = Retry(total=5, backoff_factor=1, status_forcelist=[ 502, 503, 504 ])
s.mount('https://', HTTPAdapter(max_retries=retries))
s.mount('http://', HTTPAdapter(max_retries=retries))
alt.renderers.enable('notebook')
alt.data_transformers.enable('json')
api_key = ''
def get_total_results(params):
'''
Get the total number of results for a search.
'''
these_params = params.copy()
these_params['n'] = 0
response = s.get('https://api.trove.nla.gov.au/v2/result', params=these_params)
data = response.json()
return int(data['response']['zone'][0]['records']['total'])
def get_fulltext_url(links):
'''
Loop through the identifiers to find a link to the digital version of the journal.
'''
url = None
for link in links:
if link['linktype'] == 'fulltext' and 'nla.obj' in link['value']:
url = link['value']
break
return url
def get_copyright_status(response):
'''
Scrape copyright information from a digital work page.
'''
soup = BeautifulSoup(response.text, 'lxml')
copyright_status = soup.find('div', id='tab-access').strong.string
return copyright_status
def get_work_data(url):
'''
Extract work data in a JSON string from the work's HTML page.
'''
response = s.get(url)
try:
work_data = json.loads(re.search(r'var work = JSON\.parse\(JSON\.stringify\((\{.*\})', response.text).group(1))
except (AttributeError, TypeError):
work_data = '{}'
else:
# If there's no copyright info in the work data, then scrape it
if 'copyrightPolicy' not in work_data:
work_data['copyrightPolicy'] = get_copyright_status(response)
return work_data
def format_bytes(size):
# 2**10 = 1024
power = 2**10
n = 0
power_labels = {0 : '', 1: 'K', 2: 'M', 3: 'G', 4: 'T'}
while size > power:
size /= power
n += 1
return size, power_labels[n]+'B'
def get_map_data(work_data):
'''
Look for file size information in the embedded data
'''
map_data = {}
width = None
height = None
num_bytes = None
try:
# Make sure there's a downloadable version
if work_data.get('accessConditions') == 'Unrestricted' and 'copies' in work_data:
for copy in work_data['copies']:
# Get the pixel dimensions
if 'technicalmetadata' in copy:
width = copy['technicalmetadata'].get('width')
height = copy['technicalmetadata'].get('height')
# Get filesize in bytes
elif copy['copyrole'] in ['m', 'o', 'i', 'fd'] and copy['access'] == 'true':
num_bytes = copy.get('filesize')
if width and height and num_bytes:
size, unit = format_bytes(num_bytes)
# Convert bytes to something human friendly
map_data['filesize_string'] = '{:.2f}{}'.format(size, unit)
map_data['filesize'] = num_bytes
map_data['width'] = width
map_data['height'] = height
map_data['copyright_status'] = work_data.get('copyrightPolicy')
except AttributeError:
pass
return map_data
def get_maps():
'''
Harvest metadata about maps.
'''
url = 'http://api.trove.nla.gov.au/v2/result'
maps = []
params = {
'q': '"nla.obj-"',
'zone': 'map',
'l-availability': 'y',
'l-format': 'Map/Single map',
'bulkHarvest': 'true', # Needed to maintain a consistent order across requests
'key': api_key,
'n': 100,
'encoding': 'json'
}
start = '*'
total = get_total_results(params)
with tqdm_notebook(total=total) as pbar:
while start:
params['s'] = start
response = s.get(url, params=params)
data = response.json()
# If there's a startNext value then we get it to request the next page of results
try:
start = data['response']['zone'][0]['records']['nextStart']
except KeyError:
start = None
for work in tqdm_notebook(data['response']['zone'][0]['records']['work'], leave=False):
# Check to see if there's a link to a digital version
try:
fulltext_url = get_fulltext_url(work['identifier'])
except KeyError:
pass
else:
if fulltext_url:
work_data = get_work_data(fulltext_url)
map_data = get_map_data(work_data)
if 'filesize' in map_data:
trove_id = re.search(r'(nla\.obj\-\d+)', fulltext_url).group(1)
try:
contributors = '|'.join(work.get('contributor'))
except TypeError:
contributors = work.get('contributor')
# Get basic metadata
# You could add more work data here
# Check the Trove API docs for work record structure
map_data['title'] = work['title']
map_data['fulltext_url'] = fulltext_url
map_data['trove_url'] = work.get('troveUrl')
map_data['trove_id'] = trove_id
map_data['date'] = work.get('issued')
map_data['creators'] = contributors
maps.append(map_data)
time.sleep(0.2)
time.sleep(0.2)
pbar.update(100)
return maps
maps = get_maps()
# Convert to dataframe
df = pd.DataFrame(maps)
df.head()
# Save to CSV
df.to_csv('single_maps.csv', index=False)
display(FileLink('single_maps.csv'))
# Reload data from CSV if necessary
df = pd.read_csv('single_maps.csv')
How many single maps have high-resolution downloads?
print('{:,} maps'.format(df.shape[0]))
How much map data is available for download?
size, unit = format_bytes(df['filesize'].sum())
print('{:.2f}{}'.format(size, unit))
What's the copyright status of the maps?
df['copyright_status'].value_counts()
Let's show the copyright status as a chart...
counts = df['copyright_status'].value_counts().to_frame().reset_index()
counts.columns = ['status', 'count']
alt.Chart(counts).mark_bar().encode(
y='status:N',
x='count',
tooltip='count'
)
Let's look at the sizes of the download files.
So while most are less than 500MB, almost 5,000 are between 0.5 and 1GB!
df['mb'] = df['filesize'] / 2**10 / 2**10
alt.Chart(df).mark_bar().encode(
x=alt.X('mb', bin=True, title='MB'),
y='count()',
tooltip='count()'
)
What's the biggest file available for download?
df.iloc[df['filesize'].idxmax()]
All downloads greater than 3GB.
df.loc[(df['filesize'] / 2**10 / 2**10 / 2**10) > 3]
The widest image?
df.iloc[df['width'].idxmax()]
The tallest image?
df.iloc[df['height'].idxmax()]
Created by Tim Sherratt.
Work on this notebook was supported by the Humanities, Arts and Social Sciences (HASS) Data Enhanced Virtual Lab.