The number of digitised newspapers available through Trove has increased dramatically since 2009. Understanding when newspapers were added is important for historiographical purposes, but there's no data about this available directly from Trove. This notebook uses web archives to extract lists of newspapers in Trove over time, and chart Trove's development.
Trove has always provided a browseable list of digitised newspaper titles. The url and format of this list has changed over time, but it's possible to find captures of this page in the Internet Archive and extract the full list of titles. The pages are also captured in the Australian Web Archive, but the Wayback Machine has a more detailed record.
The pages that I'm looking for are:
This notebook creates the following data files:
I've also created a browseable list of titles, showing when they first appeared in Trove.
import requests
import json
import re
from surt import surt
from bs4 import BeautifulSoup
import arrow
import pandas as pd
import altair as alt
from IPython.display import display, HTML
from pathlib import Path
We're using the Memento protocol to get a list of captures. See the Web Archives section of the GLAM Workbench for more details.
# The code in this cell is copied from notebooks in the Web Archives section of the GLAM Workbench (https://glam-workbench.net/web-archives/)
# In particular see: https://glam-workbench.net/web-archives/#find-all-the-archived-versions-of-a-web-page
# These are the repositories we'll be using
TIMEGATES = {
'awa': 'https://web.archive.org.au/awa/',
'nzwa': 'https://ndhadeliver.natlib.govt.nz/webarchive/wayback/',
'ukwa': 'https://www.webarchive.org.uk/wayback/en/archive/',
'ia': 'https://web.archive.org/web/'
}
def convert_lists_to_dicts(results):
'''
Converts IA style timemap (a JSON array of arrays) to a list of dictionaries.
Renames keys to standardise IA with other Timemaps.
'''
if results:
keys = results[0]
results_as_dicts = [dict(zip(keys, v)) for v in results[1:]]
else:
results_as_dicts = results
for d in results_as_dicts:
d['status'] = d.pop('statuscode')
d['mime'] = d.pop('mimetype')
d['url'] = d.pop('original')
return results_as_dicts
def get_capture_data_from_memento(url, request_type='head'):
'''
For OpenWayback systems this can get some extra capture info to insert into Timemaps.
'''
if request_type == 'head':
response = requests.head(url)
else:
response = requests.get(url)
headers = response.headers
length = headers.get('x-archive-orig-content-length')
status = headers.get('x-archive-orig-status')
status = status.split(' ')[0] if status else None
mime = headers.get('x-archive-orig-content-type')
mime = mime.split(';')[0] if mime else None
return {'length': length, 'status': status, 'mime': mime}
def convert_link_to_json(results, enrich_data=False):
'''
Converts link formatted Timemap to JSON.
'''
data = []
for line in results.splitlines():
parts = line.split('; ')
if len(parts) > 1:
link_type = re.search(r'rel="(original|self|timegate|first memento|last memento|memento)"', parts[1]).group(1)
if link_type == 'memento':
link = parts[0].strip('<>')
timestamp, original = re.search(r'/(\d{14})/(.*)$', link).groups()
capture = {'urlkey': surt(original), 'timestamp': timestamp, 'url': original}
if enrich_data:
capture.update(get_capture_data_from_memento(link))
print(capture)
data.append(capture)
return data
def get_timemap_as_json(timegate, url, enrich_data=False):
'''
Get a Timemap then normalise results (if necessary) to return a list of dicts.
'''
tg_url = f'{TIMEGATES[timegate]}timemap/json/{url}/'
response = requests.get(tg_url)
response_type = response.headers['content-type']
if response_type == 'text/x-ndjson':
data = [json.loads(line) for line in response.text.splitlines()]
elif response_type == 'application/json':
data = convert_lists_to_dicts(response.json())
elif response_type in ['application/link-format', 'text/html;charset=utf-8']:
data = convert_link_to_json(response.text, enrich_data=enrich_data)
return data
This gets the web page captures from the Internet Archive, scrapes the list of titles from the page, then does a bit of normalisation of the title data.
titles = []
# These are the pages that listed available titles.
# There was a change in 2016
pages = [{'url': 'http://trove.nla.gov.au/ndp/del/titles', 'path': '/ndp/del/title/'},
{'url': 'https://trove.nla.gov.au/newspaper/about', 'path': '/newspaper/title/'}]
for page in pages:
for capture in get_timemap_as_json('ia', page['url']):
if capture['status'] == '200':
url = f'https://web.archive.org/web/{capture["timestamp"]}id_/{capture["url"]}'
#print(url)
capture_date = arrow.get(capture['timestamp'][:8], 'YYYYMMDD').format('YYYY-MM-DD')
#print(capture_date)
response = requests.get(url)
soup = BeautifulSoup(response.content)
title_links = soup.find_all('a', href=re.compile(page['path']))
for title in title_links:
# Get the title text
full_title = title.get_text().strip()
# Get the title id
title_id = re.search(r'\/(\d+)\/?$', title['href']).group(1)
# Most of the code below is aimed at normalising the publication place and dates values to allow for easy grouping & deduplication
brief_title = re.sub(r'\(.+\)\s*$', '', full_title).strip()
try:
details = re.search(r'\((.+)\)\s*$', full_title).group(1).split(':')
except AttributeError:
place = ''
dates = ''
else:
try:
place = details[0].strip()
# Normalise states
try:
place = re.sub(r'(, )?([A-Za-z]+)[\.\s]*$', lambda match: f'{match.group(1) if match.group(1) else ""}{match.group(2).upper()}', place)
except AttributeError:
pass
# Normalise dates
dates = ' - '.join([d.strip() for d in details[1].strip().split('-')])
except IndexError:
place = ''
dates = ' - '.join([d.strip() for d in details[0].strip().split('-')])
titles.append({'title_id': title_id, 'full_title': full_title, 'title': brief_title, 'place': place, 'dates': dates, 'capture_date': capture_date, 'capture_timestamp': capture['timestamp']})
df = pd.DataFrame(titles)
df
title_id | full_title | title | place | dates | capture_date | capture_timestamp | |
---|---|---|---|---|---|---|---|
0 | 34 | Advertiser (Adelaide, SA : 1889-1931) | Advertiser | Adelaide, SA | 1889 - 1931 | 2009-11-12 | 20091112000713 |
1 | 13 | Argus (Melbourne, Vic. : 1848-1954) | Argus | Melbourne, VIC | 1848 - 1954 | 2009-11-12 | 20091112000713 |
2 | 16 | Brisbane Courier (Qld. : 1864-1933) | Brisbane Courier | QLD | 1864 - 1933 | 2009-11-12 | 20091112000713 |
3 | 11 | Canberra Times (ACT : 1926-1954) | Canberra Times | ACT | 1926 - 1954 | 2009-11-12 | 20091112000713 |
4 | 24 | Colonial Times (Hobart, Tas. : 1828-1857) | Colonial Times | Hobart, TAS | 1828 - 1857 | 2009-11-12 | 20091112000713 |
... | ... | ... | ... | ... | ... | ... | ... |
90111 | 1374 | Papuan Times (Port Moresby, Papua New Guinea :... | Papuan Times | Port Moresby, Papua New GUINEA | 1911 - 1916 | 2021-04-15 | 20210415021550 |
90112 | 1369 | Territory of Papua Government Gazette (Papua N... | Territory of Papua Government Gazette | Papua New GUINEA | 1906 - 1942 | 2021-04-15 | 20210415021550 |
90113 | 1371 | Territory of Papua and New Guinea Government G... | Territory of Papua and New Guinea Government G... | 1949 - 1971 | 2021-04-15 | 20210415021550 | |
90114 | 1370 | Territory of Papua-New Guinea Government Gazet... | Territory of Papua-New Guinea Government Gazette | 1945 - 1949 | 2021-04-15 | 20210415021550 | |
90115 | 1391 | Tribune (Philippines : 1932 - 1945) | Tribune | PHILIPPINES | 1932 - 1945 | 2021-04-15 | 20210415021550 |
90116 rows × 7 columns
# Number of captures
len(df['capture_timestamp'].unique())
120
# Number of days on which the pages were captured
len(df['capture_date'].unique())
111
Save this dataset as a CSV file.
df.to_csv('trove_newspaper_titles_2009_2021.csv', index=False)
# Drop duplicates in cases where there were mutiple captures on a single day
captures_df = df.drop_duplicates(subset=['capture_date', 'full_title'])
# Calculate totals per capture
capture_totals = captures_df['capture_date'].value_counts().to_frame().reset_index()
capture_totals.columns = ['capture_date', 'total']
capture_totals
capture_date | total | |
---|---|---|
0 | 2021-04-15 | 1666 |
1 | 2021-03-11 | 1658 |
2 | 2021-02-05 | 1649 |
3 | 2020-11-12 | 1625 |
4 | 2020-05-10 | 1553 |
... | ... | ... |
106 | 2010-04-28 | 37 |
107 | 2009-11-24 | 34 |
108 | 2009-12-12 | 34 |
109 | 2009-11-22 | 34 |
110 | 2009-11-12 | 34 |
111 rows × 2 columns
alt.Chart(capture_totals).mark_line(point=True).encode(
x=alt.X('capture_date:T', title='Date captured'),
y=alt.Y('total:Q', title='Number of newspaper titles'),
tooltip=[alt.Tooltip('capture_date:T', format='%e %b %Y'), 'total:Q'],
).properties(width=700)
For historiographical purposes, its useful to know when a particular title first appeared in Trove. Here we'll only keep the first appearance of each title (or any subsequent changes to its date range / location).
first_appearance = df.drop_duplicates(subset=['title', 'place', 'dates'])
first_appearance
title_id | full_title | title | place | dates | capture_date | capture_timestamp | |
---|---|---|---|---|---|---|---|
0 | 34 | Advertiser (Adelaide, SA : 1889-1931) | Advertiser | Adelaide, SA | 1889 - 1931 | 2009-11-12 | 20091112000713 |
1 | 13 | Argus (Melbourne, Vic. : 1848-1954) | Argus | Melbourne, VIC | 1848 - 1954 | 2009-11-12 | 20091112000713 |
2 | 16 | Brisbane Courier (Qld. : 1864-1933) | Brisbane Courier | QLD | 1864 - 1933 | 2009-11-12 | 20091112000713 |
3 | 11 | Canberra Times (ACT : 1926-1954) | Canberra Times | ACT | 1926 - 1954 | 2009-11-12 | 20091112000713 |
4 | 24 | Colonial Times (Hobart, Tas. : 1828-1857) | Colonial Times | Hobart, TAS | 1828 - 1857 | 2009-11-12 | 20091112000713 |
... | ... | ... | ... | ... | ... | ... | ... |
89211 | 1700 | Port Lincoln, Tumby and West Coast Recorder (S... | Port Lincoln, Tumby and West Coast Recorder | SA | 1904 - 1909 | 2021-04-15 | 20210415021550 |
89258 | 1702 | West Coast Recorder (Port Lincoln, SA : 1909 -... | West Coast Recorder | Port Lincoln, SA | 1909 - 1942 | 2021-04-15 | 20210415021550 |
89487 | 1703 | Express, Bacchus Marsh (Vic. : 1943 - 1954) | Express, Bacchus Marsh | VIC | 1943 - 1954 | 2021-04-15 | 20210415021550 |
89671 | 310 | Richmond Guardian (Vic. : 1885; 1904 - 1922) | Richmond Guardian | VIC | 1885; 1904 - 1922 | 2021-04-15 | 20210415021550 |
89944 | 1638 | Miner's Right (Boulder, WA : 1897) | Miner's Right | Boulder, WA | 1897 | 2021-04-15 | 20210415021550 |
2040 rows × 7 columns
Find when a particular newspaper first appeared.
first_appearance.loc[first_appearance['title'] == 'Canberra Times']
title_id | full_title | title | place | dates | capture_date | capture_timestamp | |
---|---|---|---|---|---|---|---|
3 | 11 | Canberra Times (ACT : 1926-1954) | Canberra Times | ACT | 1926 - 1954 | 2009-11-12 | 20091112000713 |
9395 | 11 | Canberra Times (ACT : 1926 - 1995) | Canberra Times | ACT | 1926 - 1995 | 2012-12-27 | 20121227113753 |
Generate an alphabetical list for easy browsing. View the results as a Gist.
with Path('titles_list.md').open('w') as titles_list:
for title, group in first_appearance.groupby(['title', 'title_id']):
places = ' | '.join(group['place'].unique())
titles_list.write(f'<h4><a href="http://nla.gov.au/nla.news-title{title[1]}">{title[0]} ({places})</a></h4>')
titles_list.write(group.sort_values(by='capture_date')[['capture_date','dates', 'place']].to_html(index=False))
Save this dataset to CSV.
first_appearance.to_csv('trove_newspaper_titles_first_appearance_2009_2021.csv', index=False)
Created by Tim Sherratt for the GLAM Workbench.
Support this project by becoming a GitHub sponsor.