Create large composite images from snipped words

This is a variation of the 'scissors & paste' notebook that extracts words from Trove newspaper images and compiles them into messages. In this notebook, you can harvest multiple versions of a list of words and compile them all into one big image.

Slice of composite image

View high-res version

In [17]:
# Import what we need
import os
import random
import time
from datetime import datetime
from io import BytesIO
from pathlib import Path

import requests
from bs4 import BeautifulSoup
from IPython.display import FileLink, display
from PIL import Image, ImageOps
from rectpack import SORT_NONE, newPacker
In [ ]:
# Load variables from the .env file if it exists
# Use %%capture to suppress messages
%load_ext dotenv
In [ ]:
# Insert your Trove API key

# Use api key value from environment variables if it is available
if os.getenv("TROVE_API_KEY"):
    API_KEY = os.getenv("TROVE_API_KEY")

# List of words you want to harvest

# Max number of images of each word you want to harvest (sometimes the words can't be found in the article, so the actual number will probably be a little less)

# Where to save the images
IMG_DIR = "words"

# Create the output directory
Path(IMG_DIR).mkdir(parents=True, exist_ok=True)
In [ ]:
def get_word_boxes(article_url):
    Get the boxes around highlighted search terms.
    boxes = []
    # Get the article page
    response = requests.get(article_url)
    # Load in BS4
    soup = BeautifulSoup(response.text, "lxml")
    # Get the id of the newspaper page
    page_id ="")[0]["data-page-id"]
    # Find the highlighted terms
    words ="span.highlightedTerm")
    # Save the box coords
    for word in words:
        box = {
            "page_id": page_id,
            "left": int(word["data-x"]),
            "top": int(word["data-y"]),
            "width": int(word["data-w"]),
            "height": int(word["data-h"]),
    return boxes

def crop_word(box, kw, article_id):
    Crop the box coordinates from the full page image.
    word_path = Path(f"{IMG_DIR}/{kw}-{article_id}.jpg")
    if not word_path.exists():
        # Construct the url we need to download the page image
        page_url = (
                box["page_id"], 7
        # print(page_url)
        # Download the page image
        response = requests.get(page_url)
        # Open download as an image for editing
        img =
        word = img.crop(
                box["left"] - 5,
                box["top"] - 5,
                box["left"] + box["width"] + 5,
                box["top"] + box["height"] + 5,

def get_article_from_search(kw):
    Use the Trove API to find articles with the supplied keyword.
    params = {
        "q": f'text:"{kw}"',
        "zone": "newspaper",
        "encoding": "json",
        "n": NUM_WORDS,
        "key": API_KEY,
    response = requests.get("", params=params)
    data = response.json()
    articles = data["response"]["zone"][0]["records"]["article"]
    for article in articles:
        boxes = []
            boxes = get_word_boxes(article["troveUrl"])
        except KeyError:
        if boxes:
            crop_word(boxes[0], kw, article["id"])

Get all the words

In [ ]:
for word in WORD_LIST:

Create the composite image

Here we use a packing algorithm to try and fit the little word images (which are a variety of shapes and sizes) into one big box with as few gaps as possible. Adjust the WIDTH and HEIGHT values below to change the size of the composite.

In [ ]:
# Set width of composite image
WIDTH = 2000

# Set height of composite image
HEIGHT = 1000

# Set background colour of composite image
BG_COLOUR = (0, 0, 0)
In [ ]:
def get_image_data():
    images = []
    for im in [i for i in Path(IMG_DIR).glob("*.jpg")]:
        img =
        h, w = img.size
        images.append((h + 4, w + 4,
    return images

def pack_images():
    images = get_image_data()
    packer = newPacker(sort_algo=SORT_NONE, rotation=False)
    for i in images:
    packer.add_bin(WIDTH, HEIGHT)
    return len(images), packer.rect_list()

def create_composite(output_file=None):
    num_images, rectangles = pack_images()
    comp ="RGB", (WIDTH, HEIGHT), BG_COLOUR)
    for rect in rectangles:
        b, x, y, w, h, rid = rect
        # print(x,y, w, h, rid)
        word_path = Path(IMG_DIR, rid)
        word =
        word = word.convert("RGB")
        word_with_border = ImageOps.expand(word, border=2, fill=BG_COLOUR)
        comp.paste(word_with_border, (x, y, x + w, y + h))
    if not output_file:
        output_file = (
    print(f"{len(rectangles)} of {num_images} images used")

Run the cell below to create a composite image of the words you've harvested. The function will tell you how many of the harvested words it was able to fit into the composite. You can adjust the width and height of the composite to fit in more, or fill up gaps.

In [ ]:

Created by Tim Sherratt for the GLAM Workbench.
Support this project by becoming a GitHub sponsor.