This notebook investigates the performance of HDF5 and BLZ disk-based formats for writing and reading 2-dimensional arrays of genotype data.
The motivation is using HDF5 or BLZ to store arrays of genotype data, where the first dimension corresponds to genome positions where individuals differ genetically from each other (a.k.a. variants) and the second dimension corresponds to a cohort of individuals (a.k.a. samples). Each cell within the array stores the genotype of the individual at a genetic variant, coded as a 1 byte integer.
We typically have >10 million variants across >1000 samples. Hence the arrays are long and thin. This notebook benchmarks a smaller but similar shaped dataset of 1000000 variants by 100 samples.
Most genetic variants are also rare, hence the arrays are sparse along both dimensions, with most values being zeros.
Different data analysis tasks require different access patterns for these data. Hence this notebook tests performance under a number of different access patterns, to get some idea of which configurations provide reasonable all-round performance, and which configurations might be optimal for some access patterns but slow for others.
Data
The data used in this benchmark are real genotype data from the Anopheles gambiae 1000 genomes project phase 1 preview data release. Genotype data from chromosome arm 3L are used.
Hardware
All tests were performed on a Toshiba Portege Z930-108. Tests were performed using the onboard SSD, and using a WD My Passport 2TB external hard disk connected via USB 3.0.
from __future__ import division, print_function
import numpy as np
import h5py
import blz
import sh
import os
import sys
import itertools
from datetime import datetime
import gc
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_style('darkgrid')
import random
random.seed(1)
import petl.interactive as etl
import petlx.all
import pandas
from bisect import bisect_left, bisect_right
%matplotlib inline
def log(*msg):
print(str(datetime.now()) + ' :: ' + ' '.join(map(str, msg)), file=sys.stderr)
sys.stderr.flush()
def random_indices(n, k):
return sorted(random.sample(range(n), k))
def random_selection(n, k):
selection = np.zeros((n,), np.bool)
indices = random_indices(n, k)
selection[indices] = True
return indices, selection
devices = 'ssd', 'hdd'
[2**n for n in range(13, 20)]
[8192, 16384, 32768, 65536, 131072, 262144, 524288]
h5_compression_configurations = (
dict(compression='lzf', shuffle=False),
dict(compression='gzip', compression_opts=1, shuffle=False),
dict(compression='gzip', compression_opts=3, shuffle=False),
)
h5_chunk_sizes = [2**n for n in range(13, 20)]
h5_chunk_widths = (1, 10, 100)
h5_chunks_configurations = [(s//w, w) for (s, w) in itertools.product(h5_chunk_sizes, h5_chunk_widths)]
h5_configurations = [dict(chunks=chunks, **compression)
for (chunks, compression) in itertools.product(h5_chunks_configurations,
h5_compression_configurations)]
h5_configurations.append(dict()) # add default config, no chunks
[2**n for n in range(10, 20)]
[1024, 2048, 4096, 8192, 16384, 32768, 65536, 131072, 262144, 524288]
blz_cnames = ('blosclz', 'lz4', 'lz4hc', 'zlib')
blz_clevels = (1, 3)
blz_chunklens = [2**n for n in range(10, 20)]
blz_configurations = [dict(cname=cname, clevel=clevel, shuffle=False, chunklen=chunklen)
for (cname, clevel, chunklen) in itertools.product(blz_cnames,
blz_clevels,
blz_chunklens)]
n_variants = 1000000
n_samples = 100
n_replicates = 3
chromosome = '3L'
data_replicates = []
with h5py.File('data/data.h5', mode='r') as f:
genotypes = f[chromosome]['calldata']['genotype']
for n in range(n_replicates):
# each replicate uses a different random slice of the data
n_variants_total = genotypes.shape[0]
start_index = random.randint(0, n_variants_total - n_variants)
stop_index = start_index + n_variants
d = genotypes[start_index:stop_index, :n_samples, ...]
# reduce data to 2 dimensions for the purpose of this benchmark
d = np.sum(d, axis=2, dtype='i1')
log(n, start_index, stop_index, d.shape, '%.1fMb' % (d.nbytes/1e6), '%.3f%% nonzero' % (np.count_nonzero(d)*100/d.size))
data_replicates.append(d)
2014-07-22 11:06:06.374455 :: 0 955908 1955908 (1000000, 100) 100.0Mb 19.061% nonzero 2014-07-22 11:06:10.942690 :: 1 6028902 7028902 (1000000, 100) 100.0Mb 15.036% nonzero 2014-07-22 11:06:14.405189 :: 2 5433726 6433726 (1000000, 100) 100.0Mb 14.826% nonzero
The command drop_cache
is just a small script that executes:
#!/bin/bash
echo 1 > /proc/sys/vm/drop_caches
This must be run as sudo, so to avoid passwords this script has been set in the sudoers file with NOPASSWD.
def benchmark_h5(op, verbose=False):
results = []
# storage devices
for device in devices:
# replicates
for n, data in enumerate(data_replicates):
# configurations
for i, config in enumerate(h5_configurations):
sh.sudo.drop_cache()
fn = os.path.join(device, '%s.%s.h5' % (n, i))
if verbose:
log(n, i, fn, config)
# operation to be benchmarked
before = datetime.now()
ds = op(fn, config.copy(), data)
after = datetime.now()
compression = config.get('compression', 'none') + '.' + str(config.get('compression_opts', ''))
chunk_height, chunk_width = config.get('chunks', (0, 0))
chunk_size = chunk_width*chunk_height
elapsed = (after-before).total_seconds()
size_on_disk = os.stat(fn).st_size
compression_ratio = data.nbytes / size_on_disk
result = [n, device, compression, chunk_height, chunk_width, chunk_size, elapsed, size_on_disk, compression_ratio]
results.append(result)
df = pandas.DataFrame.from_records(results,
columns=['replicate',
'device',
'compression',
'chunk_height',
'chunk_width',
'chunk_size',
'time',
'size_on_disk',
'compression_ratio'])
return df
def benchmark_blz(op, verbose=False):
results = []
# storage devices
for device in devices:
# replicates
for n, data in enumerate(data_replicates):
# configurations
for i, config in enumerate(blz_configurations):
sh.sudo.drop_cache()
fn = os.path.join(device, '%s.%s.blz' % (n, i))
if verbose:
log(n, i, fn, config)
# operation to be benchmarked
before = datetime.now()
b = op(fn, config.copy(), data)
after = datetime.now()
compression = config.get('cname') + '.' + str(config.get('clevel'))
chunklen = config.get('chunklen')
elapsed = (after-before).total_seconds()
size_on_disk = b.cbytes
compression_ratio = data.nbytes / size_on_disk
result = [n, device, compression, chunklen, elapsed, size_on_disk, compression_ratio]
results.append(result)
df = pandas.DataFrame.from_records(results,
columns=['replicate',
'device',
'compression',
'chunklen',
'time',
'size_on_disk',
'compression_ratio'])
return df
def op_write_h5(fn, config, data):
with h5py.File(fn, mode='w') as h5:
ds = h5.create_dataset('test', data=data, **config)
return ds
def op_write_blz(fn, config, data):
bparams = blz.bparams(cname=config.pop('cname'),
clevel=config.pop('clevel'),
shuffle=config.pop('shuffle', False))
b = blz.barray(data, rootdir=fn, mode='w', bparams=bparams, **config)
return b
df_write_h5 = benchmark_h5(op_write_h5)
df_write_blz = benchmark_blz(op_write_blz)
grid = sns.factorplot(x='chunk_size', y='time',
row='device', col='chunk_width', hue='compression',
data=df_write_h5, kind='point', estimator=np.min)
grid.set(ylim=(0, 3))
grid.fig.suptitle('HDF5 write speed', fontsize=20, y=1.03);
grid = sns.factorplot(x='chunklen', y='time',
row='device', hue='compression',
data=df_write_blz, kind='point', estimator=np.min,
palette=sns.color_palette('hls', n_colors=len(blz_cnames)*len(blz_clevels)))
grid.set(ylim=(0, 3))
grid.fig.suptitle('BLZ write speed', fontsize=20, y=1.03);
grid = sns.factorplot(x='chunk_size', y='compression_ratio',
col='chunk_width', hue='compression',
data=df_write_h5, kind='point', estimator=np.min)
grid.fig.suptitle('HDF5 compression ratio', fontsize=20, y=1.03);
grid = sns.factorplot(x='chunklen', y='compression_ratio',
hue='compression',
data=df_write_blz, kind='point', estimator=np.min,
palette=sns.color_palette('hls', n_colors=len(blz_cnames)*len(blz_clevels)))
grid.fig.suptitle('BLZ compression ratio', fontsize=20, y=1.03);
def op_read_h5(fn, *args):
with h5py.File(fn, mode='r') as h5:
ds = h5['test']
a = ds[:]
return ds
def op_read_blz(fn, *args):
b = blz.barray(rootdir=fn, mode='r')
a = b[:]
return b
df_read_h5 = benchmark_h5(op_read_h5)
df_read_blz = benchmark_blz(op_read_blz)
grid = sns.factorplot(x='chunk_size', y='time',
row='device', col='chunk_width', hue='compression',
data=df_read_h5, kind='point', estimator=np.min)
grid.set(ylim=(0, 2))
grid.fig.suptitle('HDF5 read all', fontsize=20, y=1.03);
grid = sns.factorplot(x='chunklen', y='time',
row='device', hue='compression',
data=df_read_blz, kind='point', estimator=np.min,
palette=sns.color_palette('hls', n_colors=len(blz_cnames)*len(blz_clevels)))
grid.set(ylim=(0, 2))
grid.fig.suptitle('BLZ read all', fontsize=20, y=1.03);