#!/usr/bin/env python # coding: utf-8 # # Combine raw data across all samples # In this notebook, we'll use the file manifest we previously assembled to retrieve all of the human PBMC data that we'll use to assemble our reference dataset. # # Here, we'll retrieve data from each each individual sample, stored in HDF5 format in HISE, and concatenate them into a single AnnData object. # ## Load Packages # # anndata: Data structures for scRNA-seq # datetime: Date/Time methods # h5py: HDF5 file I/O # hisepy: The HISE SDK for Python # numpy: Mathematical data structures and computation # os: operating system calls # pandas: DataFrame data structures # re: Regular expressions # scanpy: scRNA-seq analysis # scipy.sparse: Spare matrix data structures # shutil: Shell utilities # In[1]: import anndata from datetime import date import h5py import hisepy import numpy as np import os import pandas as pd from pandas.api.types import is_object_dtype import re import scanpy as sc import scipy.sparse as scs import shutil # ## Helper functions # # These functions assist in reading from our pipeline .h5 file format into AnnData format: # In[2]: # define a function to read count data def read_mat(h5_con): mat = scs.csc_matrix( (h5_con['matrix']['data'][:], # Count values h5_con['matrix']['indices'][:], # Row indices h5_con['matrix']['indptr'][:]), # Pointers for column positions shape = tuple(h5_con['matrix']['shape'][:]) # Matrix dimensions ) return mat # define a function to read obeservation metadata (i.e. cell metadata) def read_obs(h5con): bc = h5con['matrix']['barcodes'][:] bc = [x.decode('UTF-8') for x in bc] # Initialized the DataFrame with cell barcodes obs_df = pd.DataFrame({ 'barcodes' : bc }) # Get the list of available metadata columns obs_columns = h5con['matrix']['observations'].keys() # For each column for col in obs_columns: # Read the values values = h5con['matrix']['observations'][col][:] # Check for byte storage if(isinstance(values[0], (bytes, bytearray))): # Decode byte strings values = [x.decode('UTF-8') for x in values] # Add column to the DataFrame obs_df[col] = values obs_df = obs_df.set_index('barcodes', drop = False) return obs_df # define a function to construct anndata object from a h5 file def read_h5_anndata(h5_con): #h5_con = h5py.File(h5_file, mode = 'r') # extract the expression matrix mat = read_mat(h5_con) # extract gene names genes = h5_con['matrix']['features']['name'][:] genes = [x.decode('UTF-8') for x in genes] # extract metadata obs_df = read_obs(h5_con) # construct anndata adata = anndata.AnnData(mat.T, obs = obs_df) # make sure the gene names aligned adata.var_names = genes adata.var_names_make_unique() return adata # ## Retrieve file list from HISE # # First, we'll pull the manifest of samples and associated files that we want to retrieve for building our reference dataset. We previously assembled this file and loaded it into HISE via a Watchfolder. # In[3]: sample_meta_file_uuid = '2da66a1a-17cc-498b-9129-6858cf639caf' file_query = hisepy.reader.read_files( [sample_meta_file_uuid] ) # In[4]: meta_data = file_query['values'] # read_files will return a dictionary with an entry, `values`, that contains a list of `h5py.File()` objects. We can use these directly to read in each .h5 file to an AnnData object with our helper function, `read_h5_anndata()`, defined above. # In[5]: def get_adata(uuid): # Load the file using HISE res = hisepy.reader.read_files([uuid]) # Read the file to adata h5_con = res['values'][0] adata = read_h5_anndata(h5_con) # Clean up the file now that we're done with it h5_con.close() return(adata) # Here, we'll iterate over each file in our manifest # In[6]: adata_list = [] for i in range(meta_data.shape[0]): uuid = meta_data['file.id'][i] adata_list.append(get_adata(uuid)) # Concatenate all of the datasets into a single object # In[7]: adata = anndata.concat(adata_list) # ## Add sample metadata # # When retrieved from HISE, our .h5 files have a sample identifier (`pbmc_sample_id`), but don't carry other sample and subject metadata. Let's add this information from our `meta_data` DataFrame to make this information more complete. # # First, we'll convert `pbmc_sample_id` to `sample.sampleKitGuid` using a regular expression. PBMC samples are derived from kits in our LIMS system, so both share the same numerical core. The difference is that there can be multiple PBMC samples collected at the same time, so PBMC samples are prefixed with `PB` to indicate their sample type, and suffixed with `-XX` to indicate an aliquot number. # In[8]: obs = adata.obs # In[9]: def sample_to_kit(sample): kit = re.sub('PB([0-9]+)-.+','KT\\1',sample) return(kit) # In[10]: obs['sample.sampleKitGuid'] = [sample_to_kit(sample) for sample in obs['pbmc_sample_id']] # We only need to keep some of the metadata columns that pertain to cohort, subject, and sample. We'll also keep the originating File GUID to help us keep track of provenance. Let's select just these columns: # In[11]: keep_meta = [ 'cohort.cohortGuid', 'subject.subjectGuid', 'subject.biologicalSex', 'subject.race', 'subject.ethnicity', 'subject.birthYear', 'sample.sampleKitGuid', 'sample.visitName', 'sample.drawDate', 'file.id' ] # In[12]: selected_meta = meta_data[keep_meta] # Now, we'll join this metadata to our observations using those sample IDs: # In[13]: obs = obs.merge( selected_meta, how = 'left', on = 'sample.sampleKitGuid' ) # ## Retrieve labs, labels and doublet detection from HISE # # In previous steps, we retrieved CMV and BMI clinical lab data and performed label transfer with CellTypist and Seurat, as well as doublet detection using Scrublet. We'll retrieve these results and add them to the observations in our AnnData file to assist with cell type labeling. # # For this purpose, we'll use the CellTypist labels generated using the Immune All Low model, which has (ironically) high-resolution cell type labels. # In[14]: obs_uuids = { 'cmv': 'be338216-0c90-47df-923c-7f4d7c35bac4', 'bmi': '9e39e86d-025c-4ad1-bbfb-63302b99c3f1', 'celltypist': 'dbe10e73-4959-480a-8618-e40652901ab7', 'seurat': 'ef5baaf1-1364-4e07-b01c-0252fd917fbb', 'scrublet': '69d4e3a1-ff03-4173-a7e1-e4acff1f3d09', } # In[15]: obs_dfs = {} for name,uuid in obs_uuids.items(): hise_res = hisepy.reader.read_files([uuid]) hise_df = hise_res['values'] obs_dfs[name] = hise_df # We only need to retain some of the columns from each to assemble our cell metadata. Let's make and apply some column selections: # In[16]: keep_columns = { 'cmv': ['subject.subjectGuid', 'subject.cmv'], 'bmi': ['subject.subjectGuid', 'subject.bmi'], 'celltypist': ['barcodes', 'predicted_labels'], 'seurat': ['barcodes', 'predicted.celltype.l1', 'predicted.celltype.l1.score', 'predicted.celltype.l2', 'predicted.celltype.l2.score', 'predicted.celltype.l2.5', 'predicted.celltype.l2.5.score', 'predicted.celltype.l3', 'predicted.celltype.l3.score' ], 'scrublet': ['barcodes', 'predicted_doublet', 'doublet_score'] } # In[17]: for name,df in obs_dfs.items(): keep_cols = keep_columns[name] selected_df = df[keep_cols] obs_dfs[name] = selected_df # To make the sources clear later, let's rename the celltypist and seurat columns: # In[18]: obs_dfs['celltypist'] = obs_dfs['celltypist'].rename( columns = {'predicted_labels': 'celltypist.low'} ) obs_dfs['seurat'] = obs_dfs['seurat'].rename( columns = { 'predicted.celltype.l1': 'seurat.l1', 'predicted.celltype.l1.score': 'seurat.l1.score', 'predicted.celltype.l2': 'seurat.l2', 'predicted.celltype.l2.score': 'seurat.l2.score', 'predicted.celltype.l2.5': 'seurat.l2.5', 'predicted.celltype.l2.5.score': 'seurat.l2.5.score', 'predicted.celltype.l3': 'seurat.l3', 'predicted.celltype.l3.score': 'seurat.l3.score' } ) # And now we can add our sample metadata and these labels to the observations stored in the anndata object: # In[19]: join_on_dict = { 'cmv': 'subject.subjectGuid', 'bmi': 'subject.subjectGuid', 'celltypist': 'barcodes', 'seurat': 'barcodes', 'scrublet': 'barcodes' } # In[20]: for name,df in obs_dfs.items(): join_col = join_on_dict[name] obs = obs.merge( df, how = 'left', on = join_col ) # To keep things tidy, we'll also drop the `seurat_pbmc_type`, `seurat_pbmc_type_score`, and UMAP coordinates generated by our processing pipeline. # # These cell type assignments are from a now-outdated reference dataset, and the UMAP coordinates are generated for viewing individual samples - not helpful for our full dataset. # In[21]: obs = obs.drop([ 'seurat_pbmc_type','seurat_pbmc_type_score', 'umap_1', 'umap_2' ], axis = 1) # Next, we'll convert all of our text columns to categorical. This is used to make storage of text data more efficient when we write our output file, as we'll only need to store a single instance of our strings. # # We do this for all columns except `barcodes`, which we need to retain as a string type for use as an index. # In[22]: cat_obs = obs for i in range(cat_obs.shape[1]): col_name = cat_obs.dtypes.index.tolist()[i] col_type = cat_obs.dtypes[col_name] if col_name == 'barcodes': cat_obs[col_name] = cat_obs[col_name].astype(str) elif is_object_dtype(col_type): cat_obs[col_name] = cat_obs[col_name].astype('category') # In[23]: cat_obs.head() # In[24]: cat_obs.columns # Finally, we need to reinstate the cell barcodes as the index of the DataFrame, and substitute our new metadata for the observations in the AnnData object. # In[25]: cat_obs = cat_obs.set_index('barcodes', drop = False) # In[26]: adata.obs = cat_obs # In[27]: out_dir = 'output' if not os.path.isdir(out_dir): os.makedirs(out_dir) # Save combined object to .h5ad # In[28]: out_h5ad = 'output/ref_pbmc_raw_{d}.h5ad'.format(d = date.today()) adata.write_h5ad(out_h5ad) # We'll also save just the observations as a .csv file in case we need it for generating figures about the full, raw dataset and labels. # # For CSV, we don't need the version with categorical columns. # In[ ]: out_csv = 'output/ref_pbmc_raw_meta_{d}.csv'.format(d = date.today()) obs.to_csv(out_csv) # ## Upload assembled data to HISE # # Finally, we'll use `hisepy.upload.upload_files()` to send a copy of our output to HISE to use for downstream analysis steps. # In[30]: study_space_uuid = '64097865-486d-43b3-8f94-74994e0a72e0' title = 'Assembled Raw PBMC .h5ad {d}'.format(d = date.today()) # In[39]: obs_uuid_list = list(obs_uuids.values())[0] # In[40]: in_files = [sample_meta_file_uuid] + [obs_uuid_list] + meta_data['file.id'].to_list() # In[42]: out_files = [out_h5ad, out_csv] # In[43]: hisepy.upload.upload_files( files = out_files, study_space_id = study_space_uuid, title = title, input_file_ids = in_files ) # In[44]: import session_info session_info.show() # In[ ]: