# @title Installations !pip install nilearn --quiet !pip install decord --quiet # Imports import os import cv2 import glob import time import torch import random import urllib import pickle import argparse import numpy as np import nibabel as nib import matplotlib.pyplot as plt from PIL import Image from tqdm import tqdm from sklearn.preprocessing import StandardScaler from sklearn.decomposition import PCA, IncrementalPCA import torch.nn as nn import torch.utils.model_zoo as model_zoo from torch.autograd import Variable as V from torchvision import transforms as trn from nilearn import surface from nilearn import datasets from nilearn import plotting from decord import cpu from decord import VideoReader fsaverage = datasets.fetch_surf_fsaverage() # @title Enter the dropbox link and run the cell dropbox_link = 'https://www.dropbox.com/s/agxyxntrbwko7t1/participants_data.zip?dl=1' # @title Run the cell to download the data import requests, zipfile, io # Use the dropbox link to download the data if dropbox_link: fname1 = 'participants_data_v2021' fname2 = 'AlgonautsVideos268_All_30fpsmax' if not os.path.exists(fname1) or not os.path.exists(fname2): print('Data downloading...') r = requests.get(dropbox_link) z = zipfile.ZipFile(io.BytesIO(r.content)) z.extractall() print('Data download is completed.') else: print('Data are already downloaded.') url = 'https://github.com/Neural-Dynamics-of-Visual-Cognition-FUB/Algonauts2021_devkit/raw/main/example.nii' fname = 'example.nii' if not os.path.exists(fname): r = requests.get(url, allow_redirects=True) with open(fname, 'wb') as fh: fh.write(r.content) else: print(f"{fname} file is already downloaded.") else: print('You need to submit the form and get the dropbox link') # @title Utility functions for data loading def save_dict(di_, filename_): with open(filename_, 'wb') as f: pickle.dump(di_, f) def load_dict(filename_): with open(filename_, 'rb') as f: u = pickle._Unpickler(f) u.encoding = 'latin1' ret_di = u.load() # print(p) # ret_di = pickle.load(f) return ret_di def visualize_activity(vid_id, sub): # Setting up the paths for whole brain data fmri_dir = './participants_data_v2021' track = "full_track" # get the right track directory depending on whole brain/ROI choice track_dir = os.path.join(fmri_dir, track) # get the selected subject's directory sub_fmri_dir = os.path.join(track_dir, sub) # result directory to store nifti file results_dir = '/content/' # mapping the data to voxels and storing in a nifti file fmri_train_all,voxel_mask = get_fmri(sub_fmri_dir, "WB") visual_mask_3D = np.zeros((78, 93, 71)) visual_mask_3D[voxel_mask==1]= fmri_train_all[vid_id, :] brain_mask = './example.nii' nii_save_path = os.path.join(results_dir, 'vid_activity.nii') saveasnii(brain_mask, nii_save_path, visual_mask_3D) # visualizing saved nifti file plotting.plot_glass_brain(nii_save_path, title='fMRI response',plot_abs=False, display_mode='lyr',colorbar=True) def get_fmri(fmri_dir, ROI): """This function loads fMRI data into a numpy array for to a given ROI. Parameters ---------- fmri_dir : str path to fMRI data. ROI : str name of ROI. Returns ------- np.array matrix of dimensions #train_vids x #repetitions x #voxels containing fMRI responses to train videos of a given ROI """ # Loading ROI data ROI_file = os.path.join(fmri_dir, ROI + ".pkl") ROI_data = load_dict(ROI_file) # averaging ROI data across repetitions ROI_data_train = np.mean(ROI_data["train"], axis=1) if ROI == "WB": voxel_mask = ROI_data['voxel_mask'] return ROI_data_train, voxel_mask return ROI_data_train def saveasnii(brain_mask, nii_save_path, nii_data): img = nib.load(brain_mask) nii_img = nib.Nifti1Image(nii_data, img.affine, img.header) nib.save(nii_img, nii_save_path) # @title Loading fMRI data and inspecting dimensions # Select Subject sub = 'sub04' #@param ["sub01","sub02","sub03","sub04","sub05","sub06","sub07","sub08","sub09","sub10"] # Select ROI ROI = 'V1' #@param ["WB", "V1", "V2","V3", "V4", "LOC", "EBA", "FFA","STS", "PPA"] ######## fMRI data loader wrapper code ################################### fmri_dir = './participants_data_v2021' if ROI == "WB": # Loading whole brain data track = "full_track" # stored in full_track directory else: # Loading ROI data track = "mini_track" # stored in mini_track directory # get the right track directory depending on whole brain/ROI choice track_dir = os.path.join(fmri_dir, track) # get the selected subject's directory sub_fmri_dir = os.path.join(track_dir, sub) # Load the fMRI data for the selected subject and ROI if track == "full_track": fmri_train_all,_ = get_fmri(sub_fmri_dir,ROI) else: fmri_train_all = get_fmri(sub_fmri_dir,ROI) ######## fMRI data loader wrapper code ################################### # Visualize the fMRI responses in a heatmap f, ax = plt.subplots(figsize=(12, 5)) ax.set(xlabel="Voxel", ylabel="Stimulus") heatmap = ax.imshow(fmri_train_all, aspect="auto", cmap='jet', vmin=-1, vmax=1) f.colorbar(heatmap, shrink=.5, label="Response amplitude (Z)") f.tight_layout() # @title Visualize video from IPython.display import HTML from base64 import b64encode vid_id = 1 # @param {type: "integer"} video_dir = './AlgonautsVideos268_All_30fpsmax' ########### Video display code ################################################# video_list = glob.glob(video_dir + '/*.mp4') video_list.sort() mp4 = open(video_list[vid_id], 'rb').read() data_url = "data:video/mp4;base64," + b64encode(mp4).decode() HTML(""" """ % data_url) ########### Video display code ################################################# # @title Visualize corresponding brain response visualize_activity(vid_id, sub)