#!/usr/bin/env python # coding: utf-8 # # Semantic Segmentation Input # ## Install Dependencies # In[10]: get_ipython().system('pip install panoptica auxiliary rich numpy > /dev/null') # If you installed the packages and requirements on your own machine, you can skip this section and start from the import section. # # ### Setup Colab environment (optional) # Otherwise you can follow and execute the tutorial on your browser. # In order to start working on the notebook, click on the following button, this will open this page in the Colab environment and you will be able to execute the code on your own (*Google account required*). # # # Open In Colab # # # Now that you are visualizing the notebook in Colab, run the next cell to install the packages we will use. There are few things you should follow in order to properly set the notebook up: # 1. Warning: This notebook was not authored by Google. Click on 'Run anyway'. # 1. When the installation commands are done, there might be "Restart runtime" button at the end of the output. Please, click it. # If you run the next cell in a Google Colab environment, it will **clone the 'tutorials' repository** in your google drive. This will create a **new folder** called "tutorials" in **your Google Drive**. # All generated file will be created/uploaded to your Google Drive respectively. # # After the first execution of the next cell, you might receive some warnings and notifications, please follow these instructions: # - 'Permit this notebook to access your Google Drive files?' Click on 'Yes', and select your account. # - Google Drive for desktop wants to access your Google Account. Click on 'Allow'. # # Afterwards the "tutorials" folder has been created. You can navigate it through the lefthand panel in Colab. You might also have received an email that informs you about the access on your Google Drive. # In[2]: import sys # Check if we are in google colab currently try: import google.colab colabFlag = True except ImportError as r: colabFlag = False # Execute certain steps only if we are in a colab environment if colabFlag: # Create a folder in your Google Drive from google.colab import drive drive.mount("/content/drive") # clone repository and set path get_ipython().system('git clone https://github.com/BrainLesion/tutorials.git /content/drive/MyDrive/tutorials') BASE_PATH = "/content/drive/MyDrive/tutorials/panoptica" sys.path.insert(0, BASE_PATH) else: # normal jupyter notebook environment BASE_PATH = "." # current working directory would be BraTs-Toolkit anyways if you are not in colab # ## Setup Imports # In[3]: from auxiliary.nifti.io import read_nifti from rich import print as pprint from panoptica import ( InputType, Panoptica_Evaluator, ConnectedComponentsInstanceApproximator, NaiveThresholdMatching, ) # ## Load Data # To demonstrate we use a reference and predicition of spine a segmentation without instances. # # ![semantic_figure](figures/semantic.png) # In[4]: ref_masks = read_nifti(f"{BASE_PATH}/spine_seg/semantic/ref.nii.gz") pred_masks = read_nifti(f"{BASE_PATH}/spine_seg/semantic/pred.nii.gz") # To use your own data please replace the example data with your own data. # # In ordner to successfully load your data please use NIFTI files and the following file designation within the "semantic" folder: # # ```panoptica/spine_seg/semantic/``` # # - Reference data ("ref.nii.gz") # - Prediction data ("pred.nii.gz") # # ## Run Evaluation # In[5]: evaluator = Panoptica_Evaluator( expected_input=InputType.SEMANTIC, instance_approximator=ConnectedComponentsInstanceApproximator(), instance_matcher=NaiveThresholdMatching(), ) # ## Inspect Results # The results object allows access to individual metrics and provides helper methods for further processing # # In[6]: # print all results result, intermediate_steps_data = evaluator.evaluate( pred_masks, ref_masks, verbose=False )["ungrouped"] print(result) # In[7]: # get specific metric, e.g. pq pprint(f"{result.pq=}") # In[8]: # get dict for further processing, e.g. for pandas pprint("results dict: ", result.to_dict()) # In[9]: # To inspect different phases, just use the returned intermediate_steps_data object import numpy as np intermediate_steps_data.original_prediction_arr # yields input prediction array intermediate_steps_data.original_reference_arr # yields input reference array intermediate_steps_data.prediction_arr( InputType.MATCHED_INSTANCE ) # yields prediction array after instances have been matched intermediate_steps_data.reference_arr( InputType.MATCHED_INSTANCE ) # yields reference array after instances have been matched # This works with all InputType for i in InputType: print(i) pred = intermediate_steps_data.prediction_arr(i) ref = intermediate_steps_data.reference_arr(i) print("Prediction array shape =", pred.shape, "unique_values=", np.unique(pred)) print("Reference array shape =", ref.shape, "unique_values=", np.unique(ref)) print()