!pip install -U panoptica > /dev/null #installs numpy and others
!pip install -U auxiliary > /dev/null
import numpy as np
from auxiliary.nifti.io import read_nifti
from rich import print as pprint
from panoptica import MatchedInstancePair, Panoptic_Evaluator
from panoptica.metrics import Metric
To demonstrate we use a reference and predicition of spine a segmentation with matched instances.
ref_masks = read_nifti("./spine_seg/matched_instance/ref.nii.gz")
pred_masks = read_nifti("./spine_seg/matched_instance/pred.nii.gz")
# labels are matching
np.unique(ref_masks), np.unique(pred_masks)
(array([ 0, 2, 3, 4, 5, 6, 7, 8, 26, 102, 103, 104, 105, 106, 107, 108, 202, 203, 204, 205, 206, 207, 208], dtype=uint8), array([ 0, 2, 3, 4, 5, 6, 7, 8, 26, 102, 103, 104, 105, 106, 107, 108, 202, 203, 204, 205, 206, 207, 208], dtype=uint8))
To use your own data please replace the example data with your own data.
In ordner to successfully load your data please use NIFTI files and the following file designation within the folder "matched_instance":
panoptica/spine_seg/matched_instance/
sample = MatchedInstancePair(prediction_arr=pred_masks, reference_arr=ref_masks)
evaluator = Panoptic_Evaluator(
expected_input=MatchedInstancePair,
decision_metric=Metric.IOU,
decision_threshold=0.5,
)
result, debug_data = evaluator.evaluate(sample)
────────────────────────────────────────── Thank you for using panoptica ──────────────────────────────────────────
Please support our development by citing
https://github.com/BrainLesion/panoptica#citation -- Thank you!
───────────────────────────────────────────────────────────────────────────────────────────────────────────────────
Panoptic: Start Evaluation -- Got MatchedInstancePair, will evaluate instances -- evaluate took 2.918851613998413 seconds to execute.
The results object allows access to individual metrics and provides helper methods for further processing
# print all results
print(result)
+++ MATCHING +++ Number of instances in reference (num_ref_instances): 22 Number of instances in prediction (num_pred_instances): 22 True Positives (tp): 22 False Positives (fp): 0 False Negatives (fn): 0 Recognition Quality / F1-Score (rq): 1.0 +++ GLOBAL +++ Global Binary Dice (global_bin_dsc): 0.9744370224078394 Global Binary Centerline Dice (global_bin_cldsc): 0.9637064011802574 +++ INSTANCE +++ Segmentation Quality IoU (sq): 0.8328184295330796 +- 0.15186064004517466 Panoptic Quality IoU (pq): 0.8328184295330796 Segmentation Quality Dsc (sq_dsc): 0.900292616009954 +- 0.10253566174957332 Panoptic Quality Dsc (pq_dsc): 0.900292616009954 Segmentation Quality Assd (sq_assd): 0.250331887879225 +- 0.07696680402317076
# get specific metric, e.g. pq
pprint(f"{result.pq=}")
result.pq=0.8328184295330796
# get dict for further processing, e.g. for pandas
pprint("results dict: ", result.to_dict())
results dict: { 'num_ref_instances': 22, 'num_pred_instances': 22, 'tp': 22, 'fp': 0, 'fn': 0, 'rq': 1.0, 'global_bin_dsc': 0.9744370224078394, 'global_bin_cldsc': 0.9637064011802574, 'sq': 0.8328184295330796, 'sq_std': 0.15186064004517466, 'pq': 0.8328184295330796, 'sq_dsc': 0.900292616009954, 'sq_dsc_std': 0.10253566174957332, 'pq_dsc': 0.900292616009954, 'sq_assd': 0.250331887879225, 'sq_assd_std': 0.07696680402317076 }