#!/usr/bin/env python # coding: utf-8 # ## Install MMDetection # # #### simple # In[ ]: get_ipython().run_line_magic('pip', 'install torch==2.1.0 torchvision==0.16.0 --index-url https://download.pytorch.org/whl/cu121') get_ipython().run_line_magic('pip', 'install openmim pycocotools faster-coco-eval') get_ipython().run_line_magic('pip', 'install mmcv==2.1.0 -f https://download.openmmlab.com/mmcv/dist/cu121/torch2.1/index.html') get_ipython().system('python3 -m mim install mmdet') # ## Download COCO VAL # In[ ]: get_ipython().system('wget -P COCO/DIR/ http://images.cocodataset.org/annotations/annotations_trainval2017.zip') get_ipython().system('wget -P COCO/DIR/ http://images.cocodataset.org/zips/val2017.zip') # ## Unzip COCO VAL # In[ ]: get_ipython().system('unzip -qq COCO/DIR/annotations_trainval2017.zip -d COCO/DIR/') get_ipython().system('unzip -qq COCO/DIR/val2017.zip -d COCO/DIR/') # ## Download model # In[1]: import mmdet import mmengine import os.path as osp config_dir = osp.dirname(mmdet.__file__) sub_config = "configs/rtmdet/rtmdet-ins_tiny_8xb32-300e_coco.py" config_file = osp.join(config_dir, ".mim", sub_config) cfg = mmengine.Config.fromfile(config_file) model_file = "https://download.openmmlab.com/mmdetection/v3.0/rtmdet/rtmdet-ins_tiny_8xb32-300e_coco/rtmdet-ins_tiny_8xb32-300e_coco_20221130_151727-ec670f7e.pth" print(f"{config_file=}") print(f"{model_file=}") get_ipython().system('mkdir -p -m 777 model') cfg.dump(osp.join("model", osp.basename(config_file))) get_ipython().system('wget -P model/ {model_file}') get_ipython().system('ls -lah model') # ## Validate # In[2]: from mmdet.apis import inference_detector, init_detector from mmengine.registry import init_default_scope from mmdet.datasets import CocoDataset import tqdm import os.path as osp import os import torch # from coco_metric import CocoMetric from mmdet.evaluation import CocoMetric from mmdet.structures.mask import encode_mask_results import pathlib import copy import time from pycocotools.coco import COCO as pycocotools_COCO from pycocotools.cocoeval import COCOeval as pycocotools_COCOeval from faster_coco_eval import COCO as COCO_faster, COCOeval_faster import pandas as pd from IPython.display import display, Markdown # In[3]: init_default_scope("mmdet") # In[4]: import json with open("./COCO/DIR/annotations/instances_val2017.json") as fd: instances_val2017 = json.load(fd) image_id_for_eval = [image['id'] for image in instances_val2017['images']] # image_id_for_eval = image_id_for_eval[:100] # Select first 100 images annotations = [ann for ann in instances_val2017['annotations'] if ann['image_id'] in image_id_for_eval] images = [image for image in instances_val2017['images'] if image['id'] in image_id_for_eval] instances_val2017['annotations'] = annotations instances_val2017['images'] = images with open("./COCO/DIR/annotations/instances_val2017_first_100.json", "w") as fd: json.dump(instances_val2017, fd) # ## Init model # In[5]: model = init_detector( "./model/rtmdet-ins_tiny_8xb32-300e_coco.py", "./model/rtmdet-ins_tiny_8xb32-300e_coco_20221130_151727-ec670f7e.pth", device=("cuda" if torch.cuda.is_available() else "cpu"), ) # ## Init dataset # In[6]: pipeline = [ dict(type="LoadImageFromFile"), dict(type="mmdet.LoadAnnotations", with_bbox=True), ] dataset = CocoDataset( data_root="./COCO/DIR/", ann_file="annotations/instances_val2017_first_100.json", data_prefix=dict(img="val2017/"), pipeline=pipeline, ) len(dataset) # In[7]: metric = CocoMetric(metric=["bbox", "segm"]) metric.dataset_meta = model.dataset_meta # In[8]: _coco_api = COCO_faster(dataset.ann_file) metric.cat_ids = _coco_api.get_cat_ids(cat_names=metric.dataset_meta["classes"]) # ## Process images # In[9]: images_path = pathlib.Path(dataset.data_prefix["img"]) files = list(images_path.rglob("*.segm.json")) files += list(images_path.rglob("*.bbox.json")) for file in tqdm.tqdm(files): os.remove(file.as_posix()) # In[10]: max_images = len(dataset) for i in tqdm.tqdm(range(max_images)): item = dataset[i] result = inference_detector(model, item["img_path"]) for key in result.pred_instances.all_keys(): result.pred_instances[key] = result.pred_instances[key].detach().cpu() dict_result = dict( result.pred_instances.to_dict(), **{"img_id": item["img_id"]} ) if "masks" in dict_result: dict_result["masks"] = encode_mask_results( dict_result["masks"].detach().cpu().numpy() ) metric.results2json( [dict_result], outfile_prefix=osp.splitext(item["img_path"])[0] ) # ## Convert results # In[11]: include_segm = "masks" in dict_result print(f"{include_segm=}") # In[12]: dataset.data_prefix["img"] # In[13]: images_path = pathlib.Path(dataset.data_prefix["img"]) if include_segm: files = list(images_path.rglob("*.segm.json")) else: files = list(images_path.rglob("*.bbox.json")) # In[14]: result_data = [] for file in tqdm.tqdm(files): result_data += COCO_faster.load_json(file) # In[15]: def load_faster_data(ann_file, result_data): cocoGt = COCO_faster(ann_file) cocoDt = cocoGt.loadRes(copy.deepcopy(result_data)) return cocoGt, cocoDt def process_faster(cocoGt, cocoDt, iouType): cocoEval = COCOeval_faster(cocoGt, cocoDt, iouType, print_function=print) ts = time.time() cocoEval.evaluate() cocoEval.accumulate() cocoEval.summarize() te = time.time() return te - ts # In[16]: def load_pycocotools_data(ann_file, result_data): cocoGt = pycocotools_COCO(ann_file) cocoDt = cocoGt.loadRes(copy.deepcopy(result_data)) return cocoGt, cocoDt def process_pycocotools(cocoGt, cocoDt, iouType): cocoEval = pycocotools_COCOeval(cocoGt, cocoDt, iouType) ts = time.time() cocoEval.evaluate() cocoEval.accumulate() cocoEval.summarize() te = time.time() return te - ts # In[17]: processors = [ ["faster-coco-eval", load_faster_data, process_faster], ["pycocotools", load_pycocotools_data, process_pycocotools], ] # ## Process eval # In[18]: result_table = {} for metric in ["bbox", "segm"] if include_segm else ["bbox"]: if result_table.get(metric) is None: result_table[metric] = {} for _name, _load, _process in processors: if result_table[metric].get(_name) is None: result_table[metric][_name] = 0 print(f"{metric=}; {_name=}") cocoGt, cocoDt = _load(dataset.ann_file, result_data) result_table[metric][_name] = _process(cocoGt, cocoDt, metric) print() print() # ## Display results # In[19]: result_table # In[20]: df = pd.DataFrame(result_table).T.round(3) df.index.name = "Type" df["Profit"] = (df["pycocotools"] / df["faster-coco-eval"]).round(3) df # In[21]: print(df.to_markdown()) # In[22]: display(Markdown(df.to_markdown())) # In[38]: filtred_result_data = [ann for ann in result_data if ann.get("score",0) > 0.3] cocoGt, cocoDt = load_faster_data(dataset.ann_file, filtred_result_data) # In[39]: from faster_coco_eval.extra import Curves cur = Curves(cocoGt, cocoDt, iou_tresh=0.5, iouType="bbox", useCats=False) cur.plot_pre_rec() cur.plot_f1_confidence() # In[40]: from faster_coco_eval.extra import PreviewResults image_preview_count = 1 preview = PreviewResults( cocoGt, cocoDt, iouType="bbox", iou_tresh=0.5 ) # In[41]: preview.display_tp_fp_fn( data_folder=dataset.data_prefix["img"], image_ids=list(cocoGt.imgs.keys())[10:10+image_preview_count], display_gt=True, ) # In[42]: preview.display_matrix(normalize=True)