!pip3 install git+https://github.com/MiXaiLL76/faster_coco_eval
!mkdir -pv data/coco/
!wget -P data/coco/ http://images.cocodataset.org/annotations/annotations_trainval2017.zip
!wget -P data/coco/ http://images.cocodataset.org/zips/val2017.zip
!unzip data/coco/annotations_trainval2017.zip -d data/coco/
!unzip data/coco/val2017.zip -d data/coco/
!rm -rf data/coco/*.zip
yolo3_model_path = "https://download.openmmlab.com/mmdetection/v2.0/yolo/yolov3_d53_320_273e_coco/yolov3_d53_320_273e_coco-421362b6.pth"
!wget -P model {yolo3_model_path}
import os.path as osp
_BASE_CONFIG_DIR = "configs/"
CONFIG_FILE = "yolo/yolov3_d53_320_273e_coco.py"
CHECKPOINT_FILE = "model/yolov3_d53_320_273e_coco-421362b6.pth"
WORK_DIR = "."
use_cpu = False
cfg_path = osp.join(_BASE_CONFIG_DIR, CONFIG_FILE)
print(f"{cfg_path=}")
cfg_path='configs/yolo/yolov3_d53_320_273e_coco.py'
_dop = ""
if use_cpu:
_dop += f" --gpu-id -1 "
!python3 test.py \
{cfg_path} \
{CHECKPOINT_FILE} \
--format-only {_dop}\
--cfg-options data.test.ann_file=data/coco/annotations/instances_val2017.json \
data.test.img_prefix=data/coco/val2017 \
--out yolo_result.pkl
%%time
!python3 eval_metric.py {cfg_path} yolo_result.pkl \
--eval bbox \
--cfg-options data.test.ann_file=data/coco/annotations/instances_val2017.json
loading annotations into memory... Done (t=0.28s) creating index... index created! Data uploaded for 1.057 sec. Evaluating bbox... Loading and preparing results... DONE (t=0.47s) creating index... index created! Running per image evaluation... Evaluate annotation type *bbox* DONE (t=16.69s). Accumulating evaluation results... DONE (t=3.02s). Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.279 Average Precision (AP) @[ IoU=0.50 | area= all | maxDets=1000 ] = 0.491 Average Precision (AP) @[ IoU=0.75 | area= all | maxDets=1000 ] = 0.283 Average Precision (AP) @[ IoU=0.50:0.95 | area= small | maxDets=1000 ] = 0.105 Average Precision (AP) @[ IoU=0.50:0.95 | area=medium | maxDets=1000 ] = 0.301 Average Precision (AP) @[ IoU=0.50:0.95 | area= large | maxDets=1000 ] = 0.438 Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.395 Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets=300 ] = 0.395 Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets=1000 ] = 0.395 Average Recall (AR) @[ IoU=0.50:0.95 | area= small | maxDets=1000 ] = 0.185 Average Recall (AR) @[ IoU=0.50:0.95 | area=medium | maxDets=1000 ] = 0.423 Average Recall (AR) @[ IoU=0.50:0.95 | area= large | maxDets=1000 ] = 0.574 OrderedDict([('bbox_mAP', 0.279), ('bbox_mAP_50', 0.491), ('bbox_mAP_75', 0.283), ('bbox_mAP_s', 0.105), ('bbox_mAP_m', 0.301), ('bbox_mAP_l', 0.438), ('bbox_mAP_copypaste', '0.279 0.491 0.283 0.105 0.301 0.438')]) Data validate for 22.854 sec. CPU times: user 160 ms, sys: 22.4 ms, total: 183 ms Wall time: 26 s
%%time
!python3 eval_metric.py {cfg_path} yolo_result.pkl \
--eval bbox \
--cfg-options data.test.ann_file=data/coco/annotations/instances_val2017.json data.test.type='FasterCocoDataset'
loading annotations into memory... Done (t=0.28s) creating index... index created! Data uploaded for 1.065 sec. Evaluating bbox... Loading and preparing results... DONE (t=0.47s) creating index... index created! OrderedDict([('bbox_mAP', 0.279), ('bbox_mAP_50', 0.491), ('bbox_mAP_75', 0.283), ('bbox_mAP_s', 0.105), ('bbox_mAP_m', 0.301), ('bbox_mAP_l', 0.438), ('bbox_mAP_copypaste', '0.279 0.491 0.283 0.105 0.301 0.438')]) Data validate for 8.714 sec. CPU times: user 91.8 ms, sys: 5.82 ms, total: 97.6 ms Wall time: 11.8 s
yoloact_model_path = "https://download.openmmlab.com/mmdetection/v2.0/yolact/yolact_r50_1x8_coco/yolact_r50_1x8_coco_20200908-f38d58df.pth"
!wget -P model {yoloact_model_path}
CONFIG_FILE = "yolact/yolact_r50_1x8_coco.py"
CHECKPOINT_FILE = "model/yolact_r50_1x8_coco_20200908-f38d58df.pth"
cfg_path = osp.join(_BASE_CONFIG_DIR, CONFIG_FILE)
print(f"{cfg_path=}")
cfg_path='configs/yolact/yolact_r50_1x8_coco.py'
_dop = ""
if use_cpu:
_dop += f" --gpu-id -1 "
!python3 test.py \
{cfg_path} \
{CHECKPOINT_FILE} \
--format-only {_dop}\
--cfg-options data.test.ann_file=data/coco/annotations/instances_val2017.json \
data.test.img_prefix=data/coco/val2017 \
--out yoloact_result.pkl
%%time
!python3 eval_metric.py {cfg_path} yoloact_result.pkl \
--eval segm \
--cfg-options data.test.ann_file=data/coco/annotations/instances_val2017.json
loading annotations into memory... Done (t=0.28s) creating index... index created! Data uploaded for 2.235 sec. Evaluating segm... /opt/conda/lib/python3.9/site-packages/mmdet/datasets/coco.py:470: UserWarning: The key "bbox" is deleted for more accurate mask AP of small/medium/large instances since v2.12.0. This does not change the overall mAP calculation. warnings.warn( Loading and preparing results... DONE (t=1.21s) creating index... index created! Running per image evaluation... Evaluate annotation type *segm* DONE (t=22.02s). Accumulating evaluation results... /opt/conda/lib/python3.9/site-packages/pycocotools/cocoeval.py:378: DeprecationWarning: `np.float` is a deprecated alias for the builtin `float`. To silence this warning, use `float` by itself. Doing this will not modify any behavior and is safe. If you specifically wanted the numpy scalar type, use `np.float64` here. Deprecated in NumPy 1.20; for more details and guidance: https://numpy.org/devdocs/release/1.20.0-notes.html#deprecations tp_sum = np.cumsum(tps, axis=1).astype(dtype=np.float) DONE (t=3.49s). Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.290 Average Precision (AP) @[ IoU=0.50 | area= all | maxDets=1000 ] = 0.486 Average Precision (AP) @[ IoU=0.75 | area= all | maxDets=1000 ] = 0.296 Average Precision (AP) @[ IoU=0.50:0.95 | area= small | maxDets=1000 ] = 0.100 Average Precision (AP) @[ IoU=0.50:0.95 | area=medium | maxDets=1000 ] = 0.315 Average Precision (AP) @[ IoU=0.50:0.95 | area= large | maxDets=1000 ] = 0.465 Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.392 Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets=300 ] = 0.392 Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets=1000 ] = 0.392 Average Recall (AR) @[ IoU=0.50:0.95 | area= small | maxDets=1000 ] = 0.176 Average Recall (AR) @[ IoU=0.50:0.95 | area=medium | maxDets=1000 ] = 0.439 Average Recall (AR) @[ IoU=0.50:0.95 | area= large | maxDets=1000 ] = 0.568 OrderedDict([('segm_mAP', 0.29), ('segm_mAP_50', 0.486), ('segm_mAP_75', 0.296), ('segm_mAP_s', 0.1), ('segm_mAP_m', 0.315), ('segm_mAP_l', 0.465), ('segm_mAP_copypaste', '0.290 0.486 0.296 0.100 0.315 0.465')]) Data validate for 35.356 sec. CPU times: user 235 ms, sys: 22 ms, total: 257 ms Wall time: 39.6 s
%%time
!python3 eval_metric.py {cfg_path} yoloact_result.pkl \
--eval segm \
--cfg-options data.test.ann_file=data/coco/annotations/instances_val2017.json data.test.type='FasterCocoDataset'
loading annotations into memory... Done (t=0.28s) creating index... index created! Data uploaded for 2.305 sec. Evaluating segm... /home/rdl/storage/eval/coco_fast.py:105: UserWarning: The key "bbox" is deleted for more accurate mask AP of small/medium/large instances since v2.12.0. This does not change the overall mAP calculation. warnings.warn( Loading and preparing results... DONE (t=1.25s) creating index... index created! OrderedDict([('segm_mAP', 0.29), ('segm_mAP_50', 0.486), ('segm_mAP_75', 0.296), ('segm_mAP_s', 0.1), ('segm_mAP_m', 0.315), ('segm_mAP_l', 0.465), ('segm_mAP_copypaste', '0.290 0.486 0.296 0.100 0.315 0.465')]) Data validate for 18.403 sec. CPU times: user 103 ms, sys: 52.8 ms, total: 156 ms Wall time: 22.7 s