%pip install ultralytics --quiet
Note: you may need to restart the kernel to use updated packages.
import time
from ultralytics.models.yolo.detect.val import DetectionValidator, check_requirements, LOGGER, Path
class BaseCustomDetectionValidator(DetectionValidator):
def eval_json_orig(self, stats):
"""Evaluates YOLO output in JSON format and returns performance statistics."""
if self.args.save_json and (self.is_coco or self.is_lvis) and len(self.jdict):
pred_json = self.save_dir / "predictions.json" # predictions
anno_json = (
self.data["path"]
/ "annotations"
/ ("instances_val2017.json" if self.is_coco else f"lvis_v1_{self.args.split}.json")
) # annotations
pkg = "pycocotools" if self.is_coco else "lvis"
LOGGER.info(f"\nEvaluating {pkg} mAP using {pred_json} and {anno_json}...")
try: # https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocoEvalDemo.ipynb
for x in pred_json, anno_json:
assert x.is_file(), f"{x} file not found"
check_requirements("pycocotools>=2.0.6" if self.is_coco else "lvis>=0.5.3")
if self.is_coco:
from pycocotools.coco import COCO # noqa
from pycocotools.cocoeval import COCOeval # noqa
anno = COCO(str(anno_json)) # init annotations api
pred = anno.loadRes(str(pred_json)) # init predictions api (must pass string, not Path)
val = COCOeval(anno, pred, "bbox")
else:
from lvis import LVIS, LVISEval
anno = LVIS(str(anno_json)) # init annotations api
pred = anno._load_json(str(pred_json)) # init predictions api (must pass string, not Path)
val = LVISEval(anno, pred, "bbox")
val.params.imgIds = [int(Path(x).stem) for x in self.dataloader.dataset.im_files] # images to eval
val.evaluate()
val.accumulate()
val.summarize()
if self.is_lvis:
val.print_results() # explicitly call print_results
# update mAP50-95 and mAP50
stats[self.metrics.keys[-1]], stats[self.metrics.keys[-2]] = (
val.stats[:2] if self.is_coco else [val.results["AP50"], val.results["AP"]]
)
except Exception as e:
LOGGER.warning(f"{pkg} unable to run: {e}")
return stats
def eval_json_faster(self, stats):
"""Evaluates YOLO output in JSON format and returns performance statistics."""
if self.args.save_json and (self.is_coco or self.is_lvis) and len(self.jdict):
pred_json = self.save_dir / "predictions.json" # predictions
anno_json = (
self.data["path"]
/ "annotations"
/ ("instances_val2017.json" if self.is_coco else f"lvis_v1_{self.args.split}.json")
) # annotations
pkg = "faster_coco_eval"
LOGGER.info(f"\nEvaluating {pkg} mAP using {pred_json} and {anno_json}...")
try: # https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocoEvalDemo.ipynb
for x in pred_json, anno_json:
assert x.is_file(), f"{x} file not found"
from faster_coco_eval import COCO, COCOeval_faster
if self.is_coco:
anno = COCO(str(anno_json)) # init annotations api
pred = anno.loadRes(str(pred_json)) # init predictions api (must pass string, not Path)
val = COCOeval_faster(anno, pred, "bbox", print_function=print)
else:
anno = COCO(str(anno_json)) # init annotations api
pred = anno._load_json(str(pred_json)) # init predictions api (must pass string, not Path)
val = COCOeval_faster(anno, pred, "bbox", lvis_style=True, print_function=print)
val.params.maxDets = [300]
val.params.imgIds = [int(Path(x).stem) for x in self.dataloader.dataset.im_files] # images to eval
val.evaluate()
val.accumulate()
val.summarize()
# update mAP50-95 and mAP50
stats[self.metrics.keys[-1]], stats[self.metrics.keys[-2]] = val.stats[:2]
except Exception as e:
LOGGER.warning(f"{pkg} unable to run: {e}")
return stats
def eval_json(self, stats):
tic_faster = time.time()
self.eval_json_faster(stats)
toc_faster = time.time()
print(f"Faster eval took {toc_faster - tic_faster:.2f}s")
tic_orig = time.time()
stats = self.eval_json_orig(stats)
toc_orig = time.time()
print(f"Original eval took {toc_orig - tic_orig:.2f}s")
return stats
!rm -rf rm -rf runs/
args = dict(model='yolov8n.pt', data='./coco_val_only.yaml')
validator = BaseCustomDetectionValidator(args=args)
validator()
Ultralytics YOLOv8.1.47 🚀 Python-3.10.12 torch-2.1.2+cu118 CUDA:0 (NVIDIA GeForce RTX 3080 Ti, 12288MiB) YOLOv8n summary (fused): 168 layers, 3151904 parameters, 0 gradients, 8.7 GFLOPs Dataset 'coco_val_only.yaml' images not found ⚠️, missing path '/home/mixaill76/faster_coco_eval/examples/ultralytics/datasets/coco/val2017.txt' Downloading https://github.com/ultralytics/yolov5/releases/download/v1.0/coco2017labels-segments.zip to '/home/mixaill76/faster_coco_eval/examples/ultralytics/datasets/coco2017labels-segments.zip'...
/home/mixaill76/.local/lib/python3.10/site-packages/torch/nn/modules/conv.py:456: UserWarning: Applied workaround for CuDNN issue, install nvrtc.so (Triggered internally at ../aten/src/ATen/native/cudnn/Conv_v8.cpp:80.) return F.conv2d(input, weight, bias, self.stride, 100%|██████████| 169M/169M [00:06<00:00, 25.8MB/s] Unzipping /home/mixaill76/faster_coco_eval/examples/ultralytics/datasets/coco2017labels-segments.zip to /home/mixaill76/faster_coco_eval/examples/ultralytics/datasets/coco...: 100%|██████████| 122232/122232 [00:07<00:00, 16920.33file/s]
Downloading http://images.cocodataset.org/zips/val2017.zip to '/home/mixaill76/faster_coco_eval/examples/ultralytics/datasets/coco/images/val2017.zip'...
Dataset download success ✅ (100.7s), saved to /home/mixaill76/faster_coco_eval/examples/ultralytics/datasets
val: Scanning /home/mixaill76/faster_coco_eval/examples/ultralytics/datasets/coco/labels/val2017... 4952 images, 48 backgrounds, 0 corrupt: 100%|██████████| 5000/5000 [00:05<00:00, 864.61it/s]
val: New cache created: /home/mixaill76/faster_coco_eval/examples/ultralytics/datasets/coco/labels/val2017.cache
Class Images Instances Box(P R mAP50 mAP50-95): 100%|██████████| 313/313 [00:31<00:00, 9.86it/s]
all 5000 36335 0.629 0.476 0.521 0.37
person 5000 10777 0.751 0.678 0.745 0.515
bicycle 5000 314 0.694 0.411 0.466 0.269
car 5000 1918 0.656 0.527 0.566 0.364
motorcycle 5000 367 0.71 0.573 0.654 0.412
airplane 5000 143 0.755 0.776 0.845 0.654
bus 5000 283 0.73 0.664 0.739 0.621
train 5000 190 0.795 0.774 0.833 0.648
truck 5000 414 0.519 0.384 0.45 0.301
boat 5000 424 0.562 0.297 0.373 0.209
traffic light 5000 634 0.641 0.352 0.415 0.213
fire hydrant 5000 101 0.859 0.693 0.78 0.627
stop sign 5000 75 0.679 0.627 0.676 0.615
parking meter 5000 60 0.686 0.511 0.578 0.449
bench 5000 411 0.553 0.275 0.297 0.197
bird 5000 427 0.662 0.365 0.425 0.28
cat 5000 202 0.766 0.832 0.847 0.648
dog 5000 218 0.684 0.693 0.727 0.587
horse 5000 272 0.687 0.658 0.69 0.521
sheep 5000 354 0.616 0.669 0.669 0.466
cow 5000 372 0.714 0.604 0.674 0.48
elephant 5000 252 0.698 0.843 0.815 0.628
bear 5000 71 0.816 0.749 0.833 0.673
zebra 5000 266 0.802 0.807 0.879 0.661
giraffe 5000 232 0.857 0.836 0.884 0.686
backpack 5000 371 0.493 0.164 0.2 0.105
umbrella 5000 407 0.61 0.521 0.538 0.359
handbag 5000 540 0.474 0.122 0.161 0.0815
tie 5000 252 0.636 0.377 0.429 0.267
suitcase 5000 299 0.558 0.425 0.488 0.334
frisbee 5000 115 0.727 0.757 0.763 0.58
skis 5000 241 0.632 0.34 0.377 0.194
snowboard 5000 69 0.534 0.348 0.381 0.267
sports ball 5000 260 0.702 0.442 0.481 0.331
kite 5000 327 0.612 0.526 0.556 0.379
baseball bat 5000 145 0.555 0.372 0.411 0.214
baseball glove 5000 148 0.649 0.486 0.516 0.304
skateboard 5000 179 0.659 0.592 0.645 0.456
surfboard 5000 267 0.599 0.476 0.5 0.312
tennis racket 5000 225 0.676 0.596 0.661 0.403
bottle 5000 1013 0.603 0.382 0.454 0.297
wine glass 5000 341 0.667 0.328 0.407 0.263
cup 5000 895 0.571 0.437 0.485 0.346
fork 5000 215 0.596 0.312 0.375 0.257
knife 5000 325 0.448 0.16 0.166 0.0963
spoon 5000 253 0.437 0.129 0.162 0.0973
bowl 5000 623 0.586 0.485 0.526 0.393
banana 5000 370 0.554 0.319 0.374 0.228
apple 5000 236 0.427 0.231 0.221 0.151
sandwich 5000 177 0.563 0.467 0.475 0.359
orange 5000 285 0.472 0.421 0.361 0.274
broccoli 5000 312 0.507 0.359 0.367 0.21
carrot 5000 365 0.458 0.285 0.307 0.192
hot dog 5000 125 0.718 0.406 0.489 0.36
pizza 5000 284 0.655 0.616 0.658 0.502
donut 5000 328 0.611 0.491 0.516 0.413
cake 5000 310 0.559 0.406 0.45 0.3
chair 5000 1771 0.578 0.344 0.404 0.259
couch 5000 261 0.612 0.567 0.588 0.429
potted plant 5000 342 0.508 0.374 0.377 0.223
bed 5000 163 0.555 0.558 0.6 0.443
dining table 5000 695 0.524 0.43 0.428 0.287
toilet 5000 179 0.73 0.725 0.78 0.645
tv 5000 288 0.738 0.628 0.724 0.551
laptop 5000 231 0.69 0.662 0.699 0.578
mouse 5000 106 0.662 0.647 0.704 0.522
remote 5000 283 0.427 0.212 0.284 0.165
keyboard 5000 153 0.592 0.569 0.65 0.49
cell phone 5000 262 0.545 0.37 0.406 0.275
microwave 5000 55 0.661 0.564 0.624 0.499
oven 5000 143 0.643 0.497 0.54 0.361
toaster 5000 9 0.593 0.222 0.433 0.311
sink 5000 225 0.582 0.452 0.504 0.327
refrigerator 5000 126 0.684 0.595 0.659 0.506
book 5000 1129 0.458 0.108 0.191 0.0946
clock 5000 267 0.727 0.61 0.672 0.459
vase 5000 274 0.574 0.474 0.471 0.33
scissors 5000 36 0.74 0.333 0.342 0.277
teddy bear 5000 190 0.64 0.574 0.605 0.413
hair drier 5000 11 1 0 0.00606 0.00426
toothbrush 5000 57 0.434 0.211 0.218 0.137
Speed: 0.1ms preprocess, 1.2ms inference, 0.0ms loss, 0.9ms postprocess per image
Saving runs/detect/train/predictions.json...
Evaluating faster_coco_eval mAP using runs/detect/train/predictions.json and /home/mixaill76/faster_coco_eval/examples/ultralytics/datasets/coco/annotations/instances_val2017.json...
Evaluate annotation type *bbox*
COCOeval_opt.evaluate() finished...
DONE (t=5.09s).
Accumulating evaluation results...
COCOeval_opt.accumulate() finished...
DONE (t=0.00s).
Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.373
Average Precision (AP) @[ IoU=0.50 | area= all | maxDets=100 ] = 0.526
Average Precision (AP) @[ IoU=0.75 | area= all | maxDets=100 ] = 0.405
Average Precision (AP) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.187
Average Precision (AP) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.410
Average Precision (AP) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.533
Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 1 ] = 0.320
Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 10 ] = 0.536
Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.592
Average Recall (AR) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.362
Average Recall (AR) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.657
Average Recall (AR) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.772
Average Recall (AR) @[ IoU=0.50 | area= all | maxDets=100 ] = 0.814
Average Recall (AR) @[ IoU=0.75 | area= all | maxDets=100 ] = 0.644
Faster eval took 7.34s
Evaluating pycocotools mAP using runs/detect/train/predictions.json and /home/mixaill76/faster_coco_eval/examples/ultralytics/datasets/coco/annotations/instances_val2017.json...
loading annotations into memory...
Done (t=0.16s)
creating index...
index created!
Loading and preparing results...
DONE (t=0.96s)
creating index...
index created!
Running per image evaluation...
Evaluate annotation type *bbox*
DONE (t=25.35s).
Accumulating evaluation results...
DONE (t=6.93s).
Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.373
Average Precision (AP) @[ IoU=0.50 | area= all | maxDets=100 ] = 0.526
Average Precision (AP) @[ IoU=0.75 | area= all | maxDets=100 ] = 0.405
Average Precision (AP) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.187
Average Precision (AP) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.410
Average Precision (AP) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.533
Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 1 ] = 0.320
Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 10 ] = 0.536
Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.592
Average Recall (AR) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.362
Average Recall (AR) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.657
Average Recall (AR) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.772
Original eval took 34.35s
Results saved to runs/detect/train
{'metrics/precision(B)': 0.6292564172491455, 'metrics/recall(B)': 0.47631066232459646, 'metrics/mAP50(B)': 0.5255884728465783, 'metrics/mAP50-95(B)': 0.3731985247705999, 'fitness': 0.38551831525427444}
import time
from ultralytics.models.yolo.segment.val import SegmentationValidator, check_requirements, LOGGER, Path
class BaseCustomSegmentationValidator(SegmentationValidator):
def eval_json_orig(self, stats):
"""Return COCO-style object detection evaluation metrics."""
if self.args.save_json and self.is_coco and len(self.jdict):
anno_json = self.data["path"] / "annotations/instances_val2017.json" # annotations
pred_json = self.save_dir / "predictions.json" # predictions
LOGGER.info(f"\nEvaluating pycocotools mAP using {pred_json} and {anno_json}...")
try: # https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocoEvalDemo.ipynb
check_requirements("pycocotools>=2.0.6")
from pycocotools.coco import COCO # noqa
from pycocotools.cocoeval import COCOeval # noqa
for x in anno_json, pred_json:
assert x.is_file(), f"{x} file not found"
anno = COCO(str(anno_json)) # init annotations api
pred = anno.loadRes(str(pred_json)) # init predictions api (must pass string, not Path)
for i, eval in enumerate([COCOeval(anno, pred, "bbox"), COCOeval(anno, pred, "segm")]):
if self.is_coco:
eval.params.imgIds = [int(Path(x).stem) for x in self.dataloader.dataset.im_files] # im to eval
eval.evaluate()
eval.accumulate()
eval.summarize()
idx = i * 4 + 2
stats[self.metrics.keys[idx + 1]], stats[self.metrics.keys[idx]] = eval.stats[
:2
] # update mAP50-95 and mAP50
except Exception as e:
LOGGER.warning(f"pycocotools unable to run: {e}")
return stats
def eval_json_faster(self, stats):
"""Return COCO-style object detection evaluation metrics."""
if self.args.save_json and self.is_coco and len(self.jdict):
anno_json = self.data["path"] / "annotations/instances_val2017.json" # annotations
pred_json = self.save_dir / "predictions.json" # predictions
LOGGER.info(f"\nEvaluating pycocotools mAP using {pred_json} and {anno_json}...")
try: # https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocoEvalDemo.ipynb
from faster_coco_eval import COCO, COCOeval_faster
for x in anno_json, pred_json:
assert x.is_file(), f"{x} file not found"
anno = COCO(str(anno_json)) # init annotations api
pred = anno.loadRes(str(pred_json)) # init predictions api (must pass string, not Path)
for i, eval in enumerate([COCOeval_faster(anno, pred, "bbox", print_function=print), COCOeval_faster(anno, pred, "segm", print_function=print)]):
if self.is_coco:
eval.params.imgIds = [int(Path(x).stem) for x in self.dataloader.dataset.im_files] # im to eval
eval.evaluate()
eval.accumulate()
eval.summarize()
idx = i * 4 + 2
stats[self.metrics.keys[idx + 1]], stats[self.metrics.keys[idx]] = eval.stats[
:2
] # update mAP50-95 and mAP50
except Exception as e:
LOGGER.warning(f"faster_coco_eval unable to run: {e}")
return stats
def eval_json(self, stats):
tic_faster = time.time()
self.eval_json_faster(stats)
toc_faster = time.time()
print(f"Faster eval took {toc_faster - tic_faster:.2f}s")
tic_orig = time.time()
stats = self.eval_json_orig(stats)
toc_orig = time.time()
print(f"Original eval took {toc_orig - tic_orig:.2f}s")
return stats
!rm -rf rm -rf runs/
args = dict(model='yolov8n-seg.pt', data='./coco_val_only.yaml')
validator = BaseCustomSegmentationValidator(args=args)
validator()
Ultralytics YOLOv8.1.47 🚀 Python-3.10.12 torch-2.1.2+cu118 CUDA:0 (NVIDIA GeForce RTX 3080 Ti, 12288MiB)
/home/mixaill76/.local/lib/python3.10/site-packages/torch/nn/modules/conv.py:456: UserWarning: Applied workaround for CuDNN issue, install nvrtc.so (Triggered internally at ../aten/src/ATen/native/cudnn/Conv_v8.cpp:80.) return F.conv2d(input, weight, bias, self.stride,
YOLOv8n-seg summary (fused): 195 layers, 3404320 parameters, 0 gradients, 12.6 GFLOPs
val: Scanning /home/mixaill76/faster_coco_eval/examples/ultralytics/datasets/coco/labels/val2017.cache... 4952 images, 48 backgrounds, 0 corrupt: 100%|██████████| 5000/5000 [00:00<?, ?it/s]
Class Images Instances Box(P R mAP50 mAP50-95) Mask(P R mAP50 mAP50-95): 100%|██████████| 313/313 [10:36<00:00, 2.03s/it]
all 5000 36335 0.62 0.48 0.516 0.363 0.617 0.456 0.489 0.305
person 5000 10777 0.742 0.68 0.743 0.514 0.74 0.654 0.711 0.4
bicycle 5000 314 0.666 0.366 0.438 0.243 0.579 0.315 0.346 0.132
car 5000 1918 0.616 0.508 0.556 0.356 0.626 0.486 0.531 0.296
motorcycle 5000 367 0.709 0.569 0.668 0.405 0.686 0.523 0.589 0.287
airplane 5000 143 0.722 0.78 0.83 0.643 0.725 0.762 0.8 0.493
bus 5000 283 0.768 0.7 0.749 0.612 0.762 0.679 0.731 0.556
train 5000 190 0.791 0.775 0.839 0.629 0.785 0.758 0.817 0.6
truck 5000 414 0.52 0.403 0.446 0.302 0.521 0.372 0.416 0.252
boat 5000 424 0.544 0.343 0.373 0.201 0.525 0.309 0.333 0.148
traffic light 5000 634 0.6 0.341 0.385 0.198 0.623 0.325 0.387 0.183
fire hydrant 5000 101 0.856 0.707 0.784 0.614 0.866 0.703 0.783 0.57
stop sign 5000 75 0.741 0.648 0.681 0.616 0.747 0.63 0.681 0.578
parking meter 5000 60 0.793 0.512 0.603 0.447 0.767 0.483 0.579 0.418
bench 5000 411 0.54 0.27 0.304 0.2 0.522 0.243 0.267 0.135
bird 5000 427 0.581 0.365 0.41 0.266 0.575 0.349 0.39 0.218
cat 5000 202 0.778 0.817 0.841 0.639 0.803 0.807 0.853 0.626
dog 5000 218 0.661 0.672 0.707 0.565 0.681 0.665 0.703 0.524
horse 5000 272 0.694 0.652 0.711 0.534 0.695 0.621 0.668 0.37
sheep 5000 354 0.577 0.658 0.657 0.456 0.585 0.638 0.628 0.363
cow 5000 372 0.679 0.608 0.676 0.479 0.646 0.556 0.63 0.388
elephant 5000 252 0.721 0.849 0.824 0.628 0.738 0.849 0.831 0.554
bear 5000 71 0.751 0.803 0.82 0.658 0.732 0.771 0.8 0.632
zebra 5000 266 0.785 0.823 0.883 0.663 0.795 0.816 0.869 0.55
giraffe 5000 232 0.841 0.843 0.89 0.697 0.846 0.819 0.862 0.541
backpack 5000 371 0.419 0.167 0.193 0.0996 0.462 0.162 0.176 0.0848
umbrella 5000 407 0.618 0.518 0.554 0.368 0.681 0.525 0.577 0.379
handbag 5000 540 0.45 0.13 0.17 0.0916 0.497 0.126 0.167 0.0782
tie 5000 252 0.641 0.39 0.443 0.272 0.677 0.383 0.436 0.24
suitcase 5000 299 0.619 0.421 0.477 0.326 0.604 0.381 0.448 0.29
frisbee 5000 115 0.804 0.696 0.778 0.599 0.808 0.694 0.778 0.537
skis 5000 241 0.534 0.373 0.367 0.192 0.377 0.232 0.21 0.0431
snowboard 5000 69 0.544 0.415 0.391 0.276 0.52 0.362 0.347 0.167
sports ball 5000 260 0.657 0.462 0.476 0.329 0.657 0.442 0.462 0.278
kite 5000 327 0.589 0.538 0.575 0.382 0.58 0.489 0.534 0.276
baseball bat 5000 145 0.54 0.379 0.368 0.202 0.593 0.392 0.389 0.172
baseball glove 5000 148 0.61 0.473 0.507 0.312 0.64 0.48 0.511 0.297
skateboard 5000 179 0.661 0.654 0.659 0.457 0.626 0.592 0.591 0.27
surfboard 5000 267 0.597 0.513 0.52 0.318 0.618 0.491 0.493 0.241
tennis racket 5000 225 0.664 0.609 0.654 0.386 0.7 0.621 0.658 0.418
bottle 5000 1013 0.614 0.405 0.447 0.292 0.615 0.383 0.426 0.254
wine glass 5000 341 0.697 0.361 0.417 0.273 0.697 0.337 0.377 0.207
cup 5000 895 0.584 0.437 0.473 0.339 0.587 0.417 0.456 0.309
fork 5000 215 0.547 0.326 0.364 0.239 0.498 0.265 0.257 0.111
knife 5000 325 0.456 0.163 0.187 0.111 0.41 0.138 0.148 0.0749
spoon 5000 253 0.391 0.126 0.15 0.0923 0.41 0.119 0.139 0.0565
bowl 5000 623 0.596 0.495 0.527 0.397 0.561 0.444 0.467 0.275
banana 5000 370 0.511 0.314 0.344 0.215 0.487 0.287 0.3 0.162
apple 5000 236 0.396 0.216 0.21 0.149 0.393 0.199 0.196 0.131
sandwich 5000 177 0.542 0.458 0.456 0.345 0.536 0.429 0.42 0.313
orange 5000 285 0.492 0.404 0.372 0.291 0.497 0.375 0.357 0.255
broccoli 5000 312 0.518 0.365 0.368 0.209 0.555 0.355 0.381 0.183
carrot 5000 365 0.447 0.297 0.287 0.183 0.441 0.271 0.272 0.149
hot dog 5000 125 0.575 0.416 0.418 0.285 0.5 0.344 0.339 0.205
pizza 5000 284 0.693 0.623 0.664 0.5 0.691 0.613 0.635 0.455
donut 5000 328 0.538 0.497 0.506 0.399 0.552 0.497 0.497 0.366
cake 5000 310 0.481 0.358 0.422 0.278 0.507 0.355 0.414 0.262
chair 5000 1771 0.58 0.344 0.398 0.246 0.554 0.298 0.33 0.148
couch 5000 261 0.57 0.536 0.579 0.433 0.552 0.49 0.503 0.334
potted plant 5000 342 0.53 0.383 0.376 0.217 0.496 0.333 0.305 0.132
bed 5000 163 0.575 0.54 0.583 0.389 0.553 0.485 0.484 0.294
dining table 5000 695 0.51 0.435 0.414 0.277 0.415 0.327 0.276 0.103
toilet 5000 179 0.673 0.726 0.754 0.62 0.698 0.732 0.773 0.585
tv 5000 288 0.667 0.646 0.69 0.531 0.68 0.632 0.681 0.49
laptop 5000 231 0.603 0.641 0.667 0.548 0.588 0.602 0.604 0.381
mouse 5000 106 0.642 0.698 0.705 0.517 0.634 0.671 0.684 0.487
remote 5000 283 0.428 0.216 0.263 0.156 0.474 0.216 0.266 0.133
keyboard 5000 153 0.622 0.612 0.648 0.466 0.648 0.601 0.659 0.445
cell phone 5000 262 0.489 0.374 0.376 0.249 0.494 0.351 0.36 0.225
microwave 5000 55 0.58 0.545 0.654 0.512 0.585 0.545 0.649 0.48
oven 5000 143 0.619 0.483 0.531 0.344 0.6 0.434 0.474 0.28
toaster 5000 9 1 0.22 0.476 0.272 1 0.216 0.476 0.346
sink 5000 225 0.596 0.498 0.508 0.34 0.617 0.498 0.515 0.304
refrigerator 5000 126 0.712 0.609 0.665 0.526 0.7 0.593 0.668 0.493
book 5000 1129 0.45 0.106 0.174 0.0828 0.351 0.0737 0.112 0.0442
clock 5000 267 0.695 0.633 0.654 0.452 0.713 0.629 0.667 0.433
vase 5000 274 0.562 0.436 0.438 0.293 0.544 0.392 0.414 0.259
scissors 5000 36 0.607 0.25 0.295 0.225 0.625 0.25 0.293 0.159
teddy bear 5000 190 0.684 0.553 0.618 0.442 0.699 0.542 0.597 0.399
hair drier 5000 11 1 0 0.00352 0.00197 1 0 0.0164 0.00406
toothbrush 5000 57 0.503 0.211 0.232 0.141 0.585 0.228 0.234 0.0922
Speed: 0.2ms preprocess, 3.2ms inference, 0.0ms loss, 1.2ms postprocess per image
Saving runs/detect/train/predictions.json...
Evaluating pycocotools mAP using runs/detect/train/predictions.json and /home/mixaill76/faster_coco_eval/examples/ultralytics/datasets/coco/annotations/instances_val2017.json...
Evaluate annotation type *bbox*
COCOeval_opt.evaluate() finished...
DONE (t=6.80s).
Accumulating evaluation results...
COCOeval_opt.accumulate() finished...
DONE (t=0.00s).
Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.366
Average Precision (AP) @[ IoU=0.50 | area= all | maxDets=100 ] = 0.521
Average Precision (AP) @[ IoU=0.75 | area= all | maxDets=100 ] = 0.395
Average Precision (AP) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.179
Average Precision (AP) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.404
Average Precision (AP) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.522
Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 1 ] = 0.313
Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 10 ] = 0.532
Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.587
Average Recall (AR) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.366
Average Recall (AR) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.649
Average Recall (AR) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.770
Average Recall (AR) @[ IoU=0.50 | area= all | maxDets=100 ] = 0.808
Average Recall (AR) @[ IoU=0.75 | area= all | maxDets=100 ] = 0.642
Evaluate annotation type *segm*
COCOeval_opt.evaluate() finished...
DONE (t=12.07s).
Accumulating evaluation results...
COCOeval_opt.accumulate() finished...
DONE (t=0.00s).
Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.303
Average Precision (AP) @[ IoU=0.50 | area= all | maxDets=100 ] = 0.490
Average Precision (AP) @[ IoU=0.75 | area= all | maxDets=100 ] = 0.319
Average Precision (AP) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.121
Average Precision (AP) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.334
Average Precision (AP) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.461
Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 1 ] = 0.271
Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 10 ] = 0.437
Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.472
Average Recall (AR) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.252
Average Recall (AR) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.530
Average Recall (AR) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.674
Average Recall (AR) @[ IoU=0.50 | area= all | maxDets=100 ] = 0.751
Average Recall (AR) @[ IoU=0.75 | area= all | maxDets=100 ] = 0.495
Faster eval took 26.51s
Evaluating pycocotools mAP using runs/detect/train/predictions.json and /home/mixaill76/faster_coco_eval/examples/ultralytics/datasets/coco/annotations/instances_val2017.json...
loading annotations into memory...
Done (t=0.85s)
creating index...
index created!
Done (t=0.02s)
Loading and preparing results...
DONE (t=3.47s)
creating index...
index created!
Done (t=0.18s)
Running per image evaluation...
Evaluate annotation type *bbox*
DONE (t=30.61s).
Accumulating evaluation results...
DONE (t=8.41s).
Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.366
Average Precision (AP) @[ IoU=0.50 | area= all | maxDets=100 ] = 0.521
Average Precision (AP) @[ IoU=0.75 | area= all | maxDets=100 ] = 0.395
Average Precision (AP) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.179
Average Precision (AP) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.404
Average Precision (AP) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.522
Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 1 ] = 0.313
Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 10 ] = 0.532
Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.587
Average Recall (AR) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.366
Average Recall (AR) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.649
Average Recall (AR) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.770
Running per image evaluation...
Evaluate annotation type *segm*
DONE (t=33.81s).
Accumulating evaluation results...
DONE (t=7.48s).
Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.303
Average Precision (AP) @[ IoU=0.50 | area= all | maxDets=100 ] = 0.490
Average Precision (AP) @[ IoU=0.75 | area= all | maxDets=100 ] = 0.319
Average Precision (AP) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.121
Average Precision (AP) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.334
Average Precision (AP) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.461
Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 1 ] = 0.271
Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 10 ] = 0.437
Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.472
Average Recall (AR) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.252
Average Recall (AR) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.530
Average Recall (AR) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.674
Original eval took 86.79s
Results saved to runs/detect/train
{'metrics/precision(B)': 0.6199420382660138, 'metrics/recall(B)': 0.47978746163511454, 'metrics/mAP50(B)': 0.5207700497188987, 'metrics/mAP50-95(B)': 0.36648003262764034, 'metrics/precision(M)': 0.6168869715016637, 'metrics/recall(M)': 0.45590238258381943, 'metrics/mAP50(M)': 0.4896069455217768, 'metrics/mAP50-95(M)': 0.30289961138089894, 'fitness': 0.7018358719301855}
import time
from ultralytics.models.yolo.pose.val import PoseValidator, check_requirements, LOGGER, Path
class BaseCustomPoseValidator(PoseValidator):
def eval_json_orig(self, stats):
"""Evaluates object detection model using COCO JSON format."""
if self.args.save_json and self.is_coco and len(self.jdict):
anno_json = self.data["path"] / "annotations/person_keypoints_val2017.json" # annotations
pred_json = self.save_dir / "predictions.json" # predictions
LOGGER.info(f"\nEvaluating pycocotools mAP using {pred_json} and {anno_json}...")
try: # https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocoEvalDemo.ipynb
check_requirements("pycocotools>=2.0.6")
from pycocotools.coco import COCO # noqa
from pycocotools.cocoeval import COCOeval # noqa
for x in anno_json, pred_json:
assert x.is_file(), f"{x} file not found"
anno = COCO(str(anno_json)) # init annotations api
pred = anno.loadRes(str(pred_json)) # init predictions api (must pass string, not Path)
for i, eval in enumerate([COCOeval(anno, pred, "bbox"), COCOeval(anno, pred, "keypoints")]):
if self.is_coco:
eval.params.imgIds = [int(Path(x).stem) for x in self.dataloader.dataset.im_files] # im to eval
eval.evaluate()
eval.accumulate()
eval.summarize()
idx = i * 4 + 2
stats[self.metrics.keys[idx + 1]], stats[self.metrics.keys[idx]] = eval.stats[
:2
] # update mAP50-95 and mAP50
except Exception as e:
LOGGER.warning(f"pycocotools unable to run: {e}")
return stats
def eval_json_faster(self, stats):
"""Evaluates object detection model using COCO JSON format."""
if self.args.save_json and self.is_coco and len(self.jdict):
anno_json = self.data["path"] / "annotations/person_keypoints_val2017.json" # annotations
pred_json = self.save_dir / "predictions.json" # predictions
LOGGER.info(f"\nEvaluating pycocotools mAP using {pred_json} and {anno_json}...")
try: # https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocoEvalDemo.ipynb
from faster_coco_eval import COCO, COCOeval_faster
for x in anno_json, pred_json:
assert x.is_file(), f"{x} file not found"
anno = COCO(str(anno_json)) # init annotations api
pred = anno.loadRes(str(pred_json)) # init predictions api (must pass string, not Path)
for i, eval in enumerate([COCOeval_faster(anno, pred, "bbox", print_function=print), COCOeval_faster(anno, pred, "keypoints", print_function=print)]):
if self.is_coco:
eval.params.imgIds = [int(Path(x).stem) for x in self.dataloader.dataset.im_files] # im to eval
eval.evaluate()
eval.accumulate()
eval.summarize()
idx = i * 4 + 2
stats[self.metrics.keys[idx + 1]], stats[self.metrics.keys[idx]] = eval.stats[
:2
] # update mAP50-95 and mAP50
except Exception as e:
LOGGER.warning(f"pycocotools unable to run: {e}")
return stats
def eval_json(self, stats):
tic_faster = time.time()
self.eval_json_faster(stats)
toc_faster = time.time()
print(f"Faster eval took {toc_faster - tic_faster:.2f}s")
tic_orig = time.time()
stats = self.eval_json_orig(stats)
toc_orig = time.time()
print(f"Original eval took {toc_orig - tic_orig:.2f}s")
return stats
!rm -rf rm -rf runs/
args = dict(model='yolov8n-pose.pt', data='./coco_pose_val_only.yaml')
validator = BaseCustomPoseValidator(args=args)
validator()
Ultralytics YOLOv8.1.47 🚀 Python-3.10.12 torch-2.1.2+cu118 CUDA:0 (NVIDIA GeForce RTX 3080 Ti, 12288MiB) YOLOv8n-pose summary (fused): 187 layers, 3289964 parameters, 0 gradients, 9.2 GFLOPs
/home/mixaill76/.local/lib/python3.10/site-packages/torch/nn/modules/conv.py:456: UserWarning: Applied workaround for CuDNN issue, install nvrtc.so (Triggered internally at ../aten/src/ATen/native/cudnn/Conv_v8.cpp:80.)
return F.conv2d(input, weight, bias, self.stride,
val: Scanning /home/mixaill76/faster_coco_eval/examples/ultralytics/datasets/coco-pose/labels/val2017.cache... 2346 images, 0 backgrounds, 0 corrupt: 100%|██████████| 2346/2346 [00:00<?, ?it/s]
Class Images Instances Box(P R mAP50 mAP50-95) Pose(P R mAP50 mAP50-95): 100%|██████████| 147/147 [00:17<00:00, 8.18it/s]
all 2346 6352 0.867 0.821 0.909 0.694 0.834 0.749 0.798 0.509
Speed: 0.1ms preprocess, 1.3ms inference, 0.0ms loss, 1.0ms postprocess per image
Saving runs/detect/train/predictions.json...
Evaluating pycocotools mAP using runs/detect/train/predictions.json and /home/mixaill76/faster_coco_eval/examples/ultralytics/datasets/coco-pose/annotations/person_keypoints_val2017.json...
Evaluate annotation type *bbox*
COCOeval_opt.evaluate() finished...
DONE (t=0.36s).
Accumulating evaluation results...
COCOeval_opt.accumulate() finished...
DONE (t=0.00s).
Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.526
Average Precision (AP) @[ IoU=0.50 | area= all | maxDets=100 ] = 0.713
Average Precision (AP) @[ IoU=0.75 | area= all | maxDets=100 ] = 0.584
Average Precision (AP) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.181
Average Precision (AP) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.648
Average Precision (AP) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.787
Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 1 ] = 0.207
Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 10 ] = 0.542
Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.626
Average Recall (AR) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.257
Average Recall (AR) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.749
Average Recall (AR) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.858
Average Recall (AR) @[ IoU=0.50 | area= all | maxDets=100 ] = 0.823
Average Recall (AR) @[ IoU=0.75 | area= all | maxDets=100 ] = 0.697
Evaluate annotation type *keypoints*
COCOeval_opt.evaluate() finished...
DONE (t=2.45s).
Accumulating evaluation results...
COCOeval_opt.accumulate() finished...
DONE (t=0.00s).
Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets= 20 ] = 0.505
Average Precision (AP) @[ IoU=0.50 | area= all | maxDets= 20 ] = 0.799
Average Precision (AP) @[ IoU=0.75 | area= all | maxDets= 20 ] = 0.541
Average Precision (AP) @[ IoU=0.50:0.95 | area=medium | maxDets= 20 ] = 0.431
Average Precision (AP) @[ IoU=0.50:0.95 | area= large | maxDets= 20 ] = 0.623
Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 20 ] = 0.579
Average Recall (AR) @[ IoU=0.50 | area= all | maxDets= 20 ] = 0.854
Average Recall (AR) @[ IoU=0.75 | area= all | maxDets= 20 ] = 0.621
Average Recall (AR) @[ IoU=0.50:0.95 | area=medium | maxDets= 20 ] = 0.488
Average Recall (AR) @[ IoU=0.50:0.95 | area= large | maxDets= 20 ] = 0.705
Faster eval took 4.45s
Evaluating pycocotools mAP using runs/detect/train/predictions.json and /home/mixaill76/faster_coco_eval/examples/ultralytics/datasets/coco-pose/annotations/person_keypoints_val2017.json...
loading annotations into memory...
Done (t=0.09s)
creating index...
index created!
Done (t=0.01s)
Loading and preparing results...
DONE (t=1.38s)
creating index...
index created!
Done (t=0.02s)
Running per image evaluation...
Evaluate annotation type *bbox*
DONE (t=4.41s).
Accumulating evaluation results...
DONE (t=0.43s).
Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.526
Average Precision (AP) @[ IoU=0.50 | area= all | maxDets=100 ] = 0.713
Average Precision (AP) @[ IoU=0.75 | area= all | maxDets=100 ] = 0.584
Average Precision (AP) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.181
Average Precision (AP) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.648
Average Precision (AP) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.787
Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 1 ] = 0.207
Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 10 ] = 0.542
Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.626
Average Recall (AR) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.257
Average Recall (AR) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.749
Average Recall (AR) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.858
Running per image evaluation...
Evaluate annotation type *keypoints*
DONE (t=3.61s).
Accumulating evaluation results...
DONE (t=0.10s).
Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets= 20 ] = 0.505
Average Precision (AP) @[ IoU=0.50 | area= all | maxDets= 20 ] = 0.799
Average Precision (AP) @[ IoU=0.75 | area= all | maxDets= 20 ] = 0.541
Average Precision (AP) @[ IoU=0.50:0.95 | area=medium | maxDets= 20 ] = 0.431
Average Precision (AP) @[ IoU=0.50:0.95 | area= large | maxDets= 20 ] = 0.623
Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 20 ] = 0.579
Average Recall (AR) @[ IoU=0.50 | area= all | maxDets= 20 ] = 0.854
Average Recall (AR) @[ IoU=0.75 | area= all | maxDets= 20 ] = 0.621
Average Recall (AR) @[ IoU=0.50:0.95 | area=medium | maxDets= 20 ] = 0.488
Average Recall (AR) @[ IoU=0.50:0.95 | area= large | maxDets= 20 ] = 0.705
Original eval took 10.17s
Results saved to runs/detect/train
{'metrics/precision(B)': 0.8673525355617393, 'metrics/recall(B)': 0.820528967254408, 'metrics/mAP50(B)': 0.7133781833288113, 'metrics/mAP50-95(B)': 0.5257107235960686, 'metrics/precision(P)': 0.8337325944732837, 'metrics/recall(P)': 0.7485831234256927, 'metrics/mAP50(P)': 0.799166115657517, 'metrics/mAP50-95(P)': 0.5049564874508521, 'fitness': 1.2539324350911183}
lib | model | time | profit |
---|---|---|---|
faster-coco-eval | bbox | 7.34 | 4.5x |
pycocotools | bbox | 34.35 | 1x |
---- | ----- | ---- | ------ |
faster-coco-eval | bbox+segm | 26.51 | 3.2x |
pycocotools | bbox+segm | 86.79 | 1x |
---- | ----- | ---- | ------ |
faster-coco-eval | bbox+keypoints | 4.45 | 2,2x |
pycocotools | bbox+keypoints | 10.17 | 1x |