import json
import logging
from faster_coco_eval import COCO, COCOeval_faster
logging.root.setLevel("INFO")
logging.debug("Запись.")
def load(file):
with open(file) as io:
_data = json.load(io)
return _data
prepared_coco_in_dict = load('../tests/data/eval_all_coco.json')
prepared_anns = load('../tests/data/result_annotations.json')
iouType = 'segm'
useCats = False
cocoGt = COCO(prepared_coco_in_dict)
cocoDt = cocoGt.loadRes(prepared_anns)
cocoEval = COCOeval_faster(cocoGt, cocoDt, iouType)
cocoEval.params.maxDets = [len(cocoGt.anns)]
cocoEval.params.iouThr = [0.5, 0.75]
if not useCats:
cocoEval.params.useCats = 0 # Выключение labels
cocoEval.evaluate()
cocoEval.accumulate()
cocoEval.summarize()
INFO:faster_coco_eval.coco:loading annotations into memory... INFO:faster_coco_eval.coco:Done (t=0.00s) INFO:faster_coco_eval.coco:creating index... INFO:faster_coco_eval.coco:index created! INFO:faster_coco_eval.coco:Loading and preparing results... INFO:faster_coco_eval.coco:DONE (t=0.01s) INFO:faster_coco_eval.coco:creating index... INFO:faster_coco_eval.coco:index created! INFO:faster_coco_eval.faster_eval_api:Evaluate annotation type *segm* INFO:faster_coco_eval.faster_eval_api:COCOeval_opt.evaluate() finished in 1.01 seconds. INFO:faster_coco_eval.faster_eval_api:Accumulating evaluation results... INFO:faster_coco_eval.faster_eval_api:COCOeval_opt.accumulate() finished in 0.00 seconds.
cocoEval.stats
array([0.265583 , 0.49315217, 0.26601502, 0.23487964, 0.3884678 , 0.75148515, 0.48837996, 0. , 0. , 0.4215085 , 0.6806913 , 0.75 ])
cocoEval.stats_as_dict
{'AP_all': 0.265582997070242, 'AP_50': 0.49315216628619263, 'AP_75': 0.2660150221663158, 'AP_small': 0.23487963763725533, 'AP_medium': 0.3884677964493054, 'AP_large': 0.7514851485148515, 'AR_all': 0.4883799569628035, 'AR_second': 0.0, 'AR_third': 0.0, 'AR_small': 0.42150849564857024, 'AR_medium': 0.6806912991656734, 'AR_large': 0.75, 'AR_50': 0.8628957885029204, 'AR_75': 0.5075315093759607}