更换文档检测模型
This commit is contained in:
188
paddle_detection/ppdet/metrics/coco_utils.py
Normal file
188
paddle_detection/ppdet/metrics/coco_utils.py
Normal file
@@ -0,0 +1,188 @@
|
||||
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
|
||||
import os
|
||||
import sys
|
||||
import numpy as np
|
||||
import itertools
|
||||
|
||||
from ppdet.metrics.json_results import get_det_res, get_det_poly_res, get_seg_res, get_solov2_segm_res, get_keypoint_res, get_pose3d_res
|
||||
from ppdet.metrics.map_utils import draw_pr_curve
|
||||
|
||||
from ppdet.utils.logger import setup_logger
|
||||
logger = setup_logger(__name__)
|
||||
|
||||
|
||||
def get_infer_results(outs, catid, bias=0):
|
||||
"""
|
||||
Get result at the stage of inference.
|
||||
The output format is dictionary containing bbox or mask result.
|
||||
|
||||
For example, bbox result is a list and each element contains
|
||||
image_id, category_id, bbox and score.
|
||||
"""
|
||||
if outs is None or len(outs) == 0:
|
||||
raise ValueError(
|
||||
'The number of valid detection result if zero. Please use reasonable model and check input data.'
|
||||
)
|
||||
|
||||
im_id = outs['im_id']
|
||||
|
||||
infer_res = {}
|
||||
if 'bbox' in outs:
|
||||
if len(outs['bbox']) > 0 and len(outs['bbox'][0]) > 6:
|
||||
infer_res['bbox'] = get_det_poly_res(
|
||||
outs['bbox'], outs['bbox_num'], im_id, catid, bias=bias)
|
||||
else:
|
||||
infer_res['bbox'] = get_det_res(
|
||||
outs['bbox'], outs['bbox_num'], im_id, catid, bias=bias)
|
||||
|
||||
if 'mask' in outs:
|
||||
# mask post process
|
||||
infer_res['mask'] = get_seg_res(outs['mask'], outs['bbox'],
|
||||
outs['bbox_num'], im_id, catid)
|
||||
|
||||
if 'segm' in outs:
|
||||
infer_res['segm'] = get_solov2_segm_res(outs, im_id, catid)
|
||||
|
||||
if 'keypoint' in outs:
|
||||
infer_res['keypoint'] = get_keypoint_res(outs, im_id)
|
||||
outs['bbox_num'] = [len(infer_res['keypoint'])]
|
||||
|
||||
if 'pose3d' in outs:
|
||||
infer_res['pose3d'] = get_pose3d_res(outs, im_id)
|
||||
outs['bbox_num'] = [len(infer_res['pose3d'])]
|
||||
|
||||
return infer_res
|
||||
|
||||
|
||||
def cocoapi_eval(jsonfile,
|
||||
style,
|
||||
coco_gt=None,
|
||||
anno_file=None,
|
||||
max_dets=(100, 300, 1000),
|
||||
classwise=False,
|
||||
sigmas=None,
|
||||
use_area=True):
|
||||
"""
|
||||
Args:
|
||||
jsonfile (str): Evaluation json file, eg: bbox.json, mask.json.
|
||||
style (str): COCOeval style, can be `bbox` , `segm` , `proposal`, `keypoints` and `keypoints_crowd`.
|
||||
coco_gt (str): Whether to load COCOAPI through anno_file,
|
||||
eg: coco_gt = COCO(anno_file)
|
||||
anno_file (str): COCO annotations file.
|
||||
max_dets (tuple): COCO evaluation maxDets.
|
||||
classwise (bool): Whether per-category AP and draw P-R Curve or not.
|
||||
sigmas (nparray): keypoint labelling sigmas.
|
||||
use_area (bool): If gt annotations (eg. CrowdPose, AIC)
|
||||
do not have 'area', please set use_area=False.
|
||||
"""
|
||||
assert coco_gt != None or anno_file != None
|
||||
if style == 'keypoints_crowd':
|
||||
#please install xtcocotools==1.6
|
||||
from xtcocotools.coco import COCO
|
||||
from xtcocotools.cocoeval import COCOeval
|
||||
else:
|
||||
from pycocotools.coco import COCO
|
||||
from pycocotools.cocoeval import COCOeval
|
||||
|
||||
if coco_gt == None:
|
||||
coco_gt = COCO(anno_file)
|
||||
logger.info("Start evaluate...")
|
||||
coco_dt = coco_gt.loadRes(jsonfile)
|
||||
if style == 'proposal':
|
||||
coco_eval = COCOeval(coco_gt, coco_dt, 'bbox')
|
||||
coco_eval.params.useCats = 0
|
||||
coco_eval.params.maxDets = list(max_dets)
|
||||
elif style == 'keypoints_crowd':
|
||||
coco_eval = COCOeval(coco_gt, coco_dt, style, sigmas, use_area)
|
||||
else:
|
||||
coco_eval = COCOeval(coco_gt, coco_dt, style)
|
||||
coco_eval.evaluate()
|
||||
coco_eval.accumulate()
|
||||
coco_eval.summarize()
|
||||
if classwise:
|
||||
# Compute per-category AP and PR curve
|
||||
try:
|
||||
from terminaltables import AsciiTable
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
'terminaltables not found, plaese install terminaltables. '
|
||||
'for example: `pip install terminaltables`.')
|
||||
raise e
|
||||
precisions = coco_eval.eval['precision']
|
||||
cat_ids = coco_gt.getCatIds()
|
||||
# precision: (iou, recall, cls, area range, max dets)
|
||||
assert len(cat_ids) == precisions.shape[2]
|
||||
results_per_category = []
|
||||
for idx, catId in enumerate(cat_ids):
|
||||
# area range index 0: all area ranges
|
||||
# max dets index -1: typically 100 per image
|
||||
nm = coco_gt.loadCats(catId)[0]
|
||||
precision = precisions[:, :, idx, 0, -1]
|
||||
precision = precision[precision > -1]
|
||||
if precision.size:
|
||||
ap = np.mean(precision)
|
||||
else:
|
||||
ap = float('nan')
|
||||
results_per_category.append(
|
||||
(str(nm["name"]), '{:0.3f}'.format(float(ap))))
|
||||
pr_array = precisions[0, :, idx, 0, 2]
|
||||
recall_array = np.arange(0.0, 1.01, 0.01)
|
||||
draw_pr_curve(
|
||||
pr_array,
|
||||
recall_array,
|
||||
out_dir=style + '_pr_curve',
|
||||
file_name='{}_precision_recall_curve.jpg'.format(nm["name"]))
|
||||
|
||||
num_columns = min(6, len(results_per_category) * 2)
|
||||
results_flatten = list(itertools.chain(*results_per_category))
|
||||
headers = ['category', 'AP'] * (num_columns // 2)
|
||||
results_2d = itertools.zip_longest(
|
||||
* [results_flatten[i::num_columns] for i in range(num_columns)])
|
||||
table_data = [headers]
|
||||
table_data += [result for result in results_2d]
|
||||
table = AsciiTable(table_data)
|
||||
logger.info('Per-category of {} AP: \n{}'.format(style, table.table))
|
||||
logger.info("per-category PR curve has output to {} folder.".format(
|
||||
style + '_pr_curve'))
|
||||
# flush coco evaluation result
|
||||
sys.stdout.flush()
|
||||
return coco_eval.stats
|
||||
|
||||
|
||||
def json_eval_results(metric, json_directory, dataset):
|
||||
"""
|
||||
cocoapi eval with already exists proposal.json, bbox.json or mask.json
|
||||
"""
|
||||
assert metric == 'COCO'
|
||||
anno_file = dataset.get_anno()
|
||||
json_file_list = ['proposal.json', 'bbox.json', 'mask.json']
|
||||
if json_directory:
|
||||
assert os.path.exists(
|
||||
json_directory), "The json directory:{} does not exist".format(
|
||||
json_directory)
|
||||
for k, v in enumerate(json_file_list):
|
||||
json_file_list[k] = os.path.join(str(json_directory), v)
|
||||
|
||||
coco_eval_style = ['proposal', 'bbox', 'segm']
|
||||
for i, v_json in enumerate(json_file_list):
|
||||
if os.path.exists(v_json):
|
||||
cocoapi_eval(v_json, coco_eval_style[i], anno_file=anno_file)
|
||||
else:
|
||||
logger.info("{} not exists!".format(v_json))
|
||||
Reference in New Issue
Block a user