import check_argument as utils
from hmean import eval_hmean
import eval_utils

import glob
import json
import numpy as np
import cv2

def parse_anno_info_pred(annotations):
        """Parse bbox and mask annotation.
        Args:
            annotations (dict): Annotations of one image.

        Returns:
            dict: A dict containing the following keys: bboxes, bboxes_ignore,
                labels, masks, masks_ignore. "masks"  and
                "masks_ignore" are represented by polygon boundary
                point sequences.
        """
        gt_bboxes = []
        for ann in annotations:
            if ann["shape_type"] == "rectangle":
                gt_bboxes.append(eval_utils.box2polygon(ann["points"]) + [1.0])
            elif ann["shape_type"] == "polygon":
                # print(ann["points"]+[1.0])
                a = ann['points']
                
                a = np.array(ann['points']).reshape(-1).tolist()
                a.append(1.0)
                gt_bboxes.append(a)
              

        if gt_bboxes:
            gt_bboxes = np.array(gt_bboxes, dtype=np.float32)
        else:
            gt_bboxes = np.zeros((0, 4), dtype=np.float32)
      

        ann = dict(
            boundary_result = gt_bboxes)

        return ann

def parse_anno_info(annotations):
        """Parse bbox and mask annotation.
        Args:
            annotations (dict): Annotations of one image.

        Returns:
            dict: A dict containing the following keys: bboxes, bboxes_ignore,
                labels, masks, masks_ignore. "masks"  and
                "masks_ignore" are represented by polygon boundary
                point sequences.
        """
        gt_bboxes, gt_bboxes_ignore = [], []
        gt_masks, gt_masks_ignore = [], []
        gt_labels = []
        for ann in annotations:
            if ann["shape_type"] == "rectangle":
                pts = ann["points"]
                box = [pts[0][0], pts[0][1], pts[1][0], pts[1][1]]
                gt_bboxes.append(box)
                gt_labels.append(1)
                x = eval_utils.box2polygon(box)
                y = np.array(x).reshape(-1, 8).tolist()
                print(len(y))
                gt_masks.append(y)
            elif ann["shape_type"] == "polygon":
                pts = ann["points"]
                x = np.array(pts)
                if x.size % 8 != 0:
                    continue
                x = np.array(pts).reshape(-1, 8).tolist()
                box = cv2.boundingRect(np.array(pts, dtype=np.float32))
                print(box)
                gt_bboxes.append(box)
                gt_labels.append(1)
                
                gt_masks.append(x)

        if gt_bboxes:
            gt_bboxes = np.array(gt_bboxes, dtype=np.float32)
            gt_labels = np.array(gt_labels, dtype=np.int64)
        else:
            gt_bboxes = np.zeros((0, 4), dtype=np.float32)
            gt_labels = np.array([], dtype=np.int64)

        if gt_bboxes_ignore:
            gt_bboxes_ignore = np.array(gt_bboxes_ignore, dtype=np.float32)
        else:
            gt_bboxes_ignore = np.zeros((0, 4), dtype=np.float32)

        ann = dict(
            bboxes=gt_bboxes,
            labels=gt_labels,
            bboxes_ignore=gt_bboxes_ignore,
            masks_ignore=gt_masks_ignore,
            masks=gt_masks)

        return ann


def evaluate(metric='hmean-iou',
                 logger=None,
                 score_thr=0.3,
                 rank_list=None,
                 **kwargs):
        """Evaluate the hmean metric.

        Args:
            results (list[dict]): Testing results of the dataset.
            metric (str | list[str]): Metrics to be evaluated.
            logger (logging.Logger | str | None): Logger used for printing
                related information during evaluation. Default: None.
            rank_list (str): json file used to save eval result
                of each image after ranking.
        Returns:
            dict[dict[str: float]]: The evaluation results.
        """
        
        gt_path = r'C:\Users\Administrator\Desktop\标注好测试数据\营业执照改\gts'
        gt_json_files = glob.glob(gt_path+'/*.json')
        img_infos = []
        ann_infos = []
        for gt_json_file in gt_json_files:
            gt_json = json.load(open(gt_json_file, 'r', encoding='gbk'))
            filename = gt_json['imagePath']
            print(filename)
            img_info = {'filename': filename}
            img_infos.append(img_info)
            anno = parse_anno_info(gt_json["shapes"])
            ann_infos.append(anno)
       
        pred_path = r'C:\Users\Administrator\Desktop\标注好测试数据\营业执照改\preds'
        pred_json_files = glob.glob(pred_path+'/*.json')
        results = []
        for pred_json_file in pred_json_files:
            pred_json = json.load(open(pred_json_file, 'r', encoding='utf-8'))
            ann = parse_anno_info_pred(pred_json['shapes'])
            results.append(ann)
        print(len(results))
        metrics = metric if isinstance(metric, list) else [metric]
        allowed_metrics = ['hmean-iou', 'hmean-ic13']
        metrics = set(metrics) & set(allowed_metrics)

        eval_results = eval_hmean(
            results,
            img_infos,
            ann_infos,
            metrics=metrics,
            score_thr=score_thr,
            logger=logger,
            rank_list=rank_list)

        return eval_results

evaluate()