import os
import json
import numpy as np
import shutil
import itertools
from collections import OrderedDict
from mmengine.fileio import load
from mmengine.logging import MMLogger
from terminaltables import AsciiTable
from mmdet.evaluation import CocoMetric
from mmdet.registry import METRICS
from pycocotools.cocoeval import COCOeval

from .CoCoMetricDB import CoCoMetricDB
from .MetricUtils import tensor_to_list

def extract_tp_fp_fn(coco_eval, iou_thresholds=None):
    if iou_thresholds is None:
        iou_thresholds = [0.5]

    # print('coco_eval.cocoDt = ', dir(coco_eval.cocoDt))
    per_image_results = {}
    for eval_img in filter(None, coco_eval.evalImgs):
        # print(eval_img)
        img_id = int(eval_img['image_id'])
        category_id = int(eval_img['category_id'])
        area = tuple(eval_img['aRng'])

        if img_id not in per_image_results:
            per_image_results[img_id] = {}

        if category_id not in per_image_results[img_id]:
            per_image_results[img_id][category_id] = {}

        if area not in per_image_results[img_id][category_id]:
            per_image_results[img_id][category_id][area] = {iou: {"TP": [], "FP": [], "FN": []} for iou in iou_thresholds}

        for iou_idx, iou_thresh in enumerate(iou_thresholds):
            dt_matches = eval_img['dtMatches'][iou_idx]
            gt_matches = eval_img['gtMatches'][iou_idx]
            dt_scores = eval_img['dtScores']

            # Process detections for this IoU threshold
            for dt_idx, dt_id in enumerate(eval_img['dtIds']):
                dt_anno = coco_eval.cocoDt.loadAnns(dt_id)[0]
                # print(dt_anno)
                score = dt_scores[dt_idx]  
                if dt_matches[dt_idx] > 0:  # Matched with a GT
                    matched_gt_id = int(dt_matches[dt_idx])
                    gt_anno = coco_eval.cocoGt.loadAnns(matched_gt_id)[0]
                    per_image_results[img_id][category_id][area][iou_thresh]["TP"].append({
                        'dt_id':dt_id, 
                        'gt_id': matched_gt_id,
                        'dt_bbox': dt_anno['bbox'],
                        'gt_bbox': gt_anno['bbox'],
                        'score': score
                    })
                else:
                    per_image_results[img_id][category_id][area][iou_thresh]["FP"].append({
                        'dt_id':dt_id, 
                        'gt_id': None,
                        'dt_bbox': dt_anno['bbox'],
                        'gt_bbox': None,
                        'score': score
                    })

            # Process ground truths for this IoU threshold
            unmatched_gts = np.where(gt_matches == 0)[0]
            for gt_idx in unmatched_gts:
                gt_id = eval_img['gtIds'][gt_idx]
                gt_anno = coco_eval.cocoGt.loadAnns(gt_id)[0]
                # print(gt_anno)
                per_image_results[img_id][category_id][area][iou_thresh]["FN"].append({
                        'dt_id':None, 
                        'gt_id': gt_id,
                        'dt_bbox': None,
                        'gt_bbox': gt_anno['bbox'],
                        'score': None
                    })
    return per_image_results

def results_to_json(eval_results, per_image_results):
    # print(eval_results.eval['precision'])
    pr_data = {
        "precision": eval_results.eval['precision'].tolist(),
        "recall": eval_results.eval['recall'].tolist(),
    }
    # print(pr_data)
    return json.dumps({
        "per_image": per_image_results,
        "pr_curve": pr_data
    }, indent=4)

@METRICS.register_module()
class CoCoExMetric(CocoMetric):
    def __init__(self, output_dir, *args, **kwargs):
        super().__init__(*args, **kwargs)

        self.output_dir = output_dir
        os.makedirs(self.output_dir, exist_ok=True)
        self.data_samples = []

    def process(self, data_batch, data_samples):
        # 调用父类的 process 方法
        super().process(data_batch, data_samples)
        # print(self.results)

        # 将 results 转换为可序列化格式
        # print('CoCoExMetric process: ', data_samples)
        results_serializable = tensor_to_list(data_samples)
        self.data_samples.extend( results_serializable )

    def compute_metrics(self, results):
        # 获取基本的度量结果
        # coco_metrics = super().compute_metrics(results)
        
        # ===========================================================================
        # split gt and prediction list
        logger: MMLogger = MMLogger.get_current_instance()
        gts, preds = zip(*results)

        tmp_dir = None
        outfile_prefix = self.output_dir

        # handle lazy init
        if self.cat_ids is None:
            self.cat_ids = self._coco_api.get_cat_ids(
                cat_names=self.dataset_meta['classes'])
        if self.img_ids is None:
            self.img_ids = self._coco_api.get_img_ids()

        # convert predictions to coco format and dump to json file
        result_files = self.results2json(preds, outfile_prefix)
        eval_results = OrderedDict()
        for metric in self.metrics:
            logger.info(f'Evaluating {metric}...')

            # evaluate proposal, bbox and segm
            iou_type = 'bbox' if metric == 'proposal' else metric
            if metric not in result_files:
                raise KeyError(f'{metric} is not in results')
            try:
                predictions = load(result_files[metric])
                if iou_type == 'segm':
                    # Refer to https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocotools/coco.py#L331  # noqa
                    # When evaluating mask AP, if the results contain bbox,
                    # cocoapi will use the box area instead of the mask area
                    # for calculating the instance area. Though the overall AP
                    # is not affected, this leads to different
                    # small/medium/large mask AP results.
                    for x in predictions:
                        x.pop('bbox')
                coco_dt = self._coco_api.loadRes(predictions)
            except IndexError:
                logger.error(
                    'The testing results of the whole dataset is empty.')
                break

            coco_eval = COCOeval(self._coco_api, coco_dt, iou_type)
            coco_eval.params.catIds = self.cat_ids
            coco_eval.params.imgIds = self.img_ids
            coco_eval.params.maxDets = list(self.proposal_nums)
            coco_eval.params.iouThrs = self.iou_thrs

            # mapping of cocoEval.stats
            coco_metric_names = {
                'mAP': 0,
                'mAP_50': 1,
                'mAP_75': 2,
                'mAP_s': 3,
                'mAP_m': 4,
                'mAP_l': 5,
                'AR@100': 6,
                'AR@300': 7,
                'AR@1000': 8,
                'AR_s@1000': 9,
                'AR_m@1000': 10,
                'AR_l@1000': 11
            }
            metric_items = self.metric_items
            if metric_items is not None:
                for metric_item in metric_items:
                    if metric_item not in coco_metric_names:
                        raise KeyError(
                            f'metric item "{metric_item}" is not supported')

            if metric == 'proposal':
                coco_eval.params.useCats = 0
                coco_eval.evaluate()
                coco_eval.accumulate()
                coco_eval.summarize()
                if metric_items is None:
                    metric_items = [
                        'AR@100', 'AR@300', 'AR@1000', 'AR_s@1000',
                        'AR_m@1000', 'AR_l@1000'
                    ]

                for item in metric_items:
                    val = float(
                        f'{coco_eval.stats[coco_metric_names[item]]:.3f}')
                    eval_results[item] = val
            else:
                coco_eval.evaluate()
                coco_eval.accumulate()
                coco_eval.summarize()
                if self.classwise:  # Compute per-category AP
                    # Compute per-category AP
                    # from https://github.com/facebookresearch/detectron2/
                    precisions = coco_eval.eval['precision']
                    # precision: (iou, recall, cls, area range, max dets)
                    assert len(self.cat_ids) == precisions.shape[2]

                    results_per_category = []
                    for idx, cat_id in enumerate(self.cat_ids):
                        t = []
                        # area range index 0: all area ranges
                        # max dets index -1: typically 100 per image
                        nm = self._coco_api.loadCats(cat_id)[0]
                        precision = precisions[:, :, idx, 0, -1]
                        precision = precision[precision > -1]
                        if precision.size:
                            ap = np.mean(precision)
                        else:
                            ap = float('nan')
                        t.append(f'{nm["name"]}')
                        t.append(f'{round(ap, 3)}')
                        eval_results[f'{nm["name"]}_precision'] = round(ap, 3)

                        # indexes of IoU  @50 and @75
                        for iou in [0, 5]:
                            precision = precisions[iou, :, idx, 0, -1]
                            precision = precision[precision > -1]
                            if precision.size:
                                ap = np.mean(precision)
                            else:
                                ap = float('nan')
                            t.append(f'{round(ap, 3)}')

                        # indexes of area of small, median and large
                        for area in [1, 2, 3]:
                            precision = precisions[:, :, idx, area, -1]
                            precision = precision[precision > -1]
                            if precision.size:
                                ap = np.mean(precision)
                            else:
                                ap = float('nan')
                            t.append(f'{round(ap, 3)}')
                        results_per_category.append(tuple(t))

                    num_columns = len(results_per_category[0])
                    results_flatten = list(
                        itertools.chain(*results_per_category))
                    headers = [
                        'category', 'mAP', 'mAP_50', 'mAP_75', 'mAP_s',
                        'mAP_m', 'mAP_l'
                    ]
                    results_2d = itertools.zip_longest(*[
                        results_flatten[i::num_columns]
                        for i in range(num_columns)
                    ])
                    table_data = [headers]
                    table_data += [result for result in results_2d]
                    table = AsciiTable(table_data)
                    logger.info('\n' + table.table)

                if metric_items is None:
                    metric_items = [
                        'mAP', 'mAP_50', 'mAP_75', 'mAP_s', 'mAP_m', 'mAP_l'
                    ]

                for metric_item in metric_items:
                    key = f'{metric}_{metric_item}'
                    val = coco_eval.stats[coco_metric_names[metric_item]]
                    eval_results[key] = float(f'{round(val, 3)}')

                ap = coco_eval.stats[:6]
                logger.info(f'{metric}_mAP_copypaste: {ap[0]:.3f} '
                            f'{ap[1]:.3f} {ap[2]:.3f} {ap[3]:.3f} '
                            f'{ap[4]:.3f} {ap[5]:.3f}')

        # 检查并处理输出目录
        if os.path.exists(self.output_dir):
            shutil.rmtree(self.output_dir) 
        os.makedirs(self.output_dir)

        # 将 results 保存为 JSON 格式
        # print('#=========================================')
        data_samples_path = os.path.join(self.output_dir, 'data_samples.json')
        with open(data_samples_path, 'w') as f:
            json.dump(self.data_samples, f, indent=4)

        # 保存eval_results到json文件
        eval_results_path = os.path.join(self.output_dir, 'eval_results.json')
        with open(eval_results_path, 'w') as f:
            f.write(json.dumps(eval_results, indent=4))

        # fetch evaluation details
        per_image_results = extract_tp_fp_fn(coco_eval, coco_eval.params.iouThrs)
        # print('coco_eval.params.iouThrs = ', coco_eval.params.iouThrs)
        # print('coco_eval.evalImgs = ', len(coco_eval.evalImgs))

        # 保存eval details信息到sqlite3文件
        eval_details_path = os.path.join(self.output_dir, 'eval_details.db')
        db = CoCoMetricDB(eval_details_path)
        db.save_annotations(self.data_samples)
        db.save_match_pairs(per_image_results)
        db.close()

        if tmp_dir is not None:
            tmp_dir.cleanup()
        return eval_results
