import os
import os.path as osp
import time
from collections import defaultdict
from functools import partial
from random import sample

import BboxToolkit as bt
import cv2
import mmcv
import numpy as np

from mmdet.core import eval_arb_map, eval_arb_recalls, eval_metrics, intersect_and_union, pre_eval_to_metrics
from mmdet.ops.nms import nms
from mmdet.ops.nms_rotated import obb_nms, BT_nms
from ..builder import DATASETS
from ..custom import CustomDataset
from mmcv.utils import print_log
from collections import OrderedDict
from prettytable import PrettyTable
from ..pipelines import Compose, LoadAnnotations


# MAPPING = OrderedDict({
#     'background': (0, 0, 0),
#     'ship': (0, 0, 63),
#     'storage_tank': (0, 191, 127),
#     'baseball_diamond': (0, 63, 0),
#     'tennis_court': (0, 63, 127),
#     'basketball_court': (0, 63, 191),
#     'ground_Track_Field': (0, 63, 255),
#     'bridge': (0, 127, 63),
#     'large_Vehicle': (0, 127, 127),
#     'small_Vehicle': (0, 0, 127),
#     'helicopter': (0, 0, 191),
#     'swimming_pool': (0, 0, 255),
#     'roundabout': (0, 63, 63),
#     'soccer_ball_field': (0, 127, 191),
#     'plane': (0, 127, 255),
#     'harbor': (0, 100, 155),
# })

# SEG_CLASSES = ('background', 'ship', 'storage_tank', 'baseball_diamond',
# 'tennis_court', 'basketball_court', 'ground_Track_Field',
# 'bridge', 'large_Vehicle', 'small_Vehicle', 'helicopter',
# 'swimming_pool', 'roundabout', 'soccer_ball_field', 'plane', 'harbor')

SEG_CLASSES = ('large_Vehicle', 'swimming_pool', 'helicopter', 'bridge',
'plane', 'ship', 'soccer_ball_field', 'basketball_court',
'ground_Track_Field', 'small_Vehicle', 'baseball_diamond', 'tennis_court',
'roundabout', 'storage_tank', 'harbor', 'background')


MAPPING = OrderedDict({
'large_Vehicle': (0, 127, 127),
'swimming_pool': (0, 0, 255),
'helicopter': (0, 0, 191),
'bridge': (0, 127, 63),
'plane': (0, 127, 255),
'ship': (0, 0, 63),
'soccer_ball_field': (0, 127, 191),
'basketball_court': (0, 63, 191),
'ground_Track_Field': (0, 63, 255),
'small_Vehicle': (0, 0, 127),
'baseball_diamond': (0, 63, 0),
'tennis_court': (0, 63, 127),
'roundabout': (0, 63, 63),
'storage_tank': (0, 191, 127),
'harbor': (0, 100, 155),
'background': (0, 0, 0),
})

# 构建 RGB 到类别索引的映射
rgb_to_class = {value: idx for idx, value in enumerate(MAPPING.values())}


def rgb_to_class_map(image):
    # 获取图像的高度和宽度
    image_ = image[:, :, [2, 1, 0]]
    h, w, _ = image_.shape
    # 创建一个新的图像来保存类别索引
    class_image = np.zeros((h, w), dtype=np.int8)

    # 将每个像素的 RGB 颜色值转化为类别索引
    for rgb, cls in rgb_to_class.items():
        mask = (image_ == rgb).all(axis=2)
        class_image[mask] = cls

    return class_image


@DATASETS.register_module()
class DOTADataset(CustomDataset):

    def __init__(self,
                 task,
                 fp_ratio=0,
                 **kwargs):
        assert task in ['Task1', 'Task2']
        self.task = task
        self.fp_ratio = fp_ratio
        self.gt_seg_map_loader = LoadAnnotations(with_bbox=False, with_label=False, with_mask=False,
                                                 with_seg=True, with_cls_label=False, poly2mask=False)
        super(DOTADataset, self).__init__(**kwargs)


    @classmethod
    def get_classes(cls, classes=None):
        if classes is None:
            cls.custom_classes = False
            return None

        cls.custom_classes = True
        return bt.get_classes(classes)

    def load_annotations(self, ann_file):
        split_config = osp.join(ann_file, 'split_config.json')
        self.split_info = mmcv.load(split_config)

        ori_annfile = osp.join(ann_file, 'ori_annfile.pkl')
        self.ori_infos = mmcv.load(ori_annfile)['content']

        patch_annfile = osp.join(ann_file, 'patch_annfile.pkl')
        patch_dict = mmcv.load(patch_annfile)
        cls, contents = patch_dict['cls'], patch_dict['content']
        self.ori_CLASSES = cls
        if self.CLASSES is None:
            self.CLASSES = cls

        if self.test_mode:
            return contents

        self.pp_infos = []
        self.fp_infos = []
        for content in contents:
            if content['ann']['bboxes'].size != 0:
                self.pp_infos.append(content)
            else:
                self.fp_infos.append(content)
        data_infos = self.add_random_fp()
        return data_infos

    # def load_seg_annoation(self, seg_ann_files):
    #     for seg_file in self.seg_ann_files:
    #         seg_ann = self.seg_prefix + seg_file['file_name']
    #         mmcv.imfrombytes(img_bytes, flag='unchanged').squeeze()


    def _load_semantic_seg(self, seg_ann_files):
        """Private function to load semantic segmentation annotations.

        Args:
            results (dict): Result dict from :obj:`dataset`.

        Returns:
            dict: The dict contains loaded semantic segmentation annotations.
        """
        gt_semantic_segs = []
        file_client = mmcv.FileClient(backend='disk')
        for seg_file in seg_ann_files:
            filename = osp.join(self.seg_prefix,  seg_file['filename'])
            img_bytes = file_client.get(filename)
            gt_semantic_seg = mmcv.imfrombytes(img_bytes, flag='unchanged').squeeze()
            gt_semantic_segs.append(gt_semantic_seg)
        return gt_semantic_segs

    def get_gt_seg_maps(self, efficient_test=None):
        """Get ground truth segmentation maps for evaluation."""
        if efficient_test is not None:
            warnings.warn(
                'DeprecationWarning: ``efficient_test`` has been deprecated '
                'since MMSeg v0.16, the ``get_gt_seg_maps()`` is CPU memory '
                'friendly by default. ')

        for idx in range(len(self)):
            ann_info = self.get_ann_info(idx)
            results = dict(ann_info=ann_info)
            self.pre_pipeline_seg(results)
            self.gt_seg_map_loader(results)
            yield results['gt_semantic_seg']

    def add_random_fp(self):
        if self.fp_ratio == 0:
            return self.pp_infos
        elif self.fp_ratio == 'all':
            return self.pp_infos + self.fp_infos
        else:
            num = min(self.fp_ratio*len(self.pp_infos), len(self.fp_infos))
            fp_infos = sample(self.fp_infos, k=int(num))
            return self.pp_infos + fp_infos

    def get_subset_by_classes(self):
        bt.change_cls_order(self.data_infos, self.ori_CLASSES, self.CLASSES)
        return self.data_infos

    def pre_pipeline(self, results):
        results['split_info'] = self.split_info
        results['cls'] = self.CLASSES
        super().pre_pipeline(results)

    def pre_pipeline_seg(self, results):
        """Prepare results dict for pipeline."""
        results['img_prefix'] = self.img_prefix
        results['seg_prefix'] = self.seg_prefix
        results['proposal_file'] = self.proposal_file
        results['seg_fields'] = []
        if self.custom_classes:
            results['label_map'] = self.label_map

    def format_results(self,
                       results,
                       with_merge=True,
                       ign_scale_ranges=None,
                       iou_thr=0.5,
                       nproc=4,
                       save_dir=None,
                       **kwargs):
        nproc = min(nproc, os.cpu_count())
        task = self.task
        if mmcv.is_list_of(results, tuple):
            dets, segments = results
            if task == 'Task1':
                dets = _list_mask_2_obb(dets, segments)
        else:
            # dets = results
            dets = [result['det_out'] for result in results]

        if not with_merge:
            results = [(data_info['id'], result)
                       for data_info, result in zip(self.data_infos, results)]
            if save_dir is not None:
                id_list, dets_list = zip(*results)
                bt.save_dota_submission(save_dir, id_list, dets_list, task, self.CLASSES)
            return results

        print('\nMerging patch bboxes into full image!!!')
        start_time = time.time()
        if ign_scale_ranges is not None:
            assert len(ign_scale_ranges) == (len(self.split_info['rates']) *
                                             len(self.split_info['sizes']))
            split_sizes = []
            for rate in self.split_info['rates']:
                split_sizes += [int(size / rate) for size in self.split_info['sizes']]

        collector = defaultdict(list)
        for data_info, result in zip(self.data_infos, dets):
            if ign_scale_ranges is not None:
                img_scale = data_info['width']
                scale_ratio = np.array(split_sizes) / img_scale
                inds = np.argmin(abs(np.log(scale_ratio)))

                min_scale, max_scale = ign_scale_ranges[inds]
                min_scale = 0 if min_scale is None else min_scale
                max_scale = np.inf if max_scale is None else max_scale

            x_start, y_start = data_info['x_start'], data_info['y_start']
            new_result = []
            for i, dets in enumerate(result):
                if ign_scale_ranges is not None:
                    bbox_scales = np.sqrt(bt.bbox_areas(dets[:, :-1]))
                    valid_inds = (bbox_scales > min_scale) & (bbox_scales < max_scale)
                    dets = dets[valid_inds]
                bboxes, scores = dets[:, :-1], dets[:, [-1]]
                bboxes = bt.translate(bboxes, x_start, y_start)
                labels = np.zeros((bboxes.shape[0], 1)) + i
                new_result.append(np.concatenate(
                    [labels, bboxes, scores], axis=1))

            new_result = np.concatenate(new_result, axis=0)
            collector[data_info['ori_id']].append(new_result)

        merge_func = partial(
            _merge_func,
            CLASSES=self.CLASSES,
            iou_thr=iou_thr,
            task=task)
        if nproc <= 1:
            print('Single processing')
            merged_results = mmcv.track_iter_progress(
                (map(merge_func, collector.items()), len(collector)))
        else:
            print('Multiple processing')
            merged_results = mmcv.track_parallel_progress(
                merge_func, list(collector.items()), nproc)

        if save_dir is not None:
            id_list, dets_list = zip(*merged_results)
            bt.save_dota_submission(save_dir, id_list, dets_list, task, self.CLASSES)

        stop_time = time.time()
        print('Used time: %.1f s' % (stop_time - start_time))
        return merged_results

    def evaluate(self,
                 results,
                 metric_det='mAP',
                 metric_seg='mIoU',
                 logger=None,
                 with_merge=True,
                 ign_diff=True,
                 ign_scale_ranges=None,
                 save_dir=None,
                 merge_iou_thr=0.1,
                 use_07_metric=True,
                 scale_ranges=None,
                 eval_iou_thr=[0.5],
                 proposal_nums=(2000,),
                 nproc=10,
                 pre_eval=True):
        nproc = min(nproc, os.cpu_count())
        if not isinstance(metric_det, str):
            assert len(metric_det) == 1
            metric_det = metric_det[0]
        allowed_metrics = ['mAP', 'recall']
        if metric_det not in allowed_metrics:
            raise KeyError(f'metric {metric_det} is not supported')
        task = self.task

        eval_results = {}
        if metric_det == 'mAP':
            merged_results = self.format_results(
                results,
                nproc=nproc,
                with_merge=with_merge,
                ign_scale_ranges=ign_scale_ranges,
                iou_thr=merge_iou_thr,
                save_dir=save_dir)

            infos = self.ori_infos if with_merge else self.data_infos
            id_mapper = {ann['id']: i for i, ann in enumerate(infos)}
            det_results, annotations = [], []
            for k, v in merged_results:
                det_results.append(v)
                ann = infos[id_mapper[k]]['ann']
                gt_bboxes = ann['bboxes']
                gt_labels = ann['labels']
                diffs = ann.get(
                    'diffs', np.zeros((gt_bboxes.shape[0],), dtype=np.int))

                if task == 'Task2':
                    gt_bboxes = bt.bbox2type(gt_bboxes, 'hbb')

                gt_ann = {}
                if ign_diff:
                    gt_ann['bboxes_ignore'] = gt_bboxes[diffs == 1]
                    gt_ann['labels_ignore'] = gt_labels[diffs == 1]
                    gt_bboxes = gt_bboxes[diffs == 0]
                    gt_labels = gt_labels[diffs == 0]
                gt_ann['bboxes'] = gt_bboxes
                gt_ann['labels'] = gt_labels
                annotations.append(gt_ann)

            print('\nStart calculate mAP!!!')
            print('Result is Only for reference,',
                  'final result is subject to DOTA_devkit')
            mean_ap, _ = eval_arb_map(
                det_results,
                annotations,
                scale_ranges=scale_ranges,
                iou_thr=eval_iou_thr,
                use_07_metric=use_07_metric,
                dataset=self.CLASSES,
                logger=logger,
                nproc=nproc)
            eval_results['mAP'] = mean_ap
        elif metric_det == 'recall':
            assert mmcv.is_list_of(results, np.ndarray)
            gt_bboxes = []
            for info in self.data_infos:
                bboxes = info['ann']['bboxes']
                if ign_diff:
                    diffs = info['ann'].get(
                        'diffs', np.zeros((bboxes.shape[0],), dtype=np.int))
                    bboxes = bboxes[diffs == 0]
                gt_bboxes.append(bboxes)
            if isinstance(eval_iou_thr, float):
                eval_iou_thr = [eval_iou_thr]
            recalls = eval_arb_recalls(
                gt_bboxes, results, True, proposal_nums, eval_iou_thr, logger=logger)
            for i, num in enumerate(proposal_nums):
                for j, iou in enumerate(eval_iou_thr):
                    eval_results[f'recall@{num}@{iou}'] = recalls[i, j]
            if recalls.shape[1] > 1:
                ar = recalls.mean(axis=1)
                for i, num in enumerate(proposal_nums):
                    eval_results[f'AR@{num}'] = ar[i]
        
        # eval_results['seg'] = self.evaluate_seg(results, metric_seg=metric_seg, gt_seg_maps=None, logger=logger)
        return eval_results



    def evaluate_seg(self,
                 results,
                 metric_seg='mIoU',
                 logger=None,
                 gt_seg_maps=None,
                 num_classes=16,
                 ignore_index=255,
                 **kwargs):
        """Evaluate the dataset.

        Args:
            results (list[tuple[torch.Tensor]] | list[str]): per image pre_eval
                 results or predict segmentation map for computing evaluation
                 metric.
            metric (str | list[str]): Metrics to be evaluated. 'mIoU',
                'mDice' and 'mFscore' are supported.
            logger (logging.Logger | None | str): Logger used for printing
                related information during evaluation. Default: None.
            gt_seg_maps (generator[ndarray]): Custom gt seg maps as input,
                used in ConcatDataset

        Returns:
            dict[str, float]: Default metrics.
        """
        gt_seg_maps = self._load_semantic_seg(self.data_infos)
        print('\nStart calculate mIou!!!')
        # print('gt_seg_maps', gt_seg_maps)
        if isinstance(metric_seg, str):
            metric_seg = [metric_seg]
        allowed_metrics = ['mIoU', 'mDice', 'mFscore']
        if not set(metric_seg).issubset(set(allowed_metrics)):
            raise KeyError('metric {} is not supported'.format(metric_seg))

        eval_results = {}
        # seg_results = [np.array(result['seg_out'].cpu()) for result in results]
        seg_results = [result['seg_out'] for result in results]
        # test a list of file
        if mmcv.is_list_of(seg_results, np.ndarray) or mmcv.is_list_of(seg_results, str):
            if gt_seg_maps is None:
                gt_seg_maps = self.get_gt_seg_maps()
            # num_classes = len(self.CLASSES)
            ret_metrics = eval_metrics(
                seg_results,
                gt_seg_maps,
                num_classes,
                ignore_index,
                metric_seg,
                label_map=None,
                reduce_zero_label=False)
        # test a list of pre_eval_results
        else:
            ret_metrics = pre_eval_to_metrics(seg_results, metric_seg)

        # Because dataset.CLASSES is required for per-eval.
        if self.CLASSES is None:
            class_names = tuple(range(num_classes))
        else:
            class_names = SEG_CLASSES

        # summary table
        ret_metrics_summary = OrderedDict({
            ret_metric: np.round(np.nanmean(ret_metric_value) * 100, 2)
            for ret_metric, ret_metric_value in ret_metrics.items()
        })

        # each class table
        ret_metrics.pop('aAcc', None)
        ret_metrics_class = OrderedDict({
            ret_metric: np.round(ret_metric_value * 100, 2)
            for ret_metric, ret_metric_value in ret_metrics.items()
        })
        ret_metrics_class.update({'Class': class_names})
        ret_metrics_class.move_to_end('Class', last=False)

        # for logger
        class_table_data = PrettyTable()
        for key, val in ret_metrics_class.items():
            class_table_data.add_column(key, val)

        summary_table_data = PrettyTable()
        for key, val in ret_metrics_summary.items():
            if key == 'aAcc':
                summary_table_data.add_column(key, [val])
            else:
                summary_table_data.add_column('m' + key, [val])

        print_log('per class results:', logger)
        print_log('\n' + class_table_data.get_string(), logger=logger)
        print_log('Summary:', logger)
        print_log('\n' + summary_table_data.get_string(), logger=logger)

        # each metric dict
        for key, value in ret_metrics_summary.items():
            if key == 'aAcc':
                eval_results[key] = value / 100.0
            else:
                eval_results['m' + key] = value / 100.0

        ret_metrics_class.pop('Class', None)
        for key, value in ret_metrics_class.items():
            eval_results.update({
                key + '.' + str(name): value[idx] / 100.0
                for idx, name in enumerate(class_names)
            })

        return eval_results


def _merge_func(info, CLASSES, iou_thr, task):
    img_id, label_dets = info
    label_dets = np.concatenate(label_dets, axis=0)
    labels, dets = label_dets[:, 0], label_dets[:, 1:]
    nms_ops = bt.choice_by_type(nms, obb_nms, BT_nms,
                                dets, with_score=True)

    big_img_results = []
    for i in range(len(CLASSES)):
        cls_dets = dets[labels == i]
        nms_dets, _ = nms_ops(cls_dets, iou_thr)

        if task == 'Task2':
            bboxes = bt.bbox2type(nms_dets[:, :-1], 'hbb')
            nms_dets = np.concatenate([bboxes, nms_dets[:, -1:]], axis=1)
        big_img_results.append(nms_dets)
    return img_id, big_img_results


def _list_mask_2_obb(dets, segments):
    new_dets = []
    for cls_dets, cls_segments in zip(dets, segments):
        new_cls_dets = []
        for ds, segs in zip(cls_dets, cls_segments):
            _, scores = ds[:, :-1], ds[:, -1]
            new_bboxes = []
            for seg in segs:
                try:
                    contours, _ = cv2.findContours(
                        seg, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
                except ValueError:
                    _, contours, _ = cv2.findContours(
                        seg, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)

                max_contour = max(contours, key=len).reshape(1, -1)
                new_bboxes.append(bt.bbox2type(max_contour, 'obb'))

            new_bboxes = np.zeros((0, 5)) if not new_bboxes else \
                np.concatenate(new_bboxes, axis=0)
            new_cls_dets.append(
                np.concatenate([new_bboxes, scores[:, None]], axis=1))
        new_dets.append(new_cls_dets)
    return new_dets
