# Copyright (c) Facebook, Inc. and its affiliates.
import itertools
import json
import logging
import os
from collections import OrderedDict

import numpy as np
import pycocotools.mask as mask_util
import torch
from detectron2.data import DatasetCatalog, MetadataCatalog
from detectron2.evaluation.evaluator import DatasetEvaluator
from detectron2.utils.comm import all_gather, is_main_process, synchronize
from detectron2.utils.file_io import PathManager
from PIL import Image
from tabulate import tabulate

from maskdino.data.detection_utils import read_dianjiao_image

_CV2_IMPORTED = True
try:
    import cv2  # noqa
except ImportError:
    # OpenCV is an optional dependency at the moment
    _CV2_IMPORTED = False


class DianJiaoSegEvaluator(DatasetEvaluator):
    """
    Evaluate semantic segmentation metrics.
    """

    def __init__(
        self,
        dataset_name,
        distributed=True,
        output_dir=None,
        *,
        sem_seg_loading_fn=read_dianjiao_image,
        num_classes=None,
        ignore_label=None,
        threshold=0.5
    ):
        """
        Args:
            dataset_name (str): name of the dataset to be evaluated.
            distributed (bool): if True, will collect results from all ranks for evaluation.
                Otherwise, will evaluate the results in the current process.
            output_dir (str): an output directory to dump results.
            sem_seg_loading_fn: function to read sem seg file and load into numpy array.
                Default provided, but projects can customize.
            num_classes, ignore_label: deprecated argument
        """
        # self._logger = logging.getLogger(__name__)
        if num_classes is not None:
            self._logger.warn(
                "DianJiaoSegEvaluator(num_classes) is deprecated! It should be obtained from metadata."
            )
        if ignore_label is not None:
            self._logger.warn(
                "DianJiaoSegEvaluator(ignore_label) is deprecated! It should be obtained from metadata."
            )
        self._dataset_name = dataset_name
        self._distributed = distributed
        self._output_dir = output_dir
        self.threshold = threshold

        self._cpu_device = torch.device("cpu")

        self.input_file_to_gt_file = {
            dataset_record["file_name"]: dataset_record["sem_seg_file_name"]
            for dataset_record in DatasetCatalog.get(dataset_name)
        }

        meta = MetadataCatalog.get(dataset_name)
        # Dict that maps contiguous training ids to COCO category ids
        try:
            c2d = meta.stuff_dataset_id_to_contiguous_id
            self._contiguous_id_to_dataset_id = {v: k for k, v in c2d.items()}
        except AttributeError:
            self._contiguous_id_to_dataset_id = None
        self._class_names = meta.stuff_classes
        self.sem_seg_loading_fn = sem_seg_loading_fn
        self._num_classes = len(meta.stuff_classes)
        if num_classes is not None:
            assert self._num_classes == num_classes, f"{self._num_classes} != {num_classes}"
        self._ignore_label = ignore_label if ignore_label is not None else meta.ignore_label

        # This is because cv2.erode did not work for int datatype. Only works for uint8.
        self._compute_boundary_iou = True
        if not _CV2_IMPORTED:
            self._compute_boundary_iou = False
            self._logger.warn(
                """Boundary IoU calculation requires OpenCV. B-IoU metrics are
                not going to be computed because OpenCV is not available to import."""
            )
        if self._num_classes >= np.iinfo(np.uint8).max:
            self._compute_boundary_iou = False
            self._logger.warn(
                f"""DianJiaoSegEvaluator(num_classes) is more than supported value for Boundary IoU calculation!
                B-IoU metrics are not going to be computed. Max allowed value (exclusive)
                for num_classes for calculating Boundary IoU is {np.iinfo(np.uint8).max}.
                The number of classes of dataset {self._dataset_name} is {self._num_classes}"""
            )

    def reset(self):
        self._pixel_accuracy = []
        self._mean_accuracy = []
        self._mean_iou = []
        self._class_accuracy = []
        self._class_iou = []
        self._predictions = []

    def process(self, inputs, outputs):
        """
        Args:
            inputs: the inputs to a model.
                It is a list of dicts. Each dict corresponds to an image and
                contains keys like "height", "width", "file_name".
            outputs: the outputs of a model. It is either list of semantic segmentation predictions
                (Tensor [H, W]) or list of dicts with key "sem_seg" that contains semantic
                segmentation prediction in the same format.
        """
        for input, output in zip(inputs, outputs):
            assert len(output["sem_seg"]) == 1, "dianjiao only support binary classification."
            mask = output["sem_seg"][0].to(self._cpu_device)
            mask[mask > self.threshold] = 255
            mask[mask < self.threshold] = 0     # 此时的mask已经是分割预测结果
            # 将像素值为0的像素改为255，将像素值为255的像素改为0
            output = 255 - mask
            # -------------------------------------------------------------------------------
            pred = np.array(output, dtype=np.int)
            pred[pred == self._ignore_label] = self._num_classes

            gt_filename = self.input_file_to_gt_file[input["file_name"]]
            if "0_255_type" in input:
                gt = cv2.imread(gt_filename, cv2.IMREAD_GRAYSCALE)
                h, w = gt.shape
                if h > w:
                    gt = cv2.rotate(gt, cv2.ROTATE_90_COUNTERCLOCKWISE)
                gt = np.array(gt, dtype=np.int)
            else:
                gt = np.array(self.sem_seg_loading_fn(gt_filename), dtype=np.int)
            # 跟mask一样，将背景像素值置为255，label从0开始
            gt = 255 - gt
            gt[gt == self._ignore_label] = self._num_classes

            predictions = pred.flatten()
            labels = gt.flatten()

            # 将255（背景）的像素值转换为1，以符合评估要求
            predictions[predictions == 255] = 1
            labels[labels == 255] = 1

            # 计算像素级准确率
            pixel_accuracy = np.sum(predictions == labels) / len(labels)
            self._pixel_accuracy.append(pixel_accuracy)

            # 计算每个类别的像素统计信息
            class_counts = np.bincount(labels, minlength=(self._num_classes + 1))

            # 计算每个类别的真阳性统计信息
            true_positive_counts = np.bincount(labels[predictions == labels], minlength=(self._num_classes + 1))

            # 计算每个类别的交并比
            intersection = true_positive_counts
            union = class_counts + np.bincount(predictions, minlength=(self._num_classes + 1)) - true_positive_counts
            iou = intersection / union

            # 计算平均准确率和平均交并比
            mean_accuracy = np.mean(true_positive_counts / class_counts)
            self._mean_accuracy.append(mean_accuracy)
            mean_iou = np.mean(iou)
            self._mean_iou.append(mean_iou)

            # 计算每个类别的准确率和交并比
            class_accuracy = true_positive_counts / class_counts
            self._class_accuracy.append(class_accuracy)
            class_iou = iou
            self._class_iou.append(class_iou)

            # 构建评估指标字典
            metrics = {
                'Pixel Accuracy': pixel_accuracy,
                'Mean Accuracy': mean_accuracy,
                'Mean IoU': mean_iou,
                'Class Accuracy': class_accuracy, 
                'Class IoU': class_iou,
            }
            self._predictions.append(metrics)
            
    def evaluate(self):
        """
        Evaluates standard semantic segmentation metrics (http://cocodataset.org/#stuff-eval):

        * Mean intersection-over-union averaged across classes (mIoU)
        * Frequency Weighted IoU (fwIoU)
        * Mean pixel accuracy averaged across classes (mACC)
        * Pixel Accuracy (pACC)
        """
        pACC = np.mean(self._pixel_accuracy) * 100
        mACC = np.mean(self._mean_accuracy) * 100
        mIoU = np.mean(self._mean_iou) * 100
        Metrics = [
            ["mACC", mACC],
            ["mIoU", mIoU],
            ["pACC", pACC],
        ]

        Class_ACC = []
        acc_array = np.concatenate(self._class_accuracy).reshape(-1, 2)
        acc_array_mean = acc_array.mean(axis=0)
        for i in range(self._num_classes):
           Class_ACC.append([f"ACC-{self._class_names[i]}", acc_array_mean[i] * 100])
        Class_ACC.append(["ACC-background", acc_array_mean[-1] * 100])

        Class_IoU= []
        iou_array = np.concatenate(self._class_iou).reshape(-1, 2)
        iou_array_mean = iou_array.mean(axis=0)
        for i in range(self._num_classes):
           Class_IoU.append([f"IoU-{self._class_names[i]}", iou_array_mean[i] * 100])
        Class_IoU.append(["IoU-background", iou_array_mean[-1] * 100])

        # if self._output_dir:
        #     PathManager.mkdirs(self._output_dir)
        #     file_path = os.path.join(self._output_dir, "sem_seg_predictions.json")
        #     with PathManager.open(file_path, "w") as f:
        #         f.write(json.dumps(self._predictions))

        res = {}
        for name, value in Metrics:
            res[name] = value
        
        # if self._output_dir:
        #     file_path = os.path.join(self._output_dir, "sem_seg_evaluation.pth")
        #     with PathManager.open(file_path, "wb") as f:
        #         torch.save(res, f)
        results = OrderedDict({"sem_seg": res})
        print("\n" + tabulate(Metrics, headers=['Metric', 'Value'], tablefmt='grid'))
        print("\n" + tabulate(Class_ACC, headers=['Class_ACC', 'Value'], tablefmt='grid'))
        print("\n" + tabulate(Class_IoU, headers=['Class_IoU', 'Value'], tablefmt='grid'))
        print(results)
        return results


class DianJiaoSegEvaluator_DeepLab(DianJiaoSegEvaluator):
    def process(self, inputs, outputs):
        for input, output in zip(inputs, outputs):
            assert len(output["sem_seg"]) == 1, "dianjiao only support binary classification."
            mask = output["sem_seg"][0].to(self._cpu_device)
            mask = torch.sigmoid(mask)
            mask[mask > self.threshold] = 255
            mask[mask < self.threshold] = 0     # 此时的mask已经是分割预测结果
            # 将像素值为0的像素改为255，将像素值为255的像素改为0
            output = 255 - mask
            # -------------------------------------------------------------------------------
            pred = np.array(output, dtype=np.int)
            pred[pred == self._ignore_label] = self._num_classes

            gt_filename = self.input_file_to_gt_file[input["file_name"]]
            if "0_255_type" in input:
                gt = cv2.imread(gt_filename, cv2.IMREAD_GRAYSCALE)
                h, w = gt.shape
                if h > w:
                    gt = cv2.rotate(gt, cv2.ROTATE_90_COUNTERCLOCKWISE)
                gt = np.array(gt, dtype=np.int)
            else:
                gt = np.array(self.sem_seg_loading_fn(gt_filename), dtype=np.int)
            # 跟mask一样，将背景像素值置为255，label从0开始
            gt = 255 - gt
            gt[gt == self._ignore_label] = self._num_classes

            predictions = pred.flatten()
            labels = gt.flatten()

            # 将255（背景）的像素值转换为1，以符合评估要求
            predictions[predictions == 255] = 1
            labels[labels == 255] = 1

            # 计算像素级准确率
            pixel_accuracy = np.sum(predictions == labels) / len(labels)
            self._pixel_accuracy.append(pixel_accuracy)

            # 计算每个类别的像素统计信息
            class_counts = np.bincount(labels, minlength=(self._num_classes + 1))

            # 计算每个类别的真阳性统计信息
            true_positive_counts = np.bincount(labels[predictions == labels], minlength=(self._num_classes + 1))

            # 计算每个类别的交并比
            intersection = true_positive_counts
            union = class_counts + np.bincount(predictions, minlength=(self._num_classes + 1)) - true_positive_counts
            iou = intersection / union

            # 计算平均准确率和平均交并比
            mean_accuracy = np.mean(true_positive_counts / class_counts)
            self._mean_accuracy.append(mean_accuracy)
            mean_iou = np.mean(iou)
            self._mean_iou.append(mean_iou)

            # 计算每个类别的准确率和交并比
            class_accuracy = true_positive_counts / class_counts
            self._class_accuracy.append(class_accuracy)
            class_iou = iou
            self._class_iou.append(class_iou)

            # 构建评估指标字典
            metrics = {
                'Pixel Accuracy': pixel_accuracy,
                'Mean Accuracy': mean_accuracy,
                'Mean IoU': mean_iou,
                'Class Accuracy': class_accuracy, 
                'Class IoU': class_iou,
            }
            self._predictions.append(metrics)
