#!/usr/bin/env python 
# -*- coding: utf-8 -*-
# @Time    : 2019年03月09日15:37:17
# @Author  : Tang Yang
# @Desc    : 模型评估器
# @File    : ModelEvaluator.py
import os
import logging
import traceback
import cv2
import numpy as np
import tensorflow as tf
from object_detection.utils import label_map_util

from evaluator.detect_result_evaluator import DetectResultEvaluator, EvaluationMatrix
from utils.basic import get_file_name, get_overlap_area, cv_imread
from utils.detector import load_model
from utils.detector import run_detection
from utils.evaluator import read_xml_as_eval_info
from utils.io import get_label_from_pd_file
from utils.io import get_label_list_from_category_index

from object_detection import eval_util
import functools
from object_detection.core import prefetcher
from object_detection.core import standard_fields as fields
from object_detection.builders import dataset_builder
from object_detection.builders import model_builder
from object_detection.utils import config_util
from object_detection.utils import dataset_util


def merge_boxes(all_boxes, all_classes, all_scores, overlap_percent):
    """
    合并重叠度过高的矩形框矩形框
    :param overlap_percent:
    :param all_boxes:
    :param all_classes:
    :param all_scores:
    :return: 无
    """
    need_del_idx = []  # 保存需要删除的box的索引
    for idx, box in enumerate(all_boxes):
        if idx not in need_del_idx:
            for id_2, box_2 in enumerate(all_boxes[idx + 1:], idx + 1):
                overlap_area = get_overlap_area(box, box_2)
                src_area = min((box[2] - box[0]) * (box[3] - box[1]),
                               (box_2[2] - box_2[0]) * (box_2[3] - box_2[1]))
                if src_area <= 0.000001:
                    overlap_rate = 1
                else:
                    overlap_rate = overlap_area / src_area
                if overlap_rate > overlap_percent:
                    if all_scores[idx] < all_scores[id_2] and idx not in need_del_idx:
                        need_del_idx += [idx]
                    elif all_scores[id_2] <= all_scores[idx] and id_2 not in need_del_idx:
                        need_del_idx += [id_2]
    need_del_idx = sorted(need_del_idx, reverse=True)
    for i in need_del_idx:
        if i >= len(all_boxes):
            print("Error", i, len(all_boxes))
        del (all_boxes[i])
        del (all_classes[i])
        del (all_scores[i])
    return None


class ModelEvaluator:
    """
    对pb模型进行评估
    """
    def __init__(self, model_path: str, label_map_path: str, test_images_dir: str):
        if not os.path.exists(model_path):
            raise ValueError("model path not exits.")
        if not os.path.exists(label_map_path):
            raise ValueError("label_map path not exits.")
        if not os.path.exists(test_images_dir):
            raise ValueError("test_images_dir not exits.")
        self._model_path = model_path
        self._label_map_path = label_map_path
        self._test_images_dir = test_images_dir
        self._gt_boxes = []
        self._gt_classes = []
        self._pred_boxes = []
        self._pred_classes = []
        self._pred_scores = []
        self._run_detect()
        self._evaluator = DetectResultEvaluator(self._gt_boxes, self._gt_classes, self._pred_boxes, self._pred_classes,
                                                self._pred_scores)

    def _run_detect(self):
        # 加载模型
        try:
            detection_graph = load_model(self._model_path)
            label_map_dict = label_map_util.get_label_map_dict(self._label_map_path)
            # label_map_dict_new = {value: key for key, value in label_map_dict.items()}
            category_index = get_label_from_pd_file(self._label_map_path, len(label_map_dict))
            label_list = get_label_list_from_category_index(category_index)
        except BaseException as e:
            raise e
        # 配置文件
        config = tf.ConfigProto(device_count={"CPU": 4, "GPU": 1})
        config.gpu_options.allow_growth = True
        self._pred_boxes, self._pred_classes, self._pred_scores = [], [], []
        self._gt_boxes, self._gt_classes = [], []
        count = 0
        # 遍历文件夹下所有图片
        with detection_graph.as_default():
            with tf.Session(graph=detection_graph, config=config) as sess:
                for idx, image_name in enumerate(os.listdir(self._test_images_dir)):
                    if image_name.split('.')[-1] == 'png' or image_name.split('.')[-1] == 'jpg':
                        # 获得图片名字，没有后缀
                        img_head_name = get_file_name(image_name)
                        count += 1
                        try:
                            img = cv_imread(os.path.join(self._test_images_dir, image_name))
                            img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
                            xml_path = os.path.join(self._test_images_dir, img_head_name + '.xml')
                            img = np.expand_dims(img, axis=0)
                            _boxes, _classes, _scores = run_detection(sess, detection_graph, img)
                            boxes, classes, scores = [], [], []
                            for box, cls, sc in zip(_boxes, _classes, _scores):
                                if sc > 0.0:
                                    boxes.append(box)
                                    classes.append(cls)
                                    scores.append(sc)
                            objects = read_xml_as_eval_info(xml_path, label_list)['objects']
                            # 真实的gt_boxes
                            gt_classes, gt_boxes = np.hsplit(np.array(objects), [1])

                            if len(objects) == 0:
                                gt_boxes = []
                                gt_classes = []
                            else:
                                gt_classes = list(gt_classes.flatten())
                                gt_classes = list(map(lambda x: x + 1, gt_classes))
                                gt_boxes = gt_boxes[:, (1, 0, 3, 2)]
                                gt_boxes = list(map(lambda x: list(x), gt_boxes))

                            merge_boxes(boxes, classes, scores, 0.3)

                            self._pred_boxes.append(boxes)
                            self._pred_classes.append(classes)
                            self._pred_scores.append(scores)
                            self._gt_boxes.append(gt_boxes)
                            self._gt_classes.append(gt_classes)
                        except BaseException as _:  # 捕获到异常时记录日志并且跳过当前图片
                            logging.info(traceback.format_exc())
                            logging.info("Skip " + image_name)

    def get_precison_and_recall(self, threshold) -> tuple:
        """
        计算准确率(precision)和召回率(recall)
        :threshold: 计算过程中对检测结果的boundingbox采用的最小阈值
        :return:
        """
        return self._evaluator.precision_and_recall(threshold)

    def get_confusion_matrix(self, threshold) -> EvaluationMatrix:
        """
        返回混淆矩阵
        :threshold: 计算过程中对检测结果的boundingbox采用的最小阈值
        :return: ConfusionMatrix
        """
        return self._evaluator.confusion_matrix(threshold)

    def get_missing_matrix(self, threshold) -> EvaluationMatrix:
        """
        计算漏识别矩阵
        :threshold: 计算过程中对检测结果的boundingbox采用的最小阈值
        :return: 一个漏识别矩阵, 表示对应下标对应位置的类别分别有多少漏检测的
        """
        return self._evaluator.missing_matrix(threshold)

    def get_mAP(self) -> int:
        return self._evaluator.voc_ap()

    def get_detect_result(self) -> (list, list, list, list, list):
        """
        返回检测结果，包括检测到的所有图片的boundingbox、classes、scores和这些图片的groundtruth boxes和groundtruth classes
        """
        return self._pred_boxes, self._pred_classes, self._pred_scores, self._gt_boxes, self._gt_classes


class CkptEvaluator:

    def __init__(self, ckpt_dir, pipeconfig_path, label_map_path):
        self._ckpt_dir = ckpt_dir
        self._pipeconfig_path = pipeconfig_path
        self._label_map_path = label_map_path
        self._gt_boxes = []
        self._gt_classes = []
        self._pred_boxes = []
        self._pred_classes = []
        self._pred_scores = []
        self._get_detect_results()
        self._evaluator = DetectResultEvaluator(self._gt_boxes, self._gt_classes, self._pred_boxes, self._pred_classes,
                                                self._pred_scores)

    def _extract_predictions_and_losses(self, model, create_input_dict_fn,
                                        ignore_groundtruth=False):
        input_dict = create_input_dict_fn()
        prefetch_queue = prefetcher.prefetch(input_dict, capacity=500)
        input_dict = prefetch_queue.dequeue()
        original_image = tf.expand_dims(input_dict[fields.InputDataFields.image], 0)
        preprocessed_image, true_image_shapes = model.preprocess(
            tf.to_float(original_image))
        prediction_dict = model.predict(preprocessed_image, true_image_shapes)
        detections = model.postprocess(prediction_dict, true_image_shapes)

        groundtruth = None
        losses_dict = {}
        if not ignore_groundtruth:
            groundtruth = {
                fields.InputDataFields.groundtruth_boxes:
                    input_dict[fields.InputDataFields.groundtruth_boxes],
                fields.InputDataFields.groundtruth_classes:
                    input_dict[fields.InputDataFields.groundtruth_classes],
                fields.InputDataFields.groundtruth_area:
                    input_dict[fields.InputDataFields.groundtruth_area],
                fields.InputDataFields.groundtruth_is_crowd:
                    input_dict[fields.InputDataFields.groundtruth_is_crowd],
                fields.InputDataFields.groundtruth_difficult:
                    input_dict[fields.InputDataFields.groundtruth_difficult]
            }
            if fields.InputDataFields.groundtruth_group_of in input_dict:
                groundtruth[fields.InputDataFields.groundtruth_group_of] = (
                    input_dict[fields.InputDataFields.groundtruth_group_of])
            groundtruth_masks_list = None
            if fields.DetectionResultFields.detection_masks in detections:
                groundtruth[fields.InputDataFields.groundtruth_instance_masks] = (
                    input_dict[fields.InputDataFields.groundtruth_instance_masks])
                groundtruth_masks_list = [
                    input_dict[fields.InputDataFields.groundtruth_instance_masks]]
            groundtruth_keypoints_list = None
            if fields.DetectionResultFields.detection_keypoints in detections:
                groundtruth[fields.InputDataFields.groundtruth_keypoints] = (
                    input_dict[fields.InputDataFields.groundtruth_keypoints])
                groundtruth_keypoints_list = [
                    input_dict[fields.InputDataFields.groundtruth_keypoints]]
            label_id_offset = 1
            model.provide_groundtruth(
                [input_dict[fields.InputDataFields.groundtruth_boxes]],
                [tf.one_hot(input_dict[fields.InputDataFields.groundtruth_classes]
                            - label_id_offset, depth=model.num_classes)],
                groundtruth_masks_list, groundtruth_keypoints_list)
            losses_dict.update(model.loss(prediction_dict, true_image_shapes))

        result_dict = eval_util.result_dict_for_single_example(
            original_image,
            input_dict[fields.InputDataFields.source_id],
            detections,
            groundtruth,
            class_agnostic=(
                    fields.DetectionResultFields.detection_classes not in detections),
            scale_to_absolute=True)
        return result_dict, losses_dict

    def _repeated_checkpoint_run(self,
                                 tensor_dict,
                                 batch_processor=None,
                                 checkpoint_dirs=None,
                                 variables_to_restore=None,
                                 restore_fn=None,
                                 num_batches=1,
                                 max_number_of_evaluations=None,
                                 master='',
                                 save_graph=False,
                                 save_graph_dir='', ):
        if max_number_of_evaluations and max_number_of_evaluations <= 0:
            raise ValueError(
                '`number_of_steps` must be either None or a positive number.')

        if not checkpoint_dirs:
            raise ValueError('`checkpoint_dirs` must have at least one entry.')

        model_path = tf.train.latest_checkpoint(checkpoint_dirs[0])
        if not model_path:
            return []
        else:
            results = self._run_checkpoint_once(tensor_dict,
                                                batch_processor,
                                                checkpoint_dirs,
                                                variables_to_restore,
                                                restore_fn, num_batches,
                                                master, save_graph,
                                                save_graph_dir)
            return results

    def _run_checkpoint_once(self, tensor_dict,
                             batch_processor=None,
                             checkpoint_dirs=None,
                             variables_to_restore=None,
                             restore_fn=None,
                             num_batches=1,
                             master='',
                             save_graph=False,
                             save_graph_dir='',
                             losses_dict=None):
        results = []
        if save_graph and not save_graph_dir:
            raise ValueError('`save_graph_dir` must be defined.')
        sess = tf.Session(master, graph=tf.get_default_graph())
        sess.run(tf.global_variables_initializer())
        sess.run(tf.local_variables_initializer())
        sess.run(tf.tables_initializer())
        if restore_fn:
            restore_fn(sess)
        else:
            if not checkpoint_dirs:
                raise ValueError('`checkpoint_dirs` must have at least one entry.')
            checkpoint_file = tf.train.latest_checkpoint(checkpoint_dirs[0])
            saver = tf.train.Saver(variables_to_restore)
            saver.restore(sess, checkpoint_file)

        if save_graph:
            tf.train.write_graph(sess.graph_def, save_graph_dir, 'eval.pbtxt')

        with tf.contrib.slim.queues.QueueRunners(sess):
            try:
                for batch in range(int(num_batches)):
                    if (batch + 1) % 100 == 0:
                        logging.info('Running eval ops batch %d/%d', batch + 1, num_batches)
                    result_dict = batch_processor(
                        tensor_dict, sess, losses_dict=losses_dict)
                    results.append(result_dict)
            except tf.errors.OutOfRangeError:
                logging.info('Done evaluating -- epoch limit reached')
        sess.close()
        return results

    def _evaluate(self, create_input_dict_fn, create_model_fn, eval_config,
                  checkpoint_dir, eval_dir='', graph_hook_fn=None, ):
        model = create_model_fn()

        if eval_config.ignore_groundtruth and not eval_config.export_path:
            logging.fatal('If ignore_groundtruth=True then an export_path is '
                          'required. Aborting!!!')

        tensor_dict, losses_dict = self._extract_predictions_and_losses(
            model=model,
            create_input_dict_fn=create_input_dict_fn,
            ignore_groundtruth=eval_config.ignore_groundtruth)

        def _process_batch(tensor_dict, sess, losses_dict=None):
            try:
                if not losses_dict:
                    losses_dict = {}
                result_dict, result_losses_dict = sess.run([tensor_dict, losses_dict])
            except tf.errors.InvalidArgumentError:
                return {}
            return result_dict

        variables_to_restore = tf.global_variables()
        global_step = tf.train.get_or_create_global_step()
        variables_to_restore.append(global_step)

        if graph_hook_fn:
            graph_hook_fn()

        if eval_config.use_moving_averages:
            variable_averages = tf.train.ExponentialMovingAverage(0.0)
            variables_to_restore = variable_averages.variables_to_restore()
        saver = tf.train.Saver(variables_to_restore)

        def _restore_latest_checkpoint(sess):
            latest_checkpoint = tf.train.latest_checkpoint(checkpoint_dir)
            saver.restore(sess, latest_checkpoint)

        results = self._repeated_checkpoint_run(
            tensor_dict=tensor_dict,
            batch_processor=_process_batch,
            checkpoint_dirs=[checkpoint_dir],
            variables_to_restore=None,
            restore_fn=_restore_latest_checkpoint,
            num_batches=eval_config.num_examples,
            max_number_of_evaluations=(1 if eval_config.ignore_groundtruth else
                                       eval_config.max_evals
                                       if eval_config.max_evals else None),
            master=eval_config.eval_master,
            save_graph=eval_config.save_graph,
            save_graph_dir=(eval_dir if eval_config.save_graph else ''))
        return results

    def _get_detect_results(self):
        assert self._ckpt_dir, '`checkpoint_dir` is missing.'
        configs = config_util.get_configs_from_pipeline_file(
            self._pipeconfig_path)
        model_config = configs['model']
        eval_config = configs['eval_config']
        input_config = configs['eval_input_config']

        model_fn = functools.partial(
            model_builder.build,
            model_config=model_config,
            is_training=False)

        def get_next(config):
            return dataset_util.make_initializable_iterator(
                dataset_builder.build(config)).get_next()

        create_input_dict_fn = functools.partial(get_next, input_config)

        results = self._evaluate(create_input_dict_fn, model_fn, eval_config, self._ckpt_dir)

        for result in results:
            self._pred_boxes.append(list(result['detection_boxes']))
            self._pred_scores.append(list(result['detection_scores']))
            self._pred_classes.append(list(result['detection_classes']))
            self._gt_boxes.append(list(result['groundtruth_boxes']))
            self._gt_classes.append(list(result['groundtruth_classes']))
        return results

    def get_precison_and_recall(self, threshold) -> tuple:
        """
        计算准确率(precision)和召回率(recall)
        :threshold: 计算过程中对检测结果的boundingbox采用的最小阈值
        :return:
        """
        return self._evaluator.precision_and_recall(threshold)

    def get_confusion_matrix(self, threshold) -> EvaluationMatrix:
        """
        返回混淆矩阵
        :threshold: 计算过程中对检测结果的boundingbox采用的最小阈值
        :return: ConfusionMatrix
        """
        return self._evaluator.confusion_matrix(threshold)

    def get_missing_matrix(self, threshold) -> EvaluationMatrix:
        """
        计算漏识别矩阵
        :threshold: 计算过程中对检测结果的boundingbox采用的最小阈值
        :return: 一个漏识别矩阵, 表示对应下标对应位置的类别分别有多少漏检测的
        """
        return self._evaluator.missing_matrix(threshold)

    def get_mAP(self) -> int:
        return self._evaluator.voc_ap()

    def get_detect_result(self) -> (list, list, list, list, list):
        """
        返回检测结果，包括检测到的所有图片的boundingbox、classes、scores和这些图片的groundtruth boxes和groundtruth classes
        """
        return self._pred_boxes, self._pred_classes, self._pred_scores, self._gt_boxes, self._gt_classes
