import os
import time

import cv2
import matplotlib.pylab as plt
import numpy as np
import tensorflow as tf
from object_detection.utils import visualization_utils as vis_util
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score

from beer.data.tools import check_xml_and_img_file
from utils.detector import run_detection
from utils.evaluator import eval_detect_result
from utils.evaluator import merge_region_prediction
from utils.evaluator import read_xml_as_eval_info
from utils.list import create_file_list


def predict_image_measure(checkpoint, image_list, score=0.4, percent=0.6):
    """
    prediction function using the pretrained model predict the input images
    :param checkpoint: frozen_inference_graph.pb file
    :param image_list: a list of images who are going to be predicted,
                        can be strings or ndarrays, the ndarrays' shape
                        should be (H, W, C), for instance. [image]:
                        image = cv2.imread(image_file)[:, :, (2, 1, 0)]
    :param score: the minimum threshold to show the boxes of predicted objects
    :param percent: the minimum threshold to merge the two overlapped boxes
    :return: a list of predicted images
    """
    output_images = []

    detection_graph = tf.Graph()
    with detection_graph.as_default():
        od_graph_def = tf.GraphDef()
        with tf.gfile.GFile(checkpoint, 'rb') as fid:
            serialized_graph = fid.read()
            od_graph_def.ParseFromString(serialized_graph)
            tf.import_graph_def(od_graph_def, name='')

    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    category_index = {1: {'id': 1, 'name': 'p'}}
    label_list = ['person']
    ground_true = []
    predictions = []
    with detection_graph.as_default():
        with tf.Session(graph=detection_graph, config=config) as sess:
            start_time = time.time()
            for idx, paths in enumerate(image_list):
                x1 = time.time()
                img_path, xml_path = paths.split('&!&')
                img = cv2.imread(img_path)[:, :, (2, 1, 0)]
                # top image
                image_np = np.expand_dims(img, axis=0)
                boxes, classes, scores = run_detection(sess, detection_graph, image_np)
                _boxes = boxes
                _classes = classes
                _scores = scores
                _boxes, _classes, _scores = merge_region_prediction(
                    np.array(_boxes), np.array(_scores), np.array(_classes), percent)
                _boxes = np.array(_boxes)
                _classes = np.array(_classes).astype(np.int32)
                _scores = np.array(_scores)
                vis_util.visualize_boxes_and_labels_on_image_array(
                    img, _boxes, _classes, _scores, category_index,
                    use_normalized_coordinates=True,
                    min_score_thresh=score,
                    line_thickness=1)
                plt.figure(figsize=(22, 10), dpi=100)
                plt.title(paths)
                plt.imshow(img)
                plt.savefig(os.path.join('/home/admins/data/fisheye/test/center/image/res', '{}.jpg'.format(idx)))
                plt.close()
                comb = filter(lambda x: x[-1] >= score, zip(_boxes, _classes, _scores))
                _boxes, _classes, _ = np.hsplit(np.array(list(comb)), [1, 2])
                _classes = list(_classes.flatten())
                _boxes = list(map(lambda x: list(x[0]), _boxes))
                objects_info = read_xml_as_eval_info(xml_path.strip(), label_list)['objects']
                gt_classes, gt_boxes = np.hsplit(np.array(objects_info), [1])
                # gt_classes += 1
                if len(objects_info) == 0:
                    gt_boxes = []
                    gt_classes = []
                else:
                    gt_classes = list(gt_classes.flatten())
                    gt_boxes = gt_boxes[:, (1, 0, 3, 2)]
                    gt_boxes = list(map(lambda x: list(x), gt_boxes))
                gt, pred = eval_detect_result(gt_boxes, list(map(lambda x: x + 1, gt_classes)),
                                              _boxes, _classes, default_class=0)
                ground_true += gt
                predictions += pred
                x2 = time.time()
                print(x2 - x1)
                if idx % 100 == 0:
                    print("cost time is:", x2 - x1)
                    ground_true = np.asarray(ground_true).astype(np.int32)
                    predictions = np.asarray(predictions).astype(np.int32)
                    print(ground_true)
                    print(predictions)
                    recall = recall_score(ground_true, predictions)
                    print("recall is:", recall)
                    precision = precision_score(ground_true, predictions)
                    print("precision is:", precision)
                    ground_true = list(ground_true)
                    predictions = list(predictions)

            time_cost = time.time() - start_time
    ground_true = np.asarray(ground_true).astype(np.int32)
    predictions = np.asarray(predictions).astype(np.int32)
    print(ground_true)
    print(predictions)
    recall = recall_score(ground_true, predictions)
    print(recall)
    precision = precision_score(ground_true, predictions)
    print(precision)
    return output_images, time_cost, recall, precision


if __name__ == '__main__':
    roots = '/home/admins/data/fisheye/test/no_mark1_done'
    image_lists, _ = create_file_list(roots, filtering=check_xml_and_img_file, params=[[], '&!&'])
    ckpts = '/home/admins/data/fisheye/models/frozen_inference_graph_fast_center_200000.pb'
    label_file = '/home/admins/workspace/models/research/object_detection/project/data/fisheye.pdtxt'
    predict_image_measure(ckpts, label_file, image_lists)
