# -*- coding: utf-8 -*-
# @Time    : 2018/10/24 15:28
# @Author  : cj
# @File    : predict1.0.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@author: chenjun
@contact: gangkanli1219@gmail.com
@time: 1/3/18 5:18 PM
@desc: trying to speed up predicting
"""
from __future__ import division
import argparse
import logging
import os
import time
from src.detection.objectionutils import label_map_util
import cv2
import matplotlib.pylab as plt
import numpy as np
import tensorflow as tf
from src.detection.objectionutils import visualization_utils as vis_utils
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from src.detection.utils.basic import get_file_name
from src.detection.utils.detector import load_model
from src.detection.utils.detector import run_detection
from src.detection.utils.evaluator import eval_detect_result
from src.detection.utils.evaluator import read_xml_as_eval_info
from src.detection.utils.io import get_label_from_pd_file
from src.detection.utils.io import get_label_list_from_category_index
from src.image_preprocess import project
def parse_args():
    parser = argparse.ArgumentParser()
    # 对于函数add_argumen()
    # 第一个是选项，第二个是数据类型，第三个默认值，第四个是help命令时的说明
    parser.add_argument(
        '--image-root', type=str, default='', dest='root',
        help='The directory where the image data and the annotation is stored.'
             '保存图像与标记数据的根目录, 图像与其相应标记应当在同一级目录。')
    parser.add_argument(
        '--model-edge', type=str, default='', dest='model_edge',
        help='保存模型的目录edge')
    parser.add_argument(
        '--model-center', type=str, default='', dest='model_center',
        help='保存模型的目录center')
    parser.add_argument(
        '--output-root', type=str, default='', dest='output',
        help='the root for all the output.'
             '输出结果的根目录')
    return parser.parse_args()





"""
# predict_image：函数功能，画出图片错标，漏标行人，黑色框为真实框，绿色为预测值，金色漏框，蓝色错框
#image_path:图片路径
#label_list：label_list=['person']
# checkpoint_center：中心模型的PB文件
# checkpoint_edge：边缘模型的PB文件
# score：置信度超过score才画出
框：[ymin, xmin, ymax, xmax]
"""


def predict_image(image_path, label_map_path, checkpoint='' , score=0.5, percent=0.7):
    start_time = time.time()
    # 下载模型

    detection_graph_caffe = load_model(checkpoint)
    detection_graph_naifen=load_model()
    # category_index={1: {'name': 'Sensodyne1', 'id': 1}, 2: {'name': 'Sensodyne2', 'id': 2}
    label_map_dict = label_map_util.get_label_map_dict(label_map_path)
    category_index = get_label_from_pd_file(label_map_path, 127)
    label_list = get_label_list_from_category_index(category_index)

    # 配置文件
    config = tf.ConfigProto(device_count={"CPU": 4, "GPU": 1})
    config.gpu_options.allow_growth = True
    stage_time = time.time()
    ground_true = []
    predictions = []
    all_pic_recall = []
    all_pic_pre = []
    print('loading models completed!,time is', stage_time - start_time)

    start_time = time.time()
    # 遍历文件夹下所有图片
    with detection_graph.as_default():
        with tf.Session(graph=detection_graph, config=config) as sess:
            for idx, image_name in enumerate(os.listdir(image_path)):
                if image_name.split('.')[-1] == 'png' or image_name.split('.')[-1] == 'jpg':
                    print('predicting image:', (os.path.join(image_path, image_name)))
                    # 获得图片名字，没有后缀
                    img_head_name = get_file_name(image_name)
                    # os.path.join将分离的部分组成一个路径名，并读入图片
                    img = cv2.imread(os.path.join(image_path, image_name))
                    original_shape = img.shape
                    output_img = img.copy()
                    # opencv读取的图片的通道为BGR
                    img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
                    # xml的绝对路径加xml名，如F://deep_learning_project//train//picture//2017-07-22 14-40-26-1130-.xml
                    xml_path = os.path.join(image_path, img_head_name + '.xml')
                    # 预测的boxes ，预测的classes，预测的scores
                    # boxes[[ymin, xmin, ymax, xmax]),[.....]..],clsaaes=[1 1 1 1 1],[0.9924028  0.9810564  0.82952344 0.77219677 0.67491746]
                    # _boxes:  [[0.28125, 0.28125, 0.28125, 0.28125], [0.28125, 0.28125, 0.28125, 0.28125]]

                    img = np.expand_dims(img, axis=0)
                    _boxes, _classes, _scores = run_detection(sess, detection_graph, img)
                    boxes, classes, scores = [], [], []
                    # 刷选置信度大于score的框，
                    for box, cls, sc in zip(_boxes, _classes, _scores):
                        if sc >= score:
                            boxes.append(box)
                            classes.append(cls)
                            scores.append(sc)
                    print('boxes',boxes)

                    # 预测的boxes ，预测的classes，预测的scores,合并重叠的框，如果两个框重叠并且重叠率
                    # 超过百分比，删除一个较小的分数



                    objects = read_xml_as_eval_info(xml_path, label_list)['objects']
                    # 真实的gt_boxes
                    gt_classes, gt_boxes = np.hsplit(np.array(objects), [1])

                    if len(objects) == 0:
                        gt_boxes = []
                        gt_classes = []
                    else:
                        gt_classes = list(gt_classes.flatten())

                        # ([ xmin, ymin, xmax, ymax])变成ymin, xmin, ymax, xmax]
                        gt_boxes = gt_boxes[:, (1, 0, 3, 2)]
                        gt_boxes = list(map(lambda x: list(x), gt_boxes))



                    # 预测的框:绿色np.ones((len(_classes),), dtype=np.int32)

                    vis_utils.visualize_boxes_and_labels_on_image_array(
                        output_img, np.array(_boxes), np.ones((len(_classes),), dtype=np.int32), _scores,
                        label_map_dict,
                        use_normalized_coordinates=True,
                        max_boxes_to_draw=200,
                        min_score_thresh=score,
                        line_thickness=3)
                    plt.figure(figsize=(10, 10), dpi=100)
                    plt.title(img_head_name)
                    plt.imshow(output_img)


                    cv2.imwrite(os.path.join(r"D:\360\2", image_name), output_img)
                    # plt.savefig(
                    #      os.path.join("F:\\deep_learning_project\\train\\res", '{}.jpg'.format(img_head_name)))
                    plt.close()
                    # 类别加一是因为读取XML的时候把标签的类别设为了0
                    gt, pred = eval_detect_result(gt_boxes, list(map(lambda x: x + 1, gt_classes)),
                                                  boxes, classes, default_class=0)
                    print('gt',gt)
                    print('pred',pred)
                    ground_true += gt
                    predictions += pred



            recall = recall_score(ground_true, predictions,average='weighted')
            precision = precision_score(ground_true, predictions,average='weighted')
            print('Total Recall: ',recall)
            print('Total Precision: ',precision)




if __name__ == '__main__':
    #     PARAMS = parse_args()
    #     logging.basicConfig(level=logging.INFO)
    #     predict_image(
    #         image_path=PARAMS.root,
    #         label_list=['person'],
    #         checkpoint_center=PARAMS.model_center,
    #         checkpoint_edge=PARAMS.model_edge,
    #         score=0.5)

    predict_image(image_path=r"D:\360\1",label_map_path=r'F:\car\pdtxt\label_map_caffe.pdtxt',
                  checkpoint=r"D:\360\caffemodel\frozen_inference_graph.pb",score=0.3, percent=0.8)

