
import os
import numpy as np
import tensorflow as tf
from matplotlib import pyplot as plt
from PIL import Image
import visualization_utils as vis_util
#from utils import visualization_utils as vis_util
#from utils import label_map_util
import image_classification
import random
if tf.__version__ < '1.4.0':
    raise ImportError('Please upgrade your tensorflow installation to v1.4.* or later!')

#定义需要的冻结图路径
# google ssdlite mobilenet v2预训练模型权重
SSD_MODEL_FILE = 'res/models/ssdlite_mobilenet_v2.pb'
# google faster rcnn nas预训练模型权重
FRCNN_MODEL_FILE = 'res/models/faster_rcnn_nas.pb'
# 自已训练的用于直接检测的模型权重
DIRECT_MODEL_FILE = 'res/models/ssd_mobilenet.pb'
# label表
PATH_TO_LABELS = 'res/values/labels.txt'
#定义三个全局计算图来保存三个模型
ssd_graph = tf.Graph()
fastrcnn_graph = tf.Graph()
direct_graph = tf.Graph()
#定义默认输出目录
output_path = 'out'
#label数据保存到下面两个字典，第一个用于画结果到输出图片，第二个用于class序号与文件对应。之所有用第二个字典是上于tensorflow对中文支持不好，训练时用的class为数字
category_index = {}
category_dic = {}
'''初始化，加载label和三个模型计算图
    '''
def init():
    num = 0
    with open(PATH_TO_LABELS, 'rb') as f:
        for line in f:
            line = line.decode('utf-8').strip()
            dic = {}
            label = line.split(':')[-1]
            dic['id'] = num
            dic['name'] = label
            category_index[num] = dic
            category_dic[num] = label
            num += 1
    with ssd_graph.as_default():
        ssd_graph_def = tf.GraphDef()
        with tf.gfile.GFile(SSD_MODEL_FILE, 'rb') as fid:
            serialized_graph = fid.read()
            ssd_graph_def.ParseFromString(serialized_graph)
            tf.import_graph_def(ssd_graph_def, name='')
    with fastrcnn_graph.as_default():
        fastrcnn_graph_def = tf.GraphDef()
        with tf.gfile.GFile(FRCNN_MODEL_FILE, 'rb') as fid:
            serialized_graph = fid.read()
            fastrcnn_graph_def.ParseFromString(serialized_graph)
            tf.import_graph_def(fastrcnn_graph_def, name='')
    with direct_graph.as_default():
        direct_graph_def = tf.GraphDef()
        with tf.gfile.GFile(DIRECT_MODEL_FILE, 'rb') as fid:
            serialized_graph = fid.read()
            direct_graph_def.ParseFromString(serialized_graph)
            tf.import_graph_def(direct_graph_def, name='')
'''class序号与文字对应
    cls：class序号
    return：对应的中文模型'''
def get_class_name(cls):
    return category_dic[cls]
'''将图片数据转化为矩阵，RGB为jpg,bmp格式数据，RGBA为png格式数据
    image:图片中读取到的像素数据
    return：RGB格式的矩阵'''
def load_image_into_numpy_array(image):
    (im_width, im_height) = image.size
    if image.mode == 'RGB':
        return np.array(image.getdata()).reshape(
            (im_height, im_width, 3)).astype(np.uint8)
    #将png格式数据转化为RGB模式数据
    elif image.mode == 'RGBA':
        tmp = np.array(image.getdata()).reshape(
            (im_height, im_width, 4))
        return tmp[:, :, :3].astype(np.uint8)
'''非最大值抑制
    dets:bbox列表
    scores:每个bbox概率列表
    thresh：nms阀值
    return：保留的bbox序号列表'''
def nms(dets, scores, thresh):
    """Pure Python NMS baseline."""  
    x1 = dets[:, 0]  
    y1 = dets[:, 1]  
    x2 = dets[:, 2]  
    y2 = dets[:, 3]  
    #scores = dets[:, 4]  #bbox打分
    #print(x1)
    #print(y2)
    areas = (x2 - x1) * (y2 - y1)
    #print(areas)
#打分从大到小排列，取index  
    order = scores.argsort()[::-1]
#keep为最后保留的边框  
    keep = []  
    #index = []
    while order.size > 0:  
#order[0]是当前分数最大的窗口，肯定保留  
        i = order[0]  
        keep.append(i)  
#计算窗口i与其他所有窗口的交叠部分的面积
        xx1 = np.maximum(x1[i], x1[order[1:]])  
        yy1 = np.maximum(y1[i], y1[order[1:]])  
        xx2 = np.minimum(x2[i], x2[order[1:]])  
        yy2 = np.minimum(y2[i], y2[order[1:]])  
  
        w = np.maximum(0.0, xx2 - xx1)
        h = np.maximum(0.0, yy2 - yy1)
        inter = w * h
        #print(inter)
#交/并得到iou值  
        ovr = inter / (areas[i] + areas[order[1:]] - inter)
        #print(ovr)
#inds为所有与窗口i的iou值小于threshold值的窗口的index，其他窗口此次都被窗口i吸收  
        inds = np.where(ovr <= thresh)[0]
        #print(inds)
#order里面只保留与窗口i交叠面积小于threshold的那些窗口，由于ovr长度比order长度少1(不包含i)，所以inds+1对应到保留的窗口
        order = order[inds + 1]  
    #print(keep)
    return keep
'''此函数用于直接检测图中的车辆型号
    imgfile:图片全路径
    return：一个列表，包括检测出的所有车进行非最大值抑制后的类别、概率、坐标'''
def detect_img(imgfile):
    r_list = []
    detection_graph = direct_graph
    with detection_graph.as_default():
        with tf.Session(graph=detection_graph) as sess:
            coord = tf.train.Coordinator()
            threads = tf.train.start_queue_runners(coord=coord)
            image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
            detection_boxes = detection_graph.get_tensor_by_name('detection_boxes:0')
            detection_scores = detection_graph.get_tensor_by_name('detection_scores:0')
            detection_classes = detection_graph.get_tensor_by_name('detection_classes:0')
            num_detections = detection_graph.get_tensor_by_name('num_detections:0')
            try:
                image = Image.open(imgfile)
            except IOError as e:
                return r_list
            image_np = load_image_into_numpy_array(image)
            image_np_expanded = np.expand_dims(image_np, axis=0)
            (boxes, scores, classes, num) = sess.run(
                [detection_boxes, detection_scores, detection_classes, num_detections],
                feed_dict={image_tensor: image_np_expanded})
            boxes = np.squeeze(boxes)
            classes = np.squeeze(classes).astype(np.int32)
            scores = np.squeeze(scores)
            scores = scores[scores > 0.05]
            #print(scores)
            boxes = boxes[0:len(scores), ]
            #print(boxes)
            classes = classes[0:len(scores), ]
            #print(classes)
            #非最大值抑制，阀值为0.5
            keep = nms(boxes, scores, 0.5)
            #print(keep)
            #print(scores)
            #print(boxes)
            for i in range(len(scores)):
            #对概率大于1%的物体的类别、概率、坐标保存到列表
                #if scores[i] > 0.05:
                if i in keep:
                    r_list.append(classes[i]-1)
                    r_list.append(scores[i])
                    r_list.append(boxes[i])
            coord.request_stop()
            coord.join(threads)
            return r_list
'''此函数用于将检测出的车的bbox放大20%，实测发现直接用检测出的bbox抠出的图进行分类，结果并不理想，将bbox扩大后结果会提升很多
    box:bbox原始比例值
    return：bbox放大后的比例值'''
def resize_box(box):
    resize_box = [0, 0, 0, 0]
    y1 = box[0] - (box[2] - box[0]) * 0.1
    x1 = box[1] - (box[3] - box[1]) * 0.1
    y2 = box[2] + (box[2] - box[0]) * 0.1
    x2 = box[3] + (box[3] - box[1]) * 0.1
    #将放大后的值约束在0到1之间
    resize_box[0] = 0 if y1 < 0 else y1
    resize_box[1] = 0 if x1 < 0 else x1
    resize_box[2] = 1 if y2 > 1 else y2
    resize_box[3] = 1 if x2 > 1 else x2
    return resize_box
'''此方法用于将输入图片进行车辆检测并抠出来保存成临时文件
    imgfile:需检测图片全路径
    model：选择的检测方法
    return:抠出的图片保存为临时图全路径的列表'''
def split_img(imgfile, model):
    vehicle_list = []
    #获得文件名
    dirname, filename = os.path.split(imgfile)
    fname, fename = os.path.splitext(filename)
    #选择检测模型
    if model == 'ssd':
        detection_graph = ssd_graph
    elif model == 'frnn':
        detection_graph = fastrcnn_graph
    else:
        return vehicle_list
    #执行检测
    with detection_graph.as_default():
        with tf.Session(graph=detection_graph) as sess:
            coord = tf.train.Coordinator()
            threads = tf.train.start_queue_runners(coord=coord)
            image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
            detection_boxes = detection_graph.get_tensor_by_name('detection_boxes:0')
            detection_scores = detection_graph.get_tensor_by_name('detection_scores:0')
            detection_classes = detection_graph.get_tensor_by_name('detection_classes:0')
            num_detections = detection_graph.get_tensor_by_name('num_detections:0')
            try:
                image = Image.open(imgfile)
            except IOError as e:
                return vehicle_list
            image_np = load_image_into_numpy_array(image)
            image_np_expanded = np.expand_dims(image_np, axis=0)
            (boxes, scores, classes, num) = sess.run(
                [detection_boxes, detection_scores, detection_classes, num_detections],
                feed_dict={image_tensor: image_np_expanded})
            boxes = np.squeeze(boxes)
            classes = np.squeeze(classes)
            scores = np.squeeze(scores)
            scores = scores[scores > 0.4]
            #print(scores)
            boxes = boxes[0:len(scores), ]
            #print(boxes)
            classes = classes[0:len(scores), ]
            #print(classes)
            index = 0
            #非最大值抑制
            keep = nms(boxes, scores, 0.8)
            print(keep)
            #筛选car和truck两个分类进行抠图操作
            for mclass in classes:
                if int(mclass) == 3 or int(mclass) == 8:
                    #只抠出nms之后保留的图片
                    if index in keep:
                    #    continue
                    # 检测到概率大于40%的图进行保存
                    #if scores[index] > 0.4:
                        box = resize_box(boxes[index:index + 1][0])
                        xmin = int(box[1] * image.size[0])
                        xmax = int(box[3] * image.size[0])
                        ymin = int(box[0] * image.size[1])
                        ymax = int(box[2] * image.size[1])
                        #对需保存的数据进行格式判断和转换
                        if image.mode == 'RGBA':
                            image = image.convert("RGB")
                            image.crop((xmin, ymin, xmax, ymax)).save(".tmp/{}_{}.jpg".format(fname, index))
                        else:
                            image.crop((xmin, ymin, xmax, ymax)).save(".tmp/{}_{}.jpg".format(fname, index))
                        #如果两个图是一样的，说明有一个物料既被认为是car又被认为是truck，只选一个进行保存
                        same = False
                        for i in range(len(vehicle_list)//2):
                            if (boxes[index] == vehicle_list[i*2]).all():
                                same = True
                                break
                        if not same:
                            vehicle_list.append(boxes[index])
                        #print(boxes[index:index + 1])
                            #将临时文件名保存到列表
                            vehicle_list.append("./.tmp/{}_{}.jpg".format(fname, index))
                index = index + 1
            coord.request_stop()
            coord.join(threads)
            return vehicle_list
'''检测函数入口
    imgpath:输入图片全路径
    model：检测模型选择
    out_path:输出目录
    return：检测得到的结果字典'''
def detecter(imgpath, model, out_path):
    #创建临时文件夹
    if not os.path.exists('.tmp'):
        os.makedirs('.tmp')
    img_dic = {}
    result = {}
    r_list = []
    if not os.path.exists(imgpath):
        raise IOError('Input is Null, please check!')
    #保存输出路径到全局变量
    global output_path
    if len(out_path) != 0:
        output_path = out_path
    if not os.path.exists(output_path):
        os.makedirs(output_path)
    #检测再分类方法分两步，split+detect
    if model != 'direct':
        if os.path.isfile(imgpath):
            img_dic[imgpath] = split_img(imgpath, model)
        if os.path.isdir(imgpath):
            input_list = os.listdir(imgpath)
            for file in input_list:
                img_dic[os.path.join(imgpath,file)] = split_img(os.path.join(imgpath,file), model)
    #直接检测方法只有一步，detect
    elif model == 'direct':
        if os.path.isfile(imgpath):
            result[imgpath] = detect_img(imgpath)
        if os.path.isdir(imgpath):
            input_list = os.listdir(imgpath)
            for file in input_list:
                result[os.path.join(imgpath,file)] = detect_img(os.path.join(imgpath, file))
        return result
    classes = []
    scroes =[]
    boxes = []
    #将抠出的临时文件全部进行分类
    for imagepath in img_dic:
        for i in range(len(img_dic[imagepath])//2):
            #print(img_dic[imgpath][i*2+1])
            cls, scroe = image_classification.classification(img_dic[imgpath][i*2+1])
            #对分类检测大于20%的结果进行保存
            if scroe > 0.2:
                classes.append(cls)
                scroes.append(scroe)
                boxes.append(img_dic[imgpath][i * 2])
        index = np.array(scroes).argsort()
        for i in range(len(scroes), 0, -1):
            r_list.append(classes[index[i-1]])
            r_list.append(scroes[index[i-1]])
            r_list.append(boxes[index[i-1]])
        #每一个临时文件的结果保存到总字典中
        result[imagepath] = r_list
    #返回结果
    return result
'''此函数将检测图片最后结果保存到输出目录
    image_dic:检测到的车辆临时图片及其坐标、概率、型号组成的字典
    return：保存输出文件的全路径'''
def save_result(image_dic):
    for imagepath in image_dic:
        image = Image.open(imagepath)
        dirname, filename = os.path.split(imagepath)
        fname, fename = os.path.splitext(filename)
        image_np = load_image_into_numpy_array(image)
        boxes = np.zeros((len(image_dic[imagepath])//3, 4))
        classes = []
        scores = []
        for index in range(len(image_dic[imagepath])//3):
            classes.append(image_dic[imagepath][index * 3])
            scores.append(image_dic[imagepath][index * 3 + 1])
            boxes[index] = image_dic[imagepath][index * 3 + 2]
        vis_util.visualize_boxes_and_labels_on_image_array(
            image_np,
            boxes,
            classes,
            scores,
            category_index,
            min_score_thresh=.01,
            use_normalized_coordinates=True,
            line_thickness=8)
        out_name = fname + '.png'
        outpath = os.path.join(output_path, out_name)
        plt.imsave(outpath, image_np)
        #out_img = Image.open(outpath)
        #out_img.show()
        return outpath
