import tensorflow as tf
from model.yoloV3 import OutputParser
from utils.config_utils import parse_anchors

# 定义预测类
class Predictor(object):
    # anchors
    YOLOv3_anchor = parse_anchors("config/anchor")
    anchors = dict(zip(reversed(range(3)),
                       [YOLOv3_anchor[0:3].tolist(), YOLOv3_anchor[3:6].tolist(), YOLOv3_anchor[6:9].tolist()]))
    # 初始化
    def __init__(self,input_shape=(416,416,3),class_num=80,yolov3=""):
        self.input_shape = input_shape
        self.class_num = class_num
        if yolov3 is "":
            raise AssertionError('please give H5 file.')
        self.yolov3 = tf.keras.models.load_model(yolov3,compile=False)
        self.parsers = [OutputParser(tuple(
            self.yolov3.outputs[l].shape[1:]), self.input_shape, self.anchors[l]) for l in range(3)]
    # 预测方法
    def predict(self,image,conf_thres=0.5,nms_thres=0.5):
        # 对图像进行处理
        # 增加一维batch
        images = tf.expand_dims(image, axis=0)
        # 图像变形
        resize_images = tf.image.resize(
            images, self.input_shape[:2], method=tf.image.ResizeMethod.BICUBIC, preserve_aspect_ratio=True)
        # 图像变形后的大小
        resize_shape = resize_images.shape[1:3]
        # 图像在上下左右填充的大小
        top_pad = (self.input_shape[0] - resize_shape[0]) // 2
        bottom_pad = self.input_shape[0] - resize_shape[0] - top_pad
        left_pad = (self.input_shape[1] - resize_shape[1]) // 2
        right_pad = self.input_shape[1] - resize_shape[1] - left_pad
        # 填充为128
        resize_images = tf.pad(resize_images, [[0, 0], [top_pad, bottom_pad], [
                               left_pad, right_pad], [0, 0]], constant_values=128)
        # 标准差
        deviation = tf.constant([left_pad / self.input_shape[1],
                                 top_pad / self.input_shape[0], 0, 0], dtype=tf.float32)
        # 尺度的变换
        scale = tf.constant([
            self.input_shape[1] /
            resize_shape[1], self.input_shape[0] / resize_shape[0],
            self.input_shape[1] /
            resize_shape[1], self.input_shape[0] / resize_shape[0]
        ], dtype=tf.float32)
        # 类型转换
        images_data = tf.cast(resize_images, tf.float32) / 255.
        # 输出结果
        outputs = self.yolov3(images_data)
        # 目标值
        whole_targets = tf.zeros((0, 6), dtype=tf.float32)
        # 遍历每一个尺度
        for i in range(3):
            # 获取预测的位置、置信度和分类结果
            pred_xy, pred_wh, pred_box_confidence, pred_class = self.parsers[i](
                outputs[i])
            # 获取目标框的位置
            pred_box = tf.keras.layers.Concatenate(axis=-1)([pred_xy, pred_wh])
            #目标框的置信度大于阈值的部分：target_mask.shape = (h, w, anchor num)
            target_mask = tf.greater(pred_box_confidence, conf_thres)
            # 获取大于阈值的部分的置信度：pred_box_confidence = (pred target num, 1)
            pred_box_confidence = tf.boolean_mask(
                pred_box_confidence, target_mask)
            # 在最后增加一维
            pred_box_confidence = tf.expand_dims(pred_box_confidence, axis=-1)
            # 获取对应的目标框检测结果 pred_box.shape = (pred target num, 4)
            pred_box = tf.boolean_mask(pred_box, target_mask)
            # 归一化处理
            pred_box = (pred_box - deviation) * scale * \
                [image.shape[1], image.shape[0], image.shape[1], image.shape[0]]
            # 分类结果：pred_class.shape = (pred target num, 1)
            pred_class = tf.boolean_mask(pred_class, target_mask)
            # 获取每个类别最大的索引
            pred_class = tf.math.argmax(pred_class, axis=-1)
            # 类型转换
            pred_class = tf.cast(tf.expand_dims(
                pred_class, axis=-1), dtype=tf.float32)
            # 将预测结果拼接在一起 targets,sgaoe = (pred target num, 6)
            targets = tf.keras.layers.Concatenate(
                axis=-1)([pred_box, pred_box_confidence, pred_class])
            # 将多个尺度的结果拼接在一起
            whole_targets = tf.keras.layers.Concatenate(
                axis=0)([whole_targets, targets])
        # 进行NMS,排序以置信度排序,从大到小排序
        descend_idx = tf.argsort(whole_targets[..., 4], direction='DESCENDING')
        i = 0
        # 遍历
        while i < descend_idx.shape[0]:
            # 获取索引值
            idx = descend_idx[i]
            # 左上角坐标
            cur_upper_left = whole_targets[idx,0:2] - whole_targets[idx, 2:4] / 2
            # 右下角坐标
            cur_down_right = cur_upper_left + whole_targets[idx, 2:4]
            # 宽高
            wh = whole_targets[idx, 2:4]
            # 获取面积
            area = wh[..., 0] * wh[..., 1]
            # 下一个检测框的索引
            following_idx = descend_idx[i+1:]
            # 下一个检测框
            following_targets = tf.gather(whole_targets, following_idx)
            # 下一个检测框的左上角坐标
            following_upper_left = following_targets[...,0:2] - following_targets[..., 2:4] / 2
            # 下一个检测框的右下角坐标
            following_down_right = following_upper_left + following_targets[..., 2:4]
            # 下一个检测框的宽高
            following_wh = following_targets[..., 2:4]
            # 下一个检测框的面积
            following_area = following_wh[..., 0] * following_wh[..., 1]
            # 计算交并比
            # 计算交的左上角坐标
            max_upper_left = tf.math.maximum(cur_upper_left, following_upper_left)
            # 计算交的右下角坐标
            min_down_right = tf.math.minimum(cur_down_right, following_down_right)
            # 交的宽高
            intersect_wh = min_down_right - max_upper_left
            # 将宽高大于0，保持不变，小于0的置为0
            intersect_wh = tf.where(tf.math.greater(intersect_wh, 0), intersect_wh, tf.zeros_like(intersect_wh))
            # 计算交的面积
            intersect_area = intersect_wh[..., 0] * intersect_wh[..., 1]
            # 计算交并比
            overlap = intersect_area / (area + following_area - intersect_area)
            # 获取小于NMS阈值的保留，其他的舍弃
            indices = tf.where(tf.less(overlap, nms_thres))
            # 进行切片，保留结果
            following_idx = tf.gather_nd(following_idx, indices)
            # 将其添加到descend中即可
            descend_idx = tf.concat([descend_idx[:i + 1], following_idx], axis=0)
            i += 1
        # 获取最终的结果
        whole_targets = tf.gather(whole_targets, descend_idx)
        # 左上角坐标
        upper_left = (whole_targets[..., 0:2] - whole_targets[..., 2:4] / 2)
        # 右下角坐标
        down_right = (upper_left + whole_targets[..., 2:4])
        # 获取检测结果
        boundings = tf.keras.layers.Concatenate(axis=-1)([upper_left, down_right, whole_targets[..., 4:]])
        return boundings