#! -*- coding:utf-8 -*-
# !/env/bin/python3
from sys import argv
import keras
from keras.layers import Conv2D, MaxPooling2D, Lambda
from keras.layers.advanced_activations import LeakyReLU
from keras.layers.normalization import BatchNormalization
from keras.models import Sequential
from os.path import exists

import numpy as np
import random
import cv2
import yolt_utils

PARAM_NMM = 5


class NNYOlt(object):
    '''Yolt 网络模型类。主要功能包括网络构建、训练，以及预测
    '''

    def __init__(self, input_shape_whd, output_shape_whd, box_num_per_cell):
        '''Construct function
        Parameters:
            input_shape_whd: input data shape with format (width, height, channels)
            output_shape_whd: output data shape with format (width, height, depth)
            box_num_per_cell: predict boxes number for each grid cell
        '''
        assert (isinstance(input_shape_whd, tuple))
        assert (isinstance(output_shape_whd, tuple))
        assert (isinstance(box_num_per_cell, int))
        assert ((output_shape_whd[2] // box_num_per_cell) == PARAM_NMM)

        self.input_shape_whd = input_shape_whd
        self.output_shape_whd = output_shape_whd
        self.box_num_per_cell = box_num_per_cell
        self.__net = None
        self.MAX_BATCH_SIZE = 10
        self.MAX_ROUND_SIZE = 1500

    def __space2Depth_x2(self, x):
        """Thin wrapper for Tensorflow space_to_depth with block_size=2."""
        # Import currently required to make Lambda work.
        # See: https://github.com/fchollet/keras/issues/5088#issuecomment-273851273
        import tensorflow as tf
        return tf.space_to_depth(x, block_size=2)

    def __space2Depth_x2_output_shape_whd(self, input_shape_whd):
        """Determine space_to_depth output shape for block_size=2."""
        return (input_shape_whd[0], input_shape_whd[1] // 2, input_shape_whd[2] // 2, 4 * input_shape_whd[3]) \
            if input_shape_whd[1] else (input_shape_whd[0], None, None, 4 * input_shape_whd[3])

    def constructFullyArchitecture(self):
        '''构建标准的 yolt 网络架构
            ref: https://arxiv.org/pdf/1805.09512.pdf
        '''

        # construct network's architecture
        padding_same = 'same'
        leaky_relu = LeakyReLU(0.1)
        # linear = linear()
        self.__net = Sequential()

        self.__net.add(
            Conv2D(32, (3, 3), activation=leaky_relu, padding=padding_same, input_shape=self.input_shape_whd))

        self.__net.add(MaxPooling2D(pool_size=(2, 2)))

        self.__net.add(BatchNormalization())
        self.__net.add(
            Conv2D(64, (3, 3), activation=leaky_relu, padding=padding_same))

        self.__net.add(MaxPooling2D(pool_size=(2, 2)))

        self.__net.add(BatchNormalization())
        self.__net.add(
            Conv2D(128, (3, 3), activation=leaky_relu, padding=padding_same))

        self.__net.add(MaxPooling2D(pool_size=(2, 2)))

        self.__net.add(BatchNormalization())
        self.__net.add(
            Conv2D(256, (3, 3), activation=leaky_relu, padding=padding_same))

        self.__net.add(BatchNormalization())
        self.__net.add(
            Conv2D(128, (1, 1), activation=leaky_relu, padding=padding_same))

        self.__net.add(BatchNormalization())
        self.__net.add(
            Conv2D(256, (3, 3), activation=leaky_relu, padding=padding_same))

        self.__net.add(MaxPooling2D(pool_size=(2, 2)))

        self.__net.add(BatchNormalization())
        self.__net.add(
            Conv2D(512, (3, 3), activation=leaky_relu, padding=padding_same))

        self.__net.add(BatchNormalization())
        self.__net.add(
            Conv2D(256, (1, 1), activation=leaky_relu, padding=padding_same))

        self.__net.add(BatchNormalization())
        self.__net.add(
            Conv2D(512, (3, 3), activation=leaky_relu, padding=padding_same))

        self.__net.add(BatchNormalization())
        self.__net.add(
            Conv2D(256, (1, 1), activation=leaky_relu, padding=padding_same))

        self.__net.add(BatchNormalization())
        self.__net.add(
            Conv2D(512, (3, 3), activation=leaky_relu, padding=padding_same))

        self.__net.add(BatchNormalization())
        self.__net.add(
            Conv2D(1024, (3, 3), activation=leaky_relu, padding=padding_same))

        self.__net.add(BatchNormalization())
        self.__net.add(
            Conv2D(1024, (3, 3), activation=leaky_relu, padding=padding_same))

        # Passthrough Layer
        self.__net.add(Lambda(self.__space2Depth_x2,
                              output_shape=self.__space2Depth_x2_output_shape_whd,
                              name='space_to_depth'))

        self.__net.add(BatchNormalization())
        self.__net.add(
            Conv2D(1024, (3, 3), activation=leaky_relu, padding=padding_same))

        self.__net.add(BatchNormalization())
        self.__net.add(Conv2D(
            self.output_shape_whd[2], (1, 1), activation='linear', padding=padding_same))

        self.__net.compile(loss=keras.losses.mse,
                           optimizer=keras.optimizers.Adagrad(),
                           metrics=['accuracy'])

        net_out_shape = self.__net.output_shape[1:]
        print("Net output_size: ", net_out_shape)

        # verify networks's output size
        is_output_mathes = (net_out_shape[0] == self.output_shape_whd[0]) and (
            (net_out_shape[1] == self.output_shape_whd[1])) and (net_out_shape[2] == self.output_shape_whd[2])
        if not is_output_mathes:
            raise Exception()
        print("Construct yolt network success!")

    def constructTinyArchitecture(self):
        '''构建 tiny_yolt 网络结构，提速
        '''
        # construct network's architecture
        padding_same = 'same'
        leaky_relu = LeakyReLU(0.1)
        # linear = linear()
        self.__net = Sequential()

        self.__net.add(
            Conv2D(32, (3, 3), activation=leaky_relu, padding=padding_same, input_shape=self.input_shape_whd))

        self.__net.add(MaxPooling2D(pool_size=(2, 2)))

        self.__net.add(BatchNormalization())
        self.__net.add(
            Conv2D(64, (3, 3), activation=leaky_relu, padding=padding_same))

        self.__net.add(MaxPooling2D(pool_size=(2, 2)))

        self.__net.add(BatchNormalization())
        self.__net.add(
            Conv2D(128, (3, 3), activation=leaky_relu, padding=padding_same))

        self.__net.add(MaxPooling2D(pool_size=(2, 2)))

        self.__net.add(BatchNormalization())
        self.__net.add(
            Conv2D(256, (3, 3), activation=leaky_relu, padding=padding_same))

        self.__net.add(BatchNormalization())
        self.__net.add(
            Conv2D(128, (1, 1), activation=leaky_relu, padding=padding_same))

        self.__net.add(BatchNormalization())
        self.__net.add(
            Conv2D(256, (3, 3), activation=leaky_relu, padding=padding_same))

        self.__net.add(MaxPooling2D(pool_size=(2, 2)))

        self.__net.add(BatchNormalization())
        self.__net.add(
            Conv2D(512, (3, 3), activation=leaky_relu, padding=padding_same))

        self.__net.add(BatchNormalization())
        self.__net.add(
            Conv2D(256, (1, 1), activation=leaky_relu, padding=padding_same))

        self.__net.add(BatchNormalization())
        self.__net.add(
            Conv2D(512, (3, 3), activation=leaky_relu, padding=padding_same))

        self.__net.add(BatchNormalization())
        self.__net.add(
            Conv2D(256, (1, 1), activation=leaky_relu, padding=padding_same))

        self.__net.add(BatchNormalization())
        self.__net.add(
            Conv2D(512, (3, 3), activation=leaky_relu, padding=padding_same))

        # Passthrough Layer
        self.__net.add(Lambda(self.__space2Depth_x2, output_shape=self.__space2Depth_x2_output_shape_whd,
                              name='space_to_depth'))

        self.__net.add(BatchNormalization())
        self.__net.add(
            Conv2D(512, (3, 3), activation=leaky_relu, padding=padding_same))

        self.__net.add(BatchNormalization())
        self.__net.add(Conv2D(
            self.output_shape_whd[2], (1, 1), activation='linear', padding=padding_same))

        self.__net.compile(loss=keras.losses.mse,
                           optimizer=keras.optimizers.Adagrad(),
                           metrics=['accuracy'])

        net_out_shape = self.__net.output_shape[1:]
        print("Net output_size: ", net_out_shape)

        # verify networks's output size
        is_output_mathes = (net_out_shape[0] == self.output_shape_whd[0]) and (
            (net_out_shape[1] == self.output_shape_whd[1])) and (net_out_shape[2] == self.output_shape_whd[2])
        if not is_output_mathes:
            raise Exception()
        print("Construct tiny yolt network success!")

    def loadWeights(self, weights_file_path):
        if exists(weights_file_path):
            self.__net.load_weights(weights_file_path)
            print("Load weights success!")
        else:
            print("Error! File not exits!")

    def train(self, training_data_path, model_name, begin_index=0, train_num=-1, batch_size=1, epoch=1):
        '''Train yolt network and return None
            Parameters:
                training_data_path: the directory of training data
                batch_size: training batch size
                epoch: training epoch
            Return None
        '''

        batch_size = self.MAX_BATCH_SIZE if batch_size > self.MAX_BATCH_SIZE else batch_size
        round_num = train_num // self.MAX_ROUND_SIZE
        remaining_num = train_num % self.MAX_ROUND_SIZE

        def reorder(x, y):
            tar = list(zip(x, y))
            random.shuffle(tar)
            x[:], y[:] = zip(*tar)
            return x, y

        def trainOnce():
            if round_num >= 1:
                for r in range(round_num):
                    print("Round: %d/%d" % (r, round_num))

                    x_train, y_train = self.__getTrainData(
                        training_data_path, begin_index + r * self.MAX_ROUND_SIZE, self.MAX_ROUND_SIZE)
                    x_train, y_train = reorder(x_train, y_train)
                    self.__net.fit(x_train, y_train, batch_size, 1, verbose=1)

            # processing the remaining samples
            if remaining_num > 0:
                x_train, y_train = self.__getTrainData(
                    training_data_path, begin_index + round_num * self.MAX_ROUND_SIZE, remaining_num)
                x_train, y_train = reorder(x_train, y_train)
                self.__net.fit(x_train, y_train, batch_size, 1, verbose=1)

            # save weights
            self.__net.save_weights(model_name)

        for e in range(epoch):
            print("Epoch: %d/%d" % (e, epoch))
            trainOnce()

    def __getTrainData(self, training_data_path, begin_index, num):
        '''Get traning data
            Parameters:
                begin_index: the first traning data's indext
                num: numbers of return training data
            Return [tuple]: a tuple of (x_train, y_train)
        '''
        # extract x_train and y_train
        image_paths = yolt_utils.getFilePathsInDir(
            training_data_path, '.png', begin_index, num)
        txt_paths = yolt_utils.getFilePathsInDir(
            training_data_path, '.txt', begin_index, num)
        x_train = []
        y_train = []
        for image_path, txt_path in zip(image_paths, txt_paths):
            x_train.append(cv2.imread(image_path))
            y_train.append(yolt_utils.getLabelTxtData(
                txt_path, self.output_shape_whd[0], self.output_shape_whd[1], self.output_shape_whd[2]))
        x_train = np.asarray(x_train, dtype='float')
        y_train = np.asarray(y_train, dtype='float')

        return (x_train, y_train)

    def __extractBoxesFromOutputs(self, outputs_data, iou_thresh=0.5):
        '''
        从模型输出矩阵中提取 iou 复合阈值条件的 bounding box
        :return: list of bounding box
        '''
        i_w, i_h, i_d = self.input_shape_whd
        o_w, o_h, o_d = self.output_shape_whd

        PARAM_NUM = o_d // self.box_num_per_cell
        CELL_WIDTH = i_w // o_w
        CELL_HEIGHT = i_h // o_h

        boxes = []
        for output_data in outputs_data:
            curr_output_boxes = []
            for grid_ny in range(o_h):
                for grid_nx in range(o_w):
                    for box in range(self.box_num_per_cell):
                        iou = output_data[grid_ny, grid_nx, box *
                                          PARAM_NUM + 4] / (CELL_WIDTH / 2)
                        if iou > iou_thresh:
                            cx = grid_nx * CELL_WIDTH + \
                                 output_data[grid_ny, grid_nx, box * PARAM_NUM + 0]
                            cy = grid_ny * CELL_HEIGHT + \
                                 output_data[grid_ny, grid_nx, box * PARAM_NUM + 1]
                            w = output_data[grid_ny, grid_nx, box * PARAM_NUM + 2] * CELL_WIDTH
                            h = output_data[grid_ny, grid_nx, box * PARAM_NUM + 3] * CELL_HEIGHT

                            curr_output_boxes.append((cx, cy, w, h, iou))
            boxes.append(curr_output_boxes)

        return boxes

    def predict(self, x_tests, batch_size=1, iou_thresh=0.5):
        y_predicts = self.__net.predict(x_tests, batch_size=batch_size, verbose=1)
        predict_boxes_cxcywh = self.__extractBoxesFromOutputs(outputs_data=y_predicts, iou_thresh=iou_thresh)
        return predict_boxes_cxcywh


def testModelOnTrainData(model, data_dir, begin_index, test_num):
    '''测试 yolt 网络模型，并将模型预测结果可视化输出
    Parameter:
        model: 待测试模型
        data_dir: 存放数据的根目录(Note: 标签和图片数据存放在同一目录下，且同一样本的标签和图片要同名)
        begin_index: 测试的第一张图的下标
        test_num: 用于测试的图片张数
    '''

    assert isinstance(model, NNYOlt)
    i_w, i_h, i_d = model.input_shape_whd
    o_w, o_h, o_d = model.output_shape_whd
    o_box_num = model.box_num_per_cell

    # 获取测试样本集
    test_image_paths = yolt_utils.getFilePathsInDir(
        data_dir, '.png', begin_index=begin_index, num=test_num)
    test_label_paths = yolt_utils.getFilePathsInDir(
        data_dir, '.txt', begin_index=begin_index, num=test_num)

    x_test = []
    y_true = []
    for test_image_path, test_label_path in zip(test_image_paths, test_label_paths):
        image = cv2.imread(test_image_path)
        label = yolt_utils.getLabelTxtData(label_txt_path=test_label_path,
                                           row=o_h, col=o_w, depth=o_d)
        x_test.append(image)
        y_true.append(label)
    x_test = np.asarray(x_test, 'float')

    iou_thresh = 0.24
    boxes = model.predict(x_tests=x_test, batch_size=1, iou_thresh=iou_thresh)
    print("Boxes: ", len(boxes))
    GREEN = (0, 255, 0)
    RED = (0, 0, 255)
    for image, predict_boxes, truth in zip(x_test, boxes, y_true):
        truth_boxes = yolt_utils.extractBoundingBoxesFromLabel(
            label_data=truth, image_width=i_w, image_height=i_h, box_num=o_box_num, iou_thresh=iou_thresh)

        image = np.uint8(image)
        image = yolt_utils.drawBoxes(image=image, boxes=predict_boxes, color=GREEN, line_width=2)
        image = yolt_utils.drawBoxes(image=image, boxes=truth_boxes, color=RED, line_width=2)

        cv2.imshow("Image", image)
        if cv2.waitKey(0) & 0xFF == ord('q'):
            break


def nms(boxes_xyxy, iou_thresh):
    """
    对输入的 boxes 执行非极大抑制
    :param boxes:
    :param iou_thresh:
    :return:
    """
    if len(boxes_xyxy) <= 1:
        return boxes_xyxy
    mat_xyxy = np.asarray(boxes_xyxy, dtype='float')
    # print(mat_xyxy.shape)
    x1 = mat_xyxy[:, 0]
    y1 = mat_xyxy[:, 1]
    x2 = mat_xyxy[:, 2]
    y2 = mat_xyxy[:, 3]
    scores = mat_xyxy[:, 4]

    # 每个 bounding box 的面积
    areas = (abs(x2 - x1) + 1) * (abs(y2 - y1) + 1)
    # 按置信度降序排列
    ordered_by_scores = scores.argsort()[::-1]

    keep = []  # 保留的结果框集合
    while ordered_by_scores.size > 0:
        i = ordered_by_scores[0]
        keep.append(i)  # 保留该类剩余box中得分最高的一个
        # #得到相交区域,左上及右下
        xx1 = np.maximum(x1[i], x1[ordered_by_scores[1:]])
        yy1 = np.maximum(y1[i], y1[ordered_by_scores[1:]])
        xx2 = np.minimum(x2[i], x2[ordered_by_scores[1:]])
        yy2 = np.minimum(y2[i], y2[ordered_by_scores[1:]])
        # 计算相交的面积,不重叠时面积为0
        w = np.maximum(0.0, xx2 - xx1 + 1)
        h = np.maximum(0.0, yy2 - yy1 + 1)
        inter = w * h
        # 计算IoU：重叠面积 /（面积1+面积2-重叠面积）
        ovr = inter / (areas[i] + areas[ordered_by_scores[1:]] - inter)
        # 保留IoU小于阈值的box
        inds = np.where(ovr <= iou_thresh)[0]
        ordered_by_scores = ordered_by_scores[inds + 1]  # 因为ovr数组的长度比order数组少一个,所以这里要将所有下标后移一位

    return [boxes_xyxy[i] for i in keep]


def predictSingleImage(model, raw_image, overlap_percentage=0.15, iou_thresh=0.5):
    '''
    利用模型 model，对输入图片 raw_image 进行预测，并返回相对于 raw_image 的预测结果
    :param model:
    :param raw_image:
    :return: (raw_image, small_images, predict_boxes). 其中 raw_image 为原始图片，可能经过了size的调整，
    small_images 是切割成的小图的字典，用于回复大图使用
    predict_boxes 为模型输出的 bounding box
    '''

    i_w, i_h, i_d = model.input_shape_whd

    # 将 raw_image 分割成模型要求的输入大小
    raw_image, small_images = yolt_utils.cuttingImage(raw_image=raw_image,
                                                      small_image_w_h=(i_w, i_h),
                                                      overlap_percentage=overlap_percentage)

    # 预测每张小图中的目标的 bounding boxes
    x_test = [image for image in small_images.values()]
    x_test = np.asarray(x_test, 'float')
    small_boxes_cxcywh = model.predict(x_tests=x_test, batch_size=1, iou_thresh=iou_thresh)

    # # 从小图恢复 raw_image
    # (raw_h, raw_w) = raw_image.shape[:2]
    # raw_image = yolt_utils.combineImages(small_images=small_images, raw_image_w_h=(raw_w, raw_h))

    # 将预测的相对于 small images 的 bounding boxes　转换为相对于 raw_image 的 bounding boxes
    global_boxes_xyxy = []
    for (ox, oy), boxes_cxcywh in zip(small_images.keys(), small_boxes_cxcywh):
        if len(boxes_cxcywh) <= 0:
            continue
        for box_cxcywh in boxes_cxcywh:
            cx = box_cxcywh[0] + ox
            cy = box_cxcywh[1] + oy
            w = box_cxcywh[2]
            h = box_cxcywh[3]
            iou = box_cxcywh[4]
            global_box_xyxy = [cx - w / 2, cy - h / 2, cx + w / 2, cy + h / 2, iou]
            global_boxes_xyxy.append(global_box_xyxy)

    # 对检测到的 bounding boxes 执行非极大抑制
    global_boxes_xyxy = nms(boxes_xyxy=global_boxes_xyxy, iou_thresh=iou_thresh)
    return (raw_image, global_boxes_xyxy)


def testModelOnTestData(model, test_image_dir, begin_index, test_num=-1):
    '''
    在测试集上对模型 model 进行测试
    :param model:
    :param test_image_dir:
    :param begin_index:
    :param test_num:
    :return:
    '''

    assert isinstance(model, NNYOlt)

    window = cv2.namedWindow("Image", cv2.WINDOW_NORMAL)
    iou_thresh = 5
    GREEN = (0, 255, 0)

    test_image_paths = yolt_utils.getFilePathsInDir(
        dir_path=test_image_dir, suffix='.png', begin_index=begin_index, num=test_num)

    for image_path in test_image_paths:
        print(image_path)
        raw_image = cv2.imread(image_path)
        raw_image, predict_boxes_xyxy = predictSingleImage(
            model=model, raw_image=raw_image, overlap_percentage=0.15, iou_thresh=iou_thresh)

        # 可视化预测结果
        raw_image = yolt_utils.drawBoxes_xyxy(image=raw_image, boxes_xyxy=predict_boxes_xyxy, color=GREEN,
                                              line_width=2)
        cv2.imshow("Image", raw_image)
        if cv2.waitKey(0) & 0xFF == ord('q'):
            break

    cv2.destroyAllWindows()


if __name__ == "__main__":
    IMAGE_WIDTH = 416
    IMAGE_HEIGHT = 416
    IMAGE_CHANNELS = 3
    CELL_NX = 13
    CELL_NY = 13
    BOX_NUM = 4

    if len(argv) <= 1:
        print("yolt.py -train -use_tiny_model -begin_index -train_num -batch_size -epoch ")
        print("yolt.py -test -use_tiny_model -begin_index -test_num")

        exit(0)

    training_data_path = "../../datas/train/yolt/"
    test_data_dir = "../../datas/test/images/"
    yolt = NNYOlt(input_shape_whd=(IMAGE_WIDTH, IMAGE_HEIGHT, IMAGE_CHANNELS),
                  output_shape_whd=(CELL_NX, CELL_NY, BOX_NUM * PARAM_NMM),
                  box_num_per_cell=BOX_NUM)

    opt = argv[1]
    use_tiny_model = False
    begin_index = 0
    test_or_train_num = 1000
    if len(argv) >= 3:
        use_tiny_model = int(argv[2]) > 0
    if len(argv) >= 4:
        begin_index = int(argv[3])
    if len(argv) >= 5:
        test_or_train_num = int(argv[4])

    if opt == "train":

        batch_size = 10
        epoch = 1

        if len(argv) >= 6:
            batch_size = int(argv[5])
        if len(argv) >= 7:
            epoch = int(argv[6])

        if use_tiny_model:
            model_name = "tiny_yolt_weights.h5"
            yolt.constructTinyArchitecture()
        else:
            model_name = "yolt_weights.h5"
            yolt.constructFullyArchitecture()

        yolt.loadWeights(model_name)

        yolt.train(training_data_path=training_data_path,
                   model_name=model_name,
                   begin_index=begin_index,
                   train_num=test_or_train_num,
                   batch_size=batch_size,
                   epoch=epoch)

    elif opt == "test":
        if use_tiny_model:
            yolt.constructTinyArchitecture()
            model_name = "tiny_yolt_weights.h5"

        else:
            yolt.constructFullyArchitecture()
            model_name = "yolt_weights.h5"

        yolt.loadWeights(model_name)

        # testModelOnTrainData(model=yolt,
        #                          data_dir=training_data_path,
        #                          begin_index=begin_index,
        #                          test_num=test_or_train_num)
        testModelOnTestData(model=yolt,
                            test_image_dir=test_data_dir,
                            begin_index=begin_index,
                            test_num=test_or_train_num)
