#coding: --utf-8--

import cv2
import tensorflow as tf
import keras
import numpy as np
from keras.layers import Dense
from keras.layers import Activation
from keras.layers import Flatten
from keras.layers import Conv2D, MaxPooling2D, Lambda
from keras.layers.advanced_activations import LeakyReLU
from keras.layers.normalization import BatchNormalization
from keras.models import Sequential
from keras import backend as K

from os.path import exists
import random
import getpass

from keras.layers import Conv2D, MaxPooling2D, AveragePooling2D, ZeroPadding2D 
from keras.layers import Flatten, Dense, Dropout,BatchNormalization 
from keras.layers import Input, concatenate, add
from keras.models import Model,load_model 
from keras.preprocessing.image import ImageDataGenerator 
from keras.utils import plot_model,np_utils 
from keras import regularizers 
import keras.metrics as metric 
import os


import cutImage
from loadLabels import LoadLabels as lls
from writeTest import writeTest


def compute_iou(rec1, rec2):
    """
    computing IoU
    :param rec1: (y0, x0, y1, x1), which reflects
            (top, left, bottom, right)
    :param rec2: (y0, x0, y1, x1)
    :return: scala value of IoU
    """
    # computing area of each rectangles
    S_rec1 = (rec1[2] - rec1[0]) * (rec1[3] - rec1[1])
    S_rec2 = (rec2[2] - rec2[0]) * (rec2[3] - rec2[1])
 
    # computing the sum_area
    sum_area = S_rec1 + S_rec2
 
    # find the each edge of intersect rectangle
    left_line = max(rec1[1], rec2[1])
    right_line = min(rec1[3], rec2[3])
    top_line = max(rec1[0], rec2[0])
    bottom_line = min(rec1[2], rec2[2])
 
    # judge if there is an intersect
    if left_line >= right_line or top_line >= bottom_line:
        return 0
    else:
        intersect = (right_line - left_line) * (bottom_line - top_line)
        return intersect / (sum_area - intersect)

def nms(boxes_xyxy, iou_thresh):
    """
    对输入的 boxes 执行非极大抑制
    :param boxes:
    :param iou_thresh:
    :return:
    """
    if len(boxes_xyxy) <= 1:
        return boxes_xyxy
    mat_xyxy = np.asarray(boxes_xyxy, dtype='float')
    # print(mat_xyxy.shape)
    x1 = mat_xyxy[:, 0]
    y1 = mat_xyxy[:, 1]
    x2 = mat_xyxy[:, 2]
    y2 = mat_xyxy[:, 3]
    scores = mat_xyxy[:, 4]

    # 每个 bounding box 的面积
    areas = (abs(x2 - x1) + 1) * (abs(y2 - y1) + 1)
    # 按置信度降序排列
    ordered_by_scores = scores.argsort()[::-1]

    keep = []  # 保留的结果框集合
    while ordered_by_scores.size > 0:
        i = ordered_by_scores[0]
        keep.append(i)  # 保留该类剩余box中得分最高的一个
        # #得到相交区域,左上及右下
        xx1 = np.maximum(x1[i], x1[ordered_by_scores[1:]])
        yy1 = np.maximum(y1[i], y1[ordered_by_scores[1:]])
        xx2 = np.minimum(x2[i], x2[ordered_by_scores[1:]])
        yy2 = np.minimum(y2[i], y2[ordered_by_scores[1:]])
        # 计算相交的面积,不重叠时面积为0
        w = np.maximum(0.0, xx2 - xx1 + 1)
        h = np.maximum(0.0, yy2 - yy1 + 1)
        inter = w * h
        # 计算IoU：重叠面积 /（面积1+面积2-重叠面积）
        ovr = inter / (areas[i] + areas[ordered_by_scores[1:]] - inter)
        # 保留IoU小于阈值的box
        inds = np.where(ovr <= iou_thresh)[0]
        # 因为ovr数组的长度比order数组少一个,所以这里要将所有下标后移一位
        ordered_by_scores = ordered_by_scores[inds + 1]

    return [boxes_xyxy[i] for i in keep]

class Net():
    def __init__(self):
        self._net = None
        self.input_shape_whd = None
        self.output_shape_whd = None
        self.box_num_per_cell = None

    def _netYolt(self, input_shape, output_shape):
        '''
        说明
        ----
        构建 yolt 网络
        参考: https://arxiv.org/pdf/1805.09512.pdf
        '''
        
        def Conv2d_BN(x, nb_filter, kernel_size, strides=(1, 1), padding='same', name=None): 
            if name is not None: 
                bn_name = name + '_bn' 
                conv_name = name + '_conv' 
            else: 
                bn_name = None 
                conv_name = None 
            x = Conv2D(nb_filter, kernel_size, padding=padding, strides=strides, activation='relu', name=conv_name)(x) 
            x = BatchNormalization(axis=3, name=bn_name)(x) 
            
            return x 
        
        def identity_Block(inpt, nb_filter, kernel_size, strides=(1, 1), with_conv_shortcut=False): 
            x = Conv2d_BN(inpt, nb_filter=nb_filter, kernel_size=kernel_size, strides=strides, padding='same') 
            x = Conv2d_BN(x, nb_filter=nb_filter, kernel_size=kernel_size, padding='same') 
            if with_conv_shortcut: 
                shortcut = Conv2d_BN(inpt, nb_filter=nb_filter, strides=strides, kernel_size=kernel_size) 
                x = add([x, shortcut]) 
                return x 
            else: 
                x = add([x, inpt]) 
                return x 
            
        def bottleneck_Block(inpt,nb_filters,strides=(1,1),with_conv_shortcut=False): 
            k1,k2,k3=nb_filters 
            x = Conv2d_BN(inpt, nb_filter=k1, kernel_size=1, strides=strides, padding='same') 
            x = Conv2d_BN(x, nb_filter=k2, kernel_size=3, padding='same') 
            x = Conv2d_BN(x, nb_filter=k3, kernel_size=1, padding='same') 
            if with_conv_shortcut: 
                shortcut = Conv2d_BN(inpt, nb_filter=k3, strides=strides, kernel_size=1) 
                x = add([x, shortcut]) 
                return x 
            else: 
                x = add([x, inpt]) 
                return x 


        self.input_shape_whd = input_shape
        self.output_shape_whd = output_shape
        self.box_num_per_cell = int(output_shape[2] / 5);
        # construct network's architecture
        padding_same = 'same'
        leaky_relu = LeakyReLU(0.1)
        # linear = linear()
        
        input_in = Input(shape=input_shape) 
        x = Conv2D(32, (3, 3), activation='relu', padding='same', input_shape=input_shape)(input_in)
        #conv1 
        x = Conv2d_BN(x, nb_filter=64, kernel_size=(7, 7), strides=(1, 1), padding='same') 
        x = MaxPooling2D(pool_size=(3, 3), strides=(2, 2), padding='same')(x) 
        #conv2_x 
        x = identity_Block(x, nb_filter=64, kernel_size=(3, 3)) 
        x = identity_Block(x, nb_filter=64, kernel_size=(3, 3)) 
        x = identity_Block(x, nb_filter=64, kernel_size=(3, 3)) 
        # x = MaxPooling2D(pool_size=(3, 3), strides=(2, 2), padding='same')(x) 
        #conv3_x 
        x = identity_Block(x, nb_filter=128, kernel_size=(3, 3), strides=(2, 2), with_conv_shortcut=True) 
        x = identity_Block(x, nb_filter=128, kernel_size=(3, 3)) 
        x = identity_Block(x, nb_filter=128, kernel_size=(3, 3)) 
        x = identity_Block(x, nb_filter=128, kernel_size=(3, 3)) 
        # x = MaxPooling2D(pool_size=(3, 3), strides=(2, 2), padding='same')(x) 
        #conv4_x 
        x = identity_Block(x, nb_filter=256, kernel_size=(3, 3), strides=(2, 2), with_conv_shortcut=True) 
        x = identity_Block(x, nb_filter=256, kernel_size=(3, 3)) 
        x = identity_Block(x, nb_filter=256, kernel_size=(3, 3)) 
        x = identity_Block(x, nb_filter=256, kernel_size=(3, 3)) 
        x = identity_Block(x, nb_filter=256, kernel_size=(3, 3)) 
        x = identity_Block(x, nb_filter=256, kernel_size=(3, 3)) 
        # x = MaxPooling2D(pool_size=(3, 3), strides=(2, 2), padding='same')(x) 
        #conv5_x 
        x = identity_Block(x, nb_filter=512, kernel_size=(3, 3), strides=(2, 2), with_conv_shortcut=True) 
        x = identity_Block(x, nb_filter=512, kernel_size=(3, 3)) 
        x = identity_Block(x, nb_filter=512, kernel_size=(3, 3)) 
        
        x = Conv2d_BN(x, nb_filter=output_shape[2], kernel_size=(1, 1), strides=(1, 1), padding='same') 
        self._net = Model(inputs=input_in, outputs=x)

        self._net.compile(loss=keras.losses.mse,
                           optimizer=keras.optimizers.Adagrad(),
                           metrics=['accuracy'])

        print("Construct yolt network success!")
        return self._net

    def _netResNet(self, image):
        # _model = tiny_face_model()
        # _score = _model.tiny_face(image)
        pass

    # 构建网络
    def destruct(self):
        pass
    
    def loadModel(self, load_model):
        if load_model and exists(load_model):
            self._net.load_weights(load_model)

    def extractBoxesFromOutputs(self, outputs_data, iou_thresh=0.5):
        '''
        从模型输出矩阵中提取 iou 复合阈值条件的 bounding box
        :return: list of bounding box
        '''
        i_w, i_h, i_d = self.input_shape_whd
        o_w, o_h, o_d = self.output_shape_whd

        PARAM_NUM = o_d // self.box_num_per_cell
        CELL_WIDTH = i_w // o_w
        CELL_HEIGHT = i_h // o_h

        boxes = []
        for output_data in outputs_data:
            curr_output_boxes = []
            for grid_ny in range(o_h):
                for grid_nx in range(o_w):
                    for box in range(self.box_num_per_cell):
                        iou = output_data[grid_ny, grid_nx, box *
                                            PARAM_NUM + 4]
                        if iou > iou_thresh:
                            cx = grid_nx * CELL_WIDTH + \
                                output_data[grid_ny, grid_nx,
                                            box * PARAM_NUM + 0]
                            cy = grid_ny * CELL_HEIGHT + \
                                output_data[grid_ny, grid_nx,
                                            box * PARAM_NUM + 1]
                            w = output_data[grid_ny, grid_nx,
                                            box * PARAM_NUM + 2]
                            h = output_data[grid_ny, grid_nx,
                                            box * PARAM_NUM + 3]

                            curr_output_boxes.append((cx, cy, w, h, iou))
            boxes.append(curr_output_boxes)

        return boxes

    # 训练
    def train(self, label_file, root_dir, batch_size, epoch, start=0, end=-1, times_set=[0.5, 1, 2], load_model=None, model_name='faces_weight', saver_step=10):
        '''
        说明
        ----
        用于训练
        
        参数
        ----
        - label_file 标签文件
        - root_dir 数据集根目录
        - batch_size epoch 顾名思义
        - start 使用数据集的起始位置
        - end 使用数据集的结束位置
        - times_set 缩放倍数的列表
        - load_model 是否使用已有的模型文件
        - model_name 保存模型的文件名
        - saver_step 模型保存步长
        ''' 
        if load_model and exists(load_model):
            self._net.load_weights(load_model)
        # exit()
        labels = lls(label_file, root_dir, start, end)
        _saver_step = 0
        for i in range(epoch):
            labels.shuffleData()
            print('Epoch ' + str(i + 1) + '/' + str(epoch))
            while True:
                if not labels.load():
                    break
                image_file_name = labels.getFileName()
                image_origin = cv2.imread(image_file_name)
                
                print('load one: ', image_file_name)
                # times_using = [times for times in times_set if random.random() < (3 / len(times_set))]
                times_using = [random.choice(times_set)]
                for times in times_using:
                    label = labels.getBoundingBoxesListX(times)
                    image = cv2.resize(image_origin, (int(image_origin.shape[1] * times), int(image_origin.shape[0] * times)))
                    # cv2.imshow('image',image)
                    # cv2.waitKey(10)
                    patches_img, patches_label = cutImage.cutImage(image, label, cutImage.labelFunV1, '') #str(times) + 'x: ' + image_file_name
                    if not patches_label:
                        continue
                    label_yolt = cutImage.converLabelYOLT(patches_label, True)

                    origin_patches = np.asarray([img for img in patches_img.values()], dtype='float')
                    origin_label = np.asarray(label_yolt, dtype='float')
                    # print(origin_label.shape)
                    patches, label = [], []
                    for i,_label in enumerate(origin_label):
                        # print(np.sum(_label[:, :, 4]), np.sum(_label[:, :, 9]))
                        if np.sum(np.abs(_label[:, :, 4])) < 1:
                            continue
                        patches.append(origin_patches[i])
                        label.append(_label)
                    
                    patches = np.asarray(patches, dtype='float')
                    label = np.asarray(label, dtype='float')
                    # print(patches.shape, label.shape)

                    if patches.shape[0] < 1:
                        continue
                    self._net.fit(patches, label, batch_size, 1, verbose=1)
            _saver_step += 1
            if _saver_step % saver_step == 0:
                self._net.save_weights(model_name + '_' + str(_saver_step))
            if (i + 1) >= epoch:
                self._net.save_weights(model_name + '_' + str(epoch))

    # 测试网络模型
    def test(self, label_file, root_dir, batch_size, threshold, model='faces_weight', start=0, end=-1, times_set=[0.5, 1, 2]):
        '''
        说明
        ----
        用于测试
        ''' 
        if exists(model):
            self._net.load_weights(model)
        else:
            return
        # exit()
        t_good = 0
        p_good = 0
        all_good = 0
        all_pred = 0
        labels = lls(label_file, root_dir, start, end)
        write_test = writeTest(model + '_test.out')
        while True:
            if not labels.load():
                break
            image_file_name = labels.getFileName()
            image_origin = cv2.imread(image_file_name)
            pred_boxes_xyxy = []
            for times in times_set:
                label = labels.getBoundingBoxesListX(times)
                image = cv2.resize(image_origin, (int(image_origin.shape[1] * times), int(image_origin.shape[0] * times)))
                # print(image.shape)
                # cv2.imshow('image',image)
                # cv2.waitKey(10)
                patches_img, _ = cutImage.cutImage(image, label, cutImage.labelFunV1, '')
                # print('times = ', times)
                # print(patches_label)
                # for i in patches_label:
                #     if np.sum(i) > 0.1:
                #         print(i)
                
                # label_yolt = cutImage.converLabelYOLT(patches_label)
                patches = np.asarray([img for img in patches_img.values()], dtype='float')
                # label = np.asarray(label_yolt, dtype='float')
                # print(patches.shape, label.shape)    
                # 预测每张小图中的目标的 bounding boxes
                predict_out = self._net.predict(patches)
                small_boxes_cxcywh = self.extractBoxesFromOutputs(predict_out, threshold)
                # print(small_boxes_cxcywh)
                # cv2.imshow("test", image)
                # cv2.waitKey(30)
                # 将预测的相对于 small images 的 bounding boxes　转换为相对于 raw_image 的 bounding boxes
                for (ox, oy), boxes_cxcywh in zip(patches_img.keys(), small_boxes_cxcywh):
                    if len(boxes_cxcywh) <= 0:
                        continue
                    for box_cxcywh in boxes_cxcywh:
                        cx = box_cxcywh[0] + ox
                        cy = box_cxcywh[1] + oy
                        w = box_cxcywh[2]
                        h = box_cxcywh[3]
                        if (w < 10) or (w > 400) or (h < 10) or (h > 400):
                            continue
                        iou = box_cxcywh[4]
                        global_box_xyxy = [cy / times, cx / times, (cy + h) / times, (cx + w) / times, iou]
                        pred_boxes_xyxy.append(global_box_xyxy)

                # 对检测到的 bounding boxes 执行非极大抑制
                pred_boxes_xyxy = nms(
                    boxes_xyxy=pred_boxes_xyxy, iou_thresh=0.3)

            if getpass.getuser() == 'jerry':
                image = cv2.imread(image_file_name)
                for box in pred_boxes_xyxy:
                    cv2.rectangle(image, (int(box[1]), int(box[0])), (int(box[3]), int(box[2])), (0, 255, 255), 2)
                cv2.imshow("test", image)
                cv2.waitKey(0)
            else:
                label = labels.getBoundingBoxesListX()
                patches_img, patches_label = cutImage.cutImage(image_origin, label, cutImage.labelFunV1, '')
                label_yolt = cutImage.converLabelYOLT(patches_label)
                origin_boxes_cxcywh = self.extractBoxesFromOutputs(label_yolt, threshold)
                origin_boxes_xyxy = []
                for (ox, oy), boxes_cxcywh in zip(patches_img.keys(), origin_boxes_cxcywh):
                    if len(boxes_cxcywh) <= 0:
                        continue
                    for box_cxcywh in boxes_cxcywh:
                        cx = box_cxcywh[0] + ox
                        cy = box_cxcywh[1] + oy
                        w = box_cxcywh[2]
                        h = box_cxcywh[3]
                        if (w < 10) or (w > 400) or (h < 10) or (h > 400):
                            continue
                        iou = box_cxcywh[4]
                        origin_box_xyxy = [cy, cx, (cy + h), (cx + w), iou]
                        origin_boxes_xyxy.append(origin_box_xyxy)
                        all_good += 1

                all_pred += len(pred_boxes_xyxy)
                for origin_box in origin_boxes_xyxy:
                    for pred_box in pred_boxes_xyxy:
                        iou = compute_iou(pred_box, origin_box)
                        if iou > 0.1:
                            t_good += 1
                            write_test.write(min((origin_box[2] - origin_box[0]), (origin_box[3] - origin_box[1])))
                            break

                for pred_box in pred_boxes_xyxy:
                    for origin_box in origin_boxes_xyxy:
                        iou = compute_iou(pred_box, origin_box)
                        if iou > 0.1:
                            p_good += 1
                            break
                    
                # image = cv2.imread(image_file_name)
                # for box in pred_boxes_xyxy:
                #     cv2.rectangle(image, (int(box[1]), int(box[0])), (int(box[3]), int(box[2])), (0, 255, 255), 2)
                # cv2.imwrite('test.jpg', image)
                
                print('tp: ', t_good / (all_good + 0.0), '  ep: ', p_good / (all_pred + 0.0))

if __name__ == '__main__':
    
    batch_size, epoch, start, end = 5, 1, 0, 0
    if getpass.getuser() == 'jerry':
        label_file = "/home/jerry/dataSet/faces/wider_face_split/wider_face_train_bbx_gt.txt"
        root_dir = "/home/jerry/dataSet/faces/WIDER_train/images/"
        batch_size, epoch, start, end = 5, 1, 0, 0
    else:
        label_file = "/home/huster2/data/faces/wider_face_split/wider_face_train_bbx_gt.txt"
        root_dir = "/home/huster2/data/faces/WIDER_train/images/"
        batch_size, epoch, start, end = 10, 50, 0, 1000  #100 200

    input_shape = [416, 416, 3]
    output_shape = [26, 26, 10]
    
    net = Net()
    net_net = net._netYolt(input_shape, output_shape)
    # plot_model(net_net,to_file='model.png',show_shapes=True,show_layer_names=False)

    net_train = net.train(label_file, root_dir, batch_size, epoch, start, end, times_set=[0.25, 0.5, 0.75, 1, 1.25, 1.5, 1.75, 2], model_name='faces_weight_v10')
    # net.loadModel('faces_weight_1')
    # net_test = net.test(label_file, root_dir, batch_size, 0.8, 'faces_weight_50', start, end, times_set=[0.5, 1, 2])
