#! /usr/bin/env python3
import numpy as np
np.random.seed(3) # 固定初始随机值

import keras
import keras.backend as K
from keras.layers import (
    Input, Conv2D, MaxPool2D, LeakyReLU, BatchNormalization,
    LocallyConnected2D, Flatten, Dense, Dropout, Reshape, ZeroPadding2D,
    UpSampling2D, Conv2DTranspose, Concatenate)
from keras.models import Model

from tqdm import tqdm
from PIL import Image
import os, pickle, argparse

from libs.datasetFuncs import getDatas, datas2X, getTargets, targets2TrainY
from libs.tools import drawAreaRect, calcAreaBoxs, formatImage, calcSourceBoxs

def NMSE(y_true, y_pred):
    mean = K.mean(y_true) + K.epsilon()
    k = 0.5 / mean
    r = 1 / k
    return r * K.mean(K.square(k * (y_true - y_pred)))

def trainingAreaDetect(data_path, target_path,
                       model_dir, model_filename,
                       clean, training, pcolors):
    # - `image_shape`: 输入图片数据的Shape；
    # - `batch_size`: 训练批次大小，一般不需要改变；
    # - `epochs_num`: 训练轮数，可以根据实际情况调整，但不宜过大。
    # - `data_path`: 原始图片目录
    # - `target_path`: 标记图片目录
    # - `model_dir`: 保存模型文件的目录
    # - `model_filename`: 当前模型文件的文件名称

    USE_CLEAN = clean # 是否重用已有的权重
    USE_TRAINING = training # 是否训练模型

    image_shape = (128, 128, 3) # 图片的（高，宽，rgb通道）
    batch_size = 50
    epochs_num = 300

    # ## 开始导入数据
    datas = getDatas(data_path)
    train_X = datas2X(datas)

    pcolors = list(map(int, pcolors.split()))
    pcolors = np.array(pcolors).reshape(-1, 3)

    targets, classes_num = getTargets(target_path, pcolors=pcolors)
    train_Y = targets2TrainY(targets)

    # ## 构建网络
    input_image = Input(shape=image_shape, name='input_image') # 输入

    conv1 = Conv2D(1, (1, 1), padding='same')(input_image)
    pool1 = MaxPool2D((2, 2))(conv1)

    conv2 = LeakyReLU(0.3)(Conv2D(3, (3, 3), padding='same')(pool1))
    pool2 = MaxPool2D((2, 2))(conv2)

    normalized1 = BatchNormalization(axis=1)(pool2)
    conv3 = LeakyReLU(0.3)(Conv2D(5, (3, 3), padding='same')(normalized1))
    pool3 = MaxPool2D((2, 2))(conv3)

    conv4 = LeakyReLU(0.3)(Conv2D(8, (3, 3), padding='same')(pool3))
    pool4 = MaxPool2D((2, 2))(conv4)

    normalized2 = BatchNormalization(axis=1)(pool4)

    flattened = Flatten()(normalized2)
    dense1 = Dense(256, activation='relu')(flattened)
    droped = Dropout(0.5)(dense1)
    dense2 = Dense(8 * 8**2, activation='relu')(droped)

    lcon1 = LocallyConnected2D(8, (3, 3), activation='relu')(normalized2)
    padd1 = ZeroPadding2D()(lcon1)
    lcon2 = LocallyConnected2D(8, (3, 3), activation='relu')(padd1)
    padd2 = ZeroPadding2D()(lcon2)

    reshaped = Reshape((8, 8, 8))(dense2)
    concatenated1 = Concatenate()([reshaped, padd2])

    decv1 = Conv2DTranspose(8, (3, 3), strides=2, padding='same', activation='relu')(concatenated1)
    decv2 = Conv2DTranspose(5, (3, 3), strides=2, padding='same', activation='relu')(decv1)
    decv3 = Conv2DTranspose(3, (3, 3), strides=2, padding='same', activation='relu')(decv2)

    concatenated2 = Concatenate()([pool1, decv3])
    decv4 = Conv2DTranspose(classes_num+1, (3, 3), strides=2, padding='same', activation='relu')(concatenated2)
    output_mask = Conv2DTranspose(classes_num, (3, 3), padding='same', activation='relu')(decv4)

    if not USE_CLEAN and os.path.isfile(os.path.join(model_dir, model_filename)):
        model = keras.models.load_model(os.path.join(model_dir, model_filename),
                                        custom_objects={'NMSE':NMSE})
    else:
        USE_CLEAN = True
        model = Model(inputs=input_image, outputs=output_mask)

    if USE_CLEAN:
        model.compile(optimizer='Adam', loss=NMSE, metrics=['mae'])

    # ## 开始训练神经网络模型
    def generator_ex_dataset(train_X, train_Y, batch_size, width_shift_range=10, height_shift_range=10):
        width = train_X.shape[2]
        height = train_X.shape[1]
        while True:
            shuffle_index = np.random.permutation(train_X.shape[0])
            batch_count = shuffle_index.size // batch_size
            for i in range(batch_count):
                batch_index = shuffle_index[i * batch_size:(i + 1) * batch_size]
                batch_X = train_X[batch_index]
                batch_Y = train_Y[batch_index]
                width_shift = np.random.randint(-width_shift_range, width_shift_range)
                height_shift = np.random.randint(-height_shift_range, height_shift_range)
                if width_shift >= 0:
                    if height_shift >= 0:
                        batch_X[:, height_shift:, width_shift:, :] =\
                        batch_X[:, :height-height_shift, :width-width_shift, :]
                        batch_Y[:, height_shift:, width_shift:, :] =\
                        batch_Y[:, :height-height_shift, :width-width_shift, :]
                    else:
                        batch_X[:, :height+height_shift, width_shift:, :] =\
                        batch_X[:, -height_shift:, :width-width_shift, :]
                        batch_Y[:, :height+height_shift, width_shift:, :] =\
                        batch_Y[:, -height_shift:, :width-width_shift, :]
                else:
                    if height_shift >= 0:
                        batch_X[:, height_shift:, :width+width_shift, :] =\
                        batch_X[:, :height-height_shift, -width_shift:, :]
                        batch_Y[:, height_shift:, :width+width_shift, :] =\
                        batch_Y[:, :height-height_shift, -width_shift:, :]
                    else:
                        batch_X[:, :height+height_shift, :width+width_shift, :] =\
                        batch_X[:, -height_shift:, -width_shift:, :]
                        batch_Y[:, :height+height_shift, :width+width_shift, :] =\
                        batch_Y[:, -height_shift:, -width_shift:, :]
                yield batch_X, batch_Y

    if USE_TRAINING:
        tbCallback = keras.callbacks.TensorBoard()
        model.fit_generator(
                generator_ex_dataset(train_X, train_Y, batch_size),
                steps_per_epoch=train_X.shape[0] // batch_size, epochs=epochs_num, callbacks=[tbCallback])

    # ### 保存训练好的模型
    # **PS：**可能需要安装 h5py 库，可以通过命令：`pip install h5py` 进行安装。
    if USE_TRAINING:
        if not os.path.isdir(model_dir): os.mkdir(model_dir)
        model.save(os.path.join(model_dir, model_filename))

    if os.path.isfile(os.path.join(model_dir, model_filename)):
        model = keras.models.load_model(os.path.join(model_dir, model_filename),
                                        custom_objects={'NMSE': NMSE})

    # ## 验证人工神经网络在训练集的效果
    # 其返回结果为[代价值，差距评估]，**如果数据量足够，最好使用测试集进行训练！**
    print('Validation in train set')
    print(model.evaluate(train_X, train_Y, batch_size=batch_size))
    print('Finished!')

if __name__ == '__main__':
    parser = argparse.ArgumentParser(description='区域检测模型训练工具')
    parser.add_argument('datas', metavar='原始图片集', type=str,
                        help='原始图片集所在的目录路径')
    parser.add_argument('targets', metavar='标记图片集', type=str,
                        help='标记图片集所在的目录路径')
    parser.add_argument('basename', metavar='模型文件名称', type=str,
                        help='载入/存储的模型文件名称，如：layout.h5')
    parser.add_argument('colors', metavar='标记颜色', type=str,
                        help='指定标记颜色RGB值，如："255 0 0 255 255 0 0 0 255"'+
                             '为(255, 0, 0) (255, 255, 0) (0, 0, 255)')
    parser.add_argument('-d', dest='directory', type=str, default='./models/',
                        help='指定模型载入/存储的目录，默认为 "./models/"')
    parser.add_argument('-c', dest='clean', nargs='?', const=True, default=False,
                        help='指定是否重新训练模型，若不启用则为否')
    parser.add_argument('-t', dest='training', nargs='?', const=True, default=False,
                        help='指定是否训练模型，若不启用则为否')

    args = parser.parse_args()
    trainingAreaDetect(
            args.datas, args.targets, args.directory, args.basename,
            args.clean, args.training, args.colors)
