# -*-coding:utf-8-*-

from __future__ import print_function
import sys
import numpy as np
import keras.backend as K
from keras.utils import multi_gpu_model
from keras.optimizers import Adam
from keras.models import Model
from keras.callbacks import ModelCheckpoint, Callback
from keras.layers import Bidirectional, TimeDistributed, Conv2D, MaxPooling2D, Input, GRU, Dense, Activation, Dropout, Reshape, Permute, GlobalAveragePooling2D

import utils.metrics as metrics
from utils.common import logger, plot_functions
from models.networks import crnn, crnn_v1, crnn_v2, inception_crnn, inception
from models.loss import binary_focal_loss, categorical_focal_loss
sys.setrecursionlimit(10000)
np.set_printoptions(threshold=sys.maxsize)
CURRENT_VERBOSITY = 0


def build_model(data_in, data_out, is_delta, loss_type, network_type,
                filters, pool_size_lists, rnn_lists, dnn_lists, dropout_rate, label_level='frame', num_gpu=1):
    # Input, channel first
    f_dim = data_in.shape[-1]
    t_dim = None                                            # 允许不等长输入
    channels = data_in.shape[-3]
    if is_delta:
        channels = 3
    num_classes = data_out.shape[-1]
    spec_start = Input(shape=(channels, t_dim, f_dim))
    spec_x = spec_start

    # Network
    if network_type == 'inception_crnn':
        output = inception_crnn(spec_x, num_classes, label_level, dropout_rate=dropout_rate, filters=64)
    elif network_type == 'inception':
        output = inception(spec_x, num_classes, label_level, dropout_rate=dropout_rate, dnn_lists=dnn_lists)
    elif network_type == 'crnn_v1':
        output = crnn_v1(spec_x, num_classes, label_level)
    elif network_type == 'crnn_v2':
        output = crnn_v2(spec_x, num_classes, label_level, dropout_rate, filters, pool_size_lists, rnn_lists, dnn_lists)
    else:
        output = crnn(spec_x, num_classes, label_level, dropout_rate, filters, pool_size_lists, rnn_lists, dnn_lists)

    # TODO: clip_norm
    adam = Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False)

    model = Model(inputs=spec_start, outputs=output)
    single_gpu_model = None      # 默认为空, 在多gpu时表示存储原始模型
    if num_gpu != 1:
        single_gpu_model = model
        model = multi_gpu_model(model, gpus=num_gpu)

    if loss_type == 'normal':
        model.compile(optimizer=adam, loss='binary_crossentropy')
    elif loss_type == 'sigmoid_focal':
        model.compile(optimizer=adam, loss=binary_focal_loss)
    elif loss_type == 'softmax_focal':
        model.compile(optimizer=adam, loss=categorical_focal_loss)
    else:
        model.compile(optimizer=adam, loss='binary_crossentropy')
    model.summary()
    return model, single_gpu_model


class TrainCallback(Callback):
    def __init__(self, X, Y, num_epochs, figure_dir, frames_1_sec, reduce_lr_patience=2, factor=0.8, start_epoch=80,
                 monitor='val_er_overall_1sec', early_stop_patience=20, min_lr=1e-5, min_delta=1e-4, verbose=1):
        super(Callback, self).__init__()
        self.X = X
        self.Y = Y
        self.num_epochs = num_epochs
        self.figure_dir = figure_dir
        self.frames_1_sec = frames_1_sec
        self.reduce_lr_patience = reduce_lr_patience
        self.wait = 0
        self.factor = factor
        self.current_reduce_nb = 0
        self.start_epoch = start_epoch
        self.monitor = monitor
        self.early_stop_patience = early_stop_patience
        self.min_lr = min_lr
        self.min_delta = min_delta
        self.verbose = verbose

        if self.monitor == 'val_f1_overall_1sec':
            self.monitor_op = lambda a, b: np.greater(a, b + self.min_delta)
            self.best_score = -np.Inf
        else:
            self.monitor_op = lambda a, b: np.less(a, b - self.min_delta)
            self.best_score = np.Inf

        self.best_epoch = 0
        self.tr_loss, self.val_loss, self.f1_overall_1sec_list, self.er_overall_1sec_list = [0] * num_epochs, [0] * num_epochs, [0] * num_epochs, [0] * num_epochs

    def on_epoch_end(self, epoch, logs={}):
        tr_loss = logs.get('loss')
        val_loss = logs.get('val_loss')
        f1_overall_1sec, er_overall_1sec = self.eval_map()
        self.tr_loss[epoch] = tr_loss
        self.val_loss[epoch] = val_loss
        self.f1_overall_1sec_list[epoch] = f1_overall_1sec
        self.er_overall_1sec_list[epoch] = er_overall_1sec
        logs['lr'] = K.get_value(self.model.optimizer.lr)

        if epoch >= self.start_epoch:
            if self.monitor == 'val_er_overall_1sec':
                current_score = er_overall_1sec
            elif self.monitor == 'val_f1_overall_1sec':
                current_score = f1_overall_1sec
            else:
                current_score = val_loss
            if self.monitor_op(current_score, self.best_score):
                self.wait = 0
                # self.current_reduce_nb = 0
                self.best_epoch = epoch
                self.best_score = current_score
            else:
                if self.wait >= self.reduce_lr_patience:
                    self.current_reduce_nb += 1
                    if self.current_reduce_nb <= self.early_stop_patience:
                        self.wait = 0       # 重置wait，减缓学习率的下降速度
                        old_lr = float(K.get_value(self.model.optimizer.lr))
                        new_lr = max(old_lr*self.factor, self.min_lr)
                        K.set_value(self.model.optimizer.lr, new_lr)
                    else:
                        if self.verbose > 0:
                            logger.info("Epoch %d: early stopping" % (epoch), extra={"verbosity": CURRENT_VERBOSITY})
                        self.model.stop_training = True
                self.wait += 1
        logger.info('LR: {:.6}, F1_overall: {:.4}, ER_overall: {:.4}, Best ER: {:.4}, best_epoch: {}'.format(
            logs['lr'], f1_overall_1sec, er_overall_1sec, self.best_score, self.best_epoch), extra={"verbosity": CURRENT_VERBOSITY})

    def on_train_end(self, logs=None):
        plot_functions(self.figure_dir, self.num_epochs, self.tr_loss, self.val_loss, self.f1_overall_1sec_list, self.er_overall_1sec_list)

    def eval_map(self):
        posterior_thresh = 0.5                              # sigmoid分类概率阈值
        # sample_indexs = np.random.choice(self.X.shape[0], 64, replace=False)    # 随机取出64个样本
        # X = self.X[sample_indexs]
        # y_true = self.Y[sample_indexs]
        X = self.X
        y_true = self.Y
        y_pred = self.model.predict(X)
        y_pred = y_pred > posterior_thresh                  # 将输出中大于0.5的直接置为1，小于0.5的设置为0
        score_list = metrics.compute_scores(y_pred, y_true, frames_in_1_sec=self.frames_1_sec)       # 评分标准是逐帧计算的
        f1_overall_1sec = score_list['f1_overall_1sec']
        er_overall_1sec = score_list['er_overall_1sec']

        return f1_overall_1sec, er_overall_1sec


class ParallelModelCheckpoint(ModelCheckpoint):
    def __init__(self, model, filepath, monitor='val_loss', verbose=0,
                 save_best_only=True, save_weights_only=False,
                 mode='auto', period=1):
        self.single_model = model
        super(ParallelModelCheckpoint, self).__init__(filepath, monitor, verbose, save_best_only, save_weights_only, mode, period)

    def set_model(self, model):
        super(ParallelModelCheckpoint, self).set_model(self.single_model)


def fix_random_seed(gpu_id=None, SEED=0):
    '''设置各个库的全局随机种子
    Learn From: https://stackoverflow.com/questions/32419510/how-to-get-reproducible-results-in-keras
    '''
    # 1. Set the `PYTHONHASHSEED` environment variable at a fixed value
    import os
    os.environ['PYTHONHASHSEED'] = str(SEED)

    # 2. Set the `python` built-in pseudo-random generator at a fixed value
    import random
    random.seed(SEED)

    # 3. Set the `numpy` pseudo-random generator at a fixed value
    import numpy as np
    np.random.seed(SEED)

    # 4. 设置GPU变量
    import os
    os.environ["CUDA_VISIBLE_DEVICES"] = gpu_id

    # 5. Set the `tensorflow` pseudo-random generator at a fixed value
    import tensorflow as tf
    tf.set_random_seed(SEED)

    # 6. Configure a new global `tensorflow` session
    from keras import backend as K
    session_conf = tf.ConfigProto(intra_op_parallelism_threads=1, inter_op_parallelism_threads=1)
    session_conf.gpu_options.allow_growth = True
    sess = tf.Session(graph=tf.get_default_graph(), config=session_conf)
    K.set_session(sess)
