# -*-coding:utf-8-*-
'''
Data: 2019.07.09
'''

from __future__ import print_function
import os
import sys
import time
import math
import numpy as np
import matplotlib.pyplot as plot
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
import keras.backend as K
import tensorflow as tf
from keras.utils import multi_gpu_model
from keras.optimizers import Adam
from keras.models import Model, load_model
from keras.layers.normalization import BatchNormalization
from keras.callbacks import EarlyStopping, ModelCheckpoint, TensorBoard, ReduceLROnPlateau, Callback
from keras.layers import Bidirectional, TimeDistributed, Conv2D, MaxPooling2D, Input, GRU, Dense, Activation, Dropout, Reshape, Permute

import utils.metrics as metrics
import utils.common as common
from utils.vad2 import vad_core
from utils.prepare_data import prepare_data
from utils.common import logger, get_wav_len, my_argmax
from utils.metrics import postprocess
plot.switch_backend('agg')
sys.setrecursionlimit(10000)
K.set_image_data_format('channels_first')
np.set_printoptions(threshold=sys.maxsize)
CURRENT_VERBOSITY = 0


class LrReducer(Callback):
    def __init__(self, patience=0, reduce_rate=0.5, reduce_nb=10, verbose=1):
        super(Callback, self).__init__()
        self.patience = patience
        self.wait = 0
        self.best_score = -1.
        self.reduce_rate = reduce_rate
        self.current_reduce_nb = 0
        self.reduce_nb = reduce_nb
        self.verbose = verbose

    def on_epoch_end(self, epoch, logs={}):
        current_score = logs.get('val_acc')
        if current_score > self.best_score:
            self.best_score = current_score
            self.wait = 0
            if self.verbose > 0:
                print('---current best val accuracy: %.3f' % current_score)
        else:
            if self.wait >= self.patience:
                self.current_reduce_nb += 1
                if self.current_reduce_nb <= 10:
                    lr = self.model.optimizer.lr.get_value()
                    self.model.optimizer.lr.set_value(lr*self.reduce_rate)
                else:
                    if self.verbose > 0:
                        print("Epoch %d: early stopping" % (epoch))
                    self.model.stop_training = True
            self.wait += 1


class ParallelModelCheckpoint(ModelCheckpoint):
    def __init__(self, model, filepath, monitor='val_loss', verbose=0,
                 save_best_only=True, save_weights_only=False,
                 mode='auto', period=1):
        self.single_model = model
        super(ParallelModelCheckpoint, self).__init__(filepath, monitor, verbose, save_best_only, save_weights_only, mode, period)

    def set_model(self, model):
        super(ParallelModelCheckpoint, self).set_model(self.single_model)


class LearningRateTracker(Callback):
    def on_epoch_end(self, epoch, logs=None):
        optimizer = self.model.optimizer
        # logger.info('{}'.format(tf.to_float(optimizer.iterations)), extra={"verbosity": CURRENT_VERBOSITY})
        lr = K.eval(optimizer.lr * (1. / (1. + optimizer.decay * tf.to_float(optimizer.iterations))))
        # lr = K.eval(optimizer.lr * (1. / (1. + optimizer.decay * K.cast(optimizer.iterations, K.dtype(optimizer.decay)))))
        logger.info('LR: {:.6f}\n'.format(lr), extra={"verbosity": CURRENT_VERBOSITY})


def get_test_acc(result_file):
    count = 0
    true_count = 0
    with open(result_file, 'r') as f:
        while True:
            line = f.readline()
            if line == '':
                break
            count += 1
            tmp = line.strip().split()
            if len(tmp) == 2:
                wav_path, pred = tmp
                label = wav_path.split('/')[-2]
                if label == pred:
                    true_count += 1
    return true_count/(count + 1e-8)


# TODO
# learning rate
def build_model(data_in, data_out, _cnn_nb_filt, _cnn_pool_size, _rnn_nb, _fc_nb, num_gpu=1):
    # with tf.device('/cpu:0'):
    f_dim = data_in.shape[-1]
    t_dim = None                                    # 允许不等长输入
    channels = data_in.shape[-3]
    spec_start = Input(shape=(channels, t_dim, f_dim))
    spec_x = spec_start
    for _i, _cnt in enumerate(_cnn_pool_size):
        spec_x = Conv2D(filters=_cnn_nb_filt, kernel_size=(3, 3), padding='same')(spec_x)
        spec_x = BatchNormalization(axis=1)(spec_x)
        spec_x = Activation('relu')(spec_x)
        spec_x = MaxPooling2D(pool_size=(1, _cnn_pool_size[_i]))(spec_x)        # 只在频率上进行池化
        spec_x = Dropout(dropout_rate)(spec_x)
    spec_x = Permute((2, 1, 3))(spec_x)             # [batch_size, t_len, channels, f_dim]
    out_shape = spec_x._keras_shape
    spec_x = Reshape((-1, out_shape[-1] * out_shape[-2]))(spec_x)

    for _r in _rnn_nb:
        spec_x = Bidirectional(
            GRU(_r, activation='tanh', dropout=dropout_rate, recurrent_dropout=dropout_rate, return_sequences=True),
            merge_mode='mul')(spec_x)

    for _f in _fc_nb:
        spec_x = TimeDistributed(Dense(_f))(spec_x)
        spec_x = Dropout(dropout_rate)(spec_x)

    spec_x = TimeDistributed(Dense(data_out.shape[-1]))(spec_x)
    out = Activation('sigmoid', name='strong_out')(spec_x)

    model = Model(inputs=spec_start, outputs=out)
    if num_gpu != 1:
        model = multi_gpu_model(model, gpus=num_gpu)
    clip_norm = 1.
    # , clip_norm=clip_norm
    adam = Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False)
    model.compile(optimizer=adam, loss='binary_crossentropy')
    model.summary()
    return model


def plot_functions(_nb_epoch, _tr_loss, _val_loss, _f1, _er, extension=''):
    plot.figure()

    plot.subplot(211)
    plot.plot(range(_nb_epoch), _tr_loss, label='train loss')
    plot.plot(range(_nb_epoch), _val_loss, label='val loss')
    plot.legend()
    plot.grid(True)

    plot.subplot(212)
    plot.plot(range(_nb_epoch), _f1, label='f')
    plot.plot(range(_nb_epoch), _er, label='er')
    plot.legend()
    plot.grid(True)

    plot.savefig(model_dir + '/' + fig_name + extension)
    plot.close()


def load_data(feat_dir, _mono):
    fbanks = np.load(feat_dir + '/fbank.npz', allow_pickle=True)
    labels = np.load(feat_dir + '/label.npz', allow_pickle=True)
    X_train, X_dev, X_test = fbanks['arr_0'], fbanks['arr_1'], fbanks['arr_2']
    Y_train, Y_dev, Y_test = labels['arr_0'], labels['arr_1'], labels['arr_2']
    return X_train, Y_train, X_dev, Y_dev, X_test, Y_test


def preprocess_data(_X, _Y, _X_test, _Y_test, _chunk_size, _nb_ch):
    # split into sequences
    _X = common.split_in_seqs(_X, _chunk_size)
    _Y = common.split_in_seqs(_Y, _chunk_size)

    _X_test = common.split_in_seqs(_X_test, _chunk_size)
    _Y_test = common.split_in_seqs(_Y_test, _chunk_size)

    _X = common.split_multi_channels(_X, _nb_ch)
    _X_test = common.split_multi_channels(_X_test, _nb_ch)
    return _X, _Y, _X_test, _Y_test


#######################################################################################
# MAIN SCRIPT STARTS HERE
#######################################################################################
# Data parameters
speed_list = None                   # [0.9, 1.1]
is_vad = True

# Feature parameters
sample_rate = 44100
num_frame_len_fft = 2048            # 帧长的fft点数
num_frame_shift_fft = 1024          # 帧移的fft点数
power = 2                           # 采用功率谱
num_mel_bands = 128                  # mel维度
frames_1_sec = int(sample_rate/num_frame_shift_fft)   # Number of frames in 1 second, required to calculate F and ER for 1 sec segments.

# Train parameters
num_gpu = 1
os.environ["CUDA_VISIBLE_DEVICES"] = "7"
is_mono = True                      # True: mono-channel input, False: binaural input
nb_ch = 1 if is_mono else 2
batch_size = 64                     # Decrease this if you want to run on smaller GPU's, default: 128
chunk_size = 256                    # Frame sequence length. Input to the CRNN.
nb_epoch = 100                      # Training epochs
reduce_lr_patience = 2
early_stop_patience = 10
posterior_thresh = 0.5              # sigmoid分类概率阈值
utt_percent_thresh = 0.5            # 解码用，当某个测试句子的平均帧类别高于该阈值时，标注为该类别
chunk_in_sec = int(chunk_size*num_frame_shift_fft/sample_rate)

# CRNN model parameters
cnn_nb_filt = 128                   # CNN filter size
cnn_pool_size = [5, 2, 2]           # Maxpooling across frequency. Length of cnn_pool_size =  number of CNN layers
rnn_nb = [32, 32]                   # Number of RNN nodes.  Length of rnn_nb =  number of RNN layers
fc_nb = [32]                        # Number of FC nodes.  Length of fc_nb =  number of FC layers
dropout_rate = 0.3                  # Dropout after each layer

# Control parameters
is_data_prepare = False              # Whether make data
is_train = True
is_test = True

# Dirs
data_set = 'BBC'       # SED_SMALL | BBC
audio_dir = '/home/xiaorong/Data/' + data_set
project_dir = '/home/xiaorong/workstation/sed-crnn-master'
exp_name = 'bs_' + str(batch_size) + '_cs_' + str(chunk_size) + '_dr_' + str(dropout_rate) + '_cf_' + str(cnn_nb_filt) + '_script_' + '1.1'
dir_affix = project_dir + '/exp/' + data_set
feat_dir = os.path.join(dir_affix, 'feat')
model_dir = os.path.join(dir_affix, exp_name)   # Folder for saving model and training curves
result_dir = os.path.join(dir_affix, exp_name)
result_file = os.path.join(result_dir, 'test.result')
check_point = os.path.join(model_dir, 'best_model.h5')

# Vars
avg_er = list()
avg_f1 = list()
common.create_folder(model_dir)
common.create_folder(result_dir)
fig_name = '{}_{}'.format('mon' if is_mono else 'bin', time.strftime("%Y_%m_%d_%H_%M_%S"))

if is_data_prepare:
    logger.info("Start making feature & label", extra={"verbosity": CURRENT_VERBOSITY})
    prepare_data(audio_dir, feat_dir, num_frame_len_fft, num_frame_shift_fft,
                 num_mel_bands, sample_rate, power, is_vad, speed_list)
    logger.info('Successfully dumping feature & label in %s' % (feat_dir), extra={"verbosity": CURRENT_VERBOSITY})

# Load feature and labels, pre-process it
X, Y, X_dev, Y_dev, X_test, Y_test = load_data(feat_dir, is_mono)
X, Y, X_dev, Y_dev = preprocess_data(X, Y, X_dev, Y_dev, chunk_size, nb_ch)
if is_train:
    logger.info("Start Training", extra={"verbosity": CURRENT_VERBOSITY})
    # Build model
    model = build_model(X, Y, cnn_nb_filt, cnn_pool_size, rnn_nb, fc_nb, num_gpu)

    # Training
    best_epoch, pat_cnt, best_er, f1_for_best_er, best_conf_mat = 0, 0, 99999, None, None
    tr_loss, val_loss, f1_overall_1sec_list, er_overall_1sec_list = [0] * nb_epoch, [0] * nb_epoch, [0] * nb_epoch, [0] * nb_epoch
    # tensorboard = TensorBoard(log_dir=model_dir + '/logs', write_graph=True)
    # earlystop = EarlyStopping(monitor='val_loss', patience=early_stop_patience, verbose=1)
    if num_gpu == 1:
        checkpoint = ModelCheckpoint(check_point, monitor='val_loss', save_best_only=True, save_weights_only=False, mode='auto', period=1, verbose=0)
    else:
        # TODO
        # monitor='val_loss', save_best_only=True, save_weights_only=False, mode='auto', period=1, verbose=0
        checkpoint = ModelCheckpoint(model, check_point)
    lr_schedule = ReduceLROnPlateau(monitor='val_loss', factor=0.5, patience=reduce_lr_patience, mode='auto', cooldown=0, min_lr=1e-5, verbose=1)
    lr_printer = LearningRateTracker()
    callbacks_list = [checkpoint, lr_schedule, lr_printer]

    logger.info('Train Parameters: frames_1_sec: {}, chunk_len: {}'.format(
        frames_1_sec, chunk_in_sec), extra={"verbosity": CURRENT_VERBOSITY})
    logger.info('Feature Parameters: chunk_size: {}, batch_size: {}, nb_epoch: {}'.format(
        chunk_size, batch_size, nb_epoch), extra={"verbosity": CURRENT_VERBOSITY})
    logger.info('Model Parameters: cnn_nb_filt: {}, cnn_pool_size: {}, rnn_nb: {}, fc_nb: {}, dropout_rate: {}\n'.format(
        cnn_nb_filt, cnn_pool_size, rnn_nb, fc_nb, dropout_rate), extra={"verbosity": CURRENT_VERBOSITY})

    # TODO
    # 重构整个训练过程
    # 替换成CallBack的方式去计算
    for i in range(nb_epoch):
        logger.info('Test Epoch : {}|{}'.format(i, nb_epoch), extra={"verbosity": CURRENT_VERBOSITY})
        if os.path.isfile(check_point):
            model = load_model(check_point)
        hist = model.fit(
            X, Y,
            batch_size=batch_size,
            validation_data=[X_dev, Y_dev],
            epochs=1,
            verbose=2,
            callbacks=callbacks_list
        )
        # history为Keras默认的对象，包括val_loss,val_acc,loss,acc
        val_loss[i] = hist.history.get('val_loss')[-1]
        tr_loss[i] = hist.history.get('loss')[-1]

        # Calculate the predictions on test data, in order to calculate ER and F scores
        pred = model.predict(X_dev)
        pred_thresh = pred > posterior_thresh	                                                    # 将输出中大于0.5的直接置为1，小于0.5的设置为0
        score_list = metrics.compute_scores(pred_thresh, Y_dev, frames_in_1_sec=frames_1_sec)       # 评分标准是逐帧计算的

        f1_overall_1sec_list[i] = score_list['f1_overall_1sec']
        er_overall_1sec_list[i] = score_list['er_overall_1sec']

        # Calculate confusion matrix
        test_pred_cnt = np.sum(pred_thresh, 2)
        Y_dev_cnt = np.sum(Y_dev, 2)
        conf_mat = confusion_matrix(Y_dev_cnt.reshape(-1), test_pred_cnt.reshape(-1))
        conf_mat = conf_mat / (common.eps + np.sum(conf_mat, 1)[:, None].astype('float'))
        pat_cnt += 1

        if er_overall_1sec_list[i] < best_er:
            best_conf_mat = conf_mat
            best_er = er_overall_1sec_list[i]
            f1_for_best_er = f1_overall_1sec_list[i]
            model.save(check_point)
            best_epoch = i
            pat_cnt = 0

        logger.info('tr Loss: {:.4}, val Loss: {:.4}, F1_overall: {:.4}, ER_overall: {:.4}, Best ER: {:.4}, best_epoch: {}'.format(
                tr_loss[i], val_loss[i], f1_overall_1sec_list[i], er_overall_1sec_list[i], best_er, best_epoch), extra={"verbosity": CURRENT_VERBOSITY})
        plot_functions(nb_epoch, tr_loss, val_loss, f1_overall_1sec_list, er_overall_1sec_list)
        if pat_cnt > early_stop_patience:
            break
    avg_er.append(best_er)
    avg_f1.append(f1_for_best_er)
    logger.info('saved model for the best_epoch: {} with best_f1: {:.4} f1_for_best_er: {:.4}'.format(
        best_epoch, best_er, f1_for_best_er), extra={"verbosity": CURRENT_VERBOSITY})
    logger.info('best_conf_mat: {}'.format(best_conf_mat), extra={"verbosity": CURRENT_VERBOSITY})
    logger.info('best_conf_mat_diag: {}'.format(np.diag(best_conf_mat)), extra={"verbosity": CURRENT_VERBOSITY})
    logger.info('\n\nMETRICS : avg_er: {}, avg_f1: {}'.format(avg_er, avg_f1), extra={"verbosity": CURRENT_VERBOSITY})
    logger.info('MODEL AVERAGE : avg_er: {}, avg_f1: {}'.format(np.mean(avg_er), np.mean(avg_f1)), extra={"verbosity": CURRENT_VERBOSITY})

if is_test:
    logger.info('Start decoding', extra={"verbosity": CURRENT_VERBOSITY})
    model = load_model(check_point)

    RevLABELS = {}
    with open(feat_dir + '/label_info', 'r') as f:
        while True:
            line = f.readline()
            if line == '':
                break
            label, id = line.strip().split()
            RevLABELS[int(id)] = label
    no_activity_label = len(RevLABELS.keys())
    RevLABELS[no_activity_label] = 'Other'
    print(RevLABELS)

    # TODO
    # 给出分段的评分以及每段的具体时间
    # TODO
    # 加入最短事件时间
    with open(result_file, 'w') as wf, \
            open(feat_dir + '/test.list', 'r') as rf:
        y_true_list = []
        y_pred_list = []
        label_name_list = []
        [label_name_list.append(v) for k, v in RevLABELS.items()]
        for i in range(len(X_test)):
            wav_path = rf.readline().strip()
            true_label, pred_label, pred = postprocess(wav_path, X_test[i], Y_test[i], model,
                                                       posterior_thresh, utt_percent_thresh, no_activity_label)
            y_true_list.append(true_label)
            y_pred_list.append(pred_label)
            label_text = ''
            for i in range(len(pred)):
                if pred[i] == 1:
                    label_text += RevLABELS[i] + ' '
            label_text += '\n'
            wf.write(wav_path + ' ' + label_text)
        matrix = confusion_matrix(y_true_list, y_pred_list)
        report = classification_report(y_true_list, y_pred_list, target_names=label_name_list, labels=list(RevLABELS.keys()))
        print(matrix)
        print(report)

    acc = get_test_acc(result_file)
    logger.info('Test utt-level acc: %f' % (acc), extra={"verbosity": CURRENT_VERBOSITY})
    logger.info('End decoding', extra={"verbosity": CURRENT_VERBOSITY})
