# -*-coding:utf-8-*-
'''
Data: 2019.07.30
Notes:
    - 使用原始网络
    - 使用complicate迭代器
    - 速度扰动
    - 加入时间下采样
        - Test utt-level acc: 0.877064
    - 新的数据生成器，加入delta，但不使用图像增强
        - Test utt-level acc: 0.9109
'''

from __future__ import print_function
import os
import sys
import time
import numpy as np
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
import keras.backend as K
from keras.models import load_model
from keras.callbacks import ModelCheckpoint

from utils.prepare_data import prepare_data
from utils.common import logger, create_folder
from utils.metrics import postprocess, get_test_acc
from models.loss import binary_focal_loss, categorical_focal_loss
from utils.data_generator import RatioDataGenerator, SimpleDataGenerator, ComplicateDataGenerator
from utils.feature import load_train_data, load_test_data
from models.model_base import ParallelModelCheckpoint, build_model, TrainCallback, fix_random_seed
sys.setrecursionlimit(10000)
np.set_printoptions(threshold=sys.maxsize)
CUDA_VISIBLE_DEVICES = "2"
CURRENT_VERBOSITY = 0


#######################################################################################
# MAIN SCRIPT STARTS HERE
#######################################################################################
# Data parameters
speed_list = [0.85, 1.15]                   # [0.85, 1.15]
is_vad = False
is_delta = True
mixup_alpha = 0.3                   # mixup因子
generator_type = 'complicate'            # 'ratio' | 'simple' | 'complicate'

# Feature parameters
sample_rate = 44100
num_frame_len_fft = 2048            # 帧长的fft点数
num_frame_shift_fft = 1024          # 帧移的fft点数
power = 2                           # 采用功率谱
num_mel_bands = 128                 # mel维度
frames_1_sec = int(sample_rate/num_frame_shift_fft)   # Number of frames in 1 second, required to calculate F and ER for 1 sec segments.

# Train parameters
num_gpu = 1
loss_type = 'normal'         # 'normal' | 'sigmoid_focal' | 'softmax_focal'
label_level = 'frame'               # 'frame' | 'chunk'
image_data_format = 'channels_first'  # 'channels_first' | 'channels_last'
time_factor = 16                     # 时间下采样
is_mono = True                      # True: mono-channel input, False: binaural input
nb_ch = 1 if is_mono else 2
batch_size = 64                     # Decrease this if you want to run on smaller GPU's, default: 128
chunk_size = 256                    # Frame sequence length. Input to the CRNN.
num_epochs = 300                      # Training epochs
reduce_lr_patience = 2
early_stop_patience = 100           # 不进行early stop
posterior_thresh = 0.5              # sigmoid分类概率阈值
utt_percent_thresh = 0.4            # 解码用，当某个测试句子的平均帧类别高于该阈值时，标注为该类别
chunk_in_sec = int(chunk_size*num_frame_shift_fft/sample_rate)

# CRNN model parameters
network_type = 'crnn_v2'               # 'crnn' | 'inception_crnn' | 'inception'
cnn_nb_filt = 128                   # CNN filter size
cnn_pool_size = [5, 2, 2]           # Maxpooling across frequency. Length of cnn_pool_size =  number of CNN layers
rnn_nb = [32, 32]                   # Number of RNN nodes.  Length of rnn_nb =  number of RNN layers
fc_nb = [32]                        # Number of FC nodes.  Length of fc_nb =  number of FC layers
dropout_rate = 0.3                  # Dropout after each layer

# Control parameters
is_data_prepare = False             # Whether make data
is_train = True
is_test = True

# Dirs
data_set = 'Urban_Sound'       # SED_SMALL | BBC | Urban_Sound
audio_dir = '/home/xiaorong/Data/' + data_set
data_set_suffix = '_Test2_sp'                                 # 数据集后缀
vad_audio_dir = audio_dir + '_VAD' + data_set_suffix
project_dir = '/home/xiaorong/workstation/sed-crnn-master'
exp_name = 'bs_' + str(batch_size) + '_cs_' + str(chunk_size) + '_dr_' + str(dropout_rate) + '_cf_' + str(cnn_nb_filt) + '_script_' + '5.6'
fig_name = '{}_{}'.format('mon' if is_mono else 'bin', time.strftime("%Y_%m_%d_%H_%M_%S"))
dir_affix = project_dir + '/exp/' + data_set
feat_dir = os.path.join(dir_affix, 'feat' + data_set_suffix)
model_dir = os.path.join(dir_affix, exp_name)   # Folder for saving model and training curves
result_dir = os.path.join(dir_affix, exp_name)
result_file = os.path.join(result_dir, 'test.result')
check_point = os.path.join(model_dir, 'best_model.h5')
figure_dir = os.path.join(model_dir, fig_name)

# Vars
avg_er = list()
avg_f1 = list()
create_folder(model_dir)
create_folder(result_dir)

# Setting
fix_random_seed(gpu_id=CUDA_VISIBLE_DEVICES)     # 固定种子 & 设置GPU
if image_data_format == 'channels_first':
    K.set_image_data_format('channels_first')
else:
    K.set_image_data_format('channels_last')

if is_data_prepare:
    logger.info("Start making feature & label", extra={"verbosity": CURRENT_VERBOSITY})
    prepare_data(audio_dir=audio_dir, feat_dir=feat_dir, vad_audio_dir=vad_audio_dir,
                 num_frame_len_fft=num_frame_len_fft,
                 num_frame_shift_fft=num_frame_shift_fft,
                 num_mel_bands=num_mel_bands, sample_rate=sample_rate,
                 power=power, is_vad=is_vad, speed_list=speed_list)
    logger.info('Successfully dumping feature & label in %s' % (feat_dir), extra={"verbosity": CURRENT_VERBOSITY})

if is_train:
    logger.info("Start Training", extra={"verbosity": CURRENT_VERBOSITY})
    # Load feature and labels, pre-process it
    X, Y, X_dev, Y_dev = load_train_data(feat_dir, chunk_size, nb_ch, is_delta)
    if label_level == 'chunk':
        Y = np.squeeze(Y[:, 0, :])              # 使用每块的第一帧作为该块的标注
        Y_dev = np.squeeze(Y_dev[:, 0, :])
    elif time_factor != 1:
        target_len = int(chunk_size/time_factor)
        Y = np.squeeze(Y[:, 0:target_len, :])              # 帧级别：下采样
        Y_dev = np.squeeze(Y_dev[:, 0:target_len, :])

    # Build model
    model, single_gpu_model = build_model(X, Y, is_delta, loss_type, network_type,
                                          cnn_nb_filt, cnn_pool_size, rnn_nb, fc_nb, dropout_rate, label_level, num_gpu)

    # Training
    best_epoch, pat_cnt, best_er, f1_for_best_er, best_conf_mat = 0, 0, 99999, None, None
    tr_loss, val_loss, f1_overall_1sec_list, er_overall_1sec_list = [0] * num_epochs, [0] * num_epochs, [0] * num_epochs, [0] * num_epochs
    # tensorboard = TensorBoard(log_dir=model_dir + '/logs', write_graph=True)
    if num_gpu == 1:
        checkpoint = ModelCheckpoint(check_point, monitor='val_loss', save_best_only=True, save_weights_only=False, mode='auto', period=1, verbose=0)
    else:
        # 保存时只保存单GPU模型
        checkpoint = ParallelModelCheckpoint(single_gpu_model, check_point)
    # val_er_overall_1sec | val_f1_overall_1sec | val_loss
    train_callback = TrainCallback(X_dev, Y_dev, num_epochs, figure_dir=figure_dir, frames_1_sec=frames_1_sec, monitor='val_er_overall_1sec',
                                   reduce_lr_patience=reduce_lr_patience, early_stop_patience=early_stop_patience)
    callbacks_list = [checkpoint, train_callback]

    logger.info('Train Parameters: frames_1_sec: {}, chunk_in_sec: {}'.format(
        frames_1_sec, chunk_in_sec), extra={"verbosity": CURRENT_VERBOSITY})
    logger.info('Feature Parameters: chunk_size: {}, batch_size: {}, num_epochs: {}'.format(
        chunk_size, batch_size, num_epochs), extra={"verbosity": CURRENT_VERBOSITY})
    logger.info('Model Parameters: cnn_nb_filt: {}, cnn_pool_size: {}, rnn_nb: {}, fc_nb: {}, dropout_rate: {}\n'.format(
        cnn_nb_filt, cnn_pool_size, rnn_nb, fc_nb, dropout_rate), extra={"verbosity": CURRENT_VERBOSITY})

    steps_per_epoch = X.shape[0]//batch_size
    if generator_type == 'ratio':
        generator = RatioDataGenerator(X, Y, batch_size=batch_size, alpha=mixup_alpha, image_data_format=image_data_format)
        generator_fun = generator()
    elif generator_type == 'complicate':
        generator = ComplicateDataGenerator(X, Y, batch_size=batch_size, alpha=mixup_alpha, image_data_format=image_data_format, imgaug_prob=0.0)
        generator_fun = generator()
    else:
        generator = SimpleDataGenerator(X, Y, batch_size=batch_size, image_data_format=image_data_format)
        generator_fun = generator()

    if steps_per_epoch == 0:
        logger.warning('steps_per_epoch is 0, maybe the batch_size is too large.', extra={"verbosity": CURRENT_VERBOSITY})
        steps_per_epoch = 1

    model.fit_generator(
        generator=generator_fun,
        validation_data=[X_dev, Y_dev],
        steps_per_epoch=steps_per_epoch,
        epochs=num_epochs,
        verbose=2,
        callbacks=callbacks_list
    )

if is_test:
    logger.info('Start decoding', extra={"verbosity": CURRENT_VERBOSITY})
    # load test data
    X_test, Y_test = load_test_data(feat_dir)

    # load model
    if loss_type == 'normal':
        model = load_model(check_point)
    elif loss_type == 'sigmoid_focal':
        model = load_model(check_point, custom_objects={'binary_focal_loss': binary_focal_loss})
    elif loss_type == 'softmax_focal':
        model = load_model(check_point, custom_objects={'categorical_focal_loss': categorical_focal_loss})
    else:
        model = load_model(check_point)

    RevLABELS = {}
    with open(feat_dir + '/label_info', 'r') as f:
        while True:
            line = f.readline()
            if line == '':
                break
            label, id = line.strip().split()
            RevLABELS[int(id)] = label
    unknow_id = len(RevLABELS.keys())
    RevLABELS[unknow_id] = 'Unknown'

    # TODO:
    # 给出分段的评分以及每段的具体时间
    # TODO:
    # 加入最短事件时间
    with open(result_file, 'w') as wf, \
            open(feat_dir + '/test.list', 'r') as rf:
        y_true_list = []
        y_pred_list = []
        label_name_list = []
        [label_name_list.append(v) for k, v in RevLABELS.items()]
        for i in range(len(X_test)):
            wav_path = rf.readline().strip()
            wav_frames = (Y_test[i].shape)[0]
            if wav_frames <= 16 or wav_frames < time_factor:
                logger.warning('discard too short wav: %s' % (wav_path), extra={"verbosity": CURRENT_VERBOSITY})
                continue
            true_label, pred_label, pred = postprocess(wav_path, X_test[i], Y_test[i], model,
                                                       posterior_thresh, utt_percent_thresh, unknow_id, label_level, is_delta)
            y_true_list.append(true_label)
            y_pred_list.append(pred_label)
            label_text = ''
            for i in range(len(pred)):
                if pred[i] == 1:
                    label_text += RevLABELS[i] + ' '
            label_text += '\n'
            wf.write(wav_path + ' ' + label_text)
        matrix = confusion_matrix(y_true_list, y_pred_list)
        report = classification_report(y_true_list, y_pred_list, target_names=label_name_list, labels=list(RevLABELS.keys()))
        print(matrix)
        print(report)

    acc = get_test_acc(result_file)
    logger.info('Test utt-level acc: %f' % (acc), extra={"verbosity": CURRENT_VERBOSITY})
    logger.info('End decoding', extra={"verbosity": CURRENT_VERBOSITY})
