# encoding=utf-8
import sys

import torch
import torchvision
from torch import nn, optim, Tensor
import numpy as np
import logging
import math
import os

from sklearn.model_selection import train_test_split
from torch.utils.data.dataloader import DataLoader

from models import *
from utils import *
from losses import *
from data_load import load_data, ListData

cfg_file = sys.argv[1]
# cfg_file = "config/mobile_att.json"

cfg = read_cfg(cfg_file)

lr = cfg.train.learning_rate
if cfg.train.cudnn_enabled == False:
    torch.backends.cudnn.enabled = False
os.environ["CUDA_VISIBLE_DEVICES"] = cfg.train.gpu_divices
if not os.path.exists("./{}".format(cfg.train.save_name)):
    os.makedirs("./{}".format(cfg.train.save_name))

logging.basicConfig(level=logging.DEBUG, format='%(asctime)s[%(levelname)s]: %(message)s')
logger = logging.getLogger('Training a phoneme classification model')
handler = logging.FileHandler("./{}/train.log".format(cfg.train.save_name))
handler.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)


def print_params(cfg):
    logger.info("--------------------------paragrams--------------------------------")
    logger.info("learning_rate:{}".format(cfg.train.learning_rate))
    logger.info("epoch:{}".format(cfg.train.epoch))

    logger.info("batch_size:{}".format(cfg.train.batch_size))
    logger.info("lr_decay:{}".format(cfg.train.lr_decay))
    logger.info("eval_steps:{}".format(cfg.train.eval_steps))

    logger.info("print_steps:{}".format(cfg.train.print_steps))
    logger.info("cudnn_enabled:{}".format(torch.backends.cudnn.enabled))
    logger.info("save_name:{}".format(cfg.train.save_name))

    logger.info("save_file:{}".format(cfg.train.save_file))
    logger.info("gpu_divices:{}".format(cfg.train.gpu_divices))
    logger.info("trainscp_path:{}".format(cfg.data.train.scp_path))
    logger.info("-------------------------------------------------------------------")


def show_model(nnet):
    logger.info("==============================Model Structures==============================")
    for module_name, m in nnet.named_modules():
        if module_name != '':
            logger.info(m)
    logger.info("============================================================================")


def calc_pearson_(outputs,labels):
    # print(outputs.shape)
    # print(labels.shape)
    o = outputs.argmax(axis=-1).float()
    l = labels.argmax(axis=-1).float()
    # print(o.shape)
    # print(l.shape)
    # print(o.dtype)
    # print(l.dtype)
    o = torch.unsqueeze(o,dim=0)
    l = torch.unsqueeze(l, dim=0)
    pearson = pearsonr(o, l)[0][0]
    return pearson

def calc_pearson(data_loader, net, calc_loss):
    net.eval()
    total_loss, count, count_corret = 0.0, 0, 0
    total_outputs,total_labels = [],[]
    with torch.no_grad():
        for batch_data in data_loader:
            inputs1,inputs2, labels = batch_data
            # print("输入", inputs[:2])
            # print("标签", labels[:2])
            if cfg.train.cudnn_enabled:
                inputs1,inputs2, labels = inputs1.cuda(),inputs2.cuda(), labels.cuda()
            # labels = labels.squeeze()

            outputs = net((inputs1,inputs2.long()))
            total_loss += calc_loss(outputs, labels)
            # print("输出", outputs[:2])
            total_outputs.append(outputs)
            total_labels.append(labels)

    net.train()
    o = torch.vstack(total_outputs)
    l = torch.vstack(total_labels)
    pearson = calc_pearson_(o,l)
    # pearson = pearsonr(o.detach().numpy() , l.detach().numpy() )[0]
    loss = (total_loss / len(data_loader))
    return pearson, loss

def test():
    net = MODEL_REGISTER[cfg.model.name](cfg.model.num_classes)
    calc_loss = LOSE_REGISTER[cfg.loss]()
    save_model = "{}/{}".format(cfg.train.save_name, cfg.train.save_file)
    if os.path.exists(save_model):
        net_dict = net.state_dict()

        pretrained_dict = torch.load(save_model)

        pretrained_dict = {k.split(".", 1)[1]: v for k, v in pretrained_dict.items() if k.split(".", 1)[1] in net_dict}
        net_dict.update(pretrained_dict)
        net.load_state_dict(net_dict)
        logger.info("CNN weights loaded")
    test_wav_scp = load_data(cfg.test.scp_path, cfg.test.ref_text, cfg.test.label_file)
    testset = ListData(test_wav_scp)
    test_loader = DataLoader(testset, batch_size=cfg.train.batch_size, shuffle=True, num_workers=0)
    test_acc, test_loss = calc_pearson(test_loader, net, calc_loss)
    logger.info("test loss:{:.5f} | test acc:{:.2f}%".format(test_loss, test_acc * 100))



def initialize_weights(net):
    for m in net.modules():
        if isinstance(m, nn.Conv2d):
            nn.init.kaiming_normal_(m.weight, mode='fan_out')
            if m.bias is not None:
                nn.init.zeros_(m.bias)
        elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
            nn.init.ones_(m.weight)
            nn.init.zeros_(m.bias)
        elif isinstance(m, nn.Linear):
            nn.init.normal_(m.weight, 0, 0.01)
            nn.init.zeros_(m.bias)

def compute_exp_lr_base(start_lr, final_lr, train_size, batch_size, epoch_times):
    min_decay_factor = 1e-8
    lr_changed_times = int(train_size/batch_size) * epoch_times #epoch number
    kk = math.log(float(final_lr/start_lr), 10) / lr_changed_times
    factor = math.pow(10, kk)
    if start_lr * (1 - factor) <= min_decay_factor:
        return (1 - min_decay_factor/start_lr) * (1 - 2 / lr_changed_times)
    return factor


def train():
    global lr
    assert os.path.exists(cfg.data.train.scp_path)
    # assert os.path.exists(Arg.testscp_path)

    print_params(cfg)

    train_wav_scp = load_data(cfg.data.train.scp_path,cfg.data.train.ref_text, cfg.data.train.label_file,debug=cfg.debug)
    train, dev = train_test_split(train_wav_scp, test_size=0.1, random_state=7)
    logger.info("Train dataset: {}, dev dataset: {}".format(len(train),len(dev)))

    trainset = ListData(train,label_type=cfg.label_type)
    devset = ListData(dev,label_type=cfg.label_type)
    train_loader = DataLoader(trainset, batch_size=cfg.train.batch_size, shuffle=True, num_workers=cfg.train.num_workers)
    dev_loader = DataLoader(devset, batch_size=cfg.train.batch_size, shuffle=True, num_workers=cfg.train.num_workers)

    net = MODEL_REGISTER[cfg.model.name](cfg.model.num_classes)

    save_model = "{}/{}".format(cfg.train.save_name, cfg.train.save_file)
    if os.path.exists(save_model):
        net_dict = net.state_dict()

        pretrained_dict = torch.load(save_model)

        pretrained_dict = {k.split(".", 1)[1]: v for k, v in pretrained_dict.items() if k.split(".", 1)[1] in net_dict}
        net_dict.update(pretrained_dict)
        net.load_state_dict(net_dict)
    else:
        initialize_weights(net)

    logger.info("CNN weights initialized")

    show_model(net)

    net = nn.DataParallel(net)
    if cfg.train.cudnn_enabled:
        net = net.cuda()
    net.train()

    # loss and optimizer
    calc_loss = LOSE_REGISTER[cfg.loss]()
    optimizer = optim.Adam(net.parameters(), lr=lr)

    loss_policy = False
    lr_decay_factor = compute_exp_lr_base(lr, cfg.train.final_lr, len(train), cfg.train.batch_size, cfg.train.epoch)
    if loss_policy:
        plateau_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, 'max', factor=lr_decay_factor,
                                                                       verbose=True, cooldown=1,
                                                                       threshold_mode='rel', threshold=0.01,
                                                                       min_lr=cfg.train.final_lr, patience=5)
    else:
        plateau_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, 'min', factor=lr_decay_factor,
                                                                       verbose=True, cooldown=1,
                                                                       threshold_mode='rel', threshold=0.01,
                                                                       min_lr=cfg.train.final_lr, patience=5)

    # training
    for epoch in range(1, cfg.train.epoch + 1):
        loss_sum, count_corret, count_freams = 0.0, 0, 0
        logger.info("-----------------EPOCH {}------------------".format(epoch))
        for step, batch_data in enumerate(train_loader, 1):
            inputs1, inputs2, labels = batch_data
            if cfg.train.cudnn_enabled:
                inputs1, inputs2, labels = inputs1.cuda(), inputs2.cuda(),labels.cuda()
            # labels = labels.squeeze()  # (64,80,350)
            optimizer.zero_grad()

            # forward >> backward >> optimizer
            outputs = net((inputs1, inputs2.long()))
            # outputs = outputs.permute(0, 2, 1)
            loss = calc_loss(outputs, labels)
            loss_sum += loss
            loss.backward()
            optimizer.step()


            # if step % cfg.train.lr_decay == 0:
            #     lr = max((lr * 0.5), 0.00001)
            #     optimizer = optim.Adadelta(net.parameters(), lr=lr)
            #     logger.info("Adjust learning rate: {:.6f}".format(lr))


            if step % cfg.train.print_steps == 0:
                train_pearson = calc_pearson_(outputs,labels)
                # train_pearson = pearsonr(outputs.detach().numpy() ,labels.detach().numpy() )[0]
                plateau_scheduler.step(train_pearson)
                new_lr = optimizer.param_groups[0]['lr']
                avg_train_loss = loss_sum / cfg.train.print_steps
                logger.info("STEP {} | loss:{:.5f} | train pearsonr:{:.2f}%,lr:{:.10f}".format(step, avg_train_loss, train_pearson * 100,new_lr))
                loss_sum= 0.0


        epoch_train_acc, epoch_train_loss = calc_pearson(train_loader, net, calc_loss)
        epoch_dev_acc, epoch_dev_loss = calc_pearson(dev_loader, net, calc_loss)
        logger.info(
            "EPOCH {} | train loss:{:.5f} | train pearsonr:{:.2f}%".format(epoch, epoch_train_loss, epoch_train_acc * 100))
        logger.info("EPOCH {} | dev loss:{:.5f} | dev pearsonr:{:.2f}%".format(epoch, epoch_dev_loss, epoch_dev_acc * 100))

        net.eval()
        torch.save(net.state_dict(), "./{}/{}".format(cfg.train.save_name, cfg.train.save_file))
        logger.info("Model saved in {}/{}".format(cfg.train.save_name, cfg.train.save_file))
        net.train()

    logger.info("Train finished!")

def test_model():
    r"""
    测试模型是否符合预期
    """
    net = CNN()
    initialize_weights(net)
    input1 = torch.randn(16, 1, 500, 80)
    input2 = torch.randint(51,(16, 20))
    output = net(input1,input2)
    print('size of in/out {} {} {}\n'.format(input1.size(), output.size(), output))

if __name__ == "__main__":
    train()
    # test_model()


