#encoding=utf-8
import torch
import torchvision
import torchaudio
from torch import nn, optim, Tensor
import data_prepare_by_name
import numpy as np
import logging

import os, sys
import random
import argparse

import data_prepare
from data_load import W2LData_byname, collate_fn, collate_fn_chunk, ChunkData, compute_dev_ratio, compute_exp_lr_base
from data_prepare_by_name import generate_data
from sklearn.model_selection import train_test_split
from torch.utils.data.dataloader import DataLoader
from model import *
from torchaudio.models import Wav2Letter
import torch.distributed as dist
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.cuda import amp
from ftdnn.models_mix import *
from accelerate import Accelerator



parser = argparse.ArgumentParser(description=" ")
parser.add_argument("--hidden_num", type=int, dest='hidden_num', default=1024, help="gru size")
parser.add_argument("--output_num", type=int, dest='output_num', default=40, help="output dimension")
parser.add_argument("--input_num", type=int, dest='input_num', default=240, help="output dimension")
parser.add_argument("--start_lr", type=float, dest='start_lr', default=0.0002, help="learning rate")
parser.add_argument("--final_lr", type=float, dest='final_lr', default=0.00005, help="learning rate")
parser.add_argument("--epoch", type=int, dest='epoch', default=20, help="Number of training")
parser.add_argument("--batch_size", type=int, dest='batch_size', default=256, help="batch size")
parser.add_argument("--gru_layers", type=int, dest='gru_layers', default=3, help="gru_layers")
parser.add_argument("--eval_steps", type=int, dest='eval_steps', default=1000, help="eval_steps")
parser.add_argument("--print_steps", type=int, dest='print_steps', default=20, help="print_steps")
parser.add_argument("--cudnn_enabled", type=bool, dest='cudnn_enabled', default=True, help="use cudnn to accelerate")
parser.add_argument("--save_interval", type=int, dest='save_interval', default=500, help="interval to save model")
parser.add_argument("--save_dir", type=str, dest='save_dir', default='model1', help="save_name")
parser.add_argument("--save_file", type=str, dest='save_file', default="model.pt", help="model save dir")
parser.add_argument("--restart_model", type=str, dest='restart_model', default="", help="model has been trained")
parser.add_argument("--gpu_divices", type=str, dest='gpu_divices', default="4,5,6,7", help="gpu divices")
parser.add_argument("--feats_scp_list", type=str, dest='feats_scp_list', default="./final_feats_scps.list", help="")
parser.add_argument("--labels_scp_list", type=str, dest='labels_scp_list', default="./final_labels_scps.list", help="")
Arg = parser.parse_args()

lr = Arg.start_lr
if Arg.cudnn_enabled == False:
    torch.backends.cudnn.enabled = False
os.environ["CUDA_VISIBLE_DEVICES"] = Arg.gpu_divices
print('+++++++++++++++++++++++++ Training on device : {} ++++++++++++++++++++\n'.format(Arg.gpu_divices))
if not os.path.exists("./{}".format(Arg.save_dir)):
    os.makedirs("./{}".format(Arg.save_dir))

logging.basicConfig(level=logging.DEBUG, format='%(asctime)s[%(levelname)s]: %(message)s')
logger = logging.getLogger('Training a state level acoustic model')
handler = logging.FileHandler("./{}/train.log".format(Arg.save_dir))
handler.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)

def print_params(Arg):
    logger.info("--------------------------paragrams--------------------------------")
    logger.info("hidden_num:{}".format(Arg.hidden_num))
    logger.info("output_num:{}".format(Arg.output_num))
    logger.info("input_num:{}".format(Arg.input_num))
    logger.info("start_rate:{}".format(Arg.start_lr))
    logger.info("final_rate:{}".format(Arg.final_lr))
    logger.info("epoch:{}".format(Arg.epoch))

    logger.info("batch_size:{}".format(Arg.batch_size))
    logger.info("gru_layers:{}".format(Arg.gru_layers))
    logger.info("eval_steps:{}".format(Arg.eval_steps))

    logger.info("print_steps:{}".format(Arg.print_steps))
    logger.info("cudnn_enabled:{}".format(torch.backends.cudnn.enabled))
    logger.info("save_dir:{}".format(Arg.save_dir))
    logger.info("save_interval:{}".format(Arg.save_interval))

    logger.info("save_file:{}".format(Arg.save_file))
    logger.info("gpu_divices:{}".format(Arg.gpu_divices))
    logger.info("Data scp list:{}".format(Arg.feats_scp_list))
    logger.info("Label scp list:{}".format(Arg.labels_scp_list))
    logger.info("-------------------------------------------------------------------")


def show_model(nnet):
    logger.info("==============================Model Structures==============================")
    for module_name, m in nnet.named_modules():
        if module_name != '':
            logger.info(m)
    logger.info("============================================================================")


def evaluate(data, net, calc_loss):
    total_fream, loss_sum, count_corret = 0, 0.0, 0
    for i in range(len(data)):
        net.eval()
        voice = np.load(data[i][1])
        label = np.load(data[i][2])
        min_len = min(voice.shape[0], label.shape[0])
        voice = voice[:min_len]
        label = label[:min_len]
        voice, label = Tensor([voice]), Tensor([label])
        voice, label = voice.cuda(), label.cuda()

        output = net(voice)
        output = output.permute(0,2,1)

        softmax_func=nn.Softmax(dim=1)
        soft_output=softmax_func(output)
        pred_label = torch.argmax(soft_output, dim=1).float()

        count_corret += (pred_label == label).sum().item()
        total_fream += min_len

        calc_loss = nn.CrossEntropyLoss()
        loss = calc_loss(output, label.long())
        loss_sum += loss

        print("Evaluating:{}/{}           \r".format(i, len(data)), end="")

        net.train()

    print()
    acc = count_corret / total_fream
    avg_loss = loss_sum / len(data)

    logger.info("acc:{:.2f}%  testset_avg_loss:{:.5f}".format(acc * 100, avg_loss))
    return acc, avg_loss


def calc_acc(data_loader, net, calc_loss):
    net.eval()
    total_loss, count, count_corret = 0.0, 0, 0
    with torch.no_grad():
        for batch_data in data_loader:
            inputs, labels = batch_data
            #print("输入", inputs[:2])
            #print("标签", labels[:2])
            inputs, labels = inputs.cuda(), labels.cuda()
            labels = labels.squeeze()

            outputs = net(inputs)
            outputs = outputs.permute(0, 2, 1)
            total_loss += calc_loss(outputs, labels.long())
            #print("输出", outputs[:2])
            pred_labels = torch.argmax(outputs, dim=1).float()
            #print("labels:", labels[:2])
            #print("输出标签", pred_labels[:2])
            count += labels.size(0) * labels.size(1)
            count_corret += (pred_labels == labels).sum().item()
    net.train()
    acc = count_corret / count
    loss = (total_loss / len(data_loader)).item()
    return acc, loss


def calc_loss_distributed(data_loader, net, calc_loss):
    net.eval()
    total_loss, count, count_corret = 0.0, 0, 0
    with torch.no_grad():
        for batch_data in data_loader:
            inputs, labels = batch_data
            inputs, labels = inputs.cuda(), labels.cuda()
            labels = labels.squeeze()

            outputs = net(inputs)
            outputs = outputs.permute(0, 2, 1)
            total_loss += calc_loss(outputs, labels.long())
            #print("输出", outputs[:2])
            pred_labels = torch.argmax(outputs, dim=1).float()
            #print("labels:", labels[:2])
            #print("输出标签", pred_labels[:2])
            count += labels.size(0) * labels.size(1)
            count_corret += (pred_labels == labels).sum().item()
    net.train()
    acc = count_corret / count
    loss = (total_loss / len(data_loader)).item()
    return acc, loss


def train():
    print_params(Arg)
    lr = Arg.start_lr
    final_lr = Arg.final_lr
    accelerator = Accelerator(split_batches=True)

    data_set_list = data_prepare_by_name.from_list_get_data_list(Arg.feats_scp_list, Arg.labels_scp_list)
    dev_ratio = compute_dev_ratio(data_set_list)
    train, dev = train_test_split(data_set_list, test_size=dev_ratio, random_state=7)
    print('Size of {}, train[0], size of dev {}\ndev[0] {}\n'.format(len(train), train[0], len(dev), dev[0]))

    steps = int(len(train)/Arg.batch_size/Arg.print_steps)

    # get trainset list object
    trainset = ChunkData(train)
    devset = ChunkData(dev)
    train_loader = DataLoader(trainset, collate_fn=collate_fn_chunk, batch_size=Arg.batch_size, shuffle=True, num_workers=20, drop_last=True)
    dev_loader = DataLoader(devset, collate_fn=collate_fn_chunk, batch_size=len(dev), shuffle=True, num_workers=10, drop_last=True)

    net = TDNNF(Arg.input_num, Arg.hidden_num, Arg.output_num)

    if Arg.restart_model != "":
        net = nn.DataParallel(net)
        net.load_state_dict(torch.load(Arg.restart_model))
        print('checkpoint {}\n'.format(net))

    logger.info("MODEL weights initialized")
    show_model(net)
    net.train()

    #loss and optimizer
    calc_loss = nn.CrossEntropyLoss()
    optimizer = optim.Adam(net.parameters(), lr=lr)
    # mix precision training
    print('+++++++++++++++ {} iters / EPOCH ++++++++++++++++\n'.format(len(train) / Arg.batch_size))
    lr_decay_factor = compute_exp_lr_base(lr, final_lr, len(train), Arg.batch_size, Arg.epoch)
    loss_policy = False
    plateau_scheduler = ""
    if loss_policy:
        plateau_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, 'max', factor=lr_decay_factor, verbose=True, cooldown=1,
                                                                   threshold_mode='rel', threshold=0.01, min_lr=final_lr, patience=5)
    else:
        plateau_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, 'min', factor=lr_decay_factor,
                                                                       verbose=True, cooldown=1,
                                                                       threshold_mode='rel', threshold=0.01,
                                                                       min_lr=final_lr, patience=5)

    net, optimizer, train_loader = accelerator.prepare(net, optimizer, train_loader)
    dev_loader = accelerator.prepare(dev_loader)
    # training
    prev_dev_acc = 0
    for epoch in range(1, Arg.epoch + 1):
        loss_sum, count_corret, count_freams = 0.0, 0, 0
        logger.info("-----------------EPOCH {}, BATCH_SIZE {}, LR DECAY FACTOR {}------------------".format(epoch, Arg.batch_size, lr_decay_factor))
        for step, batch_data in enumerate(train_loader, 1):
            train_feats_batch, train_label_batch = batch_data
            inputs, labels = train_feats_batch.cuda(), train_label_batch.cuda()
            labels = labels.squeeze()  # (64,80,350)
            optimizer.zero_grad()

            # forward >> backward >> optimizer
            outputs = net(inputs)
            outputs = outputs.permute(0, 2, 1)
            loss = calc_loss(outputs, labels.long())
            loss_sum += loss

            accelerator.backward(loss)
            optimizer.step()
            optimizer.zero_grad()
            
            # train acc and learning rate adjust here
            softmax_func = nn.Softmax(dim=1)
            soft_output = softmax_func(outputs)

            pred_label = torch.argmax(soft_output, dim=1).float()
            count_corret += (pred_label == labels).sum().item()
            count_freams += int(labels.size()[1])

            if step % Arg.print_steps == 0:
                train_acc = count_corret / (Arg.batch_size * count_freams)
                if loss_policy:
                    plateau_scheduler.step(train_acc)
                else:
                    plateau_scheduler.step(loss)
                avg_train_loss = loss_sum / Arg.print_steps
                new_lr = optimizer.param_groups[0]['lr']
                logger.info("STEP {} | loss:{:.5f} | train acc:{:.2f}%, lr:{:.10f}".format(step, avg_train_loss, train_acc * 100, new_lr))
                loss_sum, count_corret, count_freams = 0.0, 0, 0

            if step % Arg.save_interval == 0:
                interval_model_name = "{}/epoch_{}_iter_{}_model.pt".format(Arg.save_dir, epoch, step)
                accelerator.wait_for_everyone()
                unwrapped_model = accelerator.unwrap_model(net)
                accelerator.save(unwrapped_model.state_dict(), interval_model_name)

        epoch_dev_acc, epoch_dev_loss = calc_acc(dev_loader, net, calc_loss)
        logger.info("EPOCH {} | dev loss:{:.5f} | dev acc:{:.2f}%".format(epoch, epoch_dev_loss, epoch_dev_acc * 100))
        # save model here
        if epoch_dev_acc > prev_dev_acc:
            net.eval()
            checkpoint_model = 'epoch_{}_model_eval.pt'.format(str(epoch))
            accelerator.wait_for_everyone()
            unwrapped_model = accelerator.unwrap_model(net)
            accelerator.save(unwrapped_model.state_dict(), checkpoint_model)
            logger.info("Model saved in {}/{}".format(Arg.save_dir, checkpoint_model))
            net.train()
            prev_dev_acc = epoch_dev_acc

    logger.info(">>>>>>>>>>>>>>>>>>>>>>>>>> Train finished! <<<<<<<<<<<<<<<<<<<<<<<<<<<")
    

def train_part_of_params():
    global lr
    assert os.path.exists(Arg.trainscp_path)
    assert os.path.exists(Arg.testscp_path)

    print_params(Arg)

    train_wav_scp = generate_data(Arg.trainscp_path)
    train, dev = train_test_split(list(train_wav_scp.items()), test_size=0.01, random_state=7)

    trainset = W2LData_byname(train)
    devset = W2LData_byname(dev)
    train_loader = DataLoader(trainset, collate_fn=collate_fn, batch_size=Arg.batch_size, shuffle=True, num_workers=10)
    dev_loader = DataLoader(devset, collate_fn=collate_fn, batch_size=Arg.batch_size, shuffle=True, num_workers=10)

    #net = BLSTM(Arg.input_num, Arg.hidden_num, Arg.gru_layers, Arg.output_num).cuda()
    net = W2LSTM().cuda()
    
    net_dict = net.state_dict()

    pretrained_dict = torch.load("/ai-cephfs/Share/zps_share/native_en/wav2letter_acc0.85.pt")
    
    pretrained_dict = {k.split(".",1)[1]: v for k, v in pretrained_dict.items() if k.split(".",1)[1] in net_dict}
    net_dict.update(pretrained_dict)
    net.load_state_dict(net_dict)

    for k,v in net.named_parameters():
        if k.startswith("acoustic_model"):
            v.requires_grad=False
            logger.info("Param [ {} ] become static".format(k))
    
    for m in net.modules():
        if isinstance(m, nn.Linear):
            nn.init.kaiming_normal_(m.weight.data)  # 卷积层参数初始化
            m.bias.data.fill_(0)
            logger.info("Linear weights initialized")

    nn.init.orthogonal_(net.GRU_layer.weight_ih_l0)
    nn.init.orthogonal_(net.GRU_layer.weight_hh_l0)
    nn.init.constant_(net.GRU_layer.bias_ih_l0, 0)
    nn.init.constant_(net.GRU_layer.bias_hh_l0, 0)
    logger.info("LSTM weights initialized")

    show_model(net)

    net = nn.DataParallel(net)
    net = net.cuda()
    net.train()

    #loss and optimizer
    calc_loss = nn.CrossEntropyLoss()
    optimizer = optim.Adam(net.parameters(), lr=lr)

    # training
    for epoch in range(1, Arg.epoch + 1):
        loss_sum, count_corret, count_freams = 0.0, 0, 0
        logger.info("-----------------EPOCH {}------------------".format(epoch))
        for step, batch_data in enumerate(train_loader, 1):
            train_voices_batch, train_label_batch = batch_data
            inputs, labels = train_voices_batch.cuda(), train_label_batch.cuda()
            labels = labels.squeeze()  # (64,80,350)
            optimizer.zero_grad()

            # forward >> backward >> optimizer
            outputs = net(inputs)
            outputs = outputs.permute(0,2,1)
            loss = calc_loss(outputs, labels.long())
            loss_sum += loss
            loss.backward()
            optimizer.step()
            
            # train acc
            softmax_func=nn.Softmax(dim=1)
            soft_output=softmax_func(outputs)
            pred_label = torch.argmax(soft_output, dim=1).float()
            count_corret += (pred_label == labels).sum().item()
            count_freams += int(labels.size()[1])

            

            if step % Arg.lr_decay == 0:
                lr = max((lr * 0.5), 0.00001)
                optimizer = optim.Adadelta(net.parameters(), lr=lr)
                logger.info("Adjust learning rate: {:.6f}".format(lr))

            # acc, avg_loss = evaluate(random.sample(data.test_data, 1000), net, calc_loss)

            if step % Arg.print_steps == 0:
                train_acc = count_corret / (Arg.batch_size * count_freams)
                avg_train_loss = loss_sum / Arg.print_steps
                logger.info("STEP {} | loss:{:.5f} | train acc:{:.2f}%".format(step, avg_train_loss, train_acc * 100))
                loss_sum, count_corret, count_freams = 0.0, 0, 0

        epoch_train_acc, epoch_train_loss = calc_acc(train_loader, net, calc_loss)
        epoch_dev_acc, epoch_dev_loss = calc_acc(dev_loader, net, calc_loss)
        logger.info("EPOCH {} | train loss:{:.5f} | train acc:{:.2f}%".format(epoch, epoch_train_loss, epoch_train_acc * 100))
        logger.info("EPOCH {} | dev loss:{:.5f} | dev acc:{:.2f}%".format(epoch, epoch_dev_loss, epoch_dev_acc * 100))
        
        net.eval()
        torch.save(net.state_dict(), "./{}/{}".format(Arg.save_name, Arg.save_file))
        logger.info("Model saved in {}/{}".format(Arg.save_name, Arg.save_file))
        net.train()

        if epoch == 5:
            for k, v in net.named_parameters():
                if k.startswith("acoustic_model"):
                    v.requires_grad = True
                    logger.info("Param [ {} ] become trainable".format(k))
            optimizer = optim.Adam(net.parameters(), lr=0.0001)

    logger.info("Train finished!")
    

if __name__ == "__main__":
    train()
    # train_part_of_params()
    
