import time
import torch
import torch.nn as nn
import torch.optim as optim
from model.BiLSTM import *
from model.BiLSTM_CRF import *
from utils.data_loader import *
from tqdm import tqdm
from sklearn.metrics import precision_score,recall_score,f1_score,classification_report
from config import *

# 导入配置参数
conf = Config()

# 定义训练函数
def model2train():
    # 获取数据
    train_dataloader, dev_dataloader = get_data()
    # 实例化模型：有两个模型（BiLSTM，BiLSTM_CRF)
    models = {"BiLSTM": NERBiLSTM,
              "BiLSTM_CRF": NERBiLSTM_CRF}
    embed_dim = conf.embedding_dim
    hidden_dim = conf.hidden_dim
    dropout = conf.dropout
    tag2id = conf.tag2id
    model = models[conf.model](embed_dim,hidden_dim,dropout,word2id,tag2id)
    model = model.to(conf.device)
    print(model)
    # 实例化损失函数
    loss_func = nn.CrossEntropyLoss()
    # 实例化优化器
    optimizer = optim.Adam(model.parameters(), lr=conf.lr)

    # 训练
    star_time = time.time()
    if conf.model == "BiLSTM":
        f1_s = -1000
        for epoch in range(conf.epochs):
            # 设置训练模式
            model.train()
            for index,(inputs,labels,mask) in enumerate(tqdm(train_dataloader,desc='BiLSTM')):
                inputs = inputs.to(conf.device)
                labels = labels.to(conf.device)
                mask = mask.to(conf.device)
                # 前向传播
                output = model(inputs,mask)
                # 计算损失
                output = output.view(-1,len(conf.tag2id))
                labels = labels.view(-1)
                loss = loss_func(output,labels)
                # print(loss)
                # 梯度清零
                optimizer.zero_grad()
                # 反向传播
                loss.backward()
                # 梯度更新
                optimizer.step()
                # 打印日志：200打印一次
                if index % 200 == 0:
                    print(f'当前批次{epoch+1}，损失{loss.item():.3f}')

                # break

            # 验证
            precision, recall, f1, report = model2dev(dev_dataloader,model,loss_func)
            # print(report)
            # 保存最优模型
            if f1 > f1_s:
                f1_s = f1
                torch.save(model.state_dict(),f'save_model/{conf.model}_best.pth')
                print(report)

            # break

        end_time = time.time()
        print(f'训练总耗时：{end_time-star_time:.4f}')

    elif conf.model == "BiLSTM_CRF":
        f1_s = -1000
        for epoch in range(conf.epochs):
            # 设置训练模式
            model.train()
            for index, (inputs, labels, mask) in enumerate(tqdm(train_dataloader, desc='BiLSTM_CRF')):
                inputs = inputs.to(conf.device)
                labels = labels.to(conf.device)
                mask = mask.to(torch.bool).to(conf.device)
                # 前向传播 + 损失计算
                loss = model.log_likelihood(inputs, labels,mask).mean()
                # print(loss)
                # 梯度清零
                optimizer.zero_grad()
                # 反向传播
                loss.backward()
                # 梯度裁剪：防止梯度爆炸
                torch.nn.utils.clip_grad_norm_(model.parameters(),max_norm=10)
                # 梯度更新
                optimizer.step()
                # 打印日志：200打印一次
                if index % 200 == 0:
                    print(f'当前批次{epoch + 1}，损失{loss.item():.3f}')

                # break

            # 验证
            precision, recall, f1, report = model2dev(dev_dataloader, model)
            # print(report)
            # 保存最优模型
            if f1 > f1_s:
                f1_s = f1
                torch.save(model.state_dict(), f'save_model/{conf.model}_best.pth')
                print(report)

            # break

        end_time = time.time()
        print(f'训练总耗时：{end_time - star_time:.4f}')


def model2dev(dev_dataloader,model,loss_function=None):
    """
    验证函数，验证每次epoch后的模型
    :param dev_dataloader:验证集的dataloader
    :param model: 模型
    :param loss_function:损失函数对象
    :return:
    """
    av_loss = 0
    pred,gold = [],[]
    model.eval()
    for index, (inputs, labels, mask) in enumerate(tqdm(dev_dataloader)):
        inputs = inputs.to(conf.device)
        labels = labels.to(conf.device)
        mask = mask.to(conf.device)
        predict = []
        # 判断使用哪个模型
        if model.name == "BiLSTM":
            output = model(inputs,mask)
            predict = torch.argmax(output,dim=-1).tolist()
            # 计算损失
            #  调整形状
            output2d = output.view(-1,len(conf.tag2id))
            labels1d = labels.view(-1)
            loss = loss_function(output2d, labels1d)
            av_loss += loss.item()
        elif model.name == "BiLSTM_CRF":
            mask = mask.to(torch.bool)
            predict = model(inputs,mask)
            loss = model.log_likelihood(inputs,labels,mask)
            av_loss += loss.mean().item()

        # 获取真实的样本句子长度
        length = []  # 存储真实样本长度
        for value in inputs.cpu():
            temp = []
            for j in value:
                if j.item() > 0:
                    temp.append(j)
            length.append(len(temp))

        # 提取真实样本句子长度的预测结果
        for index_p, value in enumerate(predict):
            pred.extend(value[:length[index_p]])

        # 提取真实样本句子长度的真实结果
        for index_p, value in enumerate(labels.tolist()):
            gold.extend(value[:length[index_p]])

    # 评估指标
    # av_loss = av_loss / len(dev_dataloader)
    precision = precision_score(gold,pred,average='weighted')
    recall = recall_score(gold,pred,average='weighted')
    f1 = f1_score(gold,pred,average='weighted')
    report = classification_report(gold,pred)
    return precision, recall, f1, report

if __name__ == '__main__':
    model2train()