# codding:utf-8
import torch
import torch.nn
import torch.optim as optim
from model.BiLSTM_CRF import *
from model.BiLSTM import *
from data_loader import *
from tqdm import tqdm
import time
from sklearn.metrics import f1_score, precision_score, recall_score, classification_report

embedding_dim = config.embedding_dim
hidden_dim = config.hidden_dim
dropout = config.dropout
tag2id = config.tag2id


def model_train():
    # 获取数据
    train_dataloader, dev_dataloader = get_data()
    # 创建模型
    model = {
        "BiLSTM": BiLSTM,
        "BiLSTM_CRF": BiLSTM_CRF
    }
    train_model = model[config.model](embedding_dim, hidden_dim, dropout, word2id, tag2id)
    # 定义损失函数和优化器
    CrEt_loss = nn.CrossEntropyLoss()
    optimizer = optim.Adam(train_model.parameters(), lr=config.lr)
    # 将模型放在GPU上
    train_model = train_model.to(config.device)
    # 开始训练
    start_time = time.time()
    if config.model == "BiLSTM":
        print("开始训练...")
        f1_score = -1000
        for epoch in range(config.epochs):
            train_model.train()
            for index, (input, label, mask) in enumerate(tqdm(train_dataloader, desc="BiLSTM训练")):
                val_x = input.to(config.device)
                val_y = label.to(config.device)
                mask = mask.to(config.device)
                pre = train_model(val_x, mask)
                # 因为训练时，标签的维度是[batch_size, seq_len]，而CrossEntropyLoss的输入维度是[batch_size * seq_len, tag_size]，所以需要将标签的维度进行展平
                pre = pre.view(-1, len(tag2id))
                # 因为CrossEntropyLoss的输入维度是[batch_size * seq_len, tag_size]，所以需要将标签的维度进行展平
                loss = CrEt_loss(pre, val_y.view(-1))
                optimizer.zero_grad()
                loss.backward()
                optimizer.step()
                if index % 100 == 0:
                    print("epoch:{},index:{},loss:{}".format(epoch, index, loss.item()))
            f1, precision_s, recall_s, report_s = model_dev(dev_dataloader, train_model, CrEt_loss)
            if f1 > f1_score:
                f1_score = f1
                torch.save(train_model.state_dict(), r'mystudy_knowledge_graph\save_model\bilstm_best.pth')
                print(report_s)
        end_time = time.time()
        print("训练结束，总耗时：{}".format(end_time - start_time))
    else:
        print("BiLSTM_CRF开始训练...")
        f1_score = -1000
        for epoch in range(config.epochs):
            train_model.train()
            for index, (input, label, mask) in enumerate(tqdm(train_dataloader, desc="BiLSTM_CRF训练")):
                val_x = input.to(config.device)
                val_y = label.to(config.device)
                mask = mask.to(config.device)
                pre = train_model(val_x, mask)
                loss = train_model.log_likelihood(val_x, val_y, mask).mean()
                optimizer.zero_grad()
                loss.backward()
                optimizer.step()
                if index % 100 == 0:
                    print("epoch:{},index:{},loss:{}".format(epoch, index, loss.item()))
            f1, precision_s, recall_s, report_s = model_dev(dev_dataloader, train_model)
            if f1 > f1_score:
                f1_score = f1
                torch.save(train_model.state_dict(), r"mystudy_knowledge_graph\save_model\bilstm_crf_best.pth")
                print(report_s)
        end_time = time.time()
        print("训练结束，总耗时：{}".format(end_time - start_time))


def model_dev(dev_dataloader, train_model, criterion=None):
    average_loss = 0
    preds, golds = [], []
    train_model.eval()
    for index, (input, label, mask) in enumerate(tqdm(dev_dataloader, desc="测试集验证")):
        val_x = input.to(config.device)
        val_y = label.to(config.device)
        mask = mask.to(config.device)
        predict = []
        if train_model.name == "BiLSTM":
            pred = train_model(val_x, mask)
            # print(f"pred.shape-->{pred.shape}")
            predict = torch.argmax(pred, dim=-1).tolist()
            # print(f"predict-->{predict}")
            # print(f"predict.shape-->{predict.shape}")
            pred = pred.view(-1, len(tag2id))
            var_loss = criterion(pred, val_y.view(-1))
            average_loss += var_loss.item()
        elif train_model.name == "BiLSTM_CRF":
            pred = train_model.lstm2linear(val_x)
            # predict = pred.argmax(pred, dim=-1).tolist()
            predict = torch.argmax(pred, dim=-1).tolist()
            var_loss = train_model.log_likelihood(val_x, val_y, mask)
            average_loss += var_loss.mean().item()
        # 统计真实标签长度，去除填充
        leng = []  # 存放真实标签，不是长度
        for i in val_x.cpu():
            tmp = []
            for j in i:
                if j.item() > 0:
                    tmp.append(j.item())
            leng.append(tmp)
        # 提取真实长度的预测标签
        for index, p in enumerate(predict):
            preds.extend(p[:len(leng[index])])
        # 提取真实长度的真实标签
        for index, l in enumerate(val_y.tolist()):
            golds.extend(l[:len(leng[index])])

    f1_s = f1_score(golds, preds, average='weighted')
    precision_s = precision_score(golds, preds, average='weighted')
    recall_s = recall_score(golds, preds, average='weighted')
    report_s = classification_report(golds, preds)
    return f1_s, precision_s, recall_s, report_s


def debug_dev():
    # 获取数据
    train_dataloader, dev_dataloader = get_data()
    # 创建模型
    model = {
        "BiLSTM": BiLSTM,
        "BiLSTM_CRF": BiLSTM_CRF
    }
    train_model = model[config.model](embedding_dim, hidden_dim, dropout, word2id, tag2id)
    # 定义损失函数和优化器
    CrEt_loss = nn.CrossEntropyLoss()
    optimizer = optim.Adam(train_model.parameters(), lr=config.lr)
    # 将模型放在GPU上
    train_model = train_model.to(config.device)
    model_dev(dev_dataloader, train_model, CrEt_loss)


if __name__ == '__main__':
    model_train()
    # debug_dev()
