import os
import sys
# 获取当前文件夹的绝对路径，然后向上找2层到项目根目录
project_root = os.path.join(os.path.dirname(os.path.abspath(__file__)), '../..')
print(f'project_root-->{project_root}')
sys.path.append(project_root)
import time
import torch.optim as optim
from model.BiLSTM import *
from model.BiLSTM_CRF import *
from utils.data_loader import *
from tqdm import tqdm
# classification_report可以导出字典格式，修改参数：output_dict=True，可以将字典在保存为csv格式输出
from sklearn.metrics import precision_score, recall_score, f1_score, classification_report
from config import *
from itertools import chain
conf = Config()

def model2train():
    # 获取数据
    train_dataloader, dev_dataloader = get_data()
    # 实例化模型
    models = {'BiLSTM': NERLSTM,
              'BiLSTM_CRF': NERLSTM_CRF}
    model = models[conf.model](conf.embedding_dim, conf.hidden_dim, conf.dropout, word2index, conf.tag2id).to(
        conf.device)
    # 实例化损失函数
    criterion = nn.CrossEntropyLoss(ignore_index=11)  # 忽略标签为11【PAD】的索引，这个标签不参与损失计算（不对齐计算损失，也不产生梯度）
    # 实例化优化器
    optimizer = optim.Adam(model.parameters(), lr=conf.lr)
    # 选择模型进行训练
    start_time = time.time()  # 开始启动训练的时间
    if conf.model == 'BiLSTM':
        f1_score = -1000
        for epoch in range(conf.epochs):
            model.train()
            for index, (inputs, labels, mask) in enumerate(tqdm(train_dataloader, desc='BiLSTM==>')):
                # inputs, labels, mask的shape都是[8,59]
                x = inputs.to(conf.device)
                mask = mask.to(conf.device)
                y = labels.to(conf.device)
                pred = model(x, mask)
                pred = pred.view(-1, len(conf.tag2id))
                # 在计算交叉熵损失时，要求-->参1的shape二维：[批次大小，标签的数量]，参2的shape一维：[标签的数量],参考损失函数说明
                my_loss = criterion(pred, y.view(-1))
                optimizer.zero_grad()  # 梯度清零
                my_loss.backward()  # 反向传播(计算梯度)
                optimizer.step()  # 更新参数
                if index % 200 == 0 and index != 0:
                    print('epoch:%04d,---n_batch:%02d---loss:%f---time:%.4f' % (
                        epoch, index, my_loss.item(), time.time() - start_time))
            dev_loss, precision, recall, f1, report = model2dev(dev_dataloader, model, criterion)
            if f1 > f1_score:
                f1_score = f1
                torch.save(model.state_dict(), os.path.join(project_root,'P03_NER/LSTM_CRF/save_model/bilstm_best.pth'))
                print(f'\ndev_loss={dev_loss}\nprecision={precision}\nrecall={recall}\nf1={f1}\n{report}')
        end_time = time.time()
        print(f'训练总耗时：{end_time - start_time:.4f}')
    elif conf.model == 'BiLSTM_CRF':
        f1_score = -1000
        for epoch in range(conf.epochs):
            model.train()
            for index, (inputs, labels, mask) in enumerate(tqdm(train_dataloader, desc='BiLSTM_CRF==>')):
                x = inputs.to(conf.device)
                mask = mask.to(torch.bool).to(conf.device)
                tags = labels.to(conf.device)
                # CRF
                loss = model.log_likelihood(x, tags, mask).mean()
                optimizer.zero_grad()
                loss.backward()
                # CRF
                torch.nn.utils.clip_grad_norm_(parameters=model.parameters(), max_norm=10)
                optimizer.step()
                if index % 200 == 0 and index != 0:
                    print('epoch:%04d,------------loss:%f' % (epoch, loss.item()))
            dev_loss, precision, recall, f1, report = model2dev(dev_dataloader, model)
            if f1 > f1_score:
                f1_score = f1
                torch.save(model.state_dict(), os.path.join(project_root,'P03_NER/LSTM_CRF/save_model/bilstm_crf_best.pth'))
                print(report)
        end_time = time.time()
        print(f'训练总耗时：{end_time - start_time:.4f}')

# todo 构造验证集方法
def model2dev(dev_iter, model, criterion=None):
    aver_loss = 0
    preds, golds = [], []
    model.eval()
    for index, (inputs, labels, mask) in enumerate(tqdm(dev_iter, desc="测试集验证")):
        val_x = inputs.to(conf.device)
        mask = mask.to(conf.device)
        val_y = labels.to(conf.device)

        predict = []
        if model.name == "BiLSTM":
            pred = model(val_x, mask)  # [8,55,12]
            predict = torch.argmax(pred, dim=-1).tolist()  # predict: [8,55]
            pred = pred.view(-1, len(conf.tag2id))  # [440,12]
            val_loss = criterion(pred, val_y.view(-1))  # val_y.view(-1): [440]
            aver_loss += val_loss.item()
        elif model.name == "BiLSTM_CRF":
            # mask = mask.to(torch.bool)
            predict = model(val_x, mask)
            loss = model.log_likelihood(val_x, val_y, mask)
            aver_loss += loss.mean().item()

        # 为了统计precision, recall, f1, report指标，统计每个样本的有效长度
        # 统计非0的，也就是真实标签的长度
        # todo 方法一
        # leng = []
        # for i in val_x.cpu():  # val_x: [8,55]
        #     # 外层循环，遍历每个样本x
        #     tmp = []  # 用于临时存储处理好的每一条样本x
        #     for j in i:
        #         # 内层循环，遍历样本x中的每个词索引
        #         if j.item() > 0:
        #             tmp.append(j.item())
        #     leng.append(tmp)
        # # 提取有效长度的预测标签
        # for index, i in enumerate(predict):
        #     preds.extend(i[:len(leng[index])])
        # # 提取有效长度的真实标签
        # for index, i in enumerate(val_y.tolist()):
        #     golds.extend(i[:len(leng[index])])
        # print(f'preds-->{preds}')
        # print(f'golds-->{golds}')

        # todo 方法二
        # leng = torch.sum((val_x.cpu() != 0), dim=-1).tolist()
        # for index, i in enumerate(predict):
        #     preds.extend(i[:leng[index]])
        # for index, i in enumerate(val_y.tolist()):
        #     golds.extend(i[:leng[index]])

        # todo 方法三  使用bool类型的tensor直接获取想要的数据--> 作用张量：t，bool张量：b，t[b]
        leng_bool = (val_x != 0)
        if model.name == "BiLSTM":
            predict = torch.Tensor(predict)[leng_bool].to(torch.int)
            preds.extend(predict.tolist())
            gold = val_y[leng_bool]
            golds.extend(gold.tolist())
        elif model.name == "BiLSTM_CRF":
            predict = list(chain(*predict))
            preds.extend(predict)
            gold = val_y[leng_bool]
            golds.extend(gold.tolist())

    aver_loss /= len(dev_iter)
    precision = precision_score(golds, preds, average='weighted')
    recall = recall_score(golds, preds, average='weighted')
    f1 = f1_score(golds, preds, average='weighted')
    report = classification_report(golds, preds)
    return aver_loss, precision, recall, f1, report

if __name__ == '__main__':
    model2train()
