import torch
import torch.optim as optim
import numpy as np
import pandas as pd
from torch.nn.utils.rnn import pad_sequence
from datasets import load_from_disk
from transformers import BertTokenizer
import matplotlib.pyplot as plt

from bilstm_crf import NER


device = torch.device("cuda" if torch.cuda.is_available() else "cpu")


def pad_batch_inputs(data, labels, tokenizer):
    """
    对输入数据进行填充
    """
    # 对训练样本进行填充，原始数据长短不一
    data_inputs, data_length, data_labels = [], [], []

    for data_input, data_label in zip(data, labels):
        # 把输入的文本转成编号
        data_input_encode = tokenizer.encode(data_input, return_tensor='pt', add_special_tokens=False)
        # 放到GPU上
        data_input_encode = data_input_encode.to(device)

        # 把文本对应的编号保存到列表中
        data_inputs.append(data_input_encode.squeeze())
        # 注意：这里需要调用squeeze函数，修改维度
        
        # 统计一个样本的长度, 保存到列表中
        data_input = ''.join((data_input.split()))
        data_length.append(len(data_input))

        # 把标签转成张量保存
        data_labels.append(torch.tensor(data_label, device=device))


    # 对这个批次的数据进行排序
    sorted_index = np.argsort(-np.asarray(data_length))
    sorted_inputs, sorted_labels, sorted_length = [], [], []

    for index in sorted_index:
        sorted_inputs.append(data_inputs[index])
        sorted_labels.append(data_labels[index])
        sorted_length.append(data_length[index])

    # 统一该批次样本的长度
    pad_inputs = pad_sequence(sorted_inputs)
    
    return pad_inputs, sorted_labels, sorted_length



label_to_index = {"O": 0, "B-dis": 1, "I-dis": 2, "B-sym": 3, "I-sym": 4}



def train():
    # 读取训练集
    train_data = load_from_disk('doctor_offline/ner_model/ner_data/bilstm_crf_data')['train']
    # tokenizer
    tokenizer = BertTokenizer(vocab_file='doctor_offline/ner_model/ner_data/bilstm_crf_vocab_aidoc.txt')
    # 构建模型
    model = NER(vocab_size=tokenizer.vocab_size, label_num=len(label_to_index)).cuda(device)


    # 批次大小
    batch_size = 16

    # 优化器
    optimizer = optim.AdamW(model.parameters(), lr=3e-5)

    # 训练轮数
    num_epoch = 700

    # train history
    train_history_list = []

    # valid history
    valid_history_list = []

    def start_train(data_inputs, data_labels, tokenizer):
        """
        开始训练
        """

        # 对数据进行填充补齐
        pad_inputs, sorted_labels, sorted_length = pad_batch_inputs(data_inputs, data_labels, tokenizer)
        
        # 计算损失
        loss = model(pad_inputs, sorted_labels, sorted_length)

        # 梯度清零
        optimizer.zero_grad()

        # 反向传播
        loss.backward()

        # 更新参数
        optimizer.step()

        # 统计损失值
        nonlocal total_loss
        total_loss += loss.item()


    for epoch in range(num_epoch):
        # 总损失
        total_loss = 0.0

        train_data.map(start_train, input_columns=['data_inputs', 'data_labels'],
                       batched = True,
                       batch_size=batch_size,
                       fn_kwargs={'tokenizer': tokenizer},
                       desc='epoch: %d' % (epoch + 1))
        
        print('epoch: %d loss: %.3f' % (epoch + 1, total_loss))

        model.save_model('model/BiLSTM-CRF-%d.bin' % (epoch + 1))
        # 训练一次保存一次
        # 训练10次保存1次
        # 准确率 提升后再保存
        # EarlyStopping 例如设置的训练次数100，假设训练到10次有一个很好的指标，再训练20次，如果没有任何提升，可以停止训练