import torch
from tqdm import tqdm

from fine_tuning import BertClass
from bert_config_movie import device, LEARNING_RATE, model_dir
from fine_tuning_data_formatting import training_loader, val_loader

# 实例化BertClass模型，并将模型移动到指定的计算设备（GPU或CPU）上
model = BertClass()
model.to(device)
# 定义交叉熵损失函数
loss_function = torch.nn.CrossEntropyLoss()
# 定义Adam优化器
optimizer = torch.optim.Adam(params=model.parameters(), lr=LEARNING_RATE)


# 计算准确率的函数
def calcuate_accuracy(preds, targets):
    # 预测和目标样本对比  相等（即正确的）的记为1  并求总个数
    n_correct = (preds == targets).sum().item()
    return n_correct


# 模型训练函数
def train(epoch, training_loader):
    # 初始化累计的训练损失
    tr_loss = 0
    # 初始化累计的正确预测数量
    n_correct = 0
    # 初始化累计的训练步数
    nb_tr_steps = 0
    # 初始化累计的训练样本数量
    nb_tr_examples = 0
    # 将BERT模型设置为训练模式，启用Dropout和BatchNormalization等训练时特有的操作
    model.train()
    for _, data in tqdm(enumerate(training_loader, 0)):
        # 将输入数据的ids张量移动到指定的计算设备（GPU或CPU）上
        ids = data['ids'].to(device, dtype=torch.long)
        # 将输入数据的mask张量移动到指定的计算设备上
        mask = data['mask'].to(device, dtype=torch.long)
        # 将输入数据的token_type_ids张量移动到指定的计算设备上
        token_type_ids = data['token_type_ids'].to(device, dtype=torch.long)
        # 将标签数据的targets张量移动到指定的计算设备上
        targets = data['targets'].to(device, dtype=torch.long)
        # 通过BERT模型进行前向传播，获取预测输出
        outputs = model(ids, mask, token_type_ids)
        # 计算loss
        loss = loss_function(outputs, targets)
        # 累计loss
        tr_loss += loss.item()
        # 获取预测输出中每个样本预测得分最高的类别索引
        big_val, big_idx = torch.max(outputs.data, dim=1)
        # 计算当前训练步中预测正确的样本数量，并累计到n_correct中
        n_correct += calcuate_accuracy(big_idx, targets)
        # 增加训练步数
        nb_tr_steps += 1
        # 增加训练样本数量
        nb_tr_examples += targets.size(0)

        if _ % 100 == 0:
            # 计算当前步数中的平均训练损失
            loss_step = tr_loss / nb_tr_steps
            # 计算当前步数中的训练准确率
            accu_step = (n_correct * 100) / nb_tr_examples
            print(f"Training Loss per 500 steps: {loss_step}")
            print(f"Training Accuracy per 500 steps: {accu_step}")
        # 清空之前的梯度信息
        optimizer.zero_grad()
        # 计算当前步数中的梯度
        loss.backward()
        # 执行梯度更新，更新模型的参数
        optimizer.step()
    # 打印当前epoch中的总准确率
    print(f'The Total Accuracy for Epoch {epoch}: {(n_correct * 100) / nb_tr_examples}')
    # 计算当前epoch的平均训练损失
    epoch_loss = tr_loss / nb_tr_steps
    # 计算当前epoch的训练准确率
    epoch_accu = (n_correct * 100) / nb_tr_examples
    print(f"Training Loss Epoch: {epoch_loss}")
    print(f"Training Accuracy Epoch: {epoch_accu}")
    return


EPOCHS = 2
for epoch in range(EPOCHS):
    train(epoch, training_loader)


# 模型验证函数
def valid(model, testing_loader):
    # 将BERT模型设置为评估模式，禁用Dropout和BatchNormalization等训练时特有的操作
    model.eval()
    # 初始化累计的正确预测数量
    n_correct = 0
    # 初始化累计的错误预测数量
    n_wrong = 0
    # 初始化样本总数
    total = 0
    # 初始化累计的验证损失
    tr_loss = 0
    # 初始化累计的验证步数
    nb_tr_steps = 0
    # 初始化累计的验证样本数量
    nb_tr_examples = 0
    # 使用torch.no_grad()上下文管理器，禁用梯度计算，节省内存并加快计算速度
    with torch.no_grad():
        for _, data in tqdm(enumerate(testing_loader, 0)):
            # 将输入数据的ids张量移动到指定的计算设备（GPU或CPU）上
            ids = data['ids'].to(device, dtype=torch.long)
            mask = data['mask'].to(device, dtype=torch.long)
            token_type_ids = data['token_type_ids'].to(device, dtype=torch.long)
            targets = data['targets'].to(device, dtype=torch.long)
            # 通过BERT模型进行前向传播，获取预测输出
            outputs = model(ids, mask, token_type_ids)
            # 计算预测输出和真实标签之间loss
            loss = loss_function(outputs, targets)
            # 累计loss
            tr_loss += loss.item()
            # 获取预测输出中每个样本预测得分最高的类别索引
            big_val, big_idx = torch.max(outputs.data, dim=1)
            # 计算当前验证步中预测正确的样本数量，并累计到n_correct中
            n_correct += calcuate_accuracy(big_idx, targets)

            # 增加验证步数
            nb_tr_steps += 1
            # 增加验证样本数量
            nb_tr_examples += targets.size(0)

            if _ % 5000 == 0:
                # 计算当前步数中的平均验证损失
                loss_step = tr_loss / nb_tr_steps
                # 计算当前步数中的验证准确率
                accu_step = (n_correct * 100) / nb_tr_examples
                print(f"Validation Loss per 100 steps: {loss_step}")
                print(f"Validation Accuracy per 100 steps: {accu_step}")
    # 计算当前epoch的平均验证损失
    epoch_loss = tr_loss / nb_tr_steps
    # 计算当前epoch的验证准确率
    epoch_accu = (n_correct * 100) / nb_tr_examples

    print(f"Validation Loss Epoch: {epoch_loss}")
    print(f"Validation Accuracy Epoch: {epoch_accu}")
    # 返回验证准确率
    return epoch_accu


# 在验证数据集上评估模型的准确率
acc = valid(model, val_loader)
print("Accuracy on validation data = %0.2f%%" % acc)
# 保存模型
# .pth
torch.save(model, model_dir)
