# -*- coding: utf-8 -*-            
# @Time : 2025/5/9 14:59
# @Author: ZZC
# @proj: BERT_NO1


# 基于Bert-base-chinese训练多分类文本模型
# 代码来自https://blog.csdn.net/m0_58700887/article/details/141865364

import pandas as pd
import numpy as np
import joblib
import torch
import time

from transformers import BertTokenizer, BertForSequenceClassification
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset, random_split
from torch.optim import AdamW
from transformers import get_linear_schedule_with_warmup

# 读取数据
data = pd.read_csv('../data/ChnSentiCorp_htl_all.csv', encoding='utf-8')  # 如果表格数据是gbk，则修改encoding='gbk'

# 最优模型训练结果的保存路径
best_model_path = '../model_save'

X = data['review']  # 特征列
y = data['label'].values  # 标签列

# 对标签数据进行编码转换
print("1、开始编码转换啦~")
# label_encoder = LabelEncoder()  # 初始化
label_encoder = joblib.load('../data/encoder.joblib')   # 当你使用同样的data第二次运行脚本时，就可以直接加载上一次保存的编码结果，而不需要重复编码（除非两次加载的数据有变动）
y_encoded = label_encoder.fit_transform(y)
print(f'分类数：{len(label_encoder.classes_)} \n')  # 标签的类别数量

# 保存 label_encoder 以便以后使用
# joblib.dump(label_encoder, '../data/encoder.joblib')

# 分割数据集
X_train, X_val, y_train, y_val = train_test_split(X, y_encoded, test_size=0.1,
                                                  random_state=42)  # 这里训练和测试数据集比例是9:1，test_size=0.2或者0.3  固定随机种子42，保证每一次分割数据集都是一样的

# 加载BERT分词器
local_model_path = '../bert-base-chinese'
tokenizer = BertTokenizer.from_pretrained(local_model_path)
tokenizer.save_pretrained(best_model_path)


# BERT预处理 -- 将文本数据转换成BERT模型能够理解的格式
def preprocess_for_bert(data, labels):
    input_ids = []
    attention_masks = []

    for sent in data:
        if pd.isna(sent):
            print("Found NaN, skipping...")
            sent = '空'
        # 对每个句子（sent）进行编码处理
        encoded_sent = tokenizer.encode_plus(
            text=sent,  # 要处理的句子
            add_special_tokens=True,  # 添加特殊标记，如句子的起始标记和结束标记
            max_length=256,  # 句子的最大长度为256个标记，超出部分将被截断，不足部分将被填充
            padding='max_length',  # 将句子填充到固定长度（256），不足部分会用0补齐
            return_attention_mask=True,  # 返回注意力掩码，用于标记哪些位置是填充部分，哪些位置是实际的句子内容
            truncation=True  # 如果句子超过了最大长度，进行截断
        )

        input_ids.append(encoded_sent.get('input_ids'))
        attention_masks.append(encoded_sent.get('attention_mask'))

    # 转换为PyTorch张量（tensor），以便后续可以输入到模型中进行训练或推理
    input_ids = torch.tensor(input_ids)
    attention_masks = torch.tensor(attention_masks)
    labels = torch.tensor(labels)

    return input_ids, attention_masks, labels


# 预处理数据
print("2、开始预处理数据啦~")
train_inputs, train_masks, train_labels = preprocess_for_bert(X_train, y_train)
val_inputs, val_masks, val_labels = preprocess_for_bert(X_val, y_val)

# 创建DataLoader
train_data = TensorDataset(train_inputs, train_masks, train_labels)
train_sampler = RandomSampler(train_data)
train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=16)

validation_data = TensorDataset(val_inputs, val_masks, val_labels)
validation_sampler = SequentialSampler(validation_data)
validation_dataloader = DataLoader(validation_data, sampler=validation_sampler, batch_size=16)

# 加载BERT模型
print("3、开始预加载模型啦~")
model = BertForSequenceClassification.from_pretrained(local_model_path, num_labels=len(label_encoder.classes_),
                                                      ignore_mismatched_sizes=True)
model.cuda()  # 默认使用第一张显卡

# 设置优化器和调度器
EPOCHS = 5  # 训练次数，可以先训练5次看看效果，可以自定义修改
optimizer = AdamW(model.parameters(), lr=2e-5, eps=1e-8)  # 优化器
total_steps = len(train_dataloader) * EPOCHS
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=0, num_training_steps=total_steps)


# 计算精确度 -- 通过比较预测类别和实际标签的相同之处，计算出预测正确的比例
def flat_accuracy(preds, labels):
    pred_flat = np.argmax(preds, axis=1).flatten()
    labels_flat = labels.flatten()
    return np.sum(pred_flat == labels_flat) / len(labels_flat)


# 训练和评估
print("4、开始训练啦~")
best_val_accuracy = 0
for epoch in range(EPOCHS):
    print(f'Epoch {epoch + 1}')
    now_time = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime())  # 记录每一轮的训练开始时间和结束时间
    print("start time:", now_time)

    model.train()  # 模型设置为训练模式
    total_train_loss = 0  # 初始化训练总损失为0

    for step, batch in enumerate(train_dataloader):
        b_input_ids = batch[0].cuda()
        b_input_mask = batch[1].cuda()
        b_labels = batch[2].cuda().long()

        model.zero_grad()  # 清除模型的梯度

        outputs = model(b_input_ids, token_type_ids=None, attention_mask=b_input_mask,
                        labels=b_labels)  # 将输入数据传递给模型，得到模型的输出
        loss = outputs.loss  # 提取出损失值，用于后续的反向传播
        total_train_loss += loss.item()
        loss.backward()  # 进行反向传播，计算梯度
        torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)
        optimizer.step()
        scheduler.step()

    avg_train_loss = total_train_loss / len(train_dataloader)  # 更新学习率

    torch.cuda.empty_cache()  # 训练一轮就清空一次显卡缓存
    # 模型测试，计算准确度
    model.eval()
    total_eval_accuracy = 0
    total_eval_loss = 0

    for batch in validation_dataloader:
        b_input_ids = batch[0].cuda()
        b_input_mask = batch[1].cuda()
        b_labels = batch[2].cuda().long()

        with torch.no_grad():
            outputs = model(b_input_ids, token_type_ids=None, attention_mask=b_input_mask, labels=b_labels)

        loss = outputs.loss
        total_eval_loss += loss.item()
        logits = outputs.logits
        logits = logits.detach().cpu().numpy()
        label_ids = b_labels.to('cpu').numpy()
        total_eval_accuracy += flat_accuracy(logits, label_ids)

    avg_val_accuracy = total_eval_accuracy / len(validation_dataloader)
    avg_val_loss = total_eval_loss / len(validation_dataloader)

    torch.cuda.empty_cache()  # 验证一轮就清空一次显卡缓存

    print(f'Training loss: {avg_train_loss}')
    print(f'Validation loss: {avg_val_loss}')
    print(f'Validation Accuracy: {avg_val_accuracy}')  # 主要看这个精度，一般准确率90%以上就可以投入实际生产环境中

    # 在验证集上计算准确率
    if avg_val_accuracy > best_val_accuracy:
        best_val_accuracy = avg_val_accuracy
        # 保存模型
        model.save_pretrained(best_model_path)  # 根据训练次数，保存最优的一个模型结果

    now_time = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime())
    print("end time:", now_time)
    print("-------------------")

if __name__ == '__main__':
    pass
