"""
系统环境
print(torch.cuda.is_available())
print(torch.version.cuda) # cuda version
print(torch.__version__) # torch version
True
11.6
1.13.0+cu116
"""
import pandas as pd
# BertForSequenceClassification 包括了最终的二分类模型数据
from transformers import BertTokenizer, BertForSequenceClassification, Trainer, TrainingArguments
import torch
from torch.utils.data import  Dataset,random_split
from sklearn.metrics import roc_auc_score

# 加载数据
train_file = "D:/code/datasets/train_v2_drcat_02/train_v2_drcat_02.csv"
# 加载数据
df = pd.read_csv(train_file, usecols=['text', 'label'])

# 文本预处理函数
def preprocess_function(examples):
    return tokenizer(examples["text"], padding="max_length", truncation=True, max_length=512)

# 初始化tokenizer和模型
print("开始初始化bert模型")

tokenizer = BertTokenizer.from_pretrained('D:/code/models/bert')
model = BertForSequenceClassification.from_pretrained('D:/code/models/bert')

# 定义数据集类
class TextDataset(Dataset):
    def __init__(self, dataframe, tokenizer, max_len):
        self.dataframe = dataframe
        self.tokenizer = tokenizer
        self.max_len = max_len
        self.texts = self.dataframe.text
        self.targets = self.dataframe.label
    def __len__(self):
        return len(self.dataframe)
    def __getitem__(self, item):
        text = str(self.texts[item])
        target = self.targets[item]
        encoding = self.tokenizer.encode_plus(
            text,
            add_special_tokens=True,
            max_length=self.max_len,
            return_token_type_ids=False,
            padding='max_length',
            truncation=True,
            return_attention_mask=True,
            return_tensors='pt',
        )
        return {
            'text': text,
            'input_ids': encoding['input_ids'].flatten(),
            'attention_mask': encoding['attention_mask'].flatten(),
            'labels': torch.tensor(target, dtype=torch.long)
        }

dataset = TextDataset(df, tokenizer, max_len=128)
# 设置训练集/测试集/验证集的比例,数据量较少时,一般设置为8:1:1
train_ratio = 0.8  # 训练集所占比例
val_ration = 0.2  # 验证集所占比例
# 计算训练集&测试集的size
train_data_size = int(len(dataset) * train_ratio)
val_data_size = len(dataset) - train_data_size
train_dataset, val_dataset = random_split(
    dataset=dataset,
    lengths=[train_data_size, val_data_size]
)
# 定义训练参数
training_args = TrainingArguments(
    output_dir='E:/code/kaggle/results',  # 输出文件夹
    num_train_epochs=1,  # 训练轮次
    per_device_train_batch_size=32,  # 批大小
    per_device_eval_batch_size=64,  # 评估批大小
    warmup_steps=500,  # 预热步数
    weight_decay=0.01,  # 权重衰减
    logging_dir='E:/code/kaggle/logs',  # 日志文件夹
    logging_steps=100,
    eval_strategy="epoch"
)
# 定义训练器
trainer = Trainer(
    model=model,  # 使用的模型
    args=training_args,  # 训练参数
    train_dataset=train_dataset,  # 训练数据集
    eval_dataset=val_dataset  # 验证数据集
)

# 开始训练
trainer.train()
predictions = trainer.predict(val_dataset)
print(predictions.label_ids)

# 训练完成后保存模型
trainer.save_model('D:/code/models/bert_classsfication')
#
# # 预测验证集
# predictions = trainer.predict(val_dataset)
# print(predictions,"11111111")
# predictions = torch.nn.functional.softmax(torch.stack(predictions.predictions), dim=1)[:, 1].detach().cpu().numpy()
# print(predictions,"22222222")
# 计算AUC
# auc = roc_auc_score(val_df['label'], predictions)
# print(f'AUC: {auc}')

# # 保存预测结果
# val_df['predicted'] = predictions
# val_df[['text', 'label', 'predicted']].to_csv('predictions.csv', index=False)
