import torch
from transformers import AutoTokenizer,AutoModelForSequenceClassification,DataCollatorWithPadding,TrainingArguments,Trainer
from datasets import load_dataset
from torch.utils.data import DataLoader
import evaluate
from torch.optim import Adam
import os

dataset = load_dataset("csv",data_files="D:\datasets\ChnSentiCorp\ChnSentiCorp_htl_all.csv",split="train")
dataset = dataset.filter(lambda x:x["review"] is not None)
train_data = dataset.train_test_split(test_size=0.1)
tokenizer = AutoTokenizer.from_pretrained(r"D:\models\rbt3")

def process_fun(examples):
    tokenizer_examples = tokenizer(examples["review"],max_length=128,truncation=True)
    tokenizer_examples['labels'] = examples["label"]
    return tokenizer_examples

tokenizer_dataset = train_data.map(process_fun,batched=True,remove_columns=train_data['train'].column_names)
trainset,validset = tokenizer_dataset['train'],tokenizer_dataset['test']
trainloader = DataLoader(trainset,batch_size=200,shuffle=True,collate_fn=DataCollatorWithPadding(tokenizer))
validloader = DataLoader(validset,batch_size=200,shuffle=True,collate_fn=DataCollatorWithPadding(tokenizer))

model = AutoModelForSequenceClassification.from_pretrained(r"D:\models\rbt3")
if torch.cuda.is_available():
    model = model.cuda()
optimizer = Adam(model.parameters(),lr=1e-5)

acc_metric = evaluate.load(r"D:\Code\sshcode\HuggingFace\evaluate-main\metrics\accuracy")
f1_metric = evaluate.load(r"D:\Code\sshcode\HuggingFace\evaluate-main\metrics\f1")

import torch
import numpy as np  # 确保开头导入 numpy（用于类型转换）


def eva_metric(eval_predict):
    pred, labels = eval_predict  # pred 是 numpy.ndarray，labels 是 numpy.ndarray
    # 1. 修复：将 numpy.ndarray 转为 torch.Tensor（适配 torch.argmax）
    pred_tensor = torch.from_numpy(pred)  # 核心修改：numpy → tensor
    pred = torch.argmax(pred_tensor, dim=-1)  # 现在 pred 是 torch.Tensor
    # 2. 修复：将 tensor 转为 numpy.ndarray（evaluate 库只接受 numpy 类型）
    pred_np = pred.numpy()
    labels_np = labels  # labels 本身已是 numpy，无需额外转换
    # 3. 修复：二分类任务必须指定 average='binary'（否则报多分类错误）
    acc = acc_metric.compute(predictions=pred_np, references=labels_np)
    f1 = f1_metric.compute(predictions=pred_np, references=labels_np, average="binary")
    # 4. 修复：删除无效的 acc.update(f1)，改用字典合并（返回所有指标）
    # acc.update(f1) 错误原因：evaluate 库的 compute 结果是普通字典，没有 update 方法
    return {**acc, **f1}  # 合并 acc（含"accuracy"键）和 f1（含"f1"键），返回所有指标

os.environ["SWANLAB_PROJECT"] = "test"
os.environ["SWANLAB_WORKSPACE"] = "XYLCURRY30"

train_args = TrainingArguments(
    output_dir=r"D:\Code\sshcode\HuggingFace\output",   #输出文件夹
    num_train_epochs=3,                                 #训练轮数
    per_device_train_batch_size=100,                    #训练批次大小
    per_device_eval_batch_size=50,                      #验证批次大小
    save_total_limit=3,                                 #最大保存数量
    save_strategy="epoch",                              #保存策略
    eval_strategy="epoch",                              #评估策略
    metric_for_best_model="f1",                         #设定评估指标
    logging_steps=100,                                  #log打印频率
    learning_rate=1e-5,                                 #学习率
    weight_decay=0.01,                                  #权重衰减率
    load_best_model_at_end=True,                        #保存最优模型
    fp16=True                                           #混合精度训练
)

trainer = Trainer(
     model = model,
     args = train_args,
     train_dataset = trainset,
     eval_dataset = validset,
     tokenizer = tokenizer,
     data_collator = DataCollatorWithPadding(tokenizer),
     compute_metrics = eva_metric
)

if __name__ == '__main__':
    trainer.train()
    # trainer.evaluate(validset) 模型评估