from transformers import AutoTokenizer, AutoModelForSequenceClassification
from transformers import TrainingArguments, Trainer
import numpy as np
from sklearn.metrics import accuracy_score
from evaluate import load

model_name = '/home/mbk/.cache/modelscope/hub/models/dienstag/chinese-roberta-wwm-ext'


tokenizer = AutoTokenizer.from_pretrained(model_name)
num_labels = 5
model = AutoModelForSequenceClassification.from_pretrained(model_name, num_labels=num_labels)

training_args = TrainingArguments(
    report_to="none", 
    output_dir="./results_sentiment",          # 输出目录，保存模型和检查点
    num_train_epochs=5,              # 训练的总轮数
    per_device_train_batch_size=16,  # 每个设备上的训练批量大小
    per_device_eval_batch_size=64,   # 每个设备上的评估批量大小
    warmup_steps=500,                # 预热步数，学习率从0慢慢增加到设定值
    weight_decay=0.01,               # 权重衰减（L2正则化）
    logging_dir="./logs_sentiment",            # 日志目录
    logging_steps=10,                # 每隔多少步记录一次日志
    eval_strategy="epoch",     # 在每个 epoch 结束时进行评估
    save_strategy="epoch",           # 在每个 epoch 结束时保存模型
    load_best_model_at_end=True,     # 训练结束时加载最佳模型
    metric_for_best_model="accuracy", # 使用 accuracy 作为评估最佳模型的指标
)

def compute_metrics(eval_pred):
    logits, labels = eval_pred
    predictions = np.argmax(logits, axis=-1)
    return {"accuracy": accuracy_score(labels, predictions)}


import proc_dataset

tokenized_train_dataset = proc_dataset.tokenized_train_dataset
tokenized_eval_dataset = proc_dataset.tokenized_eval_dataset

trainer = Trainer(
    model=model,                         # 要训练的模型
    args=training_args,                  # 训练参数
    train_dataset=tokenized_train_dataset, # 训练数据集
    eval_dataset=tokenized_eval_dataset,   # 评估数据集
    compute_metrics=compute_metrics,     # 计算指标的函数
    tokenizer=tokenizer,                 # 分词器，可选，用于数据整理
)

print("开始微调模型...")
trainer.train()

print("模型微调完成！")

save_directory = "./fine_tuned_roberta_sentiment_chinese"
trainer.save_model(save_directory)
tokenizer.save_pretrained(save_directory)
print(f"微调后的模型已保存到: {save_directory}")