from transformers import AutoModelForCausalLM, AutoTokenizer, Trainer, TrainingArguments
import torch

# 加载预训练模型和分词器
model_name = "qwen/Qwen-7B-Chat"
model = AutoModelForCausalLM.from_pretrained(model_name)
tokenizer = AutoTokenizer.from_pretrained(model_name)

# 准备数据集
# 假设你已经有了一个名为'dataset.jsonl'的文件，其中包含了微调数据
train_dataset = load_dataset('json', data_files='dataset.jsonl', split='train')

# 定义训练参数
training_args = TrainingArguments(
    output_dir="./results",
    num_train_epochs=3,
    per_device_train_batch_size=4,
    per_device_eval_batch_size=4,
    warmup_steps=500,
    weight_decay=0.01,
    logging_dir='./logs',
    logging_steps=10,
)

# 定义训练函数
def compute_metrics(pred):
    labels = pred.label_ids
    preds = pred.predictions.argmax(-1)
    # 计算准确率
    acc = (preds == labels).mean()
    return {
        'accuracy': acc,
    }

trainer = Trainer(
    model=model,
    args=training_args,
    train_dataset=train_dataset,
    compute_metrics=compute_metrics,
)

# 开始微调
trainer.train()

# 保存微调后的模型
model.save_pretrained('./my_finetuned_qwen')