from transformers import (
    AutoTokenizer,
    AutoModelForSequenceClassification,
    TrainingArguments,
    Trainer,
    DataCollatorWithPadding
)
from datasets import load_from_disk
from peft import LoraConfig, get_peft_model
import torch
from sklearn.metrics import accuracy_score, f1_score
import os

from transformers.trainer_utils import SaveStrategy

# 1. 加载数据集
dataset = load_from_disk(r"D:\pythonWork\python_demo\dataset\ChnSentiCorp")

# 2. 初始化模型和tokenizer
model_name = "bert-base-chinese"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name, num_labels=2)


# 3. 数据预处理
def tokenize_function(examples):
    return tokenizer(examples["text"], truncation=True, max_length=128, padding="max_length")


tokenized_datasets = dataset.map(tokenize_function, batched=True, remove_columns=["text"])

# 4. 配置LoRA
model = get_peft_model(model, LoraConfig(
    r=8,
    lora_alpha=32,
    target_modules=["query", "value"],
    lora_dropout=0.1,
    task_type="SEQ_CLS"
))

# 5. 旧版本兼容的训练参数
training_args = TrainingArguments(
    output_dir="./output",
    learning_rate=3e-4,
    per_device_train_batch_size=32,
    per_device_eval_batch_size=32,
    num_train_epochs=5,

    # 关键修正：匹配评估和保存策略
    evaluation_strategy="steps",  # 或 "epoch"
    eval_steps=500,  # 当使用steps策略时
    save_strategy=SaveStrategy.STEPS,  # 必须与evaluation_strategy相同
    save_steps=500,  # 当使用steps策略时

    # 或者使用epoch策略：
    # evaluation_strategy="epoch",
    # save_strategy="epoch",

    load_best_model_at_end=True,
    metric_for_best_model="eval_f1",  # 注意前缀"eval_"
    greater_is_better=True,

    logging_steps=100,
    fp16=True,
    remove_unused_columns=False
)


# 6. 自定义评估函数
def compute_metrics(p):
    preds = p.predictions.argmax(-1)
    return {"accuracy": accuracy_score(p.label_ids, preds),
            "f1": f1_score(p.label_ids, preds, average="binary")}


# 7. 创建Trainer
trainer = Trainer(
    model=model,
    args=training_args,
    train_dataset=tokenized_datasets["train"],
    eval_dataset=tokenized_datasets["test"],
    compute_metrics=compute_metrics,
    data_collator=DataCollatorWithPadding(tokenizer)
)

# 8. 训练
trainer.train()

# 9. 保存LoRA权重
lora_weights = {n: p for n, p in model.named_parameters() if p.requires_grad}
torch.save(lora_weights, "lora_weights.pt")