from datasets import load_dataset
from transformers import AutoTokenizer, AutoModelForSequenceClassification, Trainer, TrainingArguments
from peft import LoraConfig, get_peft_model,TaskType
import evaluate
import numpy as np

# 1. 加载 IMDb 数据集
dataset = load_dataset("imdb")

# 2. 加载模型和 tokenizer
model_name = "distilbert-base-uncased"
tokenizer = AutoTokenizer.from_pretrained(model_name)

# 3. 数据预处理
def preprocess_function(examples):
    return tokenizer(examples["text"], truncation=True, padding="max_length", max_length=256)

# train_dataset = dataset["train"].map(preprocess_function, batched=True)
# eval_dataset = dataset["test"].map(preprocess_function, batched=True)

train_dataset = dataset["train"].shuffle(seed=42).select(range(2000)).map(preprocess_function, batched=True)
eval_dataset = dataset["test"].shuffle(seed=42).select(range(400)).map(preprocess_function, batched=True)
# 4. 加载原始模型
model = AutoModelForSequenceClassification.from_pretrained(model_name, num_labels=2)

# 5. 应用 LoRA 微调
peft_config = LoraConfig(
    task_type=TaskType.SEQ_CLS,  # 添加任务类型
    inference_mode=False,
    r=8,  # 低秩矩阵的秩
    lora_alpha=16,  # LoRA Alpha 超参数
    lora_dropout=0.1,  # LoRA dropout
    target_modules=["q_lin", "v_lin"]  # 添加目标模块
)

lora_model = get_peft_model(model, peft_config)

# 6. 定义评估指标 (Accuracy)
accuracy_metric = evaluate.load("accuracy")
def compute_metrics(eval_pred):
    logits, labels = eval_pred
    predictions = np.argmax(logits, axis=-1)
    return accuracy_metric.compute(predictions=predictions, references=labels)

# 7. 训练参数
training_args = TrainingArguments(
    output_dir="./results",
    save_strategy="no",
    learning_rate=2e-5,
    warmup_steps=1000,
    per_device_train_batch_size=4,
    per_device_eval_batch_size=8,
    num_train_epochs=8,
    weight_decay=0.01,
    logging_steps=5,
    gradient_accumulation_steps=2,
    fp16=False,
    bf16=True,  #
)

# 8. 定义 Trainer
trainer = Trainer(
    model=lora_model,  # 使用 LoRA 微调后的模型
    args=training_args,
    train_dataset=train_dataset,
    eval_dataset=eval_dataset,
    tokenizer=tokenizer,
    compute_metrics=compute_metrics
)

# 9. 训练 & 评估
trainer.train()
eval_results = trainer.evaluate()
print("最终评估结果：", eval_results)

# 10. 保存最终模型
trainer.save_model("./lora_finetuned_model")
