import torch
from datasets import load_dataset
from transformers import AutoModelForCausalLM, AutoTokenizer, TrainingArguments, Trainer, \
    DataCollatorForLanguageModeling
from peft import LoraConfig, get_peft_model
import json
import os

# 1. 加载数据集 - 修正路径
# 确保路径正确，这里直接使用文件路径
dataset = load_dataset("data", data_files="DISC-Law-SFT-Pair.jsonl", split="train")
dataset = dataset.train_test_split(test_size=0.1)

# 2. 加载模型和分词器
model_name = r"./Qwen3-0.6B"
tokenizer = AutoTokenizer.from_pretrained(model_name, padding_side='right')
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    device_map="auto",
    torch_dtype=torch.bfloat16,
    # attn_implementation="flash_attention_2"  # 如果遇到问题请注释掉
)
tokenizer.pad_token = tokenizer.eos_token


# 3. 数据格式化 - 添加参考信息
def format_data(example):

    text = (
        f"<|im_start|>system\n你是一名专业的法律AI助手，请根据提供的法律参考分析案件并预测判决结果。<|im_end|>\n"
        f"<|im_start|>user\n案件描述：\n{example['input']}\n请分析此案件并预测可能的判决结果。<|im_end|>\n"
        f"<|im_start|>assistant\n{example['output']}<|im_end|>"
    )
    return {"text": text}


# 应用格式化
dataset = dataset.map(format_data)


# 4. 分词处理 - 修正并添加标签处理
def tokenize_func(examples):
    # 分词
    tokenized = tokenizer(
        examples["text"],
        truncation=True,
        max_length=1024,
        padding="max_length",
        return_tensors="pt"
    )

    # 创建标签副本 - 对于因果语言建模，标签通常是输入ID的副本
    tokenized["labels"] = tokenized["input_ids"].clone()
    return tokenized


# 应用分词
tokenized_ds = dataset.map(
    tokenize_func,
    batched=True,
    remove_columns=["text", "input", "output", "id"],
    batch_size=8  # 较小的批次大小有助于内存管理
)

# 5. 配置LoRA
peft_config = LoraConfig(
    r=4,
    lora_alpha=32,
    target_modules=["q_proj", "k_proj", "v_proj", "o_proj"],
    lora_dropout=0.05,
    task_type="CAUSAL_LM",
    bias="none"
)
model = get_peft_model(model, peft_config)
model.print_trainable_parameters()

# 6. 训练配置 - 使用正确的参数名
training_args = TrainingArguments(
    output_dir="./qwen_finetuned",
    per_device_train_batch_size=2,  # 减小批次大小以节省内存
    per_device_eval_batch_size=2,
    num_train_epochs=3,
    learning_rate=1e-4,  # 略高的学习率
    logging_dir="./logs",
    logging_steps=10,
    eval_strategy="epoch",  # 使用正确的参数名
    save_strategy="epoch",
    bf16=True,  # 使用bfloat16替代fp16
    gradient_accumulation_steps=8,  # 增加梯度累积步数
    report_to="none",  # 禁用报告
    save_total_limit=2,
    load_best_model_at_end=True,
    metric_for_best_model="eval_loss",
    greater_is_better=False
)

# 7. 使用内置数据整理器
data_collator = DataCollatorForLanguageModeling(
    tokenizer=tokenizer,
    mlm=False  # 因果语言建模
)

# 8. 创建Trainer
trainer = Trainer(
    model=model,
    args=training_args,
    train_dataset=tokenized_ds["train"],
    eval_dataset=tokenized_ds["test"],
    data_collator=data_collator,
    # label_names=["labels"]  # 明确指定标签名称
)

# 9. 开始训练
trainer.train()

# 10. 保存模型
model.save_pretrained("./final_model")
tokenizer.save_pretrained("./final_model")