import json
import torch
import os
from transformers import (
    AutoTokenizer,
    AutoModelForCausalLM,
    TrainingArguments,
    Trainer,
    DataCollatorForLanguageModeling
)
from peft import LoraConfig, get_peft_model, TaskType
from datasets import Dataset

# 检查GPU是否可用
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print(f"Using device: {device}")

# 定义模型路径和保存路径
# model_name = "/home/liuzhongzhong/data/models/Qwen/Qwen3-0___6B"
# output_dir = "/home/liuzhongzhong/data/save_modles"

model_name = "/root/autodl-tmp/modles/models/Qwen/Qwen3-0___6B"
output_dir = "/root/autodl-tmp/save_modles"

# 确保保存目录存在
os.makedirs(output_dir, exist_ok=True)

# 加载tokenizer和模型
tokenizer = AutoTokenizer.from_pretrained(model_name)
tokenizer.pad_token = tokenizer.eos_token  # 设置pad token

# 明确指定设备映射
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype=torch.bfloat16,
    device_map={"": device.index if device.type == "cuda" else "cpu"}  # 明确指定设备
)

# 加载并处理数据
with open("/root/autodl-tmp/data/test2.json", "r", encoding="utf-8") as f:
    data = json.load(f)

# 将对话数据转换为训练文本格式
def format_conversation(conversation):
    formatted_text = ""
    for turn in conversation:
        if turn["role"] == "user":
            formatted_text += f"<|im_start|>user\n{turn['content']}<|im_end|>\n"
        else:
            formatted_text += f"<|im_start|>assistant\n{turn['content']}<|im_end|>\n"
    return formatted_text

formatted_data = [format_conversation(conv) for conv in data]

# 创建数据集
dataset = Dataset.from_dict({"text": formatted_data})

# 对数据集进行tokenize
def tokenize_function(examples):
    max_length = 512
    tokenized = tokenizer(
        examples["text"],
        truncation=True,
        padding=True,
        max_length=max_length,
        return_offsets_mapping=False
    )
    tokenized["labels"] = tokenized["input_ids"].copy()
    return tokenized

tokenized_dataset = dataset.map(
    tokenize_function,
    batched=True,
    remove_columns=dataset.column_names
)

# 配置LoRA - 使用更合理的参数防止过拟合
lora_config = LoraConfig(
    task_type=TaskType.CAUSAL_LM,
    inference_mode=False,
    r=16,  # 适度增加秩
    lora_alpha=32,  # 适度增加alpha值
    lora_dropout=0.1,  # 增加dropout防止过拟合
    target_modules=["q_proj", "v_proj", "k_proj", "o_proj", "gate_proj", "up_proj", "down_proj"]
)

# 应用LoRA到模型
model = get_peft_model(model, lora_config)
model.print_trainable_parameters()

# 设置训练参数 - 使用更合理的参数防止过拟合
training_args = TrainingArguments(
    output_dir=output_dir,
    per_device_train_batch_size=2,  # 适当增加批次大小
    gradient_accumulation_steps=4,  # 减少梯度累积步数
    learning_rate=2e-4,  # 使用更合理的学习率
    num_train_epochs=10,  # 减少训练轮数防止过拟合
    logging_dir="./logs",
    logging_steps=10,
    save_steps=100,
    eval_steps=100,
    fp16=True,
    warmup_steps=10,  # 适当增加预热步数
    lr_scheduler_type="cosine",  # 使用余弦学习率调度
    report_to="none",
    weight_decay=0.01,  # 添加权重衰减防止过拟合
    max_grad_norm=1.0,
)

# 创建数据整理器
data_collator = DataCollatorForLanguageModeling(
    tokenizer=tokenizer,
    mlm=False
)

# 创建trainer
trainer = Trainer(
    model=model,
    args=training_args,
    train_dataset=tokenized_dataset,
    data_collator=data_collator
)

# 开始训练
print("开始训练...")
trainer.train()

# 保存模型
print("保存模型...")
trainer.save_model()
tokenizer.save_pretrained(output_dir)
print(f"训练完成，模型已保存到: {output_dir}")

# 验证模型是否保存成功
print("\n验证保存的模型文件:")
if os.path.exists(output_dir):
    files = os.listdir(output_dir)
    print(f"目录 {output_dir} 中的文件:")
    for file in files:
        print(f"  - {file}")
else:
    print(f"错误: 目录 {output_dir} 不存在")