import json
import torch
import os
import re
from transformers import (
    AutoTokenizer,
    AutoModelForCausalLM,
    TrainingArguments,
    Trainer,
    DataCollatorForLanguageModeling,
    GenerationConfig
)
from peft import LoraConfig, get_peft_model, TaskType, PeftModel
from datasets import Dataset
import transformers

# 检查GPU是否可用
device = torch.device("cuda:1" if torch.cuda.is_available() else "cpu")
print(f"Using device: {device}")

# # 指定第0号GPU，如果没有GPU则使用CPU
# device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# print(f"Using device: {device}")

# 定义模型路径和保存路径
model_name = "/home/liuzhongzhong/data/models/Qwen/Qwen3-0___6B"
output_dir = "/home/liuzhongzhong/data/save_modles"

# 确保保存目录存在
os.makedirs(output_dir, exist_ok=True)

# 加载tokenizer和模型
tokenizer = AutoTokenizer.from_pretrained(model_name)
tokenizer.pad_token = tokenizer.eos_token  # 设置pad token

model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype=torch.bfloat16,
    device_map="auto"
)

# 加载并处理数据
with open("/home/liuzhongzhong/data/data/test.json", "r", encoding="utf-8") as f:
    data = json.load(f)

# 将对话数据转换为训练文本格式
def format_conversation(conversation):
    formatted_text = ""
    for turn in conversation:
        if turn["role"] == "user":
            formatted_text += f"<|im_start|>user\n{turn['content']}<|im_end|>\n"
        else:
            formatted_text += f"<|im_start|>assistant\n{turn['content']}<|im_end|>\n"
    return formatted_text

formatted_data = [format_conversation(conv) for conv in data]

# 创建数据集
dataset = Dataset.from_dict({"text": formatted_data})

# 对数据集进行tokenize
def tokenize_function(examples):
    max_length = 512
    tokenized = tokenizer(
        examples["text"],
        truncation=True,
        padding=True,
        max_length=max_length,
        return_offsets_mapping=False
    )
    tokenized["labels"] = tokenized["input_ids"].copy()
    return tokenized

tokenized_dataset = dataset.map(
    tokenize_function,
    batched=True,
    remove_columns=dataset.column_names
)

# 配置LoRA - 增加参数以促进过拟合
lora_config = LoraConfig(
    task_type=TaskType.CAUSAL_LM,
    inference_mode=False,
    r=32,  # 增加秩
    lora_alpha=64,  # 增加alpha值
    lora_dropout=0.05,  # 减少dropout
    target_modules=["q_proj", "v_proj", "k_proj", "o_proj", "gate_proj", "up_proj", "down_proj"]
)

# 应用LoRA到模型
model = get_peft_model(model, lora_config)
model.print_trainable_parameters()

# 设置训练参数 - 调整为更容易过拟合的参数
training_args = TrainingArguments(
    output_dir=output_dir,
    per_device_train_batch_size=1,  # 减小批次大小
    gradient_accumulation_steps=8,  # 增加梯度累积步数
    learning_rate=5e-4,  # 提高学习率
    num_train_epochs=50,  # 进一步增加训练轮数以促进过拟合
    logging_dir="./logs",
    logging_steps=5,
    save_steps=100,
    eval_steps=100,
    fp16=True,
    warmup_steps=5,  # 进一步减少预热步数
    lr_scheduler_type="constant",  # 使用恒定学习率
    report_to="none",
    weight_decay=0.0,  # 去除权重衰减
    max_grad_norm=1.0,  # 增加梯度裁剪
)

# 创建数据整理器
data_collator = DataCollatorForLanguageModeling(
    tokenizer=tokenizer,
    mlm=False
)

# 创建trainer
trainer = Trainer(
    model=model,
    args=training_args,
    train_dataset=tokenized_dataset,
    data_collator=data_collator
)

# 开始训练
print("开始训练...")
trainer.train()

# 保存模型
print("保存模型...")
trainer.save_model()
tokenizer.save_pretrained(output_dir)
print(f"训练完成，模型已保存到: {output_dir}")

# 验证模型是否保存成功
print("\n验证保存的模型文件:")
if os.path.exists(output_dir):
    files = os.listdir(output_dir)
    print(f"目录 {output_dir} 中的文件:")
    for file in files:
        print(f"  - {file}")
else:
    print(f"错误: 目录 {output_dir} 不存在")

# 加载训练好的模型的示例代码
print("\n加载训练好的模型的示例:")
try:
    # 加载基础模型
    base_model = AutoModelForCausalLM.from_pretrained(
        model_name,
        torch_dtype=torch.bfloat16,
        device_map="auto"
    )

    # 加载LoRA适配器
    trained_model = PeftModel.from_pretrained(base_model, output_dir)
    print("模型加载成功!")

    # 使用模型进行推理的改进函数
    def generate_response(prompt):
        # 确保输入格式正确
        if not prompt.endswith("<|im_start|>assistant\n"):
            prompt = prompt + "<|im_start|>assistant\n"

        inputs = tokenizer(prompt, return_tensors="pt").to(device)

        # 设置生成参数，限制输出长度和多样性
        generation_config = GenerationConfig(
            max_new_tokens=20,  # 减少最大生成长度，确保回答简洁
            temperature=0.1,  # 降低温度以获得更确定的输出
            do_sample=False,  # 使用贪婪搜索而不是采样，确保输出一致
            pad_token_id=tokenizer.eos_token_id,
            eos_token_id=tokenizer.convert_tokens_to_ids(["<|im_end|>"])[0],
            repetition_penalty=1.5,  # 增加重复惩罚
        )

        with torch.no_grad():
            outputs = trained_model.generate(
                **inputs,
                generation_config=generation_config
            )

        # 解码响应
        full_response = tokenizer.decode(outputs[1], skip_special_tokens=False)

        # 提取assistant的回复部分
        assistant_start = "<|im_start|>assistant\n"
        assistant_end = "<|im_end|>"

        # 提取assistant的回复
        start_idx = full_response.find(assistant_start)
        if start_idx != -1:
            start_idx += len(assistant_start)
            end_idx = full_response.find(assistant_end, start_idx)
            if end_idx != -1:
                response = full_response[start_idx:end_idx].strip()
                return response

        # 如果无法提取，返回整个响应（去掉特殊标记）
        return tokenizer.decode(outputs[0], skip_special_tokens=True)

    # 测试多个示例
    test_cases = [
        "通道更新一下"
    ]

    for test_case in test_cases:
        test_prompt = f"<|im_start|>user\n{test_case}<|im_end|>\n<|im_start|>assistant\n"
        response = generate_response(test_prompt)
        print(f"用户: {test_case}")
        print(f"助理: {response}")
        print("-" * 50)

except Exception as e:
    print(f"加载模型时出错: {e}")
    import traceback
    traceback.print_exc()