import torch
from transformers import (
    AutoTokenizer,
    AutoModelForCausalLM,
    GenerationConfig
)
from peft import PeftModel
import os

# 检查GPU是否可用
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print(f"Using device: {device}")

# 定义模型路径和保存路径
model_name = "/home/liuzhongzhong/data/models/Qwen/Qwen3-0___6B"
output_dir = "/home/liuzhongzhong/data/save_modles"

# model_name = "/home/liuzhongzhong/data/models/Qwen/Qwen3-0___6B"
# output_dir = "/home/liuzhongzhong/data/save_modles"

# 加载tokenizer
tokenizer = AutoTokenizer.from_pretrained(output_dir)
tokenizer.pad_token = tokenizer.eos_token  # 设置pad token

# 加载训练好的模型的示例代码
print("\n加载训练好的模型...")
try:
    # 加载基础模型
    base_model = AutoModelForCausalLM.from_pretrained(
        model_name,
        torch_dtype=torch.bfloat16,
        device_map={"": device.index if device.type == "cuda" else "cpu"}  # 明确指定设备
    )

    # 加载LoRA适配器
    trained_model = PeftModel.from_pretrained(base_model, output_dir)
    print("模型加载成功!")

    # 使用模型进行推理的改进函数
    def generate_response(prompt):
        # 确保输入格式正确
        if not prompt.endswith("<|im_start|>assistant\n"):
            prompt = prompt + "<|im_start|>assistant\n"

        # 将输入数据移动到与模型相同的设备
        inputs = tokenizer(prompt, return_tensors="pt").to(trained_model.device)

        # 设置生成参数，限制输出长度和多样性
        generation_config = GenerationConfig(
            max_new_tokens=50,  # 增加最大生成长度，让模型有更多空间生成完整回答
            temperature=0.7,  # 适度提高温度以增加多样性
            do_sample=True,  # 使用采样而不是贪婪搜索
            top_p=0.9,  # 使用核采样
            pad_token_id=tokenizer.eos_token_id,
            eos_token_id=tokenizer.convert_tokens_to_ids(["<|im_end|>"])[0],
            repetition_penalty=1.2,  # 适度增加重复惩罚
        )

        with torch.no_grad():
            outputs = trained_model.generate(
                **inputs,
                generation_config=generation_config
            )

        # 解码响应
        full_response = tokenizer.decode(outputs[0], skip_special_tokens=False)

        # 提取assistant的回复部分
        assistant_start = "<|im_start|>assistant\n"
        assistant_end = "<|im_end|>"

        # 提取assistant的回复
        start_idx = full_response.find(assistant_start)
        if start_idx != -1:
            start_idx += len(assistant_start)
            end_idx = full_response.find(assistant_end, start_idx)
            if end_idx != -1:
                response = full_response[start_idx:end_idx].strip()
                return response

        # 如果无法提取，返回整个响应（去掉特殊标记）
        return tokenizer.decode(outputs[0], skip_special_tokens=True)

    # 测试多个示例
    test_cases = [
        "通道更新一下"
    ]

    for test_case in test_cases:
        test_prompt = f"<|im_start|>user\n{test_case}<|im_end|>\n<|im_start|>assistant\n"
        response = generate_response(test_prompt)
        print(f"用户: {test_case}")
        print(f"助理: {response}")
        print("-" * 50)

except Exception as e:
    print(f"加载模型时出错: {e}")
    import traceback
    traceback.print_exc()