import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
from peft import PeftModel

# --- 1. 加载基础模型和我们训练好的适配器 ---
device = "cuda" if torch.cuda.is_available() else "cpu"
base_model_name = "Qwen/Qwen2-1.5B-Instruct"
adapter_path = "./sichuan_travel_bot_finetuned"  # 适配器路径

print("正在加载基础模型...")
base_model = AutoModelForCausalLM.from_pretrained(
    base_model_name,
    torch_dtype=torch.bfloat16,
    device_map="auto",
)

tokenizer = AutoTokenizer.from_pretrained(base_model_name)

print("正在加载并合并LoRA适配器...")
# 将LoRA适配器加载到基础模型上
model = PeftModel.from_pretrained(base_model, adapter_path)
print("模型准备就绪！")


def answer_with_sichuan_bot(question):
    """使用微调后的四川旅游机器人回答问题"""
    # 注意！这里的 system prompt 应该和你训练时用的一致
    messages = [
        {"role": "system", "content": "你是一个精通四川旅游的智能助手，你的回答专业、热情且详细。"},
        {"role": "user", "content": question}
    ]
    prompt = tokenizer.apply_chat_template(
        messages, tokenize=False, add_generation_prompt=True
    )
    inputs = tokenizer(prompt, return_tensors="pt").to(device)

    outputs = model.generate(
        **inputs,
        max_new_tokens=512,  # 可以设置更长的回答
        do_sample=True,
        temperature=0.7,
        top_p=0.9
    )

    response_ids = outputs[0][inputs.input_ids.shape[1]:]
    answer = tokenizer.decode(response_ids, skip_special_tokens=True)
    return answer


# # --- 和你的专属机器人对话 ---
# print("\n" + "=" * 20 + " 四川旅游问答机器人 " + "=" * 20)
# while True:
#     user_question = input("您好！请输入您关于四川旅游的问题 (输入 '退出' 结束对话): \n> ")
#     if user_question.lower() in ["退出", "exit", "quit"]:
#         print("感谢您的使用，再见！")
#         break
#
#     answer = answer_with_sichuan_bot(user_question)
#     print(f"\n🤖 机器人回答:\n{answer}\n" + "-" * 60)