import torch
from transformers import AutoModelForCausalLM, AutoTokenizer

# 配置参数
MODEL_PATH = "../qwen_finetuned/checkpoint-928"
DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
MAX_HISTORY = 6  # 保留的对话轮次（含system）
SYSTEM_PROMPT = "你是一名软件工程课程的AI助教"  # 与训练时一致

# 加载模型
model = AutoModelForCausalLM.from_pretrained(
    MODEL_PATH,
    device_map=DEVICE,
    torch_dtype=torch.bfloat16,
    trust_remote_code=True,
    attn_implementation="flash_attention_2",
).eval()

tokenizer = AutoTokenizer.from_pretrained(MODEL_PATH, trust_remote_code=True)

# 生成参数（与训练时logits处理一致）
GENERATION_CONFIG = {
    "max_new_tokens": 512,
    "temperature": 0.7,
    "top_p": 0.9,
    "do_sample": True,
    "repetition_penalty": 1.1,
    "eos_token_id": tokenizer.eos_token_id,
}


def build_prompt(history):
    """构造与训练完全一致的模板"""
    messages = [{"role": "system", "content": SYSTEM_PROMPT}]
    for h in history[-MAX_HISTORY + 1 :]:  # 保留最多5轮对话+system
        messages.append({"role": h["role"], "content": h["content"]})
    return tokenizer.apply_chat_template(
        messages, tokenize=False, add_generation_prompt=True
    )


def chat():
    history = []
    print(f"System: {SYSTEM_PROMPT}\n输入'reset'清空历史，'exit'退出")

    while True:
        user_input = input("\n用户：")
        if user_input.lower() == "exit":
            break
        if user_input.lower() == "reset":
            history = []
            print("历史已重置")
            continue

        # 添加用户消息
        history.append({"role": "user", "content": user_input})

        # 构建符合训练格式的prompt
        formatted_prompt = build_prompt(history)
        inputs = tokenizer(formatted_prompt, return_tensors="pt").to(DEVICE)

        # 生成回复
        with torch.no_grad():
            outputs = model.generate(**inputs, **GENERATION_CONFIG)

        # 精确提取新增回复
        full_response = tokenizer.decode(
            outputs[0][len(inputs[0]) :], skip_special_tokens=True
        )

        # 处理截断符号
        stop_sequences = ["\n用户:", "\nSystem:", "<|endoftext|>"]
        for seq in stop_sequences:
            if seq in full_response:
                full_response = full_response.split(seq)[0]

        # 添加助手回复到历史
        history.append({"role": "assistant", "content": full_response.strip()})
        print(f"\n助手：{full_response.strip()}")


if __name__ == "__main__":
    chat()
