import torch
from transformers import BertTokenizerFast, GPT2LMHeadModel, GPT2Config
from config import ParameterConfig

params = ParameterConfig()
# 你的路径配置
checkpoint_path = r"E:\Project\other_code\Gpt2_ChatBot\gpt2_chatbot\save_model\checkpoint_epoch2.pt"
vocab_path = params.vocab_path
config_path = params.config_json
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

# 1. 加载 tokenizer
tokenizer = BertTokenizerFast(vocab_file=vocab_path)
print("✅ Tokenizer loaded.")

# 2. 初始化模型（注意：需要用你的config初始化模型结构）
config = GPT2Config.from_json_file(config_path)
model = GPT2LMHeadModel(config)
model.to(device)

# 3. 加载 checkpoint
checkpoint = torch.load(checkpoint_path, map_location=device)
model.load_state_dict(checkpoint['model_state_dict'])
model.eval()
print("✅ Model loaded from checkpoint.")

# 4. 对话函数
def chat(model, tokenizer, device, max_len=100):
    while True:
        input_text = input("你：")
        if input_text.strip().lower() in ['exit', 'quit', 'q']:
            break

        # 编码
        input_ids = tokenizer.encode(input_text, return_tensors="pt").to(device)
        input_len = input_ids.shape[1]

        # 生成
        output_ids = model.generate(
            input_ids=input_ids,
            max_length=input_ids.shape[1] + max_len,
            do_sample=True,  # 采样生成
            top_k=50,
            top_p=0.95,
            temperature=0.7,
            eos_token_id=tokenizer.sep_token_id,  # 可选：用SEP作为终止
            pad_token_id=tokenizer.pad_token_id  # 防止报错
        )

        generated_ids = output_ids[0][input_len:]
        output_text = tokenizer.decode(generated_ids, skip_special_tokens=True)
        print("🤖：", output_text)

# 5. 启动对话
chat(model, tokenizer, device)
