import torch
from transformers import GPT2LMHeadModel, GPT2Tokenizer, GPT2Config

def load_model(model_path='output'):
    print(f"🔄 Loading model from {model_path}")
    config = GPT2Config.from_pretrained(model_path, local_files_only=True)
    tokenizer = GPT2Tokenizer.from_pretrained(model_path, local_files_only=True)
    model = GPT2LMHeadModel.from_pretrained(
        model_path,
        config=config,
        local_files_only=True
    )
    
    model.eval()
    return model, tokenizer

def generate_text(model, tokenizer, prompt, max_length=100, temperature=1.0, top_p=0.95):
    inputs = tokenizer(prompt, return_tensors='pt')
    with torch.no_grad():
        outputs = model.generate(
            input_ids=inputs['input_ids'],
            max_length=max_length,
            temperature=temperature,
            top_p=top_p,
            do_sample=True,
            pad_token_id=tokenizer.eos_token_id
        )
    return tokenizer.decode(outputs[0], skip_special_tokens=True)

if __name__ == "__main__":
    model_path = "./outputs/gpt2/checkpoint-250000/"  #
    model, tokenizer = load_model(model_path)

    print("🧪 请输入 prompt（按回车开始生成）：")
    while True:
        prompt = input(">> ")
        if prompt.strip().lower() in ['exit', 'quit']:
            break
        result = generate_text(model, tokenizer, prompt)
        print("📝 生成结果：")
        print(result)
        print("\n===============================\n")
