import torch
from model import CodeTransformer
import tiktoken
import argparse

def generate():
    parser = argparse.ArgumentParser()
    parser.add_argument("--model_path", default="model.pth")
    parser.add_argument("--prompt", default="def binary_search")
    parser.add_argument("--max_length", type=int, default=500)
    parser.add_argument("--temperature", type=float, default=0.8)
    args = parser.parse_args()
    
    # 初始化模型和分词器
    enc = tiktoken.get_encoding("gpt2")
    model = CodeTransformer()
    model.load_state_dict(torch.load(args.model_path))
    model.eval()
    
    # 编码输入
    input_ids = enc.encode(args.prompt)
    input_ids = torch.tensor([input_ids], dtype=torch.long)
    
    # 生成代码
    with torch.no_grad():
        generated_ids = model.generate(
            input_ids,
            max_new_tokens=args.max_length,
            temperature=args.temperature
        )
    
    # 解码输出
    generated_code = enc.decode(generated_ids[0].tolist())
    print("\nGenerated Code:")
    print("="*40)
    print(generated_code)
    print("="*40)

if __name__ == "__main__":
    generate()