#!/usr/bin/env python3
"""
文本生成演示脚本
用法: python scripts/generate.py
"""
import sys
from pathlib import Path

sys.path.append(str(Path(__file__).parent.parent))

import torch
from src.model import JiaboForCausalLM, JiaboModelConfig
from src.tokenizer import JiaboTokenizer
from src.utils import load_json_config


def main():
    print("🚀 Jiabo-0.5B-R1 文本生成演示")
    
    # 加载配置
    model_cfg = load_json_config("configs/model_config.json")
    config = JiaboModelConfig(**model_cfg)
    
    # 加载模型和分词器
    device = torch.device("mps")
    model = JiaboForCausalLM(config).to(device)
    tokenizer = JiaboTokenizer("data/vocab.json")
    
    # 加载训练好的权重（如果存在）
    checkpoint_path = "checkpoints/final_model.pt"
    if Path(checkpoint_path).exists():
        print(f"📦 加载Checkpoint: {checkpoint_path}")
        state = torch.load(checkpoint_path, map_location=device)
        model.load_state_dict(state["model_state_dict"])
    
    model.eval()
    
    # 交互式生成
    while True:
        prompt = input("\n📝 输入提示词 (或 'quit' 退出): ")
        if prompt.lower() in ["quit", "exit"]:
            break
        
        input_ids = torch.tensor([tokenizer.encode(prompt)]).to(device)
        
        with torch.no_grad():
            output = model.model(input_ids)
            logits = model.lm_head(output)
            next_token = torch.argmax(logits[0, -1], dim=-1)
            
            # 简单生成：贪心地取下一个token
            generated_ids = input_ids[0].tolist() + [next_token.item()]
        
        generated_text = tokenizer.decode(generated_ids)
        print(f"🤖 生成结果: {generated_text}")


if __name__ == "__main__":
    main()
