#!/usr/bin/env python
# -*- coding: UTF-8 -*-
'''
@Project ：tiny-llm 
@File    ：api.py
@IDE     ：PyCharm 
@Author  ：XJ
@Date    ：2025/3/20 16:55 
'''
from fastapi import FastAPI
from model import TinyLLM, GPT2Tokenizer
import torch

app = FastAPI()
tokenizer = GPT2Tokenizer.from_pretrained("gpt2")
model = TinyLLM()

@app.post("/generate")
async def generate(text: str, max_length: int = 50):
    inputs = tokenizer(text, return_tensors="pt")["input_ids"]
    with torch.no_grad():
        outputs = model(inputs)
    predicted_ids = torch.argmax(outputs, dim=-1)
    return {"result": tokenizer.decode(predicted_ids[0])}