from fastapi import FastAPI
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch

# 初始化 FastAPI 应用
app = FastAPI()

try:
    # 加载分词器和模型
    tokenizer = AutoTokenizer.from_pretrained("./ChinaWisdomAI", trust_remote_code=True)
    model = AutoModelForCausalLM.from_pretrained("./ChinaWisdomAI", trust_remote_code=True, torch_dtype=torch.float16).cuda()

    # 将模型设置为评估模式
    model.eval()
except Exception as e:
    print(f"模型加载出错: {e}")
    raise

@app.post("/generate")
async def generate_text(prompt: str, max_length: int = 100):
    try:
        # 对输入文本进行分词
        inputs = tokenizer(prompt, return_tensors="pt").to(model.device)

        # 生成文本
        with torch.no_grad():
            outputs = model.generate(**inputs, max_length=max_length)

        # 解码生成的文本
        generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True)

        return {"generated_text": generated_text}
    except Exception as e:
        return {"error": str(e)}

if __name__ == "__main__":
    import uvicorn
    uvicorn.run(app, host="0.0.0.0", port=8000)