# from fastapi import FastAPI # from fastapi.staticfiles import StaticFiles # from fastapi.responses import FileResponse # from transformers import pipeline # app = FastAPI() # pipe_flan = pipeline("text2text-generation", model="FreedomIntelligence/Apollo-1.8B",trust_remote_code=True) # @app.get("/infer_t5") # def t5(input): # output = pipe_flan(input) # return {"output": output[0]["generated_text"]} # app.mount("/", StaticFiles(directory="static", html=True), name="static") # @app.get("/") # def index() -> FileResponse: # return FileResponse(path="/app/static/index.html", media_type="text/html") from fastapi import FastAPI from fastapi.staticfiles import StaticFiles from fastapi.responses import FileResponse from transformers import pipeline app = FastAPI() # 更改pipeline类型到text-generation,并使用一个示例模型(如GPT-2) pipe_gpt = pipeline("text-generation", model="FreedomIntelligence/Apollo-1.8B",trust_remote_code=True) @app.get("/infer_t5") def infer_gpt(input: str): output = pipe_gpt(input, max_length=50) # 你可以调整max_length来控制生成文本的长度 return {"output": output[0]["generated_text"]} app.mount("/", StaticFiles(directory="static", html=True), name="static") @app.get("/") def index() -> FileResponse: return FileResponse(path="/app/static/index.html", media_type="text/html")