from llama_cpp.server.app import create_app, Settings from fastapi.responses import HTMLResponse import os print("os.cpu_count()", os.cpu_count()) app = create_app( Settings( n_threads=os.cpu_count(), model="model/ggmlv3-model.bin", embedding=False ) ) # Read the content of index.html once and store it in memory with open("index.html", "r") as f: content = f.read() @app.get("/", response_class=HTMLResponse) async def read_items(): return content if __name__ == "__main__": import uvicorn uvicorn.run(app, host=os.environ["HOST"], port=int(os.environ["PORT"]) )