fastapi-chat3 / main.py
deepak191z's picture
Update main.py
dcdea92 verified
import time
from fastapi import FastAPI, Request, HTTPException
from pydantic import BaseModel
from g4f.client import Client
import uvicorn
app = FastAPI()
API_PREFIX = "/"
# Middleware for logging request time
@app.middleware("http")
async def log_process_time(request: Request, call_next):
start_time = time.time()
response = await call_next(request)
process_time = time.time() - start_time
print(f"{request.method} {response.status_code} {request.url.path} {process_time*1000:.2f} ms")
return response
# Request body model
class ChatCompletionRequest(BaseModel):
model: str
messages: list[dict]
@app.get("/")
async def root():
return {"message": "API server running"}
@app.get("/ping")
async def ping():
return {"message": "pong"}
@app.get(f"{API_PREFIX}v1/models")
async def get_models():
return {
"object": "list",
"data": [
{"id": "gpt-4o-mini", "object": "model", "owned_by": "ddg"},
{"id": "claude-3-haiku", "object": "model", "owned_by": "ddg"},
{"id": "llama-3.1-70b", "object": "model", "owned_by": "ddg"},
{"id": "mixtral-8x7b", "object": "model", "owned_by": "ddg"},
{"id": "o3-mini", "object": "model", "owned_by": "ddg"},
],
}
@app.post(f"{API_PREFIX}v1/chat/completions")
async def chat_completions(request: ChatCompletionRequest):
try:
# Only using DuckAI directly
content = " ".join([msg.get("content", "") for msg in request.messages])
client = Client()
response = client.chat.completions.create(
model=request.model,
messages=[{"role": "user", "content": content}],
web_search=False
)
return response
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
if __name__ == "__main__":
uvicorn.run("app:app", host="0.0.0.0", port=7860, reload=True)