Spaces:
Sleeping
Sleeping

refactor: improve code readability and structure in OpenAI integration tests and services, update requirements for consistency
f5c3d9c
from contextlib import asynccontextmanager | |
from fastapi import FastAPI | |
from fastapi.middleware.cors import CORSMiddleware | |
from fastapi.middleware.trustedhost import TrustedHostMiddleware | |
import logging | |
import os | |
from .core.config import settings | |
from .api.v1.router import api_router | |
# Configure logging | |
logging.basicConfig(level=logging.INFO) | |
logger = logging.getLogger(__name__) | |
async def lifespan(app: FastAPI): | |
""" | |
Context manager for handling startup and shutdown events | |
""" | |
# Startup | |
logger.info("Starting FastAPI application...") | |
yield | |
# Shutdown | |
logger.info("Shutting down FastAPI application...") | |
def create_app() -> FastAPI: | |
""" | |
Application factory pattern for creating FastAPI instance | |
""" | |
app = FastAPI( | |
title=settings.PROJECT_NAME, | |
description="A scalable FastAPI application", | |
version="1.0.0", | |
lifespan=lifespan, | |
) | |
# Add CORS middleware - allow frontend to access API | |
app.add_middleware( | |
CORSMiddleware, | |
allow_origins=[ | |
"http://localhost:3000", | |
"http://127.0.0.1:3000", | |
"http://localhost:8000", | |
"http://127.0.0.1:8000", | |
"https://*.hf.space", | |
"https://*.huggingface.co", | |
"*", # Allow all origins for Hugging Face deployment | |
], | |
allow_credentials=True, | |
allow_methods=["GET", "POST", "PUT", "DELETE", "OPTIONS"], | |
allow_headers=["*"], | |
) | |
app.add_middleware( | |
TrustedHostMiddleware, | |
allowed_hosts=["localhost", "127.0.0.1", "0.0.0.0", "*"], | |
) | |
return app | |
app = create_app() | |
# Health check endpoint | |
async def health_check(): | |
return {"status": "healthy", "message": "Backend is running"} | |
# OpenAI connection test endpoint | |
# @app.get("/api/test-openai") | |
# async def test_openai(): | |
# """Test OpenAI connection without making actual API calls""" | |
# try: | |
# from .core.config import settings | |
# if not settings.OPENAI_API_KEY: | |
# return {"status": "error", "message": "OPENAI_API_KEY not configured"} | |
# api_key_preview = ( | |
# settings.OPENAI_API_KEY[:10] + "..." | |
# if len(settings.OPENAI_API_KEY) > 10 | |
# else "Too short" | |
# ) | |
# return { | |
# "status": "ok", | |
# "message": "OpenAI API key is configured", | |
# "api_key_preview": api_key_preview, | |
# "environment": ( | |
# "production" if os.getenv("NODE_ENV") == "production" else "development" | |
# ), | |
# } | |
# except Exception as e: | |
# return {"status": "error", "message": f"OpenAI configuration error: {str(e)}"} | |
# Test chat completions endpoint | |
# @app.get("/api/test-chat") | |
# async def test_chat(): | |
# """Test if OpenAI chat completions work on Hugging Face""" | |
# try: | |
# from .core.config import settings | |
# from openai import OpenAI | |
# if not settings.OPENAI_API_KEY: | |
# return {"status": "error", "message": "OPENAI_API_KEY not configured"} | |
# client = OpenAI(api_key=settings.OPENAI_API_KEY, timeout=30.0) | |
# # Test a simple chat completion | |
# response = client.chat.completions.create( | |
# model="gpt-3.5-turbo", | |
# messages=[ | |
# { | |
# "role": "user", | |
# "content": "Say 'Hello from Hugging Face!' in exactly those words.", | |
# } | |
# ], | |
# max_tokens=20, | |
# ) | |
# return { | |
# "status": "success", | |
# "message": "Chat completions work!", | |
# "response": response.choices[0].message.content, | |
# "model": response.model, | |
# } | |
# except Exception as e: | |
# return { | |
# "status": "error", | |
# "message": f"Chat completion failed: {str(e)}", | |
# "error_type": type(e).__name__, | |
# } | |
# Include API router | |
app.include_router(api_router, prefix=settings.API_V1_STR) | |