Spaces:
Sleeping
Sleeping
File size: 4,089 Bytes
857939a e61e906 857939a 17e5473 e61e906 857939a 17e5473 857939a 17e5473 e61e906 f5c3d9c e61e906 49982b5 f5c3d9c 49982b5 e61e906 857939a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 |
from contextlib import asynccontextmanager
from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware
from fastapi.middleware.trustedhost import TrustedHostMiddleware
import logging
import os
from .core.config import settings
from .api.v1.router import api_router
# Configure logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
@asynccontextmanager
async def lifespan(app: FastAPI):
"""
Context manager for handling startup and shutdown events
"""
# Startup
logger.info("Starting FastAPI application...")
yield
# Shutdown
logger.info("Shutting down FastAPI application...")
def create_app() -> FastAPI:
"""
Application factory pattern for creating FastAPI instance
"""
app = FastAPI(
title=settings.PROJECT_NAME,
description="A scalable FastAPI application",
version="1.0.0",
lifespan=lifespan,
)
# Add CORS middleware - allow frontend to access API
app.add_middleware(
CORSMiddleware,
allow_origins=[
"http://localhost:3000",
"http://127.0.0.1:3000",
"http://localhost:8000",
"http://127.0.0.1:8000",
"https://*.hf.space",
"https://*.huggingface.co",
"*", # Allow all origins for Hugging Face deployment
],
allow_credentials=True,
allow_methods=["GET", "POST", "PUT", "DELETE", "OPTIONS"],
allow_headers=["*"],
)
app.add_middleware(
TrustedHostMiddleware,
allowed_hosts=["localhost", "127.0.0.1", "0.0.0.0", "*"],
)
return app
app = create_app()
# Health check endpoint
@app.get("/health")
async def health_check():
return {"status": "healthy", "message": "Backend is running"}
# OpenAI connection test endpoint
# @app.get("/api/test-openai")
# async def test_openai():
# """Test OpenAI connection without making actual API calls"""
# try:
# from .core.config import settings
# if not settings.OPENAI_API_KEY:
# return {"status": "error", "message": "OPENAI_API_KEY not configured"}
# api_key_preview = (
# settings.OPENAI_API_KEY[:10] + "..."
# if len(settings.OPENAI_API_KEY) > 10
# else "Too short"
# )
# return {
# "status": "ok",
# "message": "OpenAI API key is configured",
# "api_key_preview": api_key_preview,
# "environment": (
# "production" if os.getenv("NODE_ENV") == "production" else "development"
# ),
# }
# except Exception as e:
# return {"status": "error", "message": f"OpenAI configuration error: {str(e)}"}
# Test chat completions endpoint
# @app.get("/api/test-chat")
# async def test_chat():
# """Test if OpenAI chat completions work on Hugging Face"""
# try:
# from .core.config import settings
# from openai import OpenAI
# if not settings.OPENAI_API_KEY:
# return {"status": "error", "message": "OPENAI_API_KEY not configured"}
# client = OpenAI(api_key=settings.OPENAI_API_KEY, timeout=30.0)
# # Test a simple chat completion
# response = client.chat.completions.create(
# model="gpt-3.5-turbo",
# messages=[
# {
# "role": "user",
# "content": "Say 'Hello from Hugging Face!' in exactly those words.",
# }
# ],
# max_tokens=20,
# )
# return {
# "status": "success",
# "message": "Chat completions work!",
# "response": response.choices[0].message.content,
# "model": response.model,
# }
# except Exception as e:
# return {
# "status": "error",
# "message": f"Chat completion failed: {str(e)}",
# "error_type": type(e).__name__,
# }
# Include API router
app.include_router(api_router, prefix=settings.API_V1_STR)
|