import os
import logging
import uvicorn
from fastapi import FastAPI, Request, Response
from fastapi.responses import StreamingResponse
from config import settings
from proxy import ProxyService

# 配置日志
logging.basicConfig(
    level=settings.LOG_LEVEL,
    format="%(asctime)s - %(name)s - %(levelname)s - %(message)s"
)
logger = logging.getLogger(__name__)

# 创建代理服务实例
proxy_service = ProxyService()

# 创建FastAPI应用
app = FastAPI(title="Ollama Bridge", description="A bridge service to connect Ollama clients with cloud LLMs")

@app.get("/api/tags")
async def list_models():
    """列出所有支持的Ollama模型"""
    return {"models": list(proxy_service.model_mapper.list_ollama_models().keys())}

@app.post("/api/generate")
async def generate_text(request: Request):
    """文本生成接口，支持流式和非流式响应"""
    # 获取请求体
    body = await request.json()
    
    # 获取模型名称
    ollama_model_name = body.get("model")
    if not ollama_model_name:
        raise HTTPException(status_code=400, detail="Model name is required")
    
    # 检查是否为流式请求
    stream = body.get("stream", False)
    
    if stream:
        # 流式响应
        return StreamingResponse(
            proxy_service.stream_proxy(request, ollama_model_name),
            media_type="application/json"
        )
    else:
        # 非流式响应
        return await proxy_service.non_stream_proxy(request, ollama_model_name)

@app.get("/healthz")
async def health_check():
    """健康检查接口"""
    return {"status": "healthy"}

def start_server():
    """启动FastAPI服务"""
    logger.info(f"Starting server on {settings.OLLAMA_HOST}:{settings.OLLAMA_PORT}")
    logger.info(f"Cloud model provider: {settings.CLOUD_MODEL_PROVIDER}")
    logger.info(f"Supported models: {list(proxy_service.model_mapper.list_ollama_models().keys())}")
    
    uvicorn.run(
        "main:app",
        host=settings.OLLAMA_HOST,
        port=settings.OLLAMA_PORT,
        reload=False,
        log_level=settings.LOG_LEVEL.lower()
    )

if __name__ == "__main__":
    start_server()