from fastapi import FastAPI, HTTPException
from pydantic import BaseModel
import requests
import asyncio

app = FastAPI()

# 定义请求模型
class ChatRequest(BaseModel):
    message: str

# 配置两个 API 服务
API_KEYS = [
    {"key": "fastgpt-o4xhIPNCJnxOCIjdhXszuqW8E8NZm00BeSxEe6269jOr74jKF2BGq3x1Egnd", "base_url": "http://192.168.100.159:3080/api/v1/"},
    # {"key": "xxxxxxxxxxx", "base_url": "https://new-service.com/api/v1/"}  # 新增的 API 服务
]

# 限制并发线程数量
MAX_CONCURRENT_REQUESTS = 2
semaphore = asyncio.Semaphore(MAX_CONCURRENT_REQUESTS)

async def call_openai_api(api_key, base_url, message):
    """异步调用 OpenAI API 的函数"""
    headers = {
        "Authorization": f"Bearer {api_key}",
        "Content-Type": "application/json"
    }
    data = {
        "model": "deepseek-r1:32b",
        "messages": [{"role": "user", "content": message}]
    }
    async with semaphore:  # 使用信号量限制并发
        response = requests.post(f"{base_url}chat/completions", headers=headers, json=data)
        response.raise_for_status()
        return response.json()["choices"][0]["message"]["content"]

@app.post("/chat")
async def chat(request: ChatRequest):
    try:
        # 尝试调用 API 服务
        for api in API_KEYS:
            try:
                response = await call_openai_api(api["key"], api["base_url"], request.message)
                return {"reply": response}
            except Exception as e:
                print(f"Error calling API: {e}")
    except asyncio.TimeoutError:
        return {"error": "后台处理排队中，请等待"}
    except Exception as e:
        print(f"General error: {e}")
    return {"error": "所有服务均未响应"}

if __name__ == "__main__":
    import uvicorn
    uvicorn.run(app, host="0.0.0.0", port=8001)