from fastapi import FastAPI, HTTPException
from pydantic import BaseModel
from concurrent.futures import ThreadPoolExecutor, as_completed
from typing import List
import requests
import time
import threading

app = FastAPI()

# 定义请求模型
class ChatRequest(BaseModel):
    message: str

# 配置两个 API 服务
API_KEYS = [
    {"key": "fastgpt-o4xhIPNCJnxOCIjdhXszuqW8E8NZm00BeSxEe6269jOr74jKF2BGq3x1Egnd", "base_url": "http://192.168.100.159:3080/api/v1/"},
    # {"key": "xxxxxxxxxxx", "base_url": "https://new-service.com/api/v1/"}  # 新增的 API 服务
]

# 限制并发线程数量
MAX_THREADS = 1
executor = ThreadPoolExecutor(max_workers=MAX_THREADS)

# 记录当前正在处理的请求数量
processing_requests = 0
processing_requests_lock = threading.Lock()

def call_openai_api(api_key, base_url, message):
    """调用 OpenAI API 的函数"""
    headers = {
        "Authorization": f"Bearer {api_key}",
        "Content-Type": "application/json"
    }
    data = {
        "model": "deepseek-r1:32b",
        "messages": [{"role": "user", "content": message}]
    }
    response = requests.post(f"{base_url}chat/completions", headers=headers, json=data)
    response.raise_for_status()
    return response.json()["choices"][0]["message"]["content"]

@app.post("/chat")
async def chat(request: ChatRequest):
    global processing_requests
    with processing_requests_lock:
        if processing_requests >= MAX_THREADS:
            return {"error": "后台处理排队中，请等待"}
        processing_requests += 1

    try:
        # 使用线程池提交任务
        future_to_api = {executor.submit(call_openai_api, api["key"], api["base_url"], request.message): api for api in API_KEYS}
        for future in as_completed(future_to_api):
            try:
                response = future.result()
                return {"reply": response}
            except Exception as e:
                print(f"Error calling API: {e}")
    finally:
        with processing_requests_lock:
            processing_requests -= 1

    return {"error": "所有服务均未响应"}

if __name__ == "__main__":
    import uvicorn
    uvicorn.run(app, host="0.0.0.0", port=8000)