from fastapi import FastAPI, HTTPException, Request
from fastapi.responses import JSONResponse
from typing import List
import asyncio
import aiohttp
import time
import openai

# 配置服务器地址
SERVERS = ["http://192.168.100.156:3080/api/v1/"]
MAX_CONCURRENT_REQUESTS = 2

# 创建 FastAPI 应用
app = FastAPI()

# 用于控制并发请求的信号量
semaphore = asyncio.Semaphore(MAX_CONCURRENT_REQUESTS)

# 用于记录当前正在处理的请求
processing_requests = 0

async def send_request_to_server(session, server_url, data):
    """
    向指定服务器发送请求
    """
    try:
        async with session.post(server_url, json=data) as response:
            if response.status == 200:
                return await response.json()
            else:
                return {"error": f"Server returned status {response.status}"}
    except Exception as e:
        return {"error": str(e)}

async def distribute_request(data):
    """
    分发请求到服务器1和服务器2
    """
    global processing_requests
    async with semaphore:
        processing_requests += 1
        async with aiohttp.ClientSession() as session:
            tasks = [send_request_to_server(session, server_url, data) for server_url in SERVERS]
            results = await asyncio.gather(*tasks)
        processing_requests -= 1
        return results


@app.post("/process")
async def process_request(request: Request):
    """
    处理外部请求的入口
    """
    global processing_requests
    if processing_requests >= MAX_CONCURRENT_REQUESTS:
        raise HTTPException(status_code=429, detail="Too many requests. Please try again later.")

    data = await request.json()
    results = await distribute_request(data)
    return JSONResponse(content={"results": results}, status_code=200)

if __name__ == "__main__":
    import uvicorn
    uvicorn.run(app, host="0.0.0.0", port=8000)

