#!/usr/bin/env python3
# -*- coding: utf-8 -*-

from fastapi import FastAPI, HTTPException
from pydantic import BaseModel
import httpx
import asyncio
from typing import List

app = FastAPI()

# 后端服务配置列表
BACKENDS = [
    {  # 原始服务
        "base_url": "http://192.168.100.159:3080/api/v1/",
        "api_key": "fastgpt-o4xhIPNCJnxOCIjdhXszuqW8E8NZm00BeSxEe6269jOr74jKF2BGq3x1Egnd"
    },
    # {  # 新增服务
    #     "base_url": "http://新服务地址/api/v1/",  # 请替换实际地址
    #     "api_key": "xxxxxxxxxxx"  # 替换实际API Key
    # }
]

# 并发控制配置
MAX_CONCURRENT = 2
current_requests = 0
request_lock = asyncio.Lock()

# 负载均衡配置
current_backend_index = 0
backend_lock = asyncio.Lock()


class ChatRequest(BaseModel):
    message: str


def get_next_backend():
    global current_backend_index
    backend = BACKENDS[current_backend_index]
    current_backend_index = (current_backend_index + 1) % len(BACKENDS)
    return backend


async def call_backend(backend, message, apiKey, path):
    async with httpx.AsyncClient() as client:
        url = backend["base_url"].strip() + path
        headers = {
            "Authorization": f"Bearer {apiKey}",
            "Content-Type": "application/json"
        }
        payload = {
            "model": "deepseek-r1:32b",
            "messages": [{
                "role": "user",
                "content": message + "所有的回答在100字以内"
            }],
        }

        try:
            response = await client.post(
                url,
                headers=headers,
                json=payload,
                timeout=100
            )
            response.raise_for_status()
            return response.json()["choices"][0]["message"]["content"]
        except Exception as e:
            raise HTTPException(
                status_code=500,
                detail=f"Backend service error: {str(e)}"
            )


@app.post("/api/v1/{path:path}")
async def chat_handler(chat_request: ChatRequest, path: str):
    global current_requests

    # 并发控制
    async with request_lock:
        if current_requests >= MAX_CONCURRENT:
            return {"reply": "后台处理排队中，请等待"}
        current_requests += 1

    try:
        # 负载均衡选择后端
        async with backend_lock:
            backend = get_next_backend()

        # 调用后端服务
        result = await call_backend(backend, chat_request.message, chat_request.api_key, path)
        return {"reply": result}
    finally:
        async with request_lock:
            current_requests -= 1


if __name__ == "__main__":
    import uvicorn

    uvicorn.run(app, host="0.0.0.0", port=8002)