from fastapi import FastAPI, Request, HTTPException, Header, Depends
from pydantic import BaseModel
import requests
from queue import PriorityQueue
import threading
import logging
from typing import Optional
from fastapi.responses import StreamingResponse
import uuid

app = FastAPI()

# 配置日志
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)

# 大模型服务地址
MODEL_SERVICE_URL = "http://18.0.44.11:9982/v1/chat/completions"

# 最大并发数
MAX_CONCURRENT_REQUESTS = 5

# 当前并发请求数
current_requests = 0
request_lock = threading.Lock()

# 请求队列
request_queue = PriorityQueue()
queue_lock = threading.Lock()

# 存储正在进行的请求以方便取消
active_requests = {}
active_requests_lock = threading.Lock()

class ChatRequest(BaseModel):
    model: str
    temperature: float
    max_tokens: int
    messages: list
    stream: bool = False
    priority: int = 1

def require_api_key(api_key: str = Header(...)):
    if not api_key or api_key != 'Bearer YOUR_API_KEY':
        raise HTTPException(status_code=401, detail="Unauthorized")
    return api_key

@app.post("/v1/chat/completions", dependencies=[Depends(require_api_key)])
async def chat_completions(request: Request, chat_request: ChatRequest):
    global current_requests

    data = chat_request.dict(exclude_unset=True)  # 获取所有设置的字段
    priority = data.pop('priority', 1)  # 移除优先级字段

    # 生成唯一的请求ID
    request_id = uuid.uuid4().int & (1<<31)-1  # 生成一个32位整数作为请求ID

    with queue_lock:
        request_queue.put((priority, request_id, data))

    response_data = None

    while True:
        with queue_lock:
            if not request_queue.empty():
                _, req_id, data = request_queue.queue[0]
                if req_id == request_id:
                    request_queue.get()  # 移除当前请求
                    break

        logger.info("Waiting for higher priority request to complete.")
        request_queue.task_done()

    with request_lock:
        if current_requests >= MAX_CONCURRENT_REQUESTS:
            logger.warning("Max concurrent requests reached. Waiting for a slot.")
            while current_requests >= MAX_CONCURRENT_REQUESTS:
                pass

        current_requests += 1

    try:
        headers = {
            'Content-Type': 'application/json',
            'Authorization': request.headers.get('Authorization'),
            'X-Request-ID': str(request_id)  # 添加请求ID到头部
        }

        if data['stream']:
            async def generate():
                nonlocal response_data
                session = requests.Session()
                with active_requests_lock:
                    active_requests[request_id] = session
                try:
                    with session.post(MODEL_SERVICE_URL, json=data, headers=headers, stream=True) as resp:
                        resp.raise_for_status()
                        for line in resp.iter_lines(decode_unicode=True):
                            if line.startswith('data:'):
                                yield f"data: {line}\n\n"
                except requests.exceptions.RequestException as e:
                    logger.error(f"Error calling model service: {e}")
                    yield f"data: {{'error': '{str(e)}'}}\n\n"
                finally:
                    with active_requests_lock:
                        if request_id in active_requests:
                            del active_requests[request_id]

            return StreamingResponse(generate(), media_type='text/event-stream')
        else:
            response = requests.post(MODEL_SERVICE_URL, json=data, headers=headers)
            response.raise_for_status()
            response_data = response.json()
            response_headers = {'X-Request-ID': str(request_id)}
            return response_data, response_headers
    except requests.exceptions.HTTPError as http_err:
        logger.error(f"HTTP error occurred: {http_err.response.status_code} - {http_err.response.text}")
        response_data = {"error": f"{http_err.response.status_code} - {http_err.response.text}"}
        response_headers = {'X-Request-ID': str(request_id)}
        return response_data, response_headers
    except requests.exceptions.RequestException as e:
        logger.error(f"Error calling model service: {e}")
        response_data = {"error": str(e)}
        response_headers = {'X-Request-ID': str(request_id)}
        return response_data, response_headers
    finally:
        with request_lock:
            current_requests -= 1

@app.post("/cancel_request", dependencies=[Depends(require_api_key)])
async def cancel_request(request_id: int):
    with active_requests_lock:
        if request_id in active_requests:
            session = active_requests.pop(request_id)
            session.close()
            logger.info(f"Cancelled request with ID: {request_id}")
            return {"status": "cancelled"}
        else:
            logger.info(f"No active request found with ID: {request_id}")
            return {"status": "not_found"}

@app.on_event("shutdown")
async def shutdown_event():
    with active_requests_lock:
        for session in active_requests.values():
            session.close()
        active_requests.clear()

if __name__ == "__main__":
    import uvicorn
    uvicorn.run(app, host="0.0.0.0", port=8000)



