#!/usr/bin/env python3
"""
OpenAI 兼容代理，清洗 vLLM 响应格式以确保与 Dify 兼容
使用 aiohttp 实现，避免添加新的依赖
"""

import asyncio
import aiohttp
from aiohttp import web, ClientSession
import json

# 配置后端服务地址
VLLM_URL = "http://127.0.0.1:8001"

def clean_model_response(vllm_response):
    """清洗 vLLM 响应，只保留 OpenAI 标准字段"""
    if not isinstance(vllm_response, dict) or "data" not in vllm_response:
        return {"object": "list", "data": []}
    
    cleaned_data = []
    for model in vllm_response.get("data", []):
        # 只保留 OpenAI 标准字段
        cleaned_model = {
            "id": model.get("id"),
            "object": "model",
            "created": model.get("created", 0),
            "owned_by": model.get("owned_by", "vllm")
        }
        cleaned_data.append(cleaned_model)
    
    return {
        "object": "list",
        "data": cleaned_data
    }

async def models_handler(request):
    """处理 /v1/models 请求，清洗响应格式"""
    async with ClientSession() as session:
        try:
            async with session.get(f"{VLLM_URL}/v1/models") as resp:
                if resp.status == 200:
                    data = await resp.json()
                    # 清洗响应格式
                    cleaned_response = clean_model_response(data)
                    return web.json_response(cleaned_response)
                else:
                    # 直接透传错误响应
                    error_text = await resp.text()
                    return web.Response(text=error_text, status=resp.status)
        except Exception as e:
            return web.json_response({"error": f"Proxy error: {str(e)}"}, status=500)

async def proxy_handler(request):
    """透传其他所有请求"""
    path = request.match_info['path']
    url = f"{VLLM_URL}/{path}"
    
    async with ClientSession() as session:
        try:
            # 获取请求体
            body = None
            if request.body_exists and request.can_read_body:
                body = await request.read()
            
            # 转发请求
            async with session.request(
                method=request.method,
                url=url,
                headers=request.headers,
                data=body,
                params=request.query
            ) as resp:
                # 透传响应
                response_data = await resp.read()
                return web.Response(
                    body=response_data,
                    status=resp.status,
                    headers=resp.headers
                )
        except Exception as e:
            return web.json_response({"error": f"Proxy error: {str(e)}"}, status=500)

async def health_handler(request):
    """健康检查接口"""
    return web.json_response({"status": "ok"})

def create_app():
    """创建 Web 应用"""
    app = web.Application()
    
    # 路由配置
    app.router.add_get("/v1/models", models_handler)
    app.router.add_route("*", "/{path:.*}", proxy_handler)
    app.router.add_get("/health", health_handler)
    
    return app

if __name__ == "__main__":
    print("Starting OpenAI Compatible Proxy...")
    print("Proxy will be available at http://0.0.0.0:8003")
    print("Press Ctrl+C to stop")
    
    app = create_app()
    web.run_app(app, host="0.0.0.0", port=8003)