"""
OpenAI 兼容API的路由文件
提供符合OpenAI API格式的接口，用于与支持OpenAI接口的客户端兼容
"""
import json
import asyncio
from typing import List, Optional, Union, Dict, Any
from fastapi import APIRouter, HTTPException, Body, Depends, Query, Response, Request, Header
from fastapi.responses import StreamingResponse, JSONResponse
from pydantic import BaseModel, Field
import time
import logging

from .config import DEFAULT_MODEL, DEFAULT_TEMPERATURE, client_gemini, openai_compat, GEMINI_BASE_URL, GEMMA_BASE_URL
from .config import get_all_models, select_model_instance, create_client

# 设置日志
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger("openai_compat")

# 创建路由器 - 重要：不要添加前缀，让主应用决定前缀
router = APIRouter()

# 添加API Key验证依赖
async def verify_api_key(authorization: Optional[str] = Header(None), api_key: Optional[str] = Header(None, alias="x-api-key")):
    """
    验证API Key的依赖函数，支持两种认证方式：
    1. Authorization: Bearer YOUR_API_KEY
    2. x-api-key: YOUR_API_KEY
    """
    # 记录收到的认证信息
    logger.info(f"收到API认证请求 - Authorization: {authorization[:10] if authorization else 'None'}..., x-api-key: {api_key[:10] if api_key else 'None'}...")
    
    # 获取API Key
    api_key_value = None
    
    # 从Authorization头获取
    if authorization and authorization.startswith("Bearer "):
        api_key_value = authorization.replace("Bearer ", "").strip()
    
    # 从x-api-key头获取
    if not api_key_value and api_key:
        api_key_value = api_key
    
    # 如果没有提供API Key，返回错误
    if not api_key_value:
        raise HTTPException(
            status_code=401,
            detail="Missing API key. Please provide it via 'Authorization: Bearer YOUR_API_KEY' header or 'x-api-key' header.",
        )
    
    # 在实际应用中，这里会验证API Key是否有效
    # 但在这个模拟实现中，我们接受任何非空值
    return api_key_value

# 模型定义
class ModelObject(BaseModel):
    id: str
    object: str = "model"
    created: int = 1677610602
    owned_by: str = "google"

class ModelsResponse(BaseModel):
    object: str = "list"
    data: List[ModelObject]

# 消息定义
class Message(BaseModel):
    role: str
    content: str

# 定义请求体模型
class ChatMessage(BaseModel):
    role: str = Field(..., description="消息角色，例如'user'、'assistant'、'system'")
    content: str = Field(..., description="消息内容")

class ChatCompletionRequest(BaseModel):
    model: str = Field(..., description="要使用的模型名称")
    messages: List[ChatMessage] = Field(..., description="消息列表")
    temperature: Optional[float] = Field(0.7, description="温度参数，控制输出的随机性")
    max_tokens: Optional[int] = Field(2000, description="生成的最大令牌数")
    stream: Optional[bool] = Field(False, description="是否使用流式响应")

# 聊天响应定义
class ChatCompletionChoice(BaseModel):
    index: int
    message: Message
    finish_reason: str = "stop"

class ChatCompletionUsage(BaseModel):
    prompt_tokens: int = 0
    completion_tokens: int = 0
    total_tokens: int = 0

class ChatCompletionResponse(BaseModel):
    id: str
    object: str = "chat.completion"
    created: int
    model: str
    choices: List[ChatCompletionChoice]
    usage: ChatCompletionUsage

# 流式响应定义
class DeltaMessage(BaseModel):
    role: Optional[str] = None
    content: Optional[str] = None

class ChatCompletionChunkChoice(BaseModel):
    index: int
    delta: DeltaMessage
    finish_reason: Optional[str] = None

class ChatCompletionChunk(BaseModel):
    id: str
    object: str = "chat.completion.chunk"
    created: int
    model: str
    choices: List[ChatCompletionChunkChoice]

# 修改OpenAI兼容的聊天补全API，添加API Key验证
@router.post("/chat/completions", dependencies=[Depends(verify_api_key)])
async def create_chat_completion(request: Request):
    """
    OpenAI兼容的聊天补全API，支持流式响应
    需要API Key认证
    """
    try:
        # 获取请求体的JSON数据
        data = await request.json()
        
        # 模型参数处理
        model = data.get("model", "gemma-3-27b-it")
        messages = data.get("messages", [])
        temperature = data.get("temperature", 0.7)
        max_tokens = data.get("max_tokens", 2000)
        stream = data.get("stream", False)
        
        logger.info(f"处理请求 - 模型: {model}, 消息数量: {len(messages)}")
        
        # 根据模型类型处理消息格式
        formatted_messages = format_messages_for_model(model, messages)
        logger.info(f"格式化后的消息数量: {len(formatted_messages)}")
        
        # 创建适合该模型的客户端
        client = create_client(model)
        
        # 调用模型API
        try:
            response = client.chat.completions.create(
                model=model,
                messages=formatted_messages,
                temperature=temperature,
                max_tokens=max_tokens,
                stream=stream
            )
            
            # 处理响应
            if stream:
                # 流式响应处理
                return StreamingResponse(process_streaming_response(response, model), media_type="text/event-stream")
            else:
                # 普通响应处理
                content = response.choices[0].message.content if response.choices else ""
                
                # 尝试获取使用情况
                usage = {
                    "prompt_tokens": getattr(response.usage, 'prompt_tokens', 0) if hasattr(response, 'usage') else 0,
                    "completion_tokens": getattr(response.usage, 'completion_tokens', 0) if hasattr(response, 'usage') else 0,
                    "total_tokens": getattr(response.usage, 'total_tokens', 0) if hasattr(response, 'usage') else 0
                }
                
                # 将Google AI响应转换为OpenAI格式
                openai_compatible_response = {
                    "id": f"chatcmpl-{generate_random_id()}",
                    "object": "chat.completion",
                    "created": int(time.time()),
        "model": model,
                    "choices": [
                        {
                            "index": 0,
                            "message": {
                                "role": "assistant",
                                "content": content
                            },
                            "finish_reason": "stop"
                        }
                    ],
                    "usage": usage
                }
                
                return openai_compatible_response
                
        except Exception as e:
            logger.error(f"调用模型API时出错: {str(e)}")
            return JSONResponse(
                status_code=500,
                content={"error": {"message": str(e), "type": "model_error"}}
            )
            
    except Exception as e:
        # 错误处理
        logger.error(f"处理请求时发生错误: {str(e)}", exc_info=True)
        return JSONResponse(
            status_code=500,
            content={"error": {"message": str(e), "type": "server_error"}}
        )

# 根据模型类型格式化消息
def format_messages_for_model(model: str, messages: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
    """
    根据模型类型格式化消息
    
    参数:
        model: 模型名称
        messages: 原始消息列表
    
    返回:
        格式化后的消息列表
    """
    # 如果是 Gemma 系列模型，过滤掉 system 消息
    if model.startswith("gemma"):
        # Gemma 模型不支持 system 消息
        formatted_messages = []
        system_content = ""
        
        # 先提取 system 消息内容
        for msg in messages:
            if msg.get("role") == "system":
                system_content += msg.get("content", "") + "\n"
        
        # 处理其他消息
        for msg in messages:
            if msg.get("role") != "system":
                # 对于第一条用户消息，可以将 system 内容附加到前面
                if msg.get("role") == "user" and system_content and not formatted_messages:
                    formatted_messages.append({
                        "role": "user",
                        "content": f"{system_content}\n{msg.get('content', '')}"
                    })
                else:
                    formatted_messages.append(msg)
        
        # 如果没有任何用户消息，但有系统消息，则创建一个用户消息
        if not formatted_messages and system_content:
            formatted_messages.append({
                "role": "user",
                "content": system_content
            })
            
        return formatted_messages
    else:
        # 其他模型直接使用原始消息
        return messages

# 同样为模型列表API添加验证
@router.get("/models", dependencies=[Depends(verify_api_key)])
async def list_models():
    """
    返回支持的所有模型列表，OpenAI兼容格式
    需要API Key认证
    """
    models = get_all_models()
    model_objects = [
        {
            "id": model,
            "object": "model",
            "created": int(time.time()),
            "owned_by": "google"
        }
        for model in models
    ]
    
    return {
        "object": "list",
        "data": model_objects
    }

# 为旧版引擎API添加验证
@router.get("/engines", dependencies=[Depends(verify_api_key)])
async def list_engines():
    """
    旧版兼容API - 列出引擎
    需要API Key认证
    """
    models = get_all_models()
    engine_objects = [
        {
            "id": model,
            "object": "engine",
            "created": int(time.time()),
            "owner": "google"
        }
        for model in models
    ]
    
    return {
        "object": "list",
        "data": engine_objects
    }

# 生成随机ID的辅助函数
def generate_random_id(length=24):
    """生成随机ID字符串"""
    import random
    import string
    return ''.join(random.choice(string.ascii_lowercase + string.digits) for _ in range(length))

# 处理流式响应的辅助函数
async def process_streaming_response(response, model):
    """
    处理流式响应
    """
    for chunk in response:
        if hasattr(chunk, 'choices') and chunk.choices:
            delta = chunk.choices[0].delta
            content = delta.content if hasattr(delta, 'content') and delta.content else ""
            
            yield f"data: {{\n"
            yield f'"id": "chatcmpl-{generate_random_id()}",\n'
            yield f'"object": "chat.completion.chunk",\n'
            yield f'"created": {int(time.time())},\n'
            yield f'"model": "{model}",\n'
            yield f'"choices": [\n'
            yield f'  {{\n'
            yield f'    "index": 0,\n'
            yield f'    "delta": {{\n'
            if content:
                yield f'      "content": "{content}"\n'
            else:
                yield f'      "content": ""\n'
            yield f'    }},\n'
            yield f'    "finish_reason": null\n'
            yield f'  }}\n'
            yield f']\n'
            yield f"}}\n\n"
    
    # 发送最后一个事件，表示完成
    yield f"data: {{\n"
    yield f'"id": "chatcmpl-{generate_random_id()}",\n'
    yield f'"object": "chat.completion.chunk",\n'
    yield f'"created": {int(time.time())},\n'
    yield f'"model": "{model}",\n'
    yield f'"choices": [\n'
    yield f'  {{\n'
    yield f'    "index": 0,\n'
    yield f'    "delta": {{}},\n'
    yield f'    "finish_reason": "stop"\n'
    yield f'  }}\n'
    yield f']\n'
    yield f"}}\n\n"
    
    # 最后发送[DONE]
    yield "data: [DONE]\n\n"

@router.get("/url-test")
async def url_test(request: Request):
    """
    URL测试端点，返回请求的完整URL和路径
    """
    return {
        "base_url": str(request.base_url),
        "url": str(request.url),
        "path": request.url.path,
        "headers": dict(request.headers)
    }

@router.post("/chat/completions-direct")
async def create_chat_completion_direct(request: Request):
    """
    直接的聊天补全API，不需要API Key验证
    仅用于测试
    """
    try:
        data = await request.json()
        return {
            "id": f"chatcmpl-test",
            "object": "chat.completion",
            "created": int(time.time()),
            "model": data.get("model", "gemma-3-27b-it"),
            "choices": [
                {
                    "index": 0,
                    "message": {
                        "role": "assistant",
                        "content": "这是一个测试响应，表明API服务可以正常访问。"
                    },
                    "finish_reason": "stop"
                }
            ],
            "usage": {
                "prompt_tokens": 10,
                "completion_tokens": 10,
                "total_tokens": 20
            }
        }
    except Exception as e:
        return JSONResponse(
            status_code=500,
            content={"error": {"message": str(e)}}
        ) 