from fastapi import APIRouter, Depends, HTTPException, Request, status
from fastapi.security import HTTPBearer, HTTPAuthorizationCredentials
from pydantic import BaseModel, Field
from typing import List, Optional, Dict, Any, Union
import os
import json
import time
import uuid
from dotenv import load_dotenv
# 从当前目录导入config
from . import config

# 加载环境变量
load_dotenv()

# 获取API密钥
API_KEY = os.getenv("CUSTOM_COMPAT_API_KEY", "dummy-key")

# 创建安全依赖
security = HTTPBearer()

# 创建路由
router = APIRouter(prefix="/v1", tags=["OpenAI Compatible API"])

# 定义模型
class Message(BaseModel):
    role: str
    content: str

class ChatCompletionRequest(BaseModel):
    model: str
    messages: List[Message]
    temperature: Optional[float] = 0.7
    top_p: Optional[float] = 1.0
    max_tokens: Optional[int] = None
    stream: Optional[bool] = False
    
class ChatCompletionChoice(BaseModel):
    index: int
    message: Message
    finish_reason: str

class Usage(BaseModel):
    prompt_tokens: int
    completion_tokens: int
    total_tokens: int

class ChatCompletionResponse(BaseModel):
    id: str
    object: str
    created: int
    model: str
    choices: List[ChatCompletionChoice]
    usage: Usage

class ModelData(BaseModel):
    id: str
    object: str = "model"
    owned_by: str = "organization"
    permission: List = []

class ModelsResponse(BaseModel):
    object: str = "list"
    data: List[ModelData]

# 验证API密钥
async def verify_api_key(credentials: HTTPAuthorizationCredentials = Depends(security)):
    if credentials.credentials != API_KEY:
        raise HTTPException(
            status_code=status.HTTP_401_UNAUTHORIZED,
            detail="Invalid API key",
        )
    return credentials.credentials

# 获取模型列表
@router.get("/models", response_model=ModelsResponse)
async def get_models(api_key: str = Depends(verify_api_key)):
    # 直接返回所有配置的模型，不进行可用性检查
    try:
        all_models = list(config.model_configs.keys())
        models = [ModelData(id=model_id) for model_id in all_models]
        return ModelsResponse(data=models)
    except Exception as e:
        # 出现任何错误，也尝试返回所有配置的模型
        all_models = list(config.model_configs.keys())
        models = [ModelData(id=model_id) for model_id in all_models]
        return ModelsResponse(data=models)

# 聊天完成
@router.post("/chat/completions", response_model=ChatCompletionResponse)
async def chat_completions(
    request: ChatCompletionRequest,
    api_key: str = Depends(verify_api_key)
):
    try:
        # 检查请求的模型是否在配置中
        if request.model not in config.model_configs:
            # 获取可用模型列表作为字符串
            available_models_str = ", ".join(config.get_available_models())
            raise HTTPException(
                status_code=status.HTTP_400_BAD_REQUEST,
                detail=f"模型 '{request.model}' 不存在于配置中。可用模型: {available_models_str}"
            )
        
        # 将请求消息转换为字典格式
        messages_dict = [{"role": msg.role, "content": msg.content} for msg in request.messages]
        
        # 获取客户端和模型名称
        client, model_name = config.get_client(request.model)
        
        # 创建参数字典
        params = {
            "model": model_name,
            "messages": messages_dict,
            "temperature": request.temperature,
        }
        
        # 添加可选参数
        if request.max_tokens is not None:
            params["max_tokens"] = request.max_tokens
        
        # 调用API
        response = client.chat.completions.create(**params)
        
        # 提取结果
        content = response.choices[0].message.content
        
        # 构建响应
        response_message = Message(
            role="assistant",
            content=content
        )
        
        return ChatCompletionResponse(
            id=f"chatcmpl-{uuid.uuid4()}",
            object="chat.completion",
            created=int(time.time()),
            model=request.model,
            choices=[
                ChatCompletionChoice(
                    index=0,
                    message=response_message,
                    finish_reason="stop"
                )
            ],
            usage=Usage(
                prompt_tokens=response.usage.prompt_tokens if hasattr(response, 'usage') and hasattr(response.usage, 'prompt_tokens') else 0,
                completion_tokens=response.usage.completion_tokens if hasattr(response, 'usage') and hasattr(response.usage, 'completion_tokens') else 0,
                total_tokens=response.usage.total_tokens if hasattr(response, 'usage') and hasattr(response.usage, 'total_tokens') else 0
            )
        )
    except HTTPException:
        # 重新抛出HTTPException异常
        raise
    except ValueError as e:
        # 处理值错误（通常是配置或参数问题）
        available_models_str = ", ".join(config.get_available_models())
        raise HTTPException(
            status_code=status.HTTP_400_BAD_REQUEST,
            detail=f"参数错误: {str(e)}。可用模型: {available_models_str}"
        )
    except Exception as e:
        # 处理其他所有异常
        available_models_str = ", ".join(config.get_available_models())
        raise HTTPException(
            status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
            detail=f"处理请求时发生错误: {str(e)}。请尝试使用其他可用模型: {available_models_str}"
        )

