from fastapi import APIRouter, Depends, HTTPException
from typing import List, Dict, Optional, Any, Union
from pydantic import BaseModel, Field

# 创建OpenAI兼容API路由实例
openai_compatible_router = APIRouter()

# 定义OpenAI兼容的模型
class OpenAIModel(BaseModel):
    id: str
    object: str = "model"
    created: int
    owned_by: str = "organization-owner"
    permissions: List[Dict]

class ModelList(BaseModel):
    object: str = "list"
    data: List[OpenAIModel]

# 定义聊天完成请求模型
class ChatCompletionRequest(BaseModel):
    model: str
    messages: List[Dict[str, str]]
    temperature: Optional[float] = 0.7
    top_p: Optional[float] = 1.0
    n: Optional[int] = 1
    stream: Optional[bool] = False
    stop: Optional[Union[str, List[str]]] = None
    max_tokens: Optional[int] = 1024
    presence_penalty: Optional[float] = 0.0
    frequency_penalty: Optional[float] = 0.0
    logit_bias: Optional[Dict[str, float]] = None
    user: Optional[str] = None

# 定义聊天完成响应模型
class Choice(BaseModel):
    index: int
    message: Dict[str, str]
    finish_reason: str

class ChatCompletionResponse(BaseModel):
    id: str
    object: str = "chat.completion"
    created: int
    model: str
    choices: List[Choice]
    usage: Dict[str, int]

# 列出可用模型接口（兼容OpenAI）
@openai_compatible_router.get("/models", summary="列出可用模型（OpenAI兼容）")
def list_models_openai_compatible():
    """获取所有可用模型（OpenAI API格式）"""
    import time
    from models.model_manager import ModelManager
    
    manager = ModelManager()
    available_models = manager.get_available_models()
    
    # 如果没有可用模型，使用默认模型
    if not available_models:
        available_models = [manager.get_default_model_name()]
    
    # 转换为OpenAI格式
    models = []
    current_time = int(time.time())
    
    for model_name in available_models:
        models.append(OpenAIModel(
            id=model_name,
            created=current_time,
            permissions=[]
        ))
    
    return ModelList(data=models)

# 聊天完成接口（兼容OpenAI）
@openai_compatible_router.post("/chat/completions", summary="聊天完成（OpenAI兼容）")
def create_chat_completion(request: ChatCompletionRequest):
    """创建聊天完成（OpenAI API格式）"""
    import time
    import uuid
    from models.model_manager import ModelManager
    
    try:
        # 构建提示
        prompt = ""
        for message in request.messages:
            if message["role"] == "system":
                prompt += f"系统提示: {message['content']}\n"
            elif message["role"] == "user":
                prompt += f"用户: {message['content']}\n"
            elif message["role"] == "assistant":
                prompt += f"助手: {message['content']}\n"
        
        # 调用模型生成文本
        manager = ModelManager()
        result = manager.generate_text(
            prompt=prompt,
            model_name=request.model,
            max_tokens=request.max_tokens,
            temperature=request.temperature,
            top_p=request.top_p,
            stream=request.stream
        )
        
        # 构建OpenAI格式的响应
        current_time = int(time.time())
        response_id = f"chatcmpl-{str(uuid.uuid4())[:28]}"
        
        # 计算使用的token数（简化处理）
        prompt_tokens = len(prompt) // 4  # 简化估算
        completion_tokens = len(result["text"]) // 4  # 简化估算
        
        return ChatCompletionResponse(
            id=response_id,
            created=current_time,
            model=request.model,
            choices=[
                Choice(
                    index=0,
                    message={
                        "role": "assistant",
                        "content": result["text"]
                    },
                    finish_reason="stop"
                )
            ],
            usage={
                "prompt_tokens": prompt_tokens,
                "completion_tokens": completion_tokens,
                "total_tokens": prompt_tokens + completion_tokens
            }
        )
        
    except Exception as e:
        raise HTTPException(status_code=500, detail=f"生成失败: {str(e)}")