"""
大模型服务相关的路由
提供调用大模型API的功能
"""

from typing import List, Optional, Dict, Any
from fastapi import APIRouter, Depends, HTTPException, status, Body
from sqlalchemy.orm import Session
from pydantic import BaseModel, Field

from dao.postgresql_connect import get_db
from models import LLMModel, ModelModality, ModelStatus
from routers.auth import get_current_user
from models.user import User

# 创建路由
llm_service_router = APIRouter(
    prefix="/api/llm-service",
    tags=["llm-service"],
    responses={404: {"description": "Not found"}},
)

# 请求和响应模型
class ChatMessage(BaseModel):
    """聊天消息"""
    role: str = Field(..., description="消息角色，如 'user', 'assistant', 'system'")
    content: str = Field(..., description="消息内容")

class ChatRequest(BaseModel):
    """聊天请求"""
    model_id: str = Field(..., description="模型ID")
    messages: List[ChatMessage] = Field(..., description="聊天消息列表")
    temperature: Optional[float] = Field(0.7, description="温度参数，控制随机性")
    max_tokens: Optional[int] = Field(1000, description="最大生成token数")
    stream: Optional[bool] = Field(False, description="是否使用流式响应")
    additional_params: Optional[Dict[str, Any]] = Field(None, description="额外的模型参数")

class ChatResponse(BaseModel):
    """聊天响应"""
    model_id: str = Field(..., description="使用的模型ID")
    response: str = Field(..., description="模型响应内容")
    usage: Dict[str, int] = Field(..., description="token使用情况")
    finish_reason: Optional[str] = Field(None, description="结束原因")

class ModelInfoResponse(BaseModel):
    """模型信息响应"""
    model_id: str = Field(..., description="模型ID")
    name: str = Field(..., description="模型名称")
    provider: str = Field(..., description="模型提供商")
    modality: ModelModality = Field(..., description="模型模态类型")
    capabilities: Optional[Dict[str, Any]] = Field(None, description="模型能力详情")
    config: Optional[Dict[str, Any]] = Field(None, description="模型配置参数")

# API端点
@llm_service_router.post("/chat", response_model=ChatResponse)
async def chat(
    request: ChatRequest,
    db: Session = Depends(get_db),
    current_user: User = Depends(get_current_user)
):
    """
    与大模型进行对话
    
    Args:
        request: 聊天请求
        db: 数据库会话
        current_user: 当前用户
        
    Returns:
        ChatResponse: 聊天响应
        
    Raises:
        HTTPException: 模型不存在或调用失败时抛出
    """
    # 获取模型信息
    model = db.query(LLMModel).filter(LLMModel.model_id == request.model_id).first()
    
    if not model:
        raise HTTPException(
            status_code=status.HTTP_404_NOT_FOUND,
            detail=f"模型ID '{request.model_id}' 不存在"
        )
    
    if model.status != ModelStatus.ACTIVE:
        raise HTTPException(
            status_code=status.HTTP_400_BAD_REQUEST,
            detail=f"模型 '{model.name}' 当前状态为 {model.status.value}，无法使用"
        )
    
    # 这里应该实现实际的模型调用逻辑
    # 根据不同的提供商和模型类型，调用相应的API
    # 以下是模拟的响应
    return ChatResponse(
        model_id=model.model_id,
        response="这是一个模拟的模型响应。在实际实现中，这里应该是从模型API获取的真实响应。",
        usage={"prompt_tokens": 10, "completion_tokens": 20, "total_tokens": 30},
        finish_reason="stop"
    )

@llm_service_router.get("/models", response_model=List[ModelInfoResponse])
async def list_available_models(
    db: Session = Depends(get_db),
    current_user: User = Depends(get_current_user)
):
    """
    获取所有可用的模型列表
    
    Args:
        db: 数据库会话
        current_user: 当前用户
        
    Returns:
        List[ModelInfoResponse]: 可用模型列表
    """
    # 只返回状态为活跃的模型
    models = db.query(LLMModel).filter(LLMModel.status == ModelStatus.ACTIVE).all()
    
    return [
        ModelInfoResponse(
            model_id=model.model_id,
            name=model.name,
            provider=model.provider,
            modality=model.modality,
            capabilities=model.capabilities,
            config=model.config
        )
        for model in models
    ]

@llm_service_router.get("/models/{model_id}", response_model=ModelInfoResponse)
async def get_model_info(
    model_id: str,
    db: Session = Depends(get_db),
    current_user: User = Depends(get_current_user)
):
    """
    获取指定模型的详细信息
    
    Args:
        model_id: 模型ID
        db: 数据库会话
        current_user: 当前用户
        
    Returns:
        ModelInfoResponse: 模型详细信息
        
    Raises:
        HTTPException: 模型不存在时抛出
    """
    model = db.query(LLMModel).filter(LLMModel.model_id == model_id).first()
    
    if not model:
        raise HTTPException(
            status_code=status.HTTP_404_NOT_FOUND,
            detail=f"模型ID '{model_id}' 不存在"
        )
    
    return ModelInfoResponse(
        model_id=model.model_id,
        name=model.name,
        provider=model.provider,
        modality=model.modality,
        capabilities=model.capabilities,
        config=model.config
    )

@llm_service_router.post("/models/{model_id}/test", response_model=ChatResponse)
async def test_model(
    model_id: str,
    prompt: str = Body(..., description="测试提示词"),
    db: Session = Depends(get_db),
    current_user: User = Depends(get_current_user)
):
    """
    测试指定模型
    
    Args:
        model_id: 模型ID
        prompt: 测试提示词
        db: 数据库会话
        current_user: 当前用户
        
    Returns:
        ChatResponse: 测试响应
        
    Raises:
        HTTPException: 模型不存在或测试失败时抛出
    """
    # 获取模型信息
    model = db.query(LLMModel).filter(LLMModel.model_id == model_id).first()
    
    if not model:
        raise HTTPException(
            status_code=status.HTTP_404_NOT_FOUND,
            detail=f"模型ID '{model_id}' 不存在"
        )
    
    # 这里应该实现实际的模型测试逻辑
    # 以下是模拟的响应
    return ChatResponse(
        model_id=model.model_id,
        response=f"这是对提示词 '{prompt}' 的模拟响应。在实际实现中，这里应该是从模型API获取的真实响应。",
        usage={"prompt_tokens": 5, "completion_tokens": 10, "total_tokens": 15},
        finish_reason="stop"
    ) 