# -*- coding: utf-8 -*-
"""
大模型API路由
提供DeepSeek大模型对话接口
"""

from typing import List, Optional, Dict, Any
from fastapi import APIRouter, HTTPException, status
from pydantic import BaseModel, Field
from app.services.llm_service import llm_service
import logging


logger = logging.getLogger(__name__)
router = APIRouter()


class ChatMessage(BaseModel):
    """
    聊天消息模型
    """
    role: str = Field(..., description="消息角色：system/user/assistant")
    content: str = Field(..., description="消息内容")


class SimpleChatRequest(BaseModel):
    """
    简单对话请求模型
    """
    message: str = Field(..., description="用户消息", min_length=1, max_length=2000)
    system_prompt: Optional[str] = Field(None, description="系统提示词", max_length=1000)
    temperature: Optional[float] = Field(0.7, description="温度参数", ge=0.0, le=2.0)
    max_tokens: Optional[int] = Field(1000, description="最大token数", ge=1, le=4000)


class MultiTurnChatRequest(BaseModel):
    """
    多轮对话请求模型
    """
    conversation_history: List[ChatMessage] = Field(..., description="历史对话记录")
    new_message: str = Field(..., description="新消息", min_length=1, max_length=2000)
    temperature: Optional[float] = Field(0.7, description="温度参数", ge=0.0, le=2.0)
    max_tokens: Optional[int] = Field(1000, description="最大token数", ge=1, le=4000)


class ChatCompletionRequest(BaseModel):
    """
    聊天完成请求模型
    """
    messages: List[ChatMessage] = Field(..., description="消息列表")
    temperature: Optional[float] = Field(0.7, description="温度参数", ge=0.0, le=2.0)
    max_tokens: Optional[int] = Field(1000, description="最大token数", ge=1, le=4000)


class ChatResponse(BaseModel):
    """
    聊天响应模型
    """
    success: bool = Field(..., description="请求是否成功")
    message: Optional[str] = Field(None, description="AI回复内容")
    usage: Optional[Dict[str, int]] = Field(None, description="Token使用情况")
    model: Optional[str] = Field(None, description="使用的模型")
    finish_reason: Optional[str] = Field(None, description="完成原因")
    error: Optional[str] = Field(None, description="错误信息")


@router.post("/chat/simple", response_model=ChatResponse, summary="简单对话")
async def simple_chat(request: SimpleChatRequest) -> ChatResponse:
    """
    简单对话接口
    
    发送单条消息给DeepSeek，获取AI回复
    
    Args:
        request: 简单对话请求
        
    Returns:
        ChatResponse: AI回复结果
        
    Raises:
        HTTPException: 请求处理异常
    """
    try:
        logger.info(f"收到简单对话请求，消息长度: {len(request.message)}")
        
        # 调用LLM服务
        result = await llm_service.simple_chat(
            user_message=request.message,
            system_prompt=request.system_prompt
        )
        
        return ChatResponse(**result)
        
    except Exception as e:
        logger.error(f"简单对话处理失败: {str(e)}")
        raise HTTPException(
            status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
            detail=f"对话处理失败: {str(e)}"
        )


@router.post("/chat/multi-turn", response_model=ChatResponse, summary="多轮对话")
async def multi_turn_chat(request: MultiTurnChatRequest) -> ChatResponse:
    """
    多轮对话接口
    
    基于历史对话记录，发送新消息获取AI回复
    
    Args:
        request: 多轮对话请求
        
    Returns:
        ChatResponse: AI回复结果
        
    Raises:
        HTTPException: 请求处理异常
    """
    try:
        logger.info(f"收到多轮对话请求，历史消息数: {len(request.conversation_history)}")
        
        # 转换消息格式
        history = [msg.model_dump() for msg in request.conversation_history]
        
        # 调用LLM服务
        result = await llm_service.multi_turn_chat(
            conversation_history=history,
            new_message=request.new_message
        )
        
        return ChatResponse(**result)
        
    except Exception as e:
        logger.error(f"多轮对话处理失败: {str(e)}")
        raise HTTPException(
            status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
            detail=f"多轮对话处理失败: {str(e)}"
        )


@router.post("/chat/completion", response_model=ChatResponse, summary="聊天完成")
async def chat_completion(request: ChatCompletionRequest) -> ChatResponse:
    """
    聊天完成接口
    
    直接发送消息列表给DeepSeek，获取AI回复
    
    Args:
        request: 聊天完成请求
        
    Returns:
        ChatResponse: AI回复结果
        
    Raises:
        HTTPException: 请求处理异常
    """
    try:
        logger.info(f"收到聊天完成请求，消息数: {len(request.messages)}")
        
        # 转换消息格式
        messages = [msg.model_dump() for msg in request.messages]
        
        # 调用LLM服务
        result = await llm_service.chat_completion(
            messages=messages,
            temperature=request.temperature,
            max_tokens=request.max_tokens
        )
        
        return ChatResponse(**result)
        
    except Exception as e:
        logger.error(f"聊天完成处理失败: {str(e)}")
        raise HTTPException(
            status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
            detail=f"聊天完成处理失败: {str(e)}"
        )


@router.get("/health", summary="健康检查")
async def health_check() -> Dict[str, Any]:
    """
    大模型服务健康检查
    
    Returns:
        Dict: 服务状态信息
    """
    try:
        # 简单的健康检查，发送测试消息
        test_result = await llm_service.simple_chat(
            user_message="Hello",
            system_prompt="Please respond with 'OK' only."
        )
        
        return {
            "status": "healthy" if test_result["success"] else "unhealthy",
            "service": "DeepSeek LLM",
            "model": llm_service.model,
            "test_success": test_result["success"]
        }
        
    except Exception as e:
        logger.error(f"健康检查失败: {str(e)}")
        return {
            "status": "unhealthy",
            "service": "DeepSeek LLM",
            "error": str(e)
        }