# -*- coding: utf-8 -*-
"""
大模型服务模块
封装DeepSeek API调用逻辑
"""

import logging
from typing import Dict, List, Optional, Any
from openai import OpenAI
from app.core.config import settings


logger = logging.getLogger(__name__)


class LLMService:
    """
    大模型服务类
    使用OpenAI SDK调用DeepSeek API
    """
    
    def __init__(self):
        """
        初始化DeepSeek客户端
        """
        self.client = OpenAI(
            api_key=settings.DEEPSEEK_API_KEY,
            base_url=settings.DEEPSEEK_BASE_URL
        )
        self.model = settings.DEEPSEEK_MODEL
        self.max_tokens = settings.DEEPSEEK_MAX_TOKENS
        self.temperature = settings.DEEPSEEK_TEMPERATURE
    
    async def chat_completion(
        self, 
        messages: List[Dict[str, str]], 
        temperature: Optional[float] = None,
        max_tokens: Optional[int] = None,
        **kwargs
    ) -> Dict[str, Any]:
        """
        调用DeepSeek聊天完成API
        
        Args:
            messages: 对话消息列表，格式为 [{"role": "user", "content": "消息内容"}]
            temperature: 温度参数，控制回复的随机性
            max_tokens: 最大token数量
            **kwargs: 其他参数
            
        Returns:
            Dict: API响应结果
            
        Raises:
            Exception: API调用异常
        """
        try:
            # 使用传入的参数或默认配置
            temp = temperature if temperature is not None else self.temperature
            max_tok = max_tokens if max_tokens is not None else self.max_tokens
            
            logger.info(f"调用DeepSeek API，消息数量: {len(messages)}")
            
            # 调用DeepSeek API
            response = self.client.chat.completions.create(
                model=self.model,
                messages=messages,
                temperature=temp,
                max_tokens=max_tok,
                **kwargs
            )
            
            # 解析响应
            result = {
                "success": True,
                "message": response.choices[0].message.content,
                "usage": {
                    "prompt_tokens": response.usage.prompt_tokens,
                    "completion_tokens": response.usage.completion_tokens,
                    "total_tokens": response.usage.total_tokens
                },
                "model": response.model,
                "finish_reason": response.choices[0].finish_reason
            }
            
            logger.info(f"DeepSeek API调用成功，消耗tokens: {result['usage']['total_tokens']}")
            return result
            
        except Exception as e:
            logger.error(f"DeepSeek API调用失败: {str(e)}")
            return {
                "success": False,
                "error": str(e),
                "message": None
            }
    
    async def simple_chat(self, user_message: str, system_prompt: Optional[str] = None) -> Dict[str, Any]:
        """
        简单对话接口
        
        Args:
            user_message: 用户消息
            system_prompt: 系统提示词（可选）
            
        Returns:
            Dict: 对话结果
        """
        messages = []
        
        # 添加系统提示词
        if system_prompt:
            messages.append({"role": "system", "content": system_prompt})
        
        # 添加用户消息
        messages.append({"role": "user", "content": user_message})
        
        return await self.chat_completion(messages)
    
    async def multi_turn_chat(
        self, 
        conversation_history: List[Dict[str, str]], 
        new_message: str
    ) -> Dict[str, Any]:
        """
        多轮对话接口
        
        Args:
            conversation_history: 历史对话记录
            new_message: 新消息
            
        Returns:
            Dict: 对话结果
        """
        # 复制历史对话
        messages = conversation_history.copy()
        
        # 添加新消息
        messages.append({"role": "user", "content": new_message})
        
        return await self.chat_completion(messages)


# 创建全局LLM服务实例
llm_service = LLMService()