from typing import Any, Dict

from dotenv import load_dotenv
from langchain_deepseek import ChatDeepSeek

from .BaseLLM import BaseLLM

# 加载环境变量
load_dotenv()


class DeepSeekChat(BaseLLM):
    """
    DeepSeek Chat模型实现
    使用OpenAI兼容的API接口
    """

    def __init__(self, **kwargs):
        super().__init__(model_name="deepseek-chat", **kwargs)
        self.api_base = kwargs.get('api_base', 'https://api.deepseek.com')
        self.temperature = kwargs.get('temperature', 0.7)
        self.max_tokens = kwargs.get('max_tokens', 4096)

    def create_llm(self, streaming: bool = False, callbacks=None, **kwargs):
        """
        创建DeepSeek Chat LLM实例
        
        Args:
            streaming: 是否启用流式输出
            callbacks: 回调函数列表
            **kwargs: 其他配置参数
            
        Returns:
            ChatOpenAI: DeepSeek LLM实例
        """
        config = {
            'model': self.model_name,
            'base_url': self.api_base,
            'max_retries': kwargs.get('max_retries', 2),
            'streaming': streaming,
            'temperature': kwargs.get('temperature', self.temperature),
            'max_tokens': kwargs.get('max_tokens', self.max_tokens),
        }

        if callbacks:
            config['callbacks'] = callbacks

        # 合并额外配置
        config.update(kwargs)
        llm = ChatDeepSeek(**config)
        return llm

    def get_model_info(self) -> Dict[str, Any]:
        """
        获取DeepSeek Chat模型信息
        
        Returns:
            Dict[str, Any]: 模型信息
        """
        return {
            'model_name': self.model_name,
            'provider': 'DeepSeek',
            'type': 'chat',
            'api_base': self.api_base,
            'temperature': self.temperature,
            'max_tokens': self.max_tokens,
            'supports_streaming': True,
            'supports_async': True,
            'description': 'DeepSeek Chat是一个强大的对话模型，支持多轮对话和复杂推理'
        }
