from typing import Any, List, Optional, Iterator

from langchain_core.callbacks.manager import CallbackManagerForLLMRun
from langchain_core.language_models import BaseChatModel
from langchain_core.outputs import ChatGenerationChunk, ChatResult
from langchain_openai import ChatOpenAI

from app.config.settings import settings
from app.config.logging_config import get_logger

# 创建大模型专用的logger
logger = get_logger(__name__, "llm")


class DeepseekChatModel(BaseChatModel):
    """Deepseek Chat模型的LangChain接口"""

    model_name: str = settings.DEEPSEEK_MODEL
    api_key: str = settings.DEEPSEEK_API_KEY
    api_base: str = settings.DEEPSEEK_API_BASE
    streaming: bool = False
    temperature: float = 0.7
    max_tokens: Optional[int] = None

    def __init__(self, **kwargs):
        super().__init__(**kwargs)
        logger.info(f"🔧 初始化DeepSeek模型")
        logger.info(f"📋 模型配置: model={self.model_name}, temperature={self.temperature}, max_tokens={self.max_tokens}")
        logger.info(f"🌐 API配置: base_url={self.api_base}")
        
        # 创建ChatOpenAI实例
        self._chat_model = ChatOpenAI(
            model=self.model_name,
            base_url=self.api_base,
            temperature=self.temperature,
            max_tokens=self.max_tokens,
            streaming=self.streaming,
            verbose=True
        )

    @property
    def _llm_type(self) -> str:
        return "deepseek-chat"

    def _generate(
            self,
            messages: List[Any],
            stop: Optional[List[str]] = None,
            run_manager: Optional[CallbackManagerForLLMRun] = None,
            **kwargs: Any,
    ) -> ChatResult:
        """同步生成聊天完成"""
        logger.info(f"🚀 开始同步生成聊天完成")
        logger.info(f"📥 输入消息数量: {len(messages)}")
        for i, msg in enumerate(messages):
            logger.info(f"📝 消息 {i+1}: {type(msg).__name__} - {getattr(msg, 'content', str(msg))[:200]}...")
        logger.info(f"⏹️ 停止词: {stop}")
        logger.info(f"🔧 额外参数: {kwargs}")
        
        try:
            # 直接使用ChatOpenAI的生成方法
            result = self._chat_model._generate(
                messages=messages,
                stop=stop,
                run_manager=run_manager,
                **kwargs
            )
            logger.info(f"✅ 同步生成完成")
            logger.info(f"📤 生成结果: {len(result.generations)} 个生成项")
            for i, gen in enumerate(result.generations):
                logger.info(f"📄 生成项 {i+1}: {gen.text[:200] if hasattr(gen, 'text') else str(gen)[:200]}...")
            return result
        except Exception as e:
            logger.error(f"❌ 同步生成出错: {str(e)}", exc_info=True)
            raise

    def _stream(
            self,
            messages: List[Any],
            stop: Optional[List[str]] = None,
            run_manager: Optional[CallbackManagerForLLMRun] = None,
            **kwargs: Any,
    ) -> Iterator[ChatGenerationChunk]:
        """流式生成聊天完成"""
        # 直接使用ChatOpenAI的流式生成方法，不进行额外的流式输出处理
        # 流式输出的处理逻辑已移至 agent.py 的 astream_run 方法中
        return self._chat_model._stream(
            messages=messages,
            stop=stop,
            run_manager=run_manager,
            **kwargs
        )
