"""
硅基流动大模型集成模块
提供与硅基流动API的集成，支持LangChain框架
"""

import logging
from typing import Any, List, Mapping, Optional, Dict
import requests

from langchain_core.language_models.llms import LLM
from langchain_core.callbacks.manager import CallbackManagerForLLMRun
from pydantic import Field

logger = logging.getLogger(__name__)


class SiliconFlowLLM(LLM):
    """
    硅基流动大模型LangChain封装
    兼容OpenAI API格式
    """
    
    # 模型配置参数
    api_key: str = Field(description="硅基流动API密钥")
    base_url: str = Field(default="https://api.siliconflow.cn/v1", description="API基础URL")
    model: str = Field(default="Qwen/Qwen2.5-7B-Instruct", description="使用的模型名称")
    temperature: float = Field(default=0.7, description="温度参数，控制随机性")
    max_tokens: int = Field(default=4000, description="最大生成token数")
    top_p: float = Field(default=0.9, description="核采样参数")
    
    @property
    def _llm_type(self) -> str:
        """返回LLM类型标识"""
        return "siliconflow"
    
    @property
    def _identifying_params(self) -> Mapping[str, Any]:
        """返回标识参数"""
        return {
            "model": self.model,
            "temperature": self.temperature,
            "max_tokens": self.max_tokens,
            "top_p": self.top_p,
        }
    
    def _call(
        self,
        prompt: str,
        stop: Optional[List[str]] = None,
        run_manager: Optional[CallbackManagerForLLMRun] = None,
        **kwargs: Any,
    ) -> str:
        """
        调用硅基流动API生成文本
        
        Args:
            prompt: 输入提示词
            stop: 停止词列表
            run_manager: 回调管理器
            **kwargs: 其他参数
            
        Returns:
            生成的文本
        """
        logger.info(f"调用硅基流动API，模型: {self.model}")
        logger.debug(f"提示词长度: {len(prompt)} 字符")
        
        try:
            # 构建请求
            headers = {
                "Authorization": f"Bearer {self.api_key}",
                "Content-Type": "application/json"
            }
            
            # 构建消息
            messages = [
                {"role": "user", "content": prompt}
            ]
            
            # 请求体
            payload = {
                "model": self.model,
                "messages": messages,
                "temperature": kwargs.get("temperature", self.temperature),
                "max_tokens": kwargs.get("max_tokens", self.max_tokens),
                "top_p": kwargs.get("top_p", self.top_p),
                "stream": False
            }
            
            # 添加停止词
            if stop:
                payload["stop"] = stop
            
            # 发送请求
            api_url = f"{self.base_url}/chat/completions"
            logger.debug(f"请求URL: {api_url}")
            
            response = requests.post(
                api_url,
                headers=headers,
                json=payload,
                timeout=60
            )
            
            # 检查响应
            response.raise_for_status()
            result = response.json()
            
            # 提取生成的文本
            if "choices" in result and len(result["choices"]) > 0:
                generated_text = result["choices"][0]["message"]["content"]
                
                # 记录token使用情况
                if "usage" in result:
                    usage = result["usage"]
                    logger.info(
                        f"Token使用: 输入={usage.get('prompt_tokens', 0)}, "
                        f"输出={usage.get('completion_tokens', 0)}, "
                        f"总计={usage.get('total_tokens', 0)}"
                    )
                
                logger.info(f"生成文本长度: {len(generated_text)} 字符")
                return generated_text
            else:
                error_msg = "API返回格式错误，未找到生成内容"
                logger.error(error_msg)
                return f"错误: {error_msg}"
                
        except requests.exceptions.RequestException as e:
            error_msg = f"API请求失败: {str(e)}"
            logger.error(error_msg)
            return f"错误: {error_msg}"
        except Exception as e:
            error_msg = f"调用大模型时发生异常: {str(e)}"
            logger.error(error_msg)
            return f"错误: {error_msg}"


class SiliconFlowChatLLM:
    """
    硅基流动聊天模型（支持对话历史）
    """
    
    def __init__(
        self,
        api_key: str,
        base_url: str = "https://api.siliconflow.cn/v1",
        model: str = "Qwen/Qwen2.5-7B-Instruct",
        temperature: float = 0.7,
        max_tokens: int = 4000,
        top_p: float = 0.9
    ):
        """
        初始化聊天模型
        
        Args:
            api_key: API密钥
            base_url: API基础URL
            model: 模型名称
            temperature: 温度参数
            max_tokens: 最大token数
            top_p: 核采样参数
        """
        self.api_key = api_key
        self.base_url = base_url
        self.model = model
        self.temperature = temperature
        self.max_tokens = max_tokens
        self.top_p = top_p
        
        logger.info(f"SiliconFlowChatLLM初始化成功，模型: {model}")
    
    def generate(
        self,
        messages: List[Dict[str, str]],
        **kwargs
    ) -> str:
        """
        生成回复
        
        Args:
            messages: 消息列表，格式: [{"role": "user", "content": "..."}, ...]
            **kwargs: 其他参数
            
        Returns:
            生成的回复内容
        """
        try:
            headers = {
                "Authorization": f"Bearer {self.api_key}",
                "Content-Type": "application/json"
            }
            
            payload = {
                "model": self.model,
                "messages": messages,
                "temperature": kwargs.get("temperature", self.temperature),
                "max_tokens": kwargs.get("max_tokens", self.max_tokens),
                "top_p": kwargs.get("top_p", self.top_p),
                "stream": False
            }
            
            api_url = f"{self.base_url}/chat/completions"
            
            response = requests.post(
                api_url,
                headers=headers,
                json=payload,
                timeout=60
            )
            
            response.raise_for_status()
            result = response.json()
            
            if "choices" in result and len(result["choices"]) > 0:
                return result["choices"][0]["message"]["content"]
            else:
                return "错误: API返回格式错误"
                
        except Exception as e:
            logger.error(f"生成回复失败: {e}")
            return f"错误: {str(e)}"


def create_siliconflow_llm(
    api_key: str,
    base_url: str = "https://api.siliconflow.cn/v1",
    model: str = "Qwen/Qwen2.5-7B-Instruct",
    temperature: float = 0.7,
    max_tokens: int = 4000,
    **kwargs
) -> SiliconFlowLLM:
    """
    创建硅基流动LLM实例的工厂函数
    
    Args:
        api_key: API密钥
        base_url: API基础URL
        model: 模型名称
        temperature: 温度参数
        max_tokens: 最大token数
        **kwargs: 其他参数
        
    Returns:
        SiliconFlowLLM实例
    """
    return SiliconFlowLLM(
        api_key=api_key,
        base_url=base_url,
        model=model,
        temperature=temperature,
        max_tokens=max_tokens,
        **kwargs
    )

