"""
LLM配置管理模块
提供大语言模型的统一配置管理
支持OpenAI、Anthropic、DeepSeek、Kimi、Doubao等多个提供商
"""
import sys
import os
from typing import Dict, Any, Optional, List
from pydantic import BaseModel, Field
from pathlib import Path
from dotenv import load_dotenv

# 加载环境变量
load_dotenv()

# 添加backend目录到Python路径
sys.path.insert(0, str(Path(__file__).parent.parent))


class LLMProviderConfig(BaseModel):
    """LLM提供商配置"""
    name: str = Field(..., description="提供商名称")
    api_base: str = Field(..., description="API基础URL")
    api_key: str = Field(..., description="API密钥")
    llm_model_id: str = Field(..., description="模型名称")
    max_tokens: int = Field(default=4000, description="最大token数")
    temperature: float = Field(default=0.7, description="温度参数")
    timeout: int = Field(default=30, description="超时时间(秒)")


class LLMSettings(BaseModel):
    """LLM统一配置"""

    model_config = {}

    # 默认配置
    DEFAULT_PROVIDER: str = Field(default="openai", description="默认提供商")
    DEFAULT_MODEL: str = Field(default="gpt-3.5-turbo", description="默认模型")

    # 全局限制
    MAX_TOKENS: int = Field(default=4000, description="最大token限制")
    MAX_CONVERSATION_TOKENS: int = Field(default=8000, description="对话最大token限制")

    # 流式响应配置
    STREAM_CHUNK_SIZE: int = Field(default=1024, description="流式响应块大小")
    STREAM_TIMEOUT: int = Field(default=60, description="流式响应超时时间")

    # 重试配置
    MAX_RETRIES: int = Field(default=3, description="最大重试次数")
    RETRY_DELAY: float = Field(default=1.0, description="重试延迟(秒)")

    # 缓存配置
    CACHE_TTL: int = Field(default=3600, description="缓存过期时间(秒)")
    CACHE_SIZE_LIMIT: int = Field(default=1000, description="缓存最大记录数")

    # 提供商配置
    PROVIDERS: Dict[str, Dict[str, Any]] = Field(
        default_factory=lambda: {
            "openai": {
                "models": ["gpt-3.5-turbo", "gpt-4", "gpt-4-turbo-preview"],
                "max_tokens": 4000,
                "temperature_range": [0, 2],
                "api_base": "https://api.openai.com/v1"
            },
            "anthropic": {
                "models": ["claude-3-haiku-20240307", "claude-3-sonnet-20240229", "claude-3-opus-20240229"],
                "max_tokens": 4000,
                "temperature_range": [0, 1],
                "api_base": "https://api.anthropic.com"
            },
            "deepseek": {
                "models": ["deepseek-chat", "deepseek-coder"],
                "max_tokens": 4000,
                "temperature_range": [0, 2],
                "api_base": "https://api.deepseek.com/v1"
            },
            "kimi": {
                "models": ["moonshot-v1-8k", "moonshot-v1-32k", "moonshot-v1-128k"],
                "max_tokens": 8000,
                "temperature_range": [0, 1],
                "api_base": "https://api.moonshot.cn/v1"
            },
            "doubao": {
                "models": ["doubao-lite-4k", "doubao-pro-4k", "doubao-pro-32k"],
                "max_tokens": 4000,
                "temperature_range": [0, 1],
                "api_base": "https://ark.cn-beijing.volces.com/api/v3"
            }
        },
        description="支持的提供商和模型"
    )


class LLMConfig:
    """LLM配置管理器"""

    def __init__(self, settings=None):
        self.settings = settings or LLMSettings()
        self._providers_cache = {}
        self._load_config()

    def _load_config(self):
        """从环境变量加载配置"""
        # 重新加载环境变量
        load_dotenv(override=True)

        # 加载各提供商配置
        self.openai_key = os.getenv("OPENAI_API_KEY")
        self.openai_base = os.getenv("OPENAI_API_BASE", "https://api.openai.com/v1")
        self.openai_model = os.getenv("OPENAI_MODEL", "gpt-3.5-turbo")

        self.anthropic_key = os.getenv("ANTHROPIC_API_KEY")
        self.anthropic_model = os.getenv("ANTHROPIC_MODEL", "claude-3-haiku-20240307")

        self.deepseek_key = os.getenv("DEEPSEEK_API_KEY")
        self.deepseek_model = os.getenv("DEEPSEEK_MODEL", "deepseek-chat")

        self.kimi_key = os.getenv("KIMI_API_KEY")
        self.kimi_model = os.getenv("KIMI_MODEL", "moonshot-v1-8k")

        self.doubao_key = os.getenv("DOUBAO_API_KEY")
        self.doubao_endpoint = os.getenv("DOUBAO_ENDPOINT", "https://ark.cn-beijing.volces.com/api/v3")
        self.doubao_model = os.getenv("DOUBAO_MODEL", "doubao-lite-4k")

    def get_provider(self, provider_name: str) -> Optional[LLMProviderConfig]:
        """获取特定提供商配置，返回 LLMProviderConfig 对象"""

        # 检查缓存
        if provider_name in self._providers_cache:
            return self._providers_cache[provider_name]

        config = None

        if provider_name == "openai" and self.openai_key:
            config = LLMProviderConfig(
                name="openai",
                api_base=self.openai_base,
                api_key=self.openai_key,
                llm_model_id=self.openai_model,
                max_tokens=int(os.getenv("OPENAI_MAX_TOKENS", "4000")),
                temperature=float(os.getenv("OPENAI_TEMPERATURE", "0.7")),
                timeout=30
            )
        elif provider_name == "anthropic" and self.anthropic_key:
            config = LLMProviderConfig(
                name="anthropic",
                api_base="https://api.anthropic.com",
                api_key=self.anthropic_key,
                llm_model_id=self.anthropic_model,
                max_tokens=int(os.getenv("ANTHROPIC_MAX_TOKENS", "4000")),
                temperature=float(os.getenv("ANTHROPIC_TEMPERATURE", "0.7")),
                timeout=30
            )
        elif provider_name == "deepseek" and self.deepseek_key:
            config = LLMProviderConfig(
                name="deepseek",
                api_base="https://api.deepseek.com/v1",
                api_key=self.deepseek_key,
                llm_model_id=self.deepseek_model,
                max_tokens=int(os.getenv("DEEPSEEK_MAX_TOKENS", "4000")),
                temperature=float(os.getenv("DEEPSEEK_TEMPERATURE", "0.7")),
                timeout=30
            )
        elif provider_name == "kimi" and self.kimi_key:
            config = LLMProviderConfig(
                name="kimi",
                api_base="https://api.moonshot.cn/v1",
                api_key=self.kimi_key,
                llm_model_id=self.kimi_model,
                max_tokens=int(os.getenv("KIMI_MAX_TOKENS", "8000")),
                temperature=float(os.getenv("KIMI_TEMPERATURE", "0.7")),
                timeout=30
            )
        elif provider_name == "doubao" and self.doubao_key:
            config = LLMProviderConfig(
                name="doubao",
                api_base=self.doubao_endpoint,
                api_key=self.doubao_key,
                llm_model_id=self.doubao_model,
                max_tokens=int(os.getenv("DOUBAO_MAX_TOKENS", "4000")),
                temperature=float(os.getenv("DOUBAO_TEMPERATURE", "0.7")),
                timeout=30
            )

        # 缓存配置
        if config:
            self._providers_cache[provider_name] = config

        return config

    def get_all_providers(self) -> Dict[str, LLMProviderConfig]:
        """获取所有提供商配置"""
        providers = {}
        for provider_name in ["openai", "anthropic", "deepseek", "kimi", "doubao"]:
            config = self.get_provider(provider_name)
            if config:
                providers[provider_name] = config
        return providers

    def get_active_providers(self) -> Dict[str, LLMProviderConfig]:
        """获取已配置的提供商，返回 LLMProviderConfig 对象字典"""
        active_providers = {}

        # 重新加载配置以确保最新
        self._load_config()

        for provider_name in ["openai", "anthropic", "deepseek", "kimi", "doubao"]:
            config = self.get_provider(provider_name)
            if config:
                active_providers[provider_name] = config
                print(f"✅ Loaded {provider_name} provider config")

        return active_providers

    def get_models(self, provider_name: str) -> List[str]:
        """获取提供商支持的模型列表"""
        if provider_name in self.settings.PROVIDERS:
            return self.settings.PROVIDERS[provider_name]["models"]
        return []

    def get_model_config(self, provider_name: str, model_name: str) -> Optional[Dict[str, Any]]:
        """获取模型配置"""
        if provider_name in self.settings.PROVIDERS:
            provider_config = self.settings.PROVIDERS[provider_name]
            if model_name in provider_config["models"]:
                return {
                    "max_tokens": provider_config["max_tokens"],
                    "temperature_range": provider_config["temperature_range"]
                }
        return None

    def validate_provider(self, provider_name: str) -> bool:
        """验证提供商是否可用"""
        try:
            config = self.get_provider(provider_name)
            return config is not None and bool(config.api_key)
        except:
            return False

    def get_default_config(self) -> Optional[LLMProviderConfig]:
        """获取默认配置"""
        # 优先使用环境变量中配置的默认提供商
        default_provider = os.getenv("DEFAULT_LLM_PROVIDER", self.settings.DEFAULT_PROVIDER)
        config = self.get_provider(default_provider)

        # 如果默认提供商不可用，尝试找第一个可用的
        if not config:
            for provider in ["deepseek", "openai", "anthropic", "kimi", "doubao"]:
                config = self.get_provider(provider)
                if config:
                    print(f"Using {provider} as default provider")
                    break

        return config


# 创建全局配置实例
llm_config = LLMConfig()
