"""
模型配置管理模块
提供模型类型、参数、部署配置的集中管理
"""

import os
from dataclasses import dataclass, field
from typing import Dict, Any, Optional
from enum import Enum

from utils.logger import logger


class ModelBackend(Enum):
    """模型后端类型"""
    VLLM = "vllm"
    LLAMA_CPP = "llama_cpp"
    OPENAI = "openai"
    ANTHROPIC = "anthropic"
    FALLBACK = "fallback"
    OLLAMA = "ollama"


class ModelSize(Enum):
    """模型规模"""
    SMALL = "small"      # 1B-3B
    MEDIUM = "medium"    # 7B-13B
    LARGE = "large"      # 30B+


@dataclass
class ModelConfig:
    """模型配置基类"""
    name: str
    backend: ModelBackend
    model_size: ModelSize
    max_tokens: int = 2048
    temperature: float = 0.7
    top_p: float = 0.9
    top_k: int = 50
    repetition_penalty: float = 1.1
    timeout: int = 30
    retry_count: int = 3
    
    # 后端特定配置
    backend_config: Dict[str, Any] = field(default_factory=dict)
    
    # 部署配置
    deployment_config: Dict[str, Any] = field(default_factory=dict)


@dataclass
class VllmConfig(ModelConfig):
    """vLLM 配置"""
    def __init__(self, name: str = "vllm-7b", model_size: ModelSize = ModelSize.MEDIUM):
        super().__init__(
            name=name,
            backend=ModelBackend.VLLM,
            model_size=model_size,
            max_tokens=2048,
            temperature=0.7,
            backend_config={
                "model_path": os.getenv("VLLM_MODEL_PATH", "./models/vllm-7b"),
                "gpu_memory_utilization": 0.9,
                "tensor_parallel_size": 1,
                "pipeline_parallel_size": 1,
                "max_num_seqs": 256,
                "max_model_len": 4096,
                "dtype": "float16",
                "trust_remote_code": True,
                "quantization": None
            },
            deployment_config={
                "service_url": os.getenv("VLLM_SERVICE_URL", "http://localhost:8000"),
                "health_check_interval": 30,
                "auto_restart": True,
                "resource_limits": {
                    "memory": "8Gi",
                    "cpu": "4",
                    "gpu": "1"
                }
            }
        )


@dataclass
class LlamaCppConfig(ModelConfig):
    """llama.cpp 配置"""
    def __init__(self, name: str = "llama-cpp-7b", model_size: ModelSize = ModelSize.MEDIUM):
        super().__init__(
            name=name,
            backend=ModelBackend.LLAMA_CPP,
            model_size=model_size,
            max_tokens=1024,
            temperature=0.8,
            backend_config={
                "model_path": os.getenv("LLAMA_MODEL_PATH", "./models/llama-7b-q4_0.gguf"),
                "n_gpu_layers": 35,  # GPU 层数
                "n_batch": 512,
                "n_threads": 4,
                "use_mmap": True,
                "use_mlock": False,
                "flash_attention": False
            },
            deployment_config={
                "service_url": os.getenv("LLAMA_SERVICE_URL", "http://localhost:8080"),
                "health_check_interval": 60,
                "auto_restart": True,
                "resource_limits": {
                    "memory": "4Gi",
                    "cpu": "2",
                    "gpu": "0"  # CPU 模式
                }
            }
        )


@dataclass
class OllamaConfig(ModelConfig):
    """Ollama 配置"""
    def __init__(self, name: str = "ollama-phi3", model_size: ModelSize = ModelSize.SMALL):
        super().__init__(
            name=name,
            backend=ModelBackend.OLLAMA,
            model_size=model_size,
            max_tokens=1024,
            temperature=0.7,
            backend_config={
                "model": os.getenv("OLLAMA_MODEL", "phi3:3.8b-instruct-q4_K_M"),
                "system_prompt": "你是一个有用的AI助手",
                "keep_alive": "5m",
                "stream": False
            },
            deployment_config={
                "service_url": os.getenv("OLLAMA_SERVICE_URL", "http://localhost:11434"),
                "health_check_interval": 30,
                "auto_restart": True,
                "resource_limits": {
                    "memory": "8Gi",
                    "cpu": "4",
                    "gpu": "0"  # 支持CPU模式
                }
            }
        )
    
    def get(self, key: str, default: Any = None) -> Any:
        """提供字典风格的get方法，用于兼容现有代码
        
        Args:
            key: 要获取的键
            default: 键不存在时的默认值
            
        Returns:
            获取的值或默认值
        """
        # 优先从backend_config中获取
        if key in self.backend_config:
            return self.backend_config[key]
        # 然后从deployment_config中获取
        elif key in self.deployment_config:
            return self.deployment_config[key]
        # 最后检查实例属性
        elif hasattr(self, key):
            return getattr(self, key)
        # 返回默认值
        return default


@dataclass
class FallbackConfig(ModelConfig):
    """降级配置"""
    def __init__(self, name: str = "fallback"):
        super().__init__(
            name=name,
            backend=ModelBackend.FALLBACK,
            model_size=ModelSize.SMALL,
            max_tokens=512,
            temperature=0.5,
            backend_config={
                "response_template": "根据相关信息：{contexts}。总结回答：{query}",
                "max_context_length": 1000,
                "enable_fallback": True
            },
            deployment_config={
                "service_url": "local",
                "health_check_interval": 0,
                "auto_restart": False,
                "resource_limits": {}
            }
        )


class ModelConfigManager:
    """模型配置管理器"""
    
    def __init__(self):
        self._configs: Dict[str, ModelConfig] = {}
        self._current_config: Optional[str] = None
        self._load_default_configs()
    
    def _load_default_configs(self):
        """加载默认配置"""
        # vLLM 配置
        self.register_config(VllmConfig())
        
        # Ollama 配置 - 使用轻量级模型
        self.register_config(OllamaConfig())
        
        # llama.cpp 配置
        self.register_config(LlamaCppConfig())
        
        # 降级配置
        self.register_config(FallbackConfig())
        
        # 设置默认配置为Ollama轻量级模型
        self.set_current_config("ollama-phi3")
    
    def register_config(self, config: ModelConfig):
        """注册模型配置"""
        self._configs[config.name] = config
        logger.info(f"Registered model config: {config.name} ({config.backend.value})")
    
    def get_config(self, name: Optional[str] = None) -> ModelConfig:
        """获取模型配置"""
        if name is None:
            name = self._current_config
        
        if name not in self._configs:
            raise ValueError(f"Model config '{name}' not found")
        
        return self._configs[name]
    
    def set_current_config(self, name: str):
        """设置当前配置"""
        if name not in self._configs:
            raise ValueError(f"Model config '{name}' not found")
        
        self._current_config = name
        logger.info(f"Set current model config: {name}")
    
    def list_configs(self) -> Dict[str, Dict[str, Any]]:
        """列出所有配置"""
        return {
            name: {
                "backend": config.backend.value,
                "model_size": config.model_size.value,
                "max_tokens": config.max_tokens,
                "temperature": config.temperature
            }
            for name, config in self._configs.items()
        }
    
    def update_config(self, name: str, **kwargs):
        """更新配置"""
        if name not in self._configs:
            raise ValueError(f"Model config '{name}' not found")
        
        config = self._configs[name]
        for key, value in kwargs.items():
            if hasattr(config, key):
                setattr(config, key, value)
            else:
                logger.warning(f"Unknown config key: {key}")
        
        logger.info(f"Updated model config: {name}")
    
    def get_deployment_config(self, name: Optional[str] = None) -> Dict[str, Any]:
        """获取部署配置"""
        config = self.get_config(name)
        return config.deployment_config
    
    def get_backend_config(self, name: Optional[str] = None) -> Dict[str, Any]:
        """获取后端配置"""
        config = self.get_config(name)
        return config.backend_config


# 全局配置管理器实例
model_config_manager = ModelConfigManager()

# 导出配置管理器
__all__ = [
    'ModelBackend', 'ModelSize', 'ModelConfig', 
    'VllmConfig', 'OllamaConfig', 'LlamaCppConfig', 'FallbackConfig',
    'ModelConfigManager', 'model_config_manager'
]