import os
import yaml
from typing import Dict, Any, Optional, Union, List, Callable
from pydantic_settings import BaseSettings, SettingsConfigDict
from pydantic import Field, ValidationInfo, field_validator, AliasChoices
import logging


def env_field(
    *,
    default: Any = None,
    default_factory: Optional[Callable[[], Any]] = None,
    env: Optional[str] = None,
) -> Any:
    """Create a Field configured to read from an environment variable."""
    field_kwargs: Dict[str, Any] = {}
    if default_factory is not None:
        field_kwargs["default_factory"] = default_factory
    else:
        field_kwargs["default"] = default

    if env:
        lower_env = env.lower()
        validation_alias = (
            env if lower_env == env else AliasChoices(env, lower_env)
        )
        field_kwargs["alias"] = env
        field_kwargs["validation_alias"] = validation_alias
        field_kwargs["serialization_alias"] = env

    return Field(**field_kwargs)

logger = logging.getLogger('config')


class ConfigBase(BaseSettings):
    """Base configuration shared by all settings collections."""
    model_config = SettingsConfigDict(
        extra='ignore',
        env_file='.env',
        env_file_encoding='utf-8',
        env_ignore_empty=True,
        populate_by_name=True,
    )

class DatabaseConfig(ConfigBase):
    """Database connection settings."""
    # 优先使用 DSN 字符串
    dsn: str = env_field(default="sqlite:///./data/aichat.db", env="DATABASE_URL")
    
    # 单独的数据库配置项（当不使用 DSN 时）
    host: str = env_field(default="localhost", env="DB_HOST")
    port: int = env_field(default=5432, env="DB_PORT")
    dbname: str = env_field(default="aichat", env="DB_NAME")
    user: str = env_field(default="postgres", env="DB_USER")
    password: str = env_field(default="", env="DB_PASS")
    sslmode: str = env_field(default="disable", env="DB_SSLMODE")
    pool_size: int = Field(default=10)
    max_overflow: int = Field(default=20)
    vector_dimension: int = Field(default=1536)  # 与bge-m3嵌入模型匹配的向量维度
    embedding_model: str = env_field(default="", env="EMBEDDING_MODEL")

    @field_validator('password')
    def validate_password(cls, v: str) -> str:
        """Warn when the database password is empty."""
        if not v:
            logger.warning("鏁版嵁搴撳瘑鐮佷负绌猴紝鍙兘浣跨敤浜嗕俊浠昏璇佹垨鍐呭瓨妯″紡")
        return v

class ModelConfig(ConfigBase):
    """Language model runtime configuration."""
    name: str = env_field(default="Qwen2.5-7B-Instruct", env="MODEL_NAME")
    quantized: bool = env_field(default=True, env="MODEL_QUANTIZED")
    quantization_type: str = env_field(default="gptq", env="MODEL_QUANTIZATION_TYPE")
    quantization_bits: int = env_field(default=4, env="MODEL_QUANTIZATION_BITS")
    use_streaming: bool = env_field(default=True, env="MODEL_USE_STREAMING")
    max_new_tokens: int = env_field(default=4096, env="MODEL_MAX_NEW_TOKENS")
    temperature: float = env_field(default=0.7, env="MODEL_TEMPERATURE")
    top_p: float = env_field(default=0.95, env="MODEL_TOP_P")
    device_map: str = env_field(default="auto", env="MODEL_DEVICE_MAP")
    cache_dir: str = env_field(default="./models", env="MODEL_CACHE_DIR")
    tokenizer_path: str = env_field(default="", env="MODEL_TOKENIZER_PATH")
    model_path: str = env_field(default="", env="MODEL_PATH")
    use_bitsandbytes: bool = env_field(default=True, env="MODEL_USE_BITSANDBYTES")
    provider_priority: List[str] = env_field(default_factory=list, env="MODEL_PROVIDER_PRIORITY")

    # LORA configuration
    lora_enabled: bool = env_field(default=False, env="MODEL_LORA_ENABLED")
    lora_adapter_name: str = env_field(default="", env="MODEL_LORA_ADAPTER_NAME")
    lora_adapter_path: str = env_field(default="./models/lora_adapters", env="MODEL_LORA_ADAPTER_PATH")

class APIConfig(ConfigBase):
    """HTTP API server configuration."""
    host: str = env_field(default="127.0.0.1", env="API_HOST")
    port: int = env_field(default=8001, env="SERVER_PORT")  # 与实际运行端口保持一致
    workers: int = env_field(default=1, env="API_WORKERS")
    cors_origins: Union[List[str], str] = env_field(
        # 生产环境建议使用明确的白名单，不使用通配符
        default=["http://localhost:8001", "http://127.0.0.1:8001", "http://localhost:5173", "http://127.0.0.1:5173"],
        env="API_CORS_ORIGINS",
    )
    debug: bool = env_field(default=False, env="API_DEBUG")
    version: str = env_field(default="v1", env="API_VERSION")

    @field_validator('cors_origins', mode='before')
    def parse_cors_origins(cls, value: Union[str, List[str]]) -> List[str]:
        """Normalize CORS origin configuration values."""
        if isinstance(value, str):
            if not value.strip():
                return []
            return [origin.strip() for origin in value.split(',')]
        return value

class RAGConfig(ConfigBase):
    """Retrieval augmented generation configuration."""
    enabled: bool = env_field(default=True, env="RAG_ENABLED")
    top_k: int = env_field(default=8, env="RAG_TOP_K")
    score_threshold: float = env_field(default=0.4, env="RAG_SCORE_THRESHOLD")
    chunk_size: int = env_field(default=800, env="RAG_CHUNK_SIZE")
    chunk_overlap: int = env_field(default=100, env="RAG_CHUNK_OVERLAP")
    retrieval_batch_size: int = env_field(default=10, env="RAG_RETRIEVAL_BATCH_SIZE")
    docs_dir: str = env_field(default="./docs", env="RAG_DOCS_DIR")

class ToolsConfig(ConfigBase):
    """Tool integration configuration."""
    enabled: bool = env_field(default=True, env="TOOLS_ENABLED")
    allowed_tools: List[str] = env_field(default_factory=lambda: ["web_search", "database_query"], env="TOOLS_ALLOWED")
    web_search_enabled: bool = env_field(default=True, env="WEB_SEARCH_ENABLED")
    web_search_api_key: str = env_field(default="", env="GOOGLE_API_KEY")
    web_search_engine: str = env_field(default="google", env="SEARCH_ENGINE")
    web_search_cx: str = env_field(default="", env="GOOGLE_CX")
    bing_api_key: str = env_field(default="", env="BING_API_KEY")

class LearningConfig(ConfigBase):
    """Learning and fine-tuning configuration."""
    enabled: bool = env_field(default=False, env="LEARNING_ENABLED")
    max_training_samples: int = Field(default=10000)

    # QLoRA configuration
    qlora_r: int = Field(default=8)
    qlora_alpha: int = Field(default=32)
    qlora_dropout: float = Field(default=0.1)
    qlora_target_modules: list = Field(default=["q_proj", "k_proj", "v_proj", "o_proj"])
    qlora_bias: str = Field(default="none")

    # Training arguments
    per_device_train_batch_size: int = Field(default=4)
    gradient_accumulation_steps: int = Field(default=4)
    learning_rate: float = Field(default=2e-4)
    num_train_epochs: int = Field(default=3)
    save_strategy: str = Field(default="epoch")
    save_total_limit: int = Field(default=3)

class OpenAIConfig(ConfigBase):
    """OpenAI provider configuration."""
    enabled: bool = env_field(default=False, env="OPENAI_ENABLED")
    api_key: str = env_field(default="", env="OPENAI_API_KEY")
    api_key_env: str = env_field(default="OPENAI_API_KEY", env="OPENAI_API_KEY_ENV")
    # 默认指向Ollama地址，便于在未配置OpenAI时使用Ollama兼容接口
    api_base: str = env_field(default="http://127.0.0.1:11434/v1", env="OPENAI_API_BASE")
    model: str = env_field(default="gpt-3.5-turbo", env="OPENAI_MODEL")
    timeout: int = env_field(default=60, env="OPENAI_TIMEOUT")

class OllamaConfig(ConfigBase):
    """Ollama provider configuration."""
    enabled: bool = env_field(default=True, env="OLLAMA_ENABLED")
    base_url: str = env_field(default="http://127.0.0.1:11434", env="OLLAMA_BASE")
    model: str = env_field(default="llama3", env="OLLAMA_MODEL")
    timeout: int = env_field(default=300, env="OLLAMA_TIMEOUT")

class VLLMConfig(ConfigBase):
    """vLLM provider configuration."""
    enabled: bool = env_field(default=False, env="VLLM_ENABLED")
    api_base: str = env_field(default="http://localhost:8000/v1", env="VLLM_API_BASE")
    model: str = env_field(default="meta-llama/Llama-2-7b-chat-hf", env="VLLM_MODEL")
    timeout: int = env_field(default=300, env="VLLM_TIMEOUT")

class LLMProvidersConfig(ConfigBase):
    """Aggregate provider configuration."""
    openai: OpenAIConfig = Field(default_factory=OpenAIConfig)
    ollama: OllamaConfig = Field(default_factory=OllamaConfig)
    vllm: VLLMConfig = Field(default_factory=VLLMConfig)
    active_provider: str = env_field(default="ollama", env="ACTIVE_LLM_PROVIDER")

class MonitoringConfig(ConfigBase):
    """Monitoring and metrics configuration."""
    enabled: bool = env_field(default=True, env="MONITORING_ENABLED")
    prometheus_port: int = env_field(default=9090, env="PROMETHEUS_PORT")
    metrics_path: str = env_field(default="/metrics", env="METRICS_PATH")

class StartupConfig(ConfigBase):
    """Startup feature toggles."""
    skip_database: bool = env_field(default=False, env="SKIP_DATABASE")
    skip_model_preload: bool = env_field(default=False, env="SKIP_MODEL_PRELOAD")
    skip_rag: bool = env_field(default=True, env="SKIP_RAG")
    skip_tools: bool = env_field(default=True, env="SKIP_TOOLS")
    skip_learning: bool = env_field(default=True, env="SKIP_LEARNING")

class LoggingConfig(ConfigBase):
    """Logging output configuration."""
    level: str = env_field(default="INFO", env="LOG_LEVEL")
    log_file: str = env_field(default="./logs/aichat.log", env="LOG_FILE")
    rotation: str = env_field(default="10 MB", env="LOG_ROTATION")
    retention: str = env_field(default="30 days", env="LOG_RETENTION")

class AuthConfig(ConfigBase):
    """Authentication and authorization configuration."""
    feature_auth: bool = env_field(default=True, env="FEATURE_AUTH")
    allow_register: bool = env_field(default=False, env="ALLOW_REGISTER")
    jwt_secret: str = env_field(default="change-me", env="JWT_SECRET")
    jwt_algorithm: str = env_field(default="HS256", env="JWT_ALGORITHM")
    jwt_expire_minutes: int = env_field(default=30, env="JWT_EXPIRE_MINUTES")
    jwt_refresh_expire_days: int = env_field(default=7, env="JWT_REFRESH_EXPIRE_DAYS")

class ConfigManager:
    """Configuration manager that aggregates settings from files and environment variables."""

    _instance = None

    def __new__(cls):
        if cls._instance is None:
            cls._instance = super().__new__(cls)
            cls._instance._initialize()
        return cls._instance

    def _initialize(self) -> None:
        """Initialize configuration sources and models."""
        self.config_dir = os.path.join(
            os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))),
            'configs',
        )
        self._load_config_files()
        self._initialize_config_models()

    def _load_config_files(self) -> None:
        """Load the base configuration file and optional local overrides."""
        main_config_path = os.path.join(self.config_dir, 'config.yaml')
        local_config_path = os.path.join(self.config_dir, 'config.local.yaml')

        self.raw_config: Dict[str, Any] = {}

        if os.path.exists(main_config_path):
            try:
                with open(main_config_path, 'r', encoding='utf-8') as handle:
                    self.raw_config = yaml.safe_load(handle) or {}
                logger.info("Loaded configuration file: %s", main_config_path)
            except Exception as exc:
                logger.error("Failed to load configuration file %s: %s", main_config_path, exc)
        else:
            logger.warning("Configuration file not found: %s", main_config_path)

        if os.path.exists(local_config_path):
            try:
                with open(local_config_path, 'r', encoding='utf-8') as handle:
                    local_config = yaml.safe_load(handle) or {}
                self.raw_config = self._merge_configs(self.raw_config, local_config)
                logger.info("Loaded local configuration overrides: %s", local_config_path)
            except Exception as exc:
                logger.error("Failed to load local configuration file %s: %s", local_config_path, exc)

    def _merge_configs(self, base: Dict[str, Any], override: Dict[str, Any]) -> Dict[str, Any]:
        """Merge two configuration dictionaries recursively."""
        result = base.copy()
        for key, value in override.items():
            if key in result and isinstance(result[key], dict) and isinstance(value, dict):
                result[key] = self._merge_configs(result[key], value)
            else:
                result[key] = value
        return result

    def _initialize_config_models(self) -> None:
        """Initialize strongly typed configuration models."""
        model_config_data = self.raw_config.get('model', {})
        db_config_data = self.raw_config.get('database', {})
        api_config_data = self.raw_config.get('api', {})
        rag_config_data = self.raw_config.get('rag', {})
        tools_config_data = self.raw_config.get('tools', {})
        learning_config_data = self.raw_config.get('learning', {})
        logging_config_data = self.raw_config.get('logging', {})
        llm_providers_data = self.raw_config.get('llm_providers', {})
        monitoring_config_data = self.raw_config.get('monitoring', {})
        startup_config_data = self.raw_config.get('startup', {})
        auth_config_data = self.raw_config.get('auth', {})

        self.db = DatabaseConfig(**db_config_data)

        model_data = model_config_data.copy()
        lora_config = model_data.pop('lora', {})
        if lora_config:
            model_data['lora_enabled'] = lora_config.get('enabled', False)
            model_data['lora_adapter_name'] = lora_config.get('adapter_name', '')
            model_data['lora_adapter_path'] = lora_config.get('adapter_path', './models/lora_adapters')
        self.model = ModelConfig(**model_data)

        self.api = APIConfig(**api_config_data)
        self.rag = RAGConfig(**rag_config_data)

        tools_data = tools_config_data.copy()
        web_search_config = tools_data.pop('web_search', {})
        if web_search_config:
            tools_data['web_search_enabled'] = web_search_config.get('enabled', True)
            tools_data['web_search_api_key'] = web_search_config.get('api_key', '')
            tools_data['web_search_engine'] = web_search_config.get('search_engine', 'google')
            tools_data['web_search_cx'] = web_search_config.get('cx', '')
        tools_data.pop('database_query', None)
        tools_data.pop('file_operation', None)
        self.tools = ToolsConfig(**tools_data)

        learning_data = learning_config_data.copy()
        qlora_config = learning_data.pop('qlora_config', {})
        if qlora_config:
            learning_data['qlora_r'] = qlora_config.get('r', 8)
            learning_data['qlora_alpha'] = qlora_config.get('lora_alpha', 32)
            learning_data['qlora_dropout'] = qlora_config.get('lora_dropout', 0.1)
            learning_data['qlora_target_modules'] = qlora_config.get('target_modules', ["q_proj", "k_proj", "v_proj", "o_proj"])
            learning_data['qlora_bias'] = qlora_config.get('bias', "none")
        training_args = learning_data.pop('training_args', {})
        if training_args:
            learning_data['per_device_train_batch_size'] = training_args.get('per_device_train_batch_size', 4)
            learning_data['gradient_accumulation_steps'] = training_args.get('gradient_accumulation_steps', 4)
            learning_data['learning_rate'] = training_args.get('learning_rate', 2e-4)
            learning_data['num_train_epochs'] = training_args.get('num_train_epochs', 3)
            learning_data['save_strategy'] = training_args.get('save_strategy', "epoch")
            learning_data['save_total_limit'] = training_args.get('save_total_limit', 3)
        self.learning = LearningConfig(**learning_data)

        self.logging = LoggingConfig(**logging_config_data)
        self.llm_providers = LLMProvidersConfig(**llm_providers_data)
        self.monitoring = MonitoringConfig(**monitoring_config_data)
        self.startup = StartupConfig(**startup_config_data)
        self.auth = AuthConfig(**auth_config_data)

    def get_server_config(self) -> Dict[str, Any]:
        """Return the FastAPI server configuration payload."""
        return {
            "host": self.api.host,
            "port": self.api.port,
            "workers": self.api.workers,
            "debug": self.api.debug,
            "reload": getattr(self.api, 'reload', False),
            "log_level": self.logging.level,
        }

    def get(self, key: str, default: Any = None) -> Any:
        """Retrieve a configuration value by dotted path."""
        try:
            value: Any = self
            for part in key.split('.'):
                value = getattr(value, part)
            return value
        except (AttributeError, KeyError):
            return default
config_manager = ConfigManager()

def get_current_model_name() -> str:
    """
    返回当前后端配置的真实模型名称，用于提示词等场景。
    """
    try:
        provider = (config_manager.llm_providers.active_provider or "").lower()
    except Exception:
        return "unknown"

    try:
        if provider == "ollama":
            return config_manager.llm_providers.ollama.model
        if provider == "openai":
            return config_manager.llm_providers.openai.model
        if provider == "vllm":
            return config_manager.llm_providers.vllm.model
    except Exception:
        return "unknown"

    return "unknown"

# Database configuration (legacy aliases)
db_host = config_manager.db.host
db_port = config_manager.db.port
db_name = config_manager.db.dbname
db_user = config_manager.db.user
db_pass = config_manager.db.password
db_sslmode = config_manager.db.sslmode
db_vector_dimension = config_manager.db.vector_dimension

# API configuration
HOST = config_manager.api.host
PORT = config_manager.api.port
CORS_ORIGINS = config_manager.api.cors_origins
API_VERSION = config_manager.api.version

# Model configuration
MODEL_NAME = config_manager.model.name
USE_STREAMING = config_manager.model.use_streaming
MAX_NEW_TOKENS = config_manager.model.max_new_tokens
TEMPERATURE = config_manager.model.temperature
TOP_P = config_manager.model.top_p

# RAG configuration
RAG_TOP_K = config_manager.rag.top_k
RAG_SCORE_THRESHOLD = config_manager.rag.score_threshold
CHUNK_SIZE = config_manager.rag.chunk_size
CHUNK_OVERLAP = config_manager.rag.chunk_overlap
DOCS_DIR = config_manager.rag.docs_dir

# Tool configuration
GOOGLE_API_KEY = config_manager.tools.web_search_api_key
GOOGLE_CX = config_manager.tools.web_search_cx
BING_API_KEY = config_manager.tools.bing_api_key
DEFAULT_SEARCH_ENGINE = config_manager.tools.web_search_engine

# Auth configuration
FEATURE_AUTH = config_manager.auth.feature_auth
ALLOW_REGISTER = config_manager.auth.allow_register
JWT_SECRET = config_manager.auth.jwt_secret
JWT_ALGORITHM = config_manager.auth.jwt_algorithm
JWT_EXPIRE_MINUTES = config_manager.auth.jwt_expire_minutes
JWT_REFRESH_EXPIRE_DAYS = config_manager.auth.jwt_refresh_expire_days

# Backwards compatibility: expose config manager instance
config = config_manager
