"""
统一配置模型

此模块定义了应用的所有配置项，使用 Pydantic Settings 进行管理。
它负责从 .env 文件、环境变量和默认值中加载配置，并提供类型安全的访问。
"""
from __future__ import annotations
import ast
import logging
import os
import json
from datetime import datetime
from pathlib import Path
from typing import Dict, List, Optional, Any

from pydantic import Field, BaseModel, model_validator, computed_field, Json
from pydantic_settings import BaseSettings, SettingsConfigDict

from axiom_boot.ai.impl.langchain.settings_impl import LangchainSettings
from axiom_boot.conf.filesystem_settings import FileSystemSettings

logger = logging.getLogger(__name__)


# =========================================================================
# 模块化配置模型 (Modular Configuration Models)
# =========================================================================

class EmbeddingProfileConfig(BaseModel):
    """定义单个 Embedding 模型配置档案的结构。"""
    provider: str = Field("dashscope", description="Embedding 服务提供商, 例如: dashscope")
    provider_display_name: Optional[str] = Field(None, description="提供商的显示名称, 例如: 通义千问")
    model_name: str = Field("text-embedding-v2", description="使用的具体模型名称")
    model_display_name: Optional[str] = Field(None, description="模型的显示名称, 例如: v2通用向量模型")
    dim: int = Field(1536, description="模型输出的向量维度")
    api_key: Optional[str] = Field(None, description="对应服务提供商的 API Key")
    usage: Optional[List[str]] = Field(None, description="模型的推荐用途, 例如 ['indexing', 'querying']")
    compatibility_group: str = Field("", description="用于标识模型兼容性的组名 (例如 'bge_v1.5_family')")


class EmbeddingSettings(BaseModel):
    """
    Embedding 模块的顶层配置，包含所有可用的模型档案。
    这将能够正确解析形如 EMBEDDING__PROFILES__DEFAULT__API_KEY 的环境变量。
    """
    profiles: Dict[str, EmbeddingProfileConfig] = Field(default_factory=dict, description="所有可用的 Embedding 模型配置档案")


class PGVectorStoreSettings(BaseModel):
    user: Optional[str] = Field(None, description="PostgreSQL 用户名")
    password: Optional[str] = Field(None, description="PostgreSQL 密码")
    host: Optional[str] = Field(None, description="PostgreSQL 服务器地址")
    port: int = Field(5432, description="PostgreSQL 端口")
    database: Optional[str] = Field(None, description="PostgreSQL 数据库名称")
    pool_size: int = Field(5, description="连接池大小")
    max_overflow: int = Field(10, description="连接池最大溢出数")
    embed_dim: int = Field(1536, description="向量维度，必须与 Embedding 模型输出的维度一致")


class VectorStoreProviderSettings(BaseModel):
    provider: str = Field("pgvector", description="活动的 VectorStore 提供商, 例如: pgvector, chroma")


class RerankerProfileConfig(BaseModel):
    """定义单个 Reranker 模型配置档案的结构。"""
    provider: str = Field("sentence-transformer-rerank", description="Reranker 服务提供商")
    provider_display_name: Optional[str] = Field(None, description="提供商的显示名称, 例如: 智源")
    model_name: str = Field("BAAI/bge-reranker-base", description="要使用的 Cross-Encoder 模型名称")
    model_display_name: Optional[str] = Field(None, description="模型的显示名称, 例如: BGE 中文重排模型")
    top_n: int = Field(3, description="重排序后返回的前 N 个结果")


class PostprocessorProfile(BaseModel):
    provider: str
    provider_display_name: Optional[str] = None
    model_name: Optional[str] = None
    model_display_name: Optional[str] = None
    top_n: Optional[int] = None
    # [新增] 为 ContextualExpansionPostprocessor 增加一个灵活的 config 字典
    config: Optional[Dict[str, Any]] = Field(default_factory=dict)

class PostprocessorSettings(BaseModel):
    # [最终修复] 将 profiles 的类型从简单的 dict 修正为能捕获动态键的泛型字典
    # Field(default_factory=dict) 确保了即使 .env 中没有任何配置，profiles 也会被初始化为空字典而不是 None
    profiles: Dict[str, PostprocessorProfile] = Field(default_factory=dict)


class VectorSettings(BaseModel):
    enabled: bool = Field(False, description="是否启用向量存储模块")
    store: VectorStoreProviderSettings = Field(default_factory=VectorStoreProviderSettings, description="向量存储 (VectorStore) 提供商配置")
    pgvector: PGVectorStoreSettings = Field(default_factory=PGVectorStoreSettings, description="PostgreSQL 向量存储的特定配置")
    postprocessor: PostprocessorSettings = Field(default_factory=PostprocessorSettings, description="后处理器 (Postprocessor) 配置")
    # chroma: ChromaStoreSettings = Field(default_factory=ChromaStoreSettings, description="ChromaDB 向量存储的特定配置") # 为未来扩展做准备


class TaskSettings(BaseModel):
    """任务队列 (ARQ) 配置"""
    enabled: bool = Field(False, description="是否启用任务队列功能")
    scan_packages: List[str] = Field(default=[], description="自动发现 @task 装饰器的包路径列表")
    # ARQ 的 Redis 连接配置，如果未提供，将尝试复用 settings.redis_url
    redis_url: Optional[str] = Field(None, description="ARQ Redis 连接 URL")


class ScraperSettings(BaseModel):
    """爬虫模块配置"""
    default_downloader: str = Field("httpx", description="默认使用的下载器名称 (例如 'httpx', 'playwright')")
    default_user_agent: str = Field(
        "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36",
        description="爬虫默认使用的 User-Agent"
    )
    logging_enabled: bool = Field(False, description="是否为爬虫引擎启用详细的日志记录中间件")
    
    # 重试策略配置
    retry_enabled: bool = Field(True, description="是否启用重试中间件")
    retry_times: int = Field(3, description="最大重试次数")
    retry_on_status: List[int] = Field(
        default=[500, 502, 503, 504, 522, 524, 408, 429],
        description="需要触发重试的 HTTP 状态码列表"
    )

    # 频率控制配置
    rate_limit_enabled: bool = Field(True, description="是否启用请求频率控制中间件")
    rate_limit_min_delay: float = Field(0.0, description="每个请求之间的最小随机延时（秒）")
    rate_limit_max_delay: float = Field(1.0, description="每个请求之间的最大随机延时（秒）")

    # Playwright 下载器配置
    playwright_enabled: bool = Field(False, description="是否启用 Playwright 下载器服务")
    playwright_browser_endpoint: Optional[str] = Field(None, description="预先启动的 Playwright 浏览器的连接端点 (例如 'ws://127.0.0.1:9222')")
    playwright_headless: bool = Field(True, description="是否以无头模式运行 Playwright 浏览器 (仅当未提供 endpoint 时生效)")
    playwright_timeout: int = Field(30000, description="Playwright 页面加载超时时间 (毫秒)")

    # 代理配置
    proxy_enabled: bool = Field(False, description="是否为所有请求启用代理中间件")
    proxy_mode: str = Field("fixed_list", description="代理模式: 'fixed_list' (使用静态列表) 或 'api' (从URL获取)")
    proxy_list: List[str] = Field(
        default=[],
        description="代理服务器的静态列表 (仅在 mode='fixed_list' 时使用), 例如 ['http://user:pass@host1:port', 'http://host2:port']"
    )
    proxy_api_url: Optional[str] = Field(None, description="用于获取单个代理IP的API地址 (仅在 mode='api' 时使用)")

    # 并发控制
    concurrency_limit: int = Field(5, description="爬虫引擎的最大并发请求数")

    # 未来可添加更多配置，如代理、重试策略等
    # playwright_endpoint: Optional[str] = Field(None, description="Playwright 浏览器的连接端点 (例如 'ws://localhost:3000')")


class Settings(BaseSettings):
    """
    应用的核心配置模型。

    Pydantic-Settings 会按照以下优先级自动加载配置：
    1. 环境变量 (最高优先级)。
    2. .env 文件中指定的变量。
    3. 模型中定义的默认值 (最低优先级)。
    """
    # =========================================================================
    # 应用核心配置 (Core Application Settings)
    # =========================================================================
    app_name: str = Field("Axiom Application", description="应用名称")
    app_version: str = Field("0.1.0", description="应用版本")
    debug: bool = Field(False, description="是否开启调试模式")
    reload: bool = Field(False, description="是否开启热重载，仅建议在开发时使用")
    host: str = Field("127.0.0.1", description="服务器监听地址")
    port: int = Field(8000, description="服务器监听端口")
    workers: int = Field(1, description="工作进程数 (仅在 gunicorn/uvicorn 模式下生效)")
    global_proxy: Optional[str] = Field(None, description="全局 HTTP/HTTPS 代理, 例如 'http://127.0.0.1:7890'")


    # =========================================================================
    # 组件扫描与框架行为
    # =========================================================================
    scan_base_dirs: List[str] = Field(default_factory=list, description="需要进行组件扫描的包路径列表，例如 ['src', 'app']")

    # =========================================================================
    # 分布式ID生成器配置
    # =========================================================================
    datacenter_id: int = Field(1, description="数据中心ID (0-31)", ge=0, le=31)
    worker_id: int = Field(1, description="工作节点ID (0-31)", ge=0, le=31)

    # -------------------------------------------------------------------------
    # 日志配置
    # -------------------------------------------------------------------------
    log_level: str = Field("INFO", description="日志级别 (e.g., DEBUG, INFO, WARNING)")
    log_file: Optional[str] = Field(None, description="日志文件路径 (如果设置)")
    log_third_party_levels: dict[str, str] = Field(
        default={"uvicorn": "WARNING", "sqlalchemy": "WARNING"},
        description="第三方库的日志级别"
    )

    # -------------------------------------------------------------------------
    # 数据库通用配置
    # -------------------------------------------------------------------------
    db_type: Optional[str] = Field(None, description="主数据库类型 (例如 'mysql', 'postgresql')")

    # -------------------------------------------------------------------------
    # MySQL 配置 (仅当 db_type 为 'mysql' 或直接提供 mysql_url 时生效)
    # -------------------------------------------------------------------------
    mysql_url: Optional[str] = Field(None, description="MySQL 异步连接字符串 (例如 'mysql+aiomysql://user:pass@host:port/db')")
    mysql_pool_size: int = Field(10, description="MySQL 连接池大小")
    mysql_max_overflow: int = Field(20, description="MySQL 连接池最大溢出数")
    mysql_pool_pre_ping: bool = Field(True, description="在从池中获取连接前进行 'ping' 检查")
    mysql_echo: bool = Field(False, description="是否打印所有执行的 SQL 语句，用于调试")

    # -------------------------------------------------------------------------
    # PostgreSQL 配置 (仅当 db_type 为 'postgresql' 或直接提供 postgresql_url 时生效)
    # -------------------------------------------------------------------------
    postgresql_url: Optional[str] = Field(None, description="PostgreSQL 异步连接字符串 (例如 'postgresql+asyncpg://user:pass@host:port/db')")
    postgresql_pool_size: int = Field(10, description="PostgreSQL 连接池大小")
    postgresql_max_overflow: int = Field(20, description="PostgreSQL 连接池最大溢出数")
    postgresql_pool_pre_ping: bool = Field(True, description="在从池中获取连接前进行 'ping' 检查")
    postgresql_echo: bool = Field(False, description="是否打印所有执行的 SQL 语句，用于调试")

    # -------------------------------------------------------------------------
    # MongoDB 配置 (仅当 mongodb_url 提供时生效)
    # -------------------------------------------------------------------------
    mongodb_url: Optional[str] = Field(None, description="MongoDB 连接 URL")
    mongodb_database: Optional[str] = Field(None, description="MongoDB 数据库名称")
    
    # -------------------------------------------------------------------------
    # Redis 配置 (仅当 redis_url 提供时生效)
    # -------------------------------------------------------------------------
    redis_url: Optional[str] = Field(None, description="Redis 连接 URL")

    # -------------------------------------------------------------------------
    # 安全与认证
    # -------------------------------------------------------------------------
    oauth_enabled: bool = Field(False, description="是否启用 OAuth 2.0 认证")
    oauth_secret_key: str = Field("default_secret_key", description="用于签名 JWT 的密钥")
    oauth_algorithm: str = Field("HS256", description="JWT 签名算法")
    oauth_access_token_expire_minutes: int = Field(30, description="访问令牌的过期时间（分钟）")
    
    # 我们将其重命名并设为私有，以表明它不应被直接使用。
    oauth_whitelist_str: str = Field(default='', alias='OAUTH_WHITELIST', description="从 .env 读取的原始白名单字符串, 应为逗号分隔")

    @computed_field
    @property
    def oauth_whitelist(self) -> list[str]:
        """
        这是一个计算属性，它将从 .env 读取的逗号分隔的字符串解析为列表，
        并与框架必需的默认路径合并，生成最终的白名单。
        """
        print("--- OAUTH_WHITELIST DIAGNOSTICS (SIMPLE PARSE) ---")
        print(f"Raw comma-separated value from .env (oauth_whitelist_str): '{self.oauth_whitelist_str}'")

        parsed_whitelist = []
        if self.oauth_whitelist_str:
            # 用更简单、更健öt壮的方式解析：按逗号分割
            parsed_whitelist = [path.strip() for path in self.oauth_whitelist_str.split(',') if path.strip()]
        
        print(f"Parsed list from string: {parsed_whitelist}")

        # 框架必需的默认白名单路径
        default_whitelist = ["/docs", "/openapi.json", "/favicon.ico", "/oauth/token", "/docs/oauth2-redirect", "/redoc", "/oauth/refresh", "/oauth/logout"]

        # 合并并去重
        final_whitelist = list(set(default_whitelist + parsed_whitelist))
        print(f"Final merged whitelist: {final_whitelist}")
        print("--- END DIAGNOSTICS ---")
        return final_whitelist

    # -------------------------------------------------------------------------
    # 框架行为控制
    # -------------------------------------------------------------------------
    run_mode: str = Field("web", description="[内部] 应用的运行模式 ('web' 或 'worker')")
    database_enabled: bool = Field(False, description="是否启用数据库连接功能")
    cache_enabled: bool = Field(False, description="是否启用 Redis 连接功能")
    enable_root_route: bool = Field(True, description="是否启用根路径 '/' 欢迎路由")
    enable_health_check: bool = Field(True, description="是否启用 '/health' 健康检查路由")
    middlewares: List[Dict[str, Any]] = Field(
        default=[],
        description="要加载的中间件列表，按顺序执行"
    )

    # =========================================================================
    # 模块化配置
    # =========================================================================
    filesystem: FileSystemSettings = Field(default_factory=FileSystemSettings, description="文件系统模块配置")
    langchain: LangchainSettings = Field(default_factory=LangchainSettings, description="LangChain 模块配置")
    vector: VectorSettings = Field(default_factory=VectorSettings, description="向量存储模块配置")
    embedding: EmbeddingSettings = Field(default_factory=EmbeddingSettings, description="向量生成 (Embedding) 模块配置")
    scraper: ScraperSettings = Field(default_factory=ScraperSettings, description="爬虫模块配置")

    # =========================================================================
    # 任务队列配置 (Task Queue Settings)
    # =========================================================================
    task: TaskSettings = Field(default_factory=TaskSettings, description="任务队列 (ARQ) 配置")

    # 模型配置
    model_config = SettingsConfigDict(
        env_file=os.environ.get("ENV_FILE", ".env"),
        env_file_encoding='utf-8',
        case_sensitive=False,
        env_nested_delimiter='__',
        extra='ignore'
    )

    def __init__(self, **values: Any):
        super().__init__(**values)
        # [新增] 在所有配置加载完毕后，立即应用全局代理
        if self.global_proxy:
            logger.info(f"正在应用全局代理: {self.global_proxy}")
            os.environ['HTTP_PROXY'] = self.global_proxy
            os.environ['HTTPS_PROXY'] = self.global_proxy

    @model_validator(mode='before')
    @classmethod
    def _pre_process_settings(cls, values: Dict[str, Any]) -> Dict[str, Any]:
        """
        在验证前，对配置值进行预处理。
        """

        keys_to_parse = ['scan_base_dirs', 'middlewares'] 
        for key in keys_to_parse:
            if key in values and isinstance(values[key], str):
                try:
                    values[key] = ast.literal_eval(values[key])
                except (ValueError, SyntaxError):
                    pass
        
        # 针对嵌套的 EMBEDDING__PROFILES 解析 USAGE 字段
        if 'embedding' in values and 'profiles' in values['embedding']:
            for profile in values['embedding']['profiles'].values():
                if 'usage' in profile and isinstance(profile['usage'], str):
                    try:
                        profile['usage'] = ast.literal_eval(profile['usage'])
                    except (ValueError, SyntaxError):
                        logger.warning(f"无法将 embedding profile 的 usage '{profile['usage']}' 解析为列表。")
                        profile['usage'] = None
            
        return values

    def get_logging_config(self) -> Dict[str, Any]:
        """
        根据当前配置生成一个 logging.dictConfig 兼容的字典。
        """
        log_level = self.log_level.upper()

        config: Dict[str, Any] = {
            "version": 1,
            "disable_existing_loggers": False,
            "formatters": {
                "default": {
                    "format": "%(asctime)s - %(name)s - %(levelname)s - %(message)s",
                },
            },
            "handlers": {
                "console": {
                    "class": "logging.StreamHandler",
                    "formatter": "default",
                    "stream": "ext://sys.stdout",
                },
            },
            "root": {
                "level": log_level,
                "handlers": ["console"],
            },
        }

        # -------------------------------------------------------------------------
        # 最终重构: 实现按日期和级别的日志文件分割
        # -------------------------------------------------------------------------
        if self.log_file:
            log_dir = Path(self.log_file).parent
            date_str = datetime.now().strftime("%Y-%m-%d")
            date_log_dir = log_dir / date_str
            date_log_dir.mkdir(exist_ok=True, parents=True)

            # 1. 定义一个过滤器，用于精确匹配日志级别
            config["filters"] = {
                "debug_filter": {"()": "axiom_boot.conf.manager.LevelFilter", "level": "DEBUG"},
                "info_filter": {"()": "axiom_boot.conf.manager.LevelFilter", "level": "INFO"},
                "warning_filter": {"()": "axiom_boot.conf.manager.LevelFilter", "level": "WARNING"},
                "error_filter": {"()": "axiom_boot.conf.manager.LevelFilter", "level": "ERROR"},
            }

            # 2. 为每个级别定义一个专属的、带过滤器的 handler
            levels_to_log = ["debug", "info", "warning", "error"]
            for level in levels_to_log:
                handler_name = f"file_{level}"
                config["handlers"][handler_name] = {
                    "class": "concurrent_log_handler.ConcurrentRotatingFileHandler",
                    "formatter": "default",
                    "level": level.upper(),
                    "filename": str(date_log_dir / f"{level}.log"),
                    "maxBytes": 1024 * 1024 * 10,  # 10 MB
                    "backupCount": 5,
                    "encoding": "utf-8",
                    "filters": [f"{level}_filter"],  # 关联对应的过滤器
                }
                config["root"]["handlers"].append(handler_name)
        
        for logger_name, level in self.log_third_party_levels.items():
            config.setdefault("loggers", {})[logger_name] = {
                "level": level.upper(),
                "handlers": config["root"]["handlers"],
                "propagate": False,
            }
            
        return config


# =========================================================================
# 日志级别过滤器 (为实现分级日志而新增)
# =========================================================================
import logging

class LevelFilter(logging.Filter):
    """
    一个用于精确匹配日志级别的过滤器。
    logging 默认的 level 设置会捕获该级别及以上的所有日志，
    此过滤器确保 handler 只处理指定级别的日志。
    """
    def __init__(self, level: str):
        super().__init__()
        self._level = getattr(logging, level.upper())

    def filter(self, record: logging.LogRecord) -> bool:
        return record.levelno == self._level


# 实例化配置对象，使其可以在其他模块中导入和使用
# from axiom_boot.conf import settings

# --- NEW DIAGNOSTICS ---
# 在实例化 Settings 之前，打印当前的工作目录
import os
print(f"--- CWD before Settings instantiation: {os.getcwd()} ---")
# --- END NEW DIAGNOSTICS ---

settings = Settings()
