"""
主配置管理模块

统一的配置管理入口，整合所有子模块配置。
"""

from typing import Dict, Any

from .enums import Mode, ChunkStrategy
from .model_config import ModelConfig
from .chunking_config import ChunkingConfig
from .file_config import FileConfig


class Config:
    """
    高级配置管理类，支持本地和云端模式

    整合所有子模块配置，提供统一的配置接口。
    """

    def __init__(self):
        """初始化配置管理器"""
        self._model_config = ModelConfig()
        self._chunking_config = ChunkingConfig()
        self._file_config = FileConfig()
        self._validated = False

    # 模式相关属性
    @property
    def mode(self) -> Mode:
        """运行模式"""
        return self._model_config.mode

    @property
    def is_local_mode(self) -> bool:
        """是否为本地模式"""
        return self._model_config.is_local_mode

    @property
    def is_cloud_mode(self) -> bool:
        """是否为云端模式"""
        return self._model_config.is_cloud_mode

    # 模型配置属性
    @property
    def device(self) -> str:
        """计算设备"""
        return self._model_config.device

    @property
    def local_model_path(self) -> str:
        """本地嵌入模型路径"""
        return self._model_config.local_model_path

    @property
    def ollama_model(self) -> str:
        """Ollama模型名称"""
        return self._model_config.ollama_model

    @property
    def ollama_timeout(self) -> float:
        """Ollama请求超时时间"""
        return self._model_config.ollama_timeout

    @property
    def embed_batch_size(self) -> int:
        """嵌入批次大小"""
        return self._model_config.embed_batch_size

    @property
    def openai_api_key(self) -> str:
        """OpenAI API密钥"""
        return self._model_config.openai_api_key

    @property
    def openai_base_url(self) -> str:
        """OpenAI API基础URL"""
        return self._model_config.openai_base_url

    @property
    def openai_embedding_model(self) -> str:
        """云端嵌入模型名称"""
        return self._model_config.openai_embedding_model

    @property
    def openai_chat_model(self) -> str:
        """云端聊天模型名称"""
        return self._model_config.openai_chat_model

    # 分块配置属性
    @property
    def chunk_strategy(self) -> ChunkStrategy:
        """分块策略"""
        return self._chunking_config.strategy

    @property
    def chunk_size(self) -> int:
        """分块大小"""
        return self._chunking_config.chunk_size

    @property
    def chunk_overlap(self) -> int:
        """分块重叠"""
        return self._chunking_config.chunk_overlap

    @property
    def min_chunk_size(self) -> int:
        """最小分块大小"""
        return self._chunking_config.min_chunk_size

    @property
    def max_chunk_size(self) -> int:
        """最大分块大小"""
        return self._chunking_config.max_chunk_size

    @property
    def semantic_chunk_threshold(self) -> float:
        """语义分块阈值"""
        return self._chunking_config.semantic_chunk_threshold

    @property
    def semantic_chunk_similarity_window_size(self) -> int:
        """语义分块相似度窗口大小"""
        return self._chunking_config.semantic_chunk_similarity_window_size

    @property
    def max_concurrent_chunking(self) -> int:
        """最大并发分块数"""
        return self._chunking_config.max_concurrent_chunking

    # 文件配置属性
    @property
    def data_dir(self) -> str:
        """数据目录路径"""
        return self._file_config.data_dir

    @property
    def index_dir(self) -> str:
        """索引存储目录路径"""
        return self._file_config.index_dir

    @property
    def pdf_inline_images(self) -> bool:
        """PDF内联图片"""
        return self._file_config.pdf_inline_images

    @property
    def pdf_preserve_whitespace(self) -> bool:
        """PDF保留空白字符"""
        return self._file_config.pdf_preserve_whitespace

    @property
    def word_preserve_formatting(self) -> bool:
        """Word文档保留格式"""
        return self._file_config.word_preserve_formatting

    @property
    def word_extract_tables(self) -> bool:
        """Word文档提取表格"""
        return self._file_config.word_extract_tables

    @property
    def md_preserve_links(self) -> bool:
        """Markdown保留链接"""
        return self._file_config.md_preserve_links

    @property
    def md_code_block_chunking(self) -> bool:
        """Markdown代码块分块"""
        return self._file_config.md_code_block_chunking

    @property
    def max_concurrent_embedding(self) -> int:
        """最大并发嵌入数"""
        return self._file_config.max_concurrent_embedding

    @property
    def enable_embedding_cache(self) -> bool:
        """启用嵌入缓存"""
        return self._file_config.enable_embedding_cache

    @property
    def embedding_cache_ttl(self) -> int:
        """嵌入缓存TTL（秒）"""
        return self._file_config.embedding_cache_ttl

    @property
    def max_memory_usage(self) -> int:
        """最大内存使用量（MB）"""
        return self._file_config.max_memory_usage

    # 配置获取方法
    def get_embedding_config(self) -> Dict[str, Any]:
        """获取嵌入模型配置"""
        return self._model_config.get_embedding_config()

    def get_llm_config(self) -> Dict[str, Any]:
        """获取LLM配置"""
        return self._model_config.get_llm_config()

    def get_chunking_config(self) -> Dict[str, Any]:
        """获取分块配置"""
        return self._chunking_config.get_config()

    def get_file_type_config(self) -> Dict[str, Any]:
        """获取文件类型配置"""
        return self._file_config.get_file_type_config()

    def get_performance_config(self) -> Dict[str, Any]:
        """获取性能配置"""
        return self._file_config.get_performance_config()

    def validate_paths(self) -> None:
        """验证路径配置"""
        self._model_config.validate_paths()
        self._file_config.validate_paths()

    def validate_all(self) -> None:
        """验证所有配置"""
        self.validate_paths()
        self._validated = True

    def ensure_valid(self) -> None:
        """在需要时触发一次性验证，避免导入期崩溃"""
        if not self._validated:
            self.validate_all()

    def __str__(self) -> str:
        """返回配置信息的字符串表示"""
        return f"""
=== 高级RAG系统配置 ===
运行模式: {self.mode.value}
数据目录: {self.data_dir}
索引目录: {self.index_dir}
计算设备: {self.device}
分块策略: {self.chunk_strategy.value}
分块大小: {self.chunk_size}

==============================
本地模型配置:
- 嵌入模型: {self.local_model_path}
- LLM模型: {self.ollama_model}
- 批次大小: {self.embed_batch_size}

云端模型配置:
- API地址: {self.openai_base_url}
- 嵌入模型: {self.openai_embedding_model}
- 聊天模型: {self.openai_chat_model}

==============================
性能配置:
- 最大并发分块: {self.max_concurrent_chunking}
- 最大并发嵌入: {self.max_concurrent_embedding}
- 启用缓存: {self.enable_embedding_cache}
- 内存限制: {self.max_memory_usage}MB
        """.strip()


# 全局配置实例
config = Config()