"""K8s资源处理器模块"""

import os
from typing import Dict, Any, Optional, List
import logging
from tqdm import tqdm

from src.extractors.factory import ExtractorFactory
from src.writers.factory import WriterFactory
from src.core.parser import ResourceParser
from src.core.resource_detector import ResourceDetector
from src.core.file_manager import FileManager
from src.config.manager import ConfigManager
from src.utils.cache_manager import CacheManager
from src.utils.parallel_processor import ParallelProcessor
from src.utils.performance_monitor import PerformanceMonitor

class FileProcessor:
    """
    K8s资源文件处理器
    协调解析、提取和写入过程
    """
    
    def __init__(
        self, 
        source_dir: str, 
        output_dir: str,
        config_manager: Optional[ConfigManager] = None,
        logger: Optional[logging.Logger] = None
    ):
        """
        初始化文件处理器
        
        Args:
            source_dir: 源文件目录
            output_dir: 输出文件目录
            config_manager: 配置管理器，可选
            logger: 日志记录器，可选
        """
        self.source_dir = source_dir
        self.output_dir = output_dir
        self.config_manager = config_manager
        self.logger = logger or logging.getLogger(__name__)
        
        # 从配置中获取资源类型过滤选项
        self.include_kinds = []
        self.exclude_kinds = []
        self.validate_resources = True
        
        # 性能优化相关配置
        self.enable_cache = True
        self.enable_parallel = True
        self.max_workers = 0
        self.batch_size = 10
        self.show_progress = True
        
        if self.config_manager:
            # 加载基本配置
            self.include_kinds = self.config_manager.get('extraction', 'include_kinds', default=[])
            self.exclude_kinds = self.config_manager.get('extraction', 'exclude_kinds', default=[])
            self.validate_resources = self.config_manager.get('extraction', 'validate_resources', default=True)
            
            # 加载性能优化配置
            perf_config = self.config_manager.get('performance', default={})
            self.enable_cache = perf_config.get('enable_cache', True)
            self.enable_parallel = perf_config.get('enable_parallel', True)
            self.max_workers = perf_config.get('max_workers', 0)
            self.batch_size = perf_config.get('batch_size', 10)
            self.show_progress = perf_config.get('show_progress', True)
        
        # 初始化组件
        self.parser = ResourceParser(logger=self.logger)
        self.detector = ResourceDetector(logger=self.logger)
        self.file_manager = FileManager(
            source_dir=source_dir, 
            output_dir=output_dir, 
            config_manager=config_manager,
            logger=self.logger
        )
        
        # 初始化性能优化组件
        if self.enable_cache:
            cache_dir = self.config_manager.get('paths', 'cache_dir', default='./temp/cache') if self.config_manager else './temp/cache'
            self.cache_manager = CacheManager(cache_dir=cache_dir)
        
        if self.enable_parallel:
            self.parallel_processor = ParallelProcessor(max_workers=self.max_workers)
        
        self.performance_monitor = PerformanceMonitor()
        
        # 从配置中加载支持的资源类型
        if self.config_manager:
            self._load_supported_resources_from_config()
    
    def _load_supported_resources_from_config(self) -> None:
        """从配置中加载支持的资源类型"""
        try:
            # 获取支持的资源类型列表
            if self.include_kinds:
                for resource_type in self.include_kinds:
                    if resource_type:
                        self.detector.add_supported_resource(resource_type)
                        self.logger.debug(f"从配置添加支持的资源类型: {resource_type}")
        except Exception as e:
            self.logger.warning(f"从配置加载支持的资源类型失败: {str(e)}")
    
    def _should_process_resource(self, resource_type: str) -> bool:
        """
        检查是否应该处理该资源类型
        
        Args:
            resource_type: 资源类型
            
        Returns:
            是否应该处理
        """
        # 如果指定了包含列表，只处理列表中的资源类型
        if self.include_kinds and resource_type not in self.include_kinds:
            self.logger.debug(f"跳过资源类型 {resource_type}，因为它不在包含列表中")
            return False
        
        # 如果在排除列表中，不处理该资源类型
        if resource_type in self.exclude_kinds:
            self.logger.debug(f"跳过资源类型 {resource_type}，因为它在排除列表中")
            return False
        
        return True
    
    def process_directory(self) -> None:
        """
        处理源目录中的所有YAML/JSON文件
        """
        self.logger.info(f"开始处理目录: {self.source_dir}")
        
        # 准备输出目录
        self.file_manager.prepare_output_directory()
        
        # 查找所有YAML/JSON文件
        yaml_files = self.file_manager.find_yaml_files()
        total_files = len(yaml_files)
        
        if total_files == 0:
            self.logger.warning("未找到任何YAML/JSON文件")
            return
        
        self.logger.info(f"找到 {total_files} 个文件待处理")
        
        # 开始性能监控
        self.performance_monitor.start_monitoring(total_items=total_files)
        
        try:
            if self.enable_parallel:
                # 使用并行处理
                results = self.parallel_processor.process_with_progress(
                    items=yaml_files,
                    process_func=self._process_file_wrapper,
                    progress_callback=self._update_progress
                )
                
                # 统计结果
                success_count = sum(1 for r in results if r)
                skip_count = sum(1 for r in results if r is False)
                fail_count = total_files - success_count - skip_count
            else:
                # 使用串行处理
                success_count = 0
                skip_count = 0
                fail_count = 0
                
                # 使用tqdm显示进度
                iterator = tqdm(yaml_files, desc="处理文件") if self.show_progress else yaml_files
                
                for file_path in iterator:
                    try:
                        processed = self.process_file(file_path)
                        if processed:
                            success_count += 1
                        else:
                            skip_count += 1
                        self._update_progress(success_count + skip_count, total_files)
                    except Exception as e:
                        self.logger.error(f"处理文件失败: {file_path}, 原因: {str(e)}")
                        fail_count += 1
                        self._update_progress(success_count + skip_count + fail_count, total_files)
            
            self.logger.info(f"处理完成。成功: {success_count}, 跳过: {skip_count}, 失败: {fail_count}")
            
        finally:
            # 停止性能监控并导出报告
            self.performance_monitor.stop_monitoring()
            report_path = os.path.join(self.output_dir, 'performance_report.txt')
            self.performance_monitor.export_metrics(report_path)
            self.logger.info(f"性能报告已导出到: {report_path}")
    
    def _process_file_wrapper(self, file_path: str) -> bool:
        """
        文件处理包装器，用于并行处理
        """
        try:
            return self.process_file(file_path)
        except Exception as e:
            self.logger.error(f"处理文件失败: {file_path}, 原因: {str(e)}")
            raise
    
    def _update_progress(self, current: int, total: int) -> None:
        """
        更新进度
        """
        self.performance_monitor.update_progress(current)
    
    def process_file(self, file_path: str) -> bool:
        """
        处理单个YAML/JSON文件
        
        Args:
            file_path: 文件路径
            
        Returns:
            是否成功处理（不包括被跳过的情况）
        """
        # 检查缓存
        if self.enable_cache:
            cache_key = f"file_content_{file_path}"
            content = self.cache_manager.get(cache_key)
            if content is None:
                content = self.parser.parse_file(file_path)
                self.cache_manager.set(cache_key, content)
        else:
            content = self.parser.parse_file(file_path)
        
        # 检测资源类型
        resource_type = self.detector.detect_resource_type(content)
        self.logger.info(f"检测到资源类型: {resource_type}")
        
        # 检查是否应该处理该资源类型
        if not self._should_process_resource(resource_type):
            self.logger.info(f"跳过处理资源类型: {resource_type}")
            return False
        
        # 验证资源有效性
        if self.validate_resources and not self._validate_resource(content, resource_type):
            self.logger.warning(f"资源验证失败，跳过处理: {file_path}")
            return False
        
        # 使用提取器提取信息
        try:
            extractor = ExtractorFactory.create_extractor(resource_type)
            extracted_data = extractor.extract(content, file_path)
            self.logger.debug(f"成功提取资源信息: {resource_type}")
        except ValueError as e:
            self.logger.warning(f"创建提取器失败: {str(e)}, 将使用默认提取器")
            extracted_data = self._default_extract(content, resource_type)
        
        # 使用写入器生成Markdown
        try:
            writer = WriterFactory.create_writer(resource_type, config_manager=self.config_manager, logger=self.logger)
            
            # 生成输出路径
            output_path = self.file_manager.get_output_path(file_path, resource_type)
            
            # 写入Markdown
            writer.write(extracted_data, output_path)
            self.logger.info(f"成功写入Markdown: {output_path}")
            return True
        except ValueError as e:
            self.logger.warning(f"创建写入器失败: {str(e)}, 将使用默认写入器")
            output_path = self.file_manager.get_output_path(file_path, resource_type)
            self._default_write(extracted_data, output_path, resource_type)
            return True
    
    def _validate_resource(self, content: Dict[str, Any], resource_type: str) -> bool:
        """
        验证资源有效性
        
        Args:
            content: 资源内容
            resource_type: 资源类型
            
        Returns:
            是否有效
        """
        # 基本验证：检查必要的字段
        if "apiVersion" not in content:
            self.logger.warning(f"资源缺少apiVersion字段")
            return False
        
        if "kind" not in content:
            self.logger.warning(f"资源缺少kind字段")
            return False
        
        if "metadata" not in content:
            self.logger.warning(f"资源缺少metadata字段")
            return False
        
        if "name" not in content.get("metadata", {}):
            self.logger.warning(f"资源metadata缺少name字段")
            return False
        
        return True
    
    def _default_extract(self, content: Dict[str, Any], resource_type: str) -> Dict[str, Any]:
        """
        默认提取逻辑，当没有对应的提取器时使用
        
        Args:
            content: K8s资源内容
            resource_type: 资源类型
            
        Returns:
            提取的信息
        """
        self.logger.info(f"使用默认提取器处理资源类型: {resource_type}")
        result = {
            "kind": resource_type,
            "name": content.get("metadata", {}).get("name", "unknown"),
            "namespace": content.get("metadata", {}).get("namespace", "default"),
            "raw_content": content  # 保存原始内容
        }
        return result
    
    def _default_write(self, data: Dict[str, Any], output_path: str, resource_type: str) -> None:
        """
        默认写入逻辑，当没有对应的写入器时使用
        
        Args:
            data: 提取的资源信息
            output_path: 输出文件路径
            resource_type: 资源类型
        """
        self.logger.info(f"使用默认写入器处理资源类型: {resource_type}")
        
        # 从配置中获取Markdown选项
        include_raw_content = False
        if self.config_manager:
            include_raw_content = self.config_manager.get('markdown', 'include_raw_content', default=False)
        
        # 生成简单的Markdown内容
        content = [
            f"# {resource_type}: {data.get('name', 'unknown')}",
            "",
            f"**命名空间:** {data.get('namespace', 'default')}",
            "",
            "## 基本信息",
            ""
        ]
        
        # 添加其他可能有的字段
        for key, value in data.items():
            if key not in ["kind", "name", "namespace", "raw_content"]:
                content.append(f"**{key}:** {value}")
                content.append("")
        
        # 如果配置允许且有原始内容，添加YAML格式的原始内容
        if include_raw_content and "raw_content" in data:
            content.append("## 原始内容")
            content.append("")
            content.append("```yaml")
            import yaml
            yaml_content = yaml.dump(data["raw_content"], default_flow_style=False)
            content.append(yaml_content)
            content.append("```")
        
        # 写入文件
        self.file_manager.write_markdown_file(output_path, "\n".join(content))