"""
备份管理器

实现ES数据备份功能，包括增量备份、全量备份、备份策略等。
"""

import json
import time
from datetime import datetime, timezone, timedelta
from pathlib import Path
from typing import Dict, List, Optional, Any, Union, Callable
from dataclasses import dataclass, asdict

from ..utils.logger import get_module_logger
from ..utils.es_client import ESClient
from ..utils.time_utils import TimeFieldDetector, TimeRangeProcessor
from ..utils.data_stream import DataStreamProcessor, StreamConfig
from ..utils.file_utils import FileManager, format_size
from ..utils.exceptions import BackupError, TimeFieldError, create_error_from_exception
from ..config.settings import get_settings
from .storage import StorageManager, BackupStorage

logger = get_module_logger(__name__)


@dataclass
class BackupStrategy:
    """备份策略配置"""
    strategy_type: str = "full"  # full, incremental, differential
    time_field: Optional[str] = None
    time_range_start: Optional[datetime] = None
    time_range_end: Optional[datetime] = None
    include_mappings: bool = True
    include_settings: bool = True
    compression: str = "gzip"
    chunk_size: str = "100MB"
    batch_size: int = 1000
    parallel_workers: int = 4
    overlap_buffer: str = "1h"
    sort_by_time: bool = True
    validate_data: bool = True


@dataclass
class BackupMetadata:
    """备份元数据"""
    backup_id: str
    strategy: BackupStrategy
    source_index: str
    backup_path: str
    start_time: datetime
    end_time: Optional[datetime] = None
    status: str = "running"  # running, completed, failed, cancelled
    total_documents: int = 0
    backed_up_documents: int = 0
    total_size_bytes: int = 0
    compressed_size_bytes: int = 0
    error_message: Optional[str] = None
    chunks: List[str] = None
    
    def __post_init__(self):
        if self.chunks is None:
            self.chunks = []


class BackupStrategyManager:
    """备份策略管理器
    
    管理不同的备份策略和配置。
    """
    
    def __init__(self):
        """初始化备份策略管理器"""
        self.time_detector = TimeFieldDetector()
        self.time_processor = TimeRangeProcessor()
        
        # 预定义策略
        self.predefined_strategies = {
            "full": BackupStrategy(
                strategy_type="full",
                include_mappings=True,
                include_settings=True
            ),
            "incremental_daily": BackupStrategy(
                strategy_type="incremental",
                time_range_start=datetime.now(timezone.utc) - timedelta(days=1),
                time_range_end=datetime.now(timezone.utc),
                overlap_buffer="1h"
            ),
            "incremental_hourly": BackupStrategy(
                strategy_type="incremental",
                time_range_start=datetime.now(timezone.utc) - timedelta(hours=1),
                time_range_end=datetime.now(timezone.utc),
                overlap_buffer="5m"
            ),
            "time_range": BackupStrategy(
                strategy_type="time_range",
                sort_by_time=True
            )
        }
    
    def create_strategy(
        self,
        strategy_type: str = "full",
        **kwargs
    ) -> BackupStrategy:
        """创建备份策略
        
        Args:
            strategy_type: 策略类型
            **kwargs: 策略参数
            
        Returns:
            备份策略对象
        """
        # 从预定义策略开始
        if strategy_type in self.predefined_strategies:
            base_strategy = self.predefined_strategies[strategy_type]
            strategy_dict = asdict(base_strategy)
        else:
            strategy_dict = {"strategy_type": strategy_type}
        
        # 更新参数
        strategy_dict.update(kwargs)
        
        return BackupStrategy(**strategy_dict)
    
    def validate_strategy(
        self,
        strategy: BackupStrategy,
        es_client: ESClient,
        index: str
    ) -> Dict[str, Any]:
        """验证备份策略
        
        Args:
            strategy: 备份策略
            es_client: ES客户端
            index: 索引名称
            
        Returns:
            验证结果
        """
        validation_result = {
            "valid": True,
            "warnings": [],
            "errors": [],
            "suggestions": []
        }
        
        try:
            # 检查索引是否存在
            if not es_client.index_exists(index):
                validation_result["errors"].append(f"索引不存在: {index}")
                validation_result["valid"] = False
                return validation_result
            
            # 获取索引映射
            mapping_response = es_client.get_mapping(index)
            # 提取实际的映射结构
            mapping = mapping_response.get(index, {}).get('mappings', {})

            # 验证时间字段
            if strategy.time_field:
                try:
                    self.time_detector.validate_time_field(strategy.time_field, mapping)
                except TimeFieldError as e:
                    validation_result["errors"].append(str(e))
                    validation_result["valid"] = False
            elif strategy.strategy_type in ["incremental", "time_range"]:
                # 尝试自动检测时间字段
                suggested_field = self.time_detector.suggest_time_field(mapping)
                if suggested_field:
                    validation_result["suggestions"].append(
                        f"建议使用时间字段: {suggested_field}"
                    )
                else:
                    validation_result["warnings"].append(
                        "未找到合适的时间字段，可能影响增量备份"
                    )
            
            # 验证时间范围
            if strategy.time_range_start and strategy.time_range_end:
                if strategy.time_range_start > strategy.time_range_end:
                    validation_result["errors"].append("开始时间不能晚于结束时间")
                    validation_result["valid"] = False
                
                # 检查时间范围是否合理
                time_span = strategy.time_range_end - strategy.time_range_start
                if time_span.days > 365:
                    validation_result["warnings"].append(
                        f"时间范围过大 ({time_span.days} 天)，可能影响性能"
                    )
            
            # 验证其他参数
            if strategy.parallel_workers > 16:
                validation_result["warnings"].append(
                    "并行工作线程数过多，可能影响ES集群性能"
                )
            
            if strategy.chunk_size:
                try:
                    from ..utils.file_utils import parse_size_string
                    chunk_bytes = parse_size_string(strategy.chunk_size)
                    if chunk_bytes > 1024 * 1024 * 1024:  # 1GB
                        validation_result["warnings"].append(
                            "分片大小过大，可能影响内存使用"
                        )
                except ValueError as e:
                    validation_result["errors"].append(f"无效的分片大小格式: {e}")
                    validation_result["valid"] = False
            
        except Exception as e:
            validation_result["errors"].append(f"验证过程中出错: {e}")
            validation_result["valid"] = False
        
        return validation_result
    
    def optimize_strategy(
        self,
        strategy: BackupStrategy,
        es_client: ESClient,
        index: str
    ) -> BackupStrategy:
        """优化备份策略
        
        Args:
            strategy: 原始策略
            es_client: ES客户端
            index: 索引名称
            
        Returns:
            优化后的策略
        """
        optimized = BackupStrategy(**asdict(strategy))
        
        try:
            # 获取索引信息
            mapping_response = es_client.get_mapping(index)
            mapping = mapping_response.get(index, {}).get('mappings', {})
            settings = es_client.get_settings(index)

            # 自动选择时间字段
            if not optimized.time_field and optimized.strategy_type in ["incremental", "time_range"]:
                suggested_field = self.time_detector.suggest_time_field(mapping)
                if suggested_field:
                    optimized.time_field = suggested_field
                    logger.info(f"自动选择时间字段: {suggested_field}")
            
            # 优化并行工作线程数
            cluster_info = es_client.get_cluster_info()
            if cluster_info:
                # 基于集群节点数调整并行度
                node_count = len(cluster_info.get('nodes', {}))
                if node_count > 0:
                    optimal_workers = min(node_count * 2, 8)
                    if optimized.parallel_workers != optimal_workers:
                        optimized.parallel_workers = optimal_workers
                        logger.info(f"优化并行工作线程数: {optimal_workers}")
            
            # 优化分片大小
            if optimized.strategy_type == "full":
                # 全量备份使用较大的分片
                optimized.chunk_size = "200MB"
            elif optimized.strategy_type == "incremental":
                # 增量备份使用较小的分片
                optimized.chunk_size = "50MB"
            
        except Exception as e:
            logger.warning(f"策略优化失败: {e}")
        
        return optimized
    
    def estimate_backup_size(
        self,
        strategy: BackupStrategy,
        es_client: ESClient,
        index: str
    ) -> Dict[str, Any]:
        """估算备份大小
        
        Args:
            strategy: 备份策略
            es_client: ES客户端
            index: 索引名称
            
        Returns:
            大小估算信息
        """
        estimation = {
            "estimated_documents": 0,
            "estimated_size_bytes": 0,
            "estimated_compressed_size_bytes": 0,
            "compression_ratio": 0.3,  # 默认压缩比
            "estimation_method": "unknown"
        }
        
        try:
            # 构建查询
            if strategy.strategy_type == "full":
                query = {"match_all": {}}
            else:
                # 构建时间范围查询
                if strategy.time_field and (strategy.time_range_start or strategy.time_range_end):
                    from ..utils.query_builder import QueryBuilder
                    query_builder = QueryBuilder()
                    query = query_builder.build_time_range_query(
                        strategy.time_field,
                        strategy.time_range_start,
                        strategy.time_range_end
                    )
                else:
                    query = {"match_all": {}}
            
            # 获取文档数量
            count_result = es_client.client.count(index=index, body={"query": query})
            estimation["estimated_documents"] = count_result["count"]
            estimation["estimation_method"] = "count_api"
            
            # 估算大小（基于平均文档大小）
            if estimation["estimated_documents"] > 0:
                # 获取少量样本来估算平均大小
                sample_query = {
                    "size": min(100, estimation["estimated_documents"]),
                    "query": query,
                    "_source": True
                }
                
                sample_result = es_client.search(index=index, body=sample_query)
                if sample_result["hits"]["hits"]:
                    # 计算样本平均大小
                    total_sample_size = 0
                    for hit in sample_result["hits"]["hits"]:
                        doc_size = len(json.dumps(hit["_source"], ensure_ascii=False).encode('utf-8'))
                        total_sample_size += doc_size
                    
                    avg_doc_size = total_sample_size / len(sample_result["hits"]["hits"])
                    estimation["estimated_size_bytes"] = int(avg_doc_size * estimation["estimated_documents"])
                    estimation["estimated_compressed_size_bytes"] = int(
                        estimation["estimated_size_bytes"] * estimation["compression_ratio"]
                    )
                    estimation["estimation_method"] = "sample_based"
        
        except Exception as e:
            logger.warning(f"备份大小估算失败: {e}")
        
        return estimation
    
    def create_backup_plan(
        self,
        strategy: BackupStrategy,
        es_client: ESClient,
        index: str,
        output_dir: str,
        backup_name: Optional[str] = None
    ) -> Dict[str, Any]:
        """创建备份计划

        Args:
            strategy: 备份策略
            es_client: ES客户端
            index: 索引名称
            output_dir: 输出目录
            backup_name: 自定义备份名称（可选）

        Returns:
            备份计划
        """
        # 验证策略
        validation = self.validate_strategy(strategy, es_client, index)
        if not validation["valid"]:
            raise BackupError(f"备份策略验证失败: {validation['errors']}")
        
        # 优化策略
        optimized_strategy = self.optimize_strategy(strategy, es_client, index)
        
        # 估算大小
        size_estimation = self.estimate_backup_size(optimized_strategy, es_client, index)
        
        # 生成备份ID
        if backup_name:
            backup_id = backup_name
        else:
            # 使用北京时区的时间戳
            from datetime import timezone, timedelta
            beijing_tz = timezone(timedelta(hours=8))
            beijing_time = datetime.now(beijing_tz)
            timestamp = beijing_time.strftime("%Y%m%d_%H%M%S")
            backup_id = f"{index}-{optimized_strategy.strategy_type}-{timestamp}"
        
        # 创建备份计划
        plan = {
            "backup_id": backup_id,
            "strategy": optimized_strategy,
            "validation": validation,
            "size_estimation": size_estimation,
            "output_path": str(Path(output_dir) / backup_id),
            "estimated_duration": self._estimate_duration(size_estimation),
            "resource_requirements": self._estimate_resources(optimized_strategy, size_estimation)
        }
        
        return plan
    
    def _estimate_duration(self, size_estimation: Dict[str, Any]) -> Dict[str, Any]:
        """估算备份持续时间
        
        Args:
            size_estimation: 大小估算
            
        Returns:
            持续时间估算
        """
        # 基于经验值估算（每秒处理10MB数据）
        processing_rate_mb_per_sec = 10
        estimated_mb = size_estimation["estimated_size_bytes"] / (1024 * 1024)
        estimated_seconds = estimated_mb / processing_rate_mb_per_sec
        
        return {
            "estimated_seconds": int(estimated_seconds),
            "estimated_minutes": int(estimated_seconds / 60),
            "processing_rate_assumption": f"{processing_rate_mb_per_sec} MB/sec"
        }
    
    def _estimate_resources(
        self,
        strategy: BackupStrategy,
        size_estimation: Dict[str, Any]
    ) -> Dict[str, Any]:
        """估算资源需求
        
        Args:
            strategy: 备份策略
            size_estimation: 大小估算
            
        Returns:
            资源需求估算
        """
        from ..utils.file_utils import parse_size_string
        
        # 内存需求（基于批量大小和并行度）
        chunk_size_bytes = parse_size_string(strategy.chunk_size)
        estimated_memory_mb = (chunk_size_bytes * strategy.parallel_workers) / (1024 * 1024)
        
        # 磁盘空间需求
        estimated_disk_mb = size_estimation["estimated_compressed_size_bytes"] / (1024 * 1024)
        
        return {
            "estimated_memory_mb": int(estimated_memory_mb),
            "estimated_disk_mb": int(estimated_disk_mb),
            "parallel_workers": strategy.parallel_workers,
            "network_intensive": True,
            "cpu_intensive": strategy.compression != "none"
        }


class BackupManager:
    """备份管理器

    执行实际的备份操作。
    """

    def __init__(self, es_client: Optional[ESClient] = None, config: Optional[Dict[str, Any]] = None):
        """初始化备份管理器

        Args:
            es_client: ES客户端
            config: 配置信息
        """
        self.es_client = es_client or ESClient()
        self.config = config or get_settings().config.backup.dict()

        self.strategy_manager = BackupStrategyManager()
        self.file_manager = FileManager()
        self.active_backups = {}  # 活跃的备份任务

        logger.info("备份管理器初始化完成")

    def backup_index(
        self,
        index: str,
        output_dir: str,
        strategy: Optional[Union[BackupStrategy, str, Dict[str, Any]]] = None,
        backup_name: Optional[str] = None,
        batch_size: Optional[int] = None,
        progress_callback: Optional[Callable[[Dict[str, Any]], None]] = None,
        **kwargs
    ) -> BackupMetadata:
        """备份索引

        Args:
            index: 索引名称
            output_dir: 输出目录
            strategy: 备份策略
            **kwargs: 额外参数

        Returns:
            备份元数据
        """
        logger.info(f"开始备份索引: {index}")

        try:
            # 处理策略参数
            if isinstance(strategy, str):
                backup_strategy = self.strategy_manager.create_strategy(strategy, **kwargs)
            elif isinstance(strategy, dict):
                backup_strategy = BackupStrategy(**strategy)
            elif isinstance(strategy, BackupStrategy):
                backup_strategy = strategy
            else:
                backup_strategy = self.strategy_manager.create_strategy("full", **kwargs)

            # 创建备份计划
            plan = self.strategy_manager.create_backup_plan(
                backup_strategy, self.es_client, index, output_dir, backup_name
            )

            # 创建备份元数据
            metadata = BackupMetadata(
                backup_id=plan["backup_id"],
                strategy=plan["strategy"],
                source_index=index,
                backup_path=plan["output_path"],
                start_time=datetime.now(timezone.utc),
                total_documents=plan["size_estimation"]["estimated_documents"]
            )

            # 注册活跃备份
            self.active_backups[metadata.backup_id] = metadata

            # 执行备份
            self._execute_backup(metadata, plan, batch_size, progress_callback)

            # 更新状态
            metadata.end_time = datetime.now(timezone.utc)
            metadata.status = "completed"

            # 保存最终的元数据状态
            self._save_metadata(metadata, Path(metadata.backup_path) / "metadata.json")

            logger.info(f"备份完成: {metadata.backup_id}")
            return metadata

        except Exception as e:
            logger.error(f"备份失败: {index}, {e}")
            if 'metadata' in locals():
                metadata.status = "failed"
                metadata.error_message = str(e)
                metadata.end_time = datetime.now(timezone.utc)
                # 保存失败状态的元数据
                self._save_metadata(metadata, Path(metadata.backup_path) / "metadata.json")
            raise BackupError(f"备份失败: {index}", index, e)

        finally:
            # 清理活跃备份记录
            if 'metadata' in locals() and metadata.backup_id in self.active_backups:
                del self.active_backups[metadata.backup_id]

    def _execute_backup(
        self,
        metadata: BackupMetadata,
        plan: Dict[str, Any],
        batch_size: Optional[int] = None,
        progress_callback: Optional[Callable[[Dict[str, Any]], None]] = None
    ) -> None:
        """执行备份操作

        Args:
            metadata: 备份元数据
            plan: 备份计划
            batch_size: 批量大小
            progress_callback: 进度回调函数
        """
        strategy = metadata.strategy

        # 确保输出目录存在
        backup_path = Path(metadata.backup_path)
        backup_path.mkdir(parents=True, exist_ok=True)

        # 保存备份元数据
        self._save_metadata(metadata, backup_path / "metadata.json")

        # 备份映射和设置
        if strategy.include_mappings or strategy.include_settings:
            self._backup_index_metadata(metadata, backup_path)

        # 备份数据
        self._backup_data(metadata, backup_path, batch_size, progress_callback)

        # 验证备份
        if strategy.validate_data:
            self._validate_backup(metadata, backup_path)

    def _backup_index_metadata(self, metadata: BackupMetadata, backup_path: Path) -> None:
        """备份索引元数据（映射和设置）

        Args:
            metadata: 备份元数据
            backup_path: 备份路径
        """
        logger.info("备份索引元数据")

        try:
            index_metadata = {}

            if metadata.strategy.include_mappings:
                mappings = self.es_client.get_mapping(metadata.source_index)
                index_metadata["mappings"] = mappings

            if metadata.strategy.include_settings:
                settings = self.es_client.get_settings(metadata.source_index)
                index_metadata["settings"] = settings

            # 保存元数据
            metadata_file = backup_path / "index_metadata.json"
            with open(metadata_file, 'w', encoding='utf-8') as f:
                json.dump(index_metadata, f, indent=2, ensure_ascii=False, default=str)

            logger.info(f"索引元数据已保存: {metadata_file}")

        except Exception as e:
            logger.error(f"备份索引元数据失败: {e}")
            raise

    def _backup_data(
        self,
        metadata: BackupMetadata,
        backup_path: Path,
        batch_size: Optional[int] = None,
        progress_callback: Optional[Callable[[Dict[str, Any]], None]] = None
    ) -> None:
        """备份数据

        Args:
            metadata: 备份元数据
            backup_path: 备份路径
            batch_size: 批量大小
            progress_callback: 进度回调函数
        """
        logger.info("开始备份数据")

        try:
            # 构建查询
            query = self._build_backup_query(metadata.strategy)

            # 配置数据流，使用传入的批量大小或默认值
            effective_batch_size = batch_size or getattr(metadata.strategy, 'batch_size', 1000)
            stream_config = StreamConfig(
                batch_size=effective_batch_size,
                scroll_timeout="5m",
                max_concurrent_requests=metadata.strategy.parallel_workers
            )

            # 创建数据流处理器
            with DataStreamProcessor(self.es_client, stream_config) as processor:
                # 流式备份数据
                chunk_index = 0
                current_chunk_docs = []
                current_chunk_size = 0

                from ..utils.file_utils import parse_size_string
                max_chunk_size = parse_size_string(metadata.strategy.chunk_size)

                for document in processor.stream_documents(
                    metadata.source_index,
                    query,
                    sort_fields=[{metadata.strategy.time_field: "asc"}] if metadata.strategy.time_field and metadata.strategy.sort_by_time else None
                ):
                    current_chunk_docs.append(document)

                    # 估算文档大小
                    doc_size = len(json.dumps(document, ensure_ascii=False).encode('utf-8'))
                    current_chunk_size += doc_size
                    metadata.total_size_bytes += doc_size
                    metadata.backed_up_documents += 1

                    # 报告进度（每1000个文档报告一次）
                    if metadata.backed_up_documents % 1000 == 0:
                        progress = min(metadata.backed_up_documents / metadata.total_documents, 1.0) if metadata.total_documents > 0 else 0
                        if progress_callback:
                            progress_callback({
                                "step": "备份数据",
                                "documents_processed": metadata.backed_up_documents,
                                "total_documents": metadata.total_documents,
                                "progress": progress,
                                "chunks_created": chunk_index,
                                "status": "running"
                            })

                    # 检查是否需要写入分片
                    if current_chunk_size >= max_chunk_size or len(current_chunk_docs) >= 10000:
                        chunk_file = self._write_chunk(
                            current_chunk_docs,
                            backup_path,
                            chunk_index,
                            metadata.strategy.compression
                        )
                        metadata.chunks.append(str(chunk_file))

                        # 重置分片
                        current_chunk_docs = []
                        current_chunk_size = 0
                        chunk_index += 1

                        # 更新元数据
                        self._save_metadata(metadata, backup_path / "metadata.json")

                        # 记录进度
                        if chunk_index % 10 == 0:
                            logger.info(f"已备份 {metadata.backed_up_documents} 个文档，{chunk_index} 个分片")

                        # 报告分片完成进度
                        if progress_callback:
                            progress = min(metadata.backed_up_documents / metadata.total_documents, 1.0) if metadata.total_documents > 0 else 0
                            progress_callback({
                                "step": "备份数据",
                                "documents_processed": metadata.backed_up_documents,
                                "total_documents": metadata.total_documents,
                                "progress": progress,
                                "chunks_created": chunk_index,
                                "status": "running"
                            })

                # 处理最后一个分片
                if current_chunk_docs:
                    chunk_file = self._write_chunk(
                        current_chunk_docs,
                        backup_path,
                        chunk_index,
                        metadata.strategy.compression
                    )
                    metadata.chunks.append(str(chunk_file))

            logger.info(f"数据备份完成，共 {metadata.backed_up_documents} 个文档，{len(metadata.chunks)} 个分片")

        except Exception as e:
            logger.error(f"备份数据失败: {e}")
            raise

    def _build_backup_query(self, strategy: BackupStrategy) -> Dict[str, Any]:
        """构建备份查询

        Args:
            strategy: 备份策略

        Returns:
            查询字典
        """
        if strategy.strategy_type == "full":
            return {"match_all": {}}

        if strategy.time_field and (strategy.time_range_start or strategy.time_range_end):
            from ..utils.query_builder import QueryBuilder
            query_builder = QueryBuilder()
            return query_builder.build_time_range_query(
                strategy.time_field,
                strategy.time_range_start,
                strategy.time_range_end
            )

        return {"match_all": {}}

    def _write_chunk(
        self,
        documents: List[Dict[str, Any]],
        backup_path: Path,
        chunk_index: int,
        compression: str
    ) -> Path:
        """写入数据分片

        Args:
            documents: 文档列表
            backup_path: 备份路径
            chunk_index: 分片索引
            compression: 压缩方式

        Returns:
            分片文件路径
        """
        chunk_filename = f"chunk_{chunk_index:06d}.json"
        if compression == "gzip":
            chunk_filename += ".gz"

        chunk_path = backup_path / chunk_filename

        # 准备数据
        chunk_data = {
            "chunk_index": chunk_index,
            "document_count": len(documents),
            "documents": documents,
            "created_at": datetime.now(timezone.utc).isoformat()
        }

        # 写入文件
        if compression == "gzip":
            import gzip
            with gzip.open(chunk_path, 'wt', encoding='utf-8') as f:
                json.dump(chunk_data, f, ensure_ascii=False, default=str)
        else:
            with open(chunk_path, 'w', encoding='utf-8') as f:
                json.dump(chunk_data, f, ensure_ascii=False, default=str)

        logger.debug(f"分片已写入: {chunk_path}, {len(documents)} 个文档")
        return chunk_path

    def _save_metadata(self, metadata: BackupMetadata, metadata_path: Path) -> None:
        """保存备份元数据

        Args:
            metadata: 备份元数据
            metadata_path: 元数据文件路径
        """
        metadata_dict = asdict(metadata)

        # 递归转换datetime对象为字符串
        metadata_dict = self._convert_datetime_to_string(metadata_dict)

        with open(metadata_path, 'w', encoding='utf-8') as f:
            json.dump(metadata_dict, f, indent=2, ensure_ascii=False, default=str)

    def _convert_datetime_to_string(self, obj):
        """递归转换datetime对象为字符串

        Args:
            obj: 要转换的对象

        Returns:
            转换后的对象
        """
        if isinstance(obj, datetime):
            return obj.isoformat()
        elif isinstance(obj, dict):
            return {key: self._convert_datetime_to_string(value) for key, value in obj.items()}
        elif isinstance(obj, list):
            return [self._convert_datetime_to_string(item) for item in obj]
        else:
            return obj

    def _validate_backup(self, metadata: BackupMetadata, backup_path: Path) -> None:
        """验证备份完整性

        Args:
            metadata: 备份元数据
            backup_path: 备份路径
        """
        logger.info("验证备份完整性")

        try:
            # 检查所有分片文件是否存在
            missing_chunks = []
            total_docs_in_chunks = 0

            for chunk_file in metadata.chunks:
                chunk_path = Path(chunk_file)
                if not chunk_path.exists():
                    missing_chunks.append(str(chunk_path))
                else:
                    # 验证分片内容
                    try:
                        if chunk_path.suffix == '.gz':
                            import gzip
                            with gzip.open(chunk_path, 'rt', encoding='utf-8') as f:
                                chunk_data = json.load(f)
                        else:
                            with open(chunk_path, 'r', encoding='utf-8') as f:
                                chunk_data = json.load(f)

                        total_docs_in_chunks += chunk_data.get('document_count', 0)
                    except Exception as e:
                        logger.warning(f"分片文件损坏: {chunk_path}, {e}")
                        missing_chunks.append(str(chunk_path))

            if missing_chunks:
                raise BackupError(f"备份验证失败，缺失分片: {missing_chunks}")

            if total_docs_in_chunks != metadata.backed_up_documents:
                logger.warning(
                    f"文档数量不匹配: 元数据显示 {metadata.backed_up_documents}, "
                    f"分片中实际 {total_docs_in_chunks}"
                )

            logger.info("备份验证通过")

        except Exception as e:
            logger.error(f"备份验证失败: {e}")
            raise

    def get_backup_status(self, backup_id: str) -> Optional[BackupMetadata]:
        """获取备份状态

        Args:
            backup_id: 备份ID

        Returns:
            备份元数据
        """
        return self.active_backups.get(backup_id)

    def list_active_backups(self) -> List[BackupMetadata]:
        """列出活跃的备份任务

        Returns:
            活跃备份列表
        """
        return list(self.active_backups.values())

    def cancel_backup(self, backup_id: str) -> bool:
        """取消备份任务

        Args:
            backup_id: 备份ID

        Returns:
            是否取消成功
        """
        if backup_id in self.active_backups:
            metadata = self.active_backups[backup_id]
            metadata.status = "cancelled"
            metadata.end_time = datetime.now(timezone.utc)
            del self.active_backups[backup_id]
            logger.info(f"备份任务已取消: {backup_id}")
            return True
        return False


class BackupExecutionEngine:
    """备份执行引擎

    负责实际执行备份操作，支持并发、进度监控、断点续传等功能。
    """

    def __init__(self, es_client: ESClient, config: Optional[Dict[str, Any]] = None):
        """初始化备份执行引擎

        Args:
            es_client: ES客户端
            config: 配置信息
        """
        self.es_client = es_client
        self.config = config or get_settings().config.backup.dict()

        self.storage_manager = StorageManager()
        self.progress_callbacks = []
        self.is_cancelled = False

        logger.info("备份执行引擎初始化完成")

    def execute_backup(
        self,
        metadata: BackupMetadata,
        progress_callback: Optional[Callable[[Dict[str, Any]], None]] = None,
        batch_size: Optional[int] = None
    ) -> None:
        """执行备份

        Args:
            metadata: 备份元数据
            progress_callback: 进度回调函数
            batch_size: 批量大小
        """
        if progress_callback:
            self.progress_callbacks.append(progress_callback)

        try:
            logger.info(f"开始执行备份: {metadata.backup_id}")

            # 创建备份存储
            backup_storage = self.storage_manager.create_backup_storage(
                Path(metadata.backup_path)
            )

            # 执行备份步骤
            self._execute_backup_steps(metadata, backup_storage, batch_size)

            logger.info(f"备份执行完成: {metadata.backup_id}")

        except Exception as e:
            logger.error(f"备份执行失败: {e}")
            metadata.status = "failed"
            metadata.error_message = str(e)
            raise

        finally:
            self.progress_callbacks.clear()

    def _execute_backup_steps(
        self,
        metadata: BackupMetadata,
        backup_storage: "BackupStorage",
        batch_size: Optional[int] = None
    ) -> None:
        """执行备份步骤

        Args:
            metadata: 备份元数据
            backup_storage: 备份存储
            batch_size: 批量大小
        """
        steps = [
            ("准备备份", lambda m, bs: self._prepare_backup(m, bs)),
            ("备份索引元数据", lambda m, bs: self._backup_index_metadata(m, bs)),
            ("备份数据", lambda m, bs: self._backup_data(m, bs, batch_size)),
            ("验证备份", lambda m, bs: self._validate_backup(m, bs)),
            ("完成备份", lambda m, bs: self._finalize_backup(m, bs))
        ]

        total_steps = len(steps)

        for step_index, (step_name, step_func) in enumerate(steps):
            if self.is_cancelled:
                metadata.status = "cancelled"
                return

            logger.info(f"执行步骤 {step_index + 1}/{total_steps}: {step_name}")

            # 更新进度
            self._report_progress({
                "step": step_name,
                "step_index": step_index,
                "total_steps": total_steps,
                "progress": step_index / total_steps,
                "status": "running"
            })

            # 执行步骤
            step_func(metadata, backup_storage)

        # 最终进度报告
        self._report_progress({
            "step": "完成",
            "step_index": total_steps,
            "total_steps": total_steps,
            "progress": 1.0,
            "status": "completed"
        })

    def _prepare_backup(
        self,
        metadata: BackupMetadata,
        backup_storage: "BackupStorage"
    ) -> None:
        """准备备份"""
        # 确保备份目录存在
        backup_path = Path(metadata.backup_path)
        backup_path.mkdir(parents=True, exist_ok=True)

        # 保存初始元数据
        self._save_metadata(metadata, backup_path / "metadata.json")

        logger.debug("备份准备完成")

    def _backup_index_metadata(
        self,
        metadata: BackupMetadata,
        backup_storage: "BackupStorage"
    ) -> None:
        """备份索引元数据"""
        if not (metadata.strategy.include_mappings or metadata.strategy.include_settings):
            return

        logger.info("备份索引元数据")

        try:
            index_metadata = {}

            if metadata.strategy.include_mappings:
                mappings = self.es_client.get_mapping(metadata.source_index)
                index_metadata["mappings"] = mappings

            if metadata.strategy.include_settings:
                settings = self.es_client.get_settings(metadata.source_index)
                index_metadata["settings"] = settings

            # 保存元数据
            metadata_file = Path(metadata.backup_path) / "index_metadata.json"
            with open(metadata_file, 'w', encoding='utf-8') as f:
                json.dump(index_metadata, f, indent=2, ensure_ascii=False, default=str)

            logger.info(f"索引元数据已保存: {metadata_file}")

        except Exception as e:
            logger.error(f"备份索引元数据失败: {e}")
            raise

    def _backup_data(
        self,
        metadata: BackupMetadata,
        backup_storage: "BackupStorage",
        batch_size: Optional[int] = None
    ) -> None:
        """备份数据"""
        logger.info("开始备份数据")

        try:
            # 构建查询
            query = self._build_backup_query(metadata.strategy)

            # 配置数据流，使用传入的批量大小或默认值
            from ..utils.data_stream import StreamConfig
            effective_batch_size = batch_size or getattr(metadata.strategy, 'batch_size', 1000)
            stream_config = StreamConfig(
                batch_size=effective_batch_size,
                scroll_timeout="5m",
                max_concurrent_requests=metadata.strategy.parallel_workers
            )

            # 创建分片写入器
            chunk_writer = backup_storage.create_chunk_writer("chunk")

            # 流式备份数据
            from ..utils.data_stream import DataStreamProcessor
            with DataStreamProcessor(self.es_client, stream_config) as processor:
                doc_count = 0

                for document in processor.stream_documents(
                    metadata.source_index,
                    query,
                    sort_fields=[{metadata.strategy.time_field: "asc"}]
                    if metadata.strategy.time_field and metadata.strategy.sort_by_time else None
                ):
                    if self.is_cancelled:
                        return

                    # 添加文档到分片
                    chunk_info = chunk_writer.add_document(document)
                    if chunk_info:
                        metadata.chunks.append(chunk_info.file_path)

                    doc_count += 1
                    metadata.backed_up_documents = doc_count

                    # 定期报告进度
                    if doc_count % 1000 == 0:
                        progress = min(doc_count / metadata.total_documents, 1.0) if metadata.total_documents > 0 else 0
                        self._report_progress({
                            "step": "备份数据",
                            "documents_processed": doc_count,
                            "total_documents": metadata.total_documents,
                            "progress": progress,
                            "status": "running"
                        })

                        # 更新元数据
                        self._save_metadata(metadata, Path(metadata.backup_path) / "metadata.json")

                # 完成最后一个分片
                final_chunk = chunk_writer.finalize()
                if final_chunk:
                    metadata.chunks.append(final_chunk.file_path)

                # 更新分片信息
                metadata.chunks = [info.file_path for info in chunk_writer.get_chunks_info()]

            logger.info(f"数据备份完成，共 {metadata.backed_up_documents} 个文档，{len(metadata.chunks)} 个分片")

        except Exception as e:
            logger.error(f"备份数据失败: {e}")
            raise

    def _validate_backup(
        self,
        metadata: BackupMetadata,
        backup_storage: "BackupStorage"
    ) -> None:
        """验证备份"""
        if not metadata.strategy.validate_data:
            return

        logger.info("验证备份完整性")

        try:
            # 检查所有分片文件
            missing_chunks = []
            total_docs_in_chunks = 0

            for chunk_file in metadata.chunks:
                chunk_path = Path(chunk_file)
                if not chunk_path.exists():
                    missing_chunks.append(str(chunk_path))
                else:
                    try:
                        documents = self.storage_manager.chunker.read_chunk(chunk_path)
                        total_docs_in_chunks += len(documents)
                    except Exception as e:
                        logger.warning(f"分片文件损坏: {chunk_path}, {e}")
                        missing_chunks.append(str(chunk_path))

            if missing_chunks:
                raise BackupError(f"备份验证失败，缺失或损坏的分片: {missing_chunks}")

            if total_docs_in_chunks != metadata.backed_up_documents:
                logger.warning(
                    f"文档数量不匹配: 元数据显示 {metadata.backed_up_documents}, "
                    f"分片中实际 {total_docs_in_chunks}"
                )

            logger.info("备份验证通过")

        except Exception as e:
            logger.error(f"备份验证失败: {e}")
            raise

    def _finalize_backup(
        self,
        metadata: BackupMetadata,
        backup_storage: "BackupStorage"
    ) -> None:
        """完成备份"""
        # 更新元数据
        metadata.end_time = datetime.now(timezone.utc)
        metadata.status = "completed"

        # 计算压缩大小
        total_compressed_size = 0
        for chunk_file in metadata.chunks:
            chunk_path = Path(chunk_file)
            if chunk_path.exists():
                total_compressed_size += chunk_path.stat().st_size

        metadata.compressed_size_bytes = total_compressed_size

        # 保存最终元数据
        self._save_metadata(metadata, Path(metadata.backup_path) / "metadata.json")

        # 生成备份报告
        self._generate_backup_report(metadata)

        logger.info("备份完成")

    def _build_backup_query(self, strategy: BackupStrategy) -> Dict[str, Any]:
        """构建备份查询"""
        if strategy.strategy_type == "full":
            return {"match_all": {}}

        if strategy.time_field and (strategy.time_range_start or strategy.time_range_end):
            from ..utils.query_builder import QueryBuilder
            query_builder = QueryBuilder()
            return query_builder.build_time_range_query(
                strategy.time_field,
                strategy.time_range_start,
                strategy.time_range_end
            )

        return {"match_all": {}}

    def _save_metadata(self, metadata: BackupMetadata, metadata_path: Path) -> None:
        """保存备份元数据"""
        metadata_dict = asdict(metadata)

        # 递归转换datetime对象为字符串
        metadata_dict = self._convert_datetime_to_string(metadata_dict)

        with open(metadata_path, 'w', encoding='utf-8') as f:
            json.dump(metadata_dict, f, indent=2, ensure_ascii=False, default=str)

    def _convert_datetime_to_string(self, obj):
        """递归转换datetime对象为字符串

        Args:
            obj: 要转换的对象

        Returns:
            转换后的对象
        """
        if isinstance(obj, datetime):
            return obj.isoformat()
        elif isinstance(obj, dict):
            return {key: self._convert_datetime_to_string(value) for key, value in obj.items()}
        elif isinstance(obj, list):
            return [self._convert_datetime_to_string(item) for item in obj]
        else:
            return obj

    def _generate_backup_report(self, metadata: BackupMetadata) -> None:
        """生成备份报告"""
        report = {
            "backup_id": metadata.backup_id,
            "source_index": metadata.source_index,
            "strategy": metadata.strategy.strategy_type,
            "start_time": metadata.start_time.isoformat(),
            "end_time": metadata.end_time.isoformat() if metadata.end_time else None,
            "duration_seconds": (metadata.end_time - metadata.start_time).total_seconds() if metadata.end_time else None,
            "total_documents": metadata.total_documents,
            "backed_up_documents": metadata.backed_up_documents,
            "total_chunks": len(metadata.chunks),
            "total_size_bytes": metadata.total_size_bytes,
            "compressed_size_bytes": metadata.compressed_size_bytes,
            "compression_ratio": metadata.compressed_size_bytes / metadata.total_size_bytes if metadata.total_size_bytes > 0 else 0,
            "status": metadata.status
        }

        report_path = Path(metadata.backup_path) / "backup_report.json"
        with open(report_path, 'w', encoding='utf-8') as f:
            json.dump(report, f, indent=2, ensure_ascii=False)

        logger.info(f"备份报告已生成: {report_path}")

    def _report_progress(self, progress_info: Dict[str, Any]) -> None:
        """报告进度"""
        for callback in self.progress_callbacks:
            try:
                callback(progress_info)
            except Exception as e:
                logger.warning(f"进度回调失败: {e}")

    def cancel(self) -> None:
        """取消备份"""
        self.is_cancelled = True
        logger.info("备份已被取消")

    def add_progress_callback(self, callback: Callable[[Dict[str, Any]], None]) -> None:
        """添加进度回调"""
        self.progress_callbacks.append(callback)
