"""
恢复管理器

实现ES数据恢复功能，包括策略管理、索引重建、数据恢复等。
"""

import json
import time
from datetime import datetime, timezone
from pathlib import Path
from typing import Dict, List, Optional, Any, Union, Callable, Iterator
from dataclasses import dataclass, asdict

from ..utils.logger import get_module_logger
from ..utils.es_client import ESClient
from ..utils.time_utils import TimeRangeProcessor, TimezoneHandler
from ..utils.mapping_analyzer import MappingAnalyzer
from ..utils.exceptions import RestoreError, ESIndexNotFoundError, create_error_from_exception
from ..config.settings import get_settings
from .storage import StorageManager
from .metadata import MetadataManager

logger = get_module_logger(__name__)


class CircuitBreakerHandler:
    """断路器异常处理器

    专门处理 Elasticsearch 断路器异常，提供自适应批量大小调整。
    """

    def __init__(self):
        """初始化断路器处理器"""
        self.circuit_breaker_errors = [
            'circuit_breaking_exception',
            'circuit_breaker_exception',
            'parent_circuit_breaker_exception'
        ]

    def is_circuit_breaker_error(self, error: Exception) -> bool:
        """判断是否为断路器异常

        Args:
            error: 异常对象

        Returns:
            是否为断路器异常
        """
        error_str = str(error).lower()
        return any(cb_error in error_str for cb_error in self.circuit_breaker_errors)

    def extract_memory_info(self, error: Exception) -> Dict[str, Any]:
        """从断路器异常中提取内存信息

        Args:
            error: 断路器异常

        Returns:
            内存信息字典
        """
        error_str = str(error)
        memory_info = {
            'current_usage': None,
            'limit': None,
            'requested': None,
            'available': None
        }

        try:
            # 解析错误信息中的内存数据
            # 例如: "data for [<http_request>] would be [1040403676/992.2mb], which is larger than the limit of [1011774259/964.9mb]"
            import re

            # 提取请求的内存大小
            requested_match = re.search(r'would be \[(\d+)/([0-9.]+)(mb|gb|kb)\]', error_str, re.IGNORECASE)
            if requested_match:
                memory_info['requested'] = self._parse_memory_size(requested_match.group(2), requested_match.group(3))

            # 提取内存限制
            limit_match = re.search(r'limit of \[(\d+)/([0-9.]+)(mb|gb|kb)\]', error_str, re.IGNORECASE)
            if limit_match:
                memory_info['limit'] = self._parse_memory_size(limit_match.group(2), limit_match.group(3))

            # 提取当前使用量
            usage_match = re.search(r'real usage: \[(\d+)/([0-9.]+)(mb|gb|kb)\]', error_str, re.IGNORECASE)
            if usage_match:
                memory_info['current_usage'] = self._parse_memory_size(usage_match.group(2), usage_match.group(3))

            # 计算可用内存
            if memory_info['limit'] and memory_info['current_usage']:
                memory_info['available'] = memory_info['limit'] - memory_info['current_usage']

        except Exception as e:
            logger.warning(f"解析内存信息失败: {e}")

        return memory_info

    def _parse_memory_size(self, size_str: str, unit: str) -> float:
        """解析内存大小字符串为MB

        Args:
            size_str: 大小字符串
            unit: 单位 (kb, mb, gb)

        Returns:
            以MB为单位的大小
        """
        size = float(size_str)
        unit = unit.lower()

        if unit == 'kb':
            return size / 1024
        elif unit == 'mb':
            return size
        elif unit == 'gb':
            return size * 1024
        else:
            return size

    def calculate_safe_batch_size(
        self,
        current_batch_size: int,
        memory_info: Dict[str, Any],
        document_avg_size: Optional[float] = None
    ) -> int:
        """计算安全的批量大小

        Args:
            current_batch_size: 当前批量大小
            memory_info: 内存信息
            document_avg_size: 文档平均大小（KB）

        Returns:
            建议的批量大小
        """
        # 如果没有可用内存信息，保守地减少批量大小
        if not memory_info.get('available') or not memory_info.get('requested'):
            return max(50, current_batch_size // 2)

        available_mb = memory_info['available']
        requested_mb = memory_info['requested']

        # 计算安全系数（保留20%的缓冲）
        safety_factor = 0.8
        safe_memory_mb = available_mb * safety_factor

        # 如果有文档平均大小信息，使用更精确的计算
        if document_avg_size:
            # 文档平均大小转换为MB
            doc_size_mb = document_avg_size / 1024
            # 计算可以安全处理的文档数量
            safe_batch_size = int(safe_memory_mb / doc_size_mb)
        else:
            # 基于当前请求和可用内存的比例计算
            ratio = safe_memory_mb / requested_mb
            safe_batch_size = int(current_batch_size * ratio)

        # 确保批量大小在合理范围内
        return max(10, min(safe_batch_size, current_batch_size // 2))


class AdaptiveBatchManager:
    """自适应批量管理器

    根据ES集群状态和错误情况动态调整批量大小。
    """

    def __init__(self, initial_batch_size: int = 500):
        """初始化自适应批量管理器

        Args:
            initial_batch_size: 初始批量大小
        """
        self.initial_batch_size = initial_batch_size
        self.current_batch_size = initial_batch_size
        self.min_batch_size = 10
        self.max_batch_size = 2000

        self.circuit_breaker_handler = CircuitBreakerHandler()
        self.success_count = 0
        self.error_count = 0
        self.document_sizes = []  # 记录文档大小用于计算平均值

    def get_current_batch_size(self) -> int:
        """获取当前批量大小

        Returns:
            当前批量大小
        """
        return self.current_batch_size

    def record_success(self, batch_size: int, processing_time: float = None) -> None:
        """记录成功的批量操作

        Args:
            batch_size: 批量大小
            processing_time: 处理时间（秒）
        """
        self.success_count += 1

        # 如果连续成功多次，可以尝试增加批量大小
        if self.success_count > 0 and self.success_count % 5 == 0:
            if self.current_batch_size < self.max_batch_size:
                # 保守地增加批量大小
                increase = min(50, self.current_batch_size // 10)
                self.current_batch_size = min(self.max_batch_size, self.current_batch_size + increase)
                logger.info(f"批量大小增加到: {self.current_batch_size}")

    def record_error(self, error: Exception, batch_size: int) -> int:
        """记录错误并调整批量大小

        Args:
            error: 错误对象
            batch_size: 当前批量大小

        Returns:
            调整后的批量大小
        """
        self.error_count += 1
        self.success_count = 0  # 重置成功计数

        if self.circuit_breaker_handler.is_circuit_breaker_error(error):
            # 处理断路器异常
            memory_info = self.circuit_breaker_handler.extract_memory_info(error)
            avg_doc_size = self._calculate_average_document_size()

            new_batch_size = self.circuit_breaker_handler.calculate_safe_batch_size(
                batch_size, memory_info, avg_doc_size
            )

            self.current_batch_size = max(self.min_batch_size, new_batch_size)
            logger.warning(f"检测到断路器异常，批量大小调整为: {self.current_batch_size}")

        else:
            # 其他错误，保守地减少批量大小
            self.current_batch_size = max(self.min_batch_size, self.current_batch_size // 2)
            logger.warning(f"检测到错误，批量大小减少到: {self.current_batch_size}")

        return self.current_batch_size

    def record_document_size(self, doc_size_kb: float) -> None:
        """记录文档大小

        Args:
            doc_size_kb: 文档大小（KB）
        """
        self.document_sizes.append(doc_size_kb)
        # 只保留最近1000个文档的大小信息
        if len(self.document_sizes) > 1000:
            self.document_sizes = self.document_sizes[-1000:]

    def _calculate_average_document_size(self) -> Optional[float]:
        """计算平均文档大小

        Returns:
            平均文档大小（KB），如果没有数据则返回None
        """
        if not self.document_sizes:
            return None
        return sum(self.document_sizes) / len(self.document_sizes)

    def get_statistics(self) -> Dict[str, Any]:
        """获取统计信息

        Returns:
            统计信息字典
        """
        return {
            'current_batch_size': self.current_batch_size,
            'initial_batch_size': self.initial_batch_size,
            'success_count': self.success_count,
            'error_count': self.error_count,
            'average_document_size_kb': self._calculate_average_document_size(),
            'total_documents_analyzed': len(self.document_sizes)
        }


@dataclass
class RestoreStrategy:
    """恢复策略配置"""
    strategy_type: str = "full"  # full, selective, time_range
    target_index: Optional[str] = None
    create_index: bool = True
    overwrite_existing: bool = False
    batch_size: int = 500  # 降低默认批量大小以避免内存问题
    parallel_workers: int = 2
    validate_data: bool = True
    preserve_timestamps: bool = True
    adjust_timezone: bool = False
    target_timezone: Optional[str] = None
    field_mapping: Optional[Dict[str, str]] = None
    document_filter: Optional[Dict[str, Any]] = None
    time_range_start: Optional[datetime] = None
    time_range_end: Optional[datetime] = None
    backup_path: Optional[str] = None  # 备份路径，用于验证
    # 新增内存管理相关配置
    max_memory_usage: str = "500MB"  # 单次批量操作最大内存使用量
    adaptive_batch_size: bool = True  # 是否启用自适应批量大小
    circuit_breaker_retry: bool = True  # 是否在断路器异常时重试
    max_retries: int = 3  # 最大重试次数
    retry_delay: float = 2.0  # 重试延迟（秒）


@dataclass
class RestoreMetadata:
    """恢复元数据"""
    restore_id: str
    strategy: RestoreStrategy
    source_backup_id: str
    target_index: str
    start_time: datetime
    end_time: Optional[datetime] = None
    status: str = "running"  # running, completed, failed, cancelled
    total_documents: int = 0
    restored_documents: int = 0
    skipped_documents: int = 0
    error_count: int = 0
    error_message: Optional[str] = None


class RestoreStrategyManager:
    """恢复策略管理器
    
    管理不同的恢复策略和配置。
    """
    
    def __init__(self):
        """初始化恢复策略管理器"""
        self.time_processor = TimeRangeProcessor()
        self.timezone_handler = TimezoneHandler()
        
        # 预定义策略
        self.predefined_strategies = {
            "full": RestoreStrategy(
                strategy_type="full",
                create_index=True,
                validate_data=True
            ),
            "selective": RestoreStrategy(
                strategy_type="selective",
                create_index=False,
                validate_data=True
            ),
            "time_range": RestoreStrategy(
                strategy_type="time_range",
                create_index=True,
                preserve_timestamps=True
            )
        }
    
    def create_strategy(
        self,
        strategy_type: str = "full",
        **kwargs
    ) -> RestoreStrategy:
        """创建恢复策略
        
        Args:
            strategy_type: 策略类型
            **kwargs: 策略参数
            
        Returns:
            恢复策略对象
        """
        # 从预定义策略开始
        if strategy_type in self.predefined_strategies:
            base_strategy = self.predefined_strategies[strategy_type]
            strategy_dict = asdict(base_strategy)
        else:
            strategy_dict = {"strategy_type": strategy_type}
        
        # 更新参数
        strategy_dict.update(kwargs)
        
        return RestoreStrategy(**strategy_dict)
    
    def validate_strategy(
        self,
        strategy: RestoreStrategy,
        es_client: ESClient,
        backup_metadata: Dict[str, Any]
    ) -> Dict[str, Any]:
        """验证恢复策略
        
        Args:
            strategy: 恢复策略
            es_client: ES客户端
            backup_metadata: 备份元数据
            
        Returns:
            验证结果
        """
        validation_result = {
            "valid": True,
            "warnings": [],
            "errors": [],
            "suggestions": []
        }
        
        try:
            # 验证目标索引
            if not strategy.target_index:
                validation_result["errors"].append("必须指定目标索引")
                validation_result["valid"] = False
                return validation_result
            
            # 检查目标索引是否存在
            target_exists = es_client.index_exists(strategy.target_index)
            
            if target_exists and not strategy.overwrite_existing:
                validation_result["warnings"].append(
                    f"目标索引已存在: {strategy.target_index}，数据将追加到现有索引"
                )
            elif target_exists and strategy.overwrite_existing:
                validation_result["warnings"].append(
                    f"目标索引将被覆盖: {strategy.target_index}"
                )
            elif not target_exists and not strategy.create_index:
                validation_result["errors"].append(
                    f"目标索引不存在且未启用自动创建: {strategy.target_index}"
                )
                validation_result["valid"] = False
            
            # 验证映射兼容性
            if target_exists:
                try:
                    analyzer = MappingAnalyzer(es_client)
                    source_index = backup_metadata.get("source_index")
                    if source_index:
                        compatibility = analyzer.check_mapping_compatibility(
                            source_index, strategy.target_index
                        )
                        if not compatibility["compatible"]:
                            validation_result["warnings"].append(
                                "源索引和目标索引映射不兼容，可能导致数据丢失"
                            )
                except Exception as e:
                    validation_result["warnings"].append(f"无法检查映射兼容性: {e}")
            
            # 验证时间范围
            if strategy.time_range_start and strategy.time_range_end:
                if strategy.time_range_start > strategy.time_range_end:
                    validation_result["errors"].append("开始时间不能晚于结束时间")
                    validation_result["valid"] = False
            
            # 验证时区设置
            if strategy.adjust_timezone and strategy.target_timezone:
                try:
                    self.timezone_handler.normalize_timezone(strategy.target_timezone)
                except Exception as e:
                    validation_result["errors"].append(f"无效的目标时区: {e}")
                    validation_result["valid"] = False
            
            # 验证其他参数
            if strategy.batch_size <= 0 or strategy.batch_size > 10000:
                validation_result["warnings"].append("批量大小建议在1-10000之间")
            
            if strategy.parallel_workers > 8:
                validation_result["warnings"].append("并行工作线程数过多，可能影响ES集群性能")
            
        except Exception as e:
            validation_result["errors"].append(f"验证过程中出错: {e}")
            validation_result["valid"] = False
        
        return validation_result
    
    def optimize_strategy(
        self,
        strategy: RestoreStrategy,
        es_client: ESClient,
        backup_metadata: Dict[str, Any]
    ) -> RestoreStrategy:
        """优化恢复策略
        
        Args:
            strategy: 原始策略
            es_client: ES客户端
            backup_metadata: 备份元数据
            
        Returns:
            优化后的策略
        """
        optimized = RestoreStrategy(**asdict(strategy))
        
        try:
            # 优化批量大小
            total_docs = backup_metadata.get("backed_up_documents", 0)
            if total_docs > 0:
                if total_docs < 10000:
                    optimized.batch_size = min(1000, total_docs)
                elif total_docs < 100000:
                    optimized.batch_size = 2000
                else:
                    optimized.batch_size = 5000
            
            # 优化并行工作线程数
            cluster_info = es_client.get_cluster_info()
            if cluster_info:
                node_count = len(cluster_info.get('nodes', {}))
                if node_count > 0:
                    optimal_workers = min(node_count, 4)
                    if optimized.parallel_workers != optimal_workers:
                        optimized.parallel_workers = optimal_workers
                        logger.info(f"优化并行工作线程数: {optimal_workers}")
            
            # 自动设置目标索引名
            if not optimized.target_index:
                source_index = backup_metadata.get("source_index", "unknown")
                timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
                optimized.target_index = f"{source_index}_restored_{timestamp}"
                logger.info(f"自动生成目标索引名: {optimized.target_index}")
            
        except Exception as e:
            logger.warning(f"策略优化失败: {e}")
        
        return optimized
    
    def estimate_restore_time(
        self,
        strategy: RestoreStrategy,
        backup_metadata: Dict[str, Any]
    ) -> Dict[str, Any]:
        """估算恢复时间
        
        Args:
            strategy: 恢复策略
            backup_metadata: 备份元数据
            
        Returns:
            时间估算信息
        """
        estimation = {
            "estimated_documents": backup_metadata.get("backed_up_documents", 0),
            "estimated_seconds": 0,
            "estimated_minutes": 0,
            "processing_rate_assumption": "500 docs/sec"
        }
        
        # 基于经验值估算（每秒处理500个文档）
        processing_rate = 500 * strategy.parallel_workers
        
        if estimation["estimated_documents"] > 0:
            estimated_seconds = estimation["estimated_documents"] / processing_rate
            estimation["estimated_seconds"] = int(estimated_seconds)
            estimation["estimated_minutes"] = int(estimated_seconds / 60)
            estimation["processing_rate_assumption"] = f"{processing_rate} docs/sec"
        
        return estimation
    
    def create_restore_plan(
        self,
        strategy: RestoreStrategy,
        es_client: ESClient,
        backup_metadata: Dict[str, Any]
    ) -> Dict[str, Any]:
        """创建恢复计划
        
        Args:
            strategy: 恢复策略
            es_client: ES客户端
            backup_metadata: 备份元数据
            
        Returns:
            恢复计划
        """
        # 验证策略
        validation = self.validate_strategy(strategy, es_client, backup_metadata)
        if not validation["valid"]:
            raise RestoreError(f"恢复策略验证失败: {validation['errors']}")
        
        # 优化策略
        optimized_strategy = self.optimize_strategy(strategy, es_client, backup_metadata)
        
        # 估算时间
        time_estimation = self.estimate_restore_time(optimized_strategy, backup_metadata)
        
        # 生成恢复ID
        restore_id = f"restore_{optimized_strategy.target_index}_{int(time.time())}"
        
        # 创建恢复计划
        plan = {
            "restore_id": restore_id,
            "strategy": optimized_strategy,
            "validation": validation,
            "time_estimation": time_estimation,
            "backup_metadata": backup_metadata,
            "steps": self._generate_restore_steps(optimized_strategy, backup_metadata)
        }
        
        return plan
    
    def _generate_restore_steps(
        self,
        strategy: RestoreStrategy,
        backup_metadata: Dict[str, Any]
    ) -> List[Dict[str, Any]]:
        """生成恢复步骤
        
        Args:
            strategy: 恢复策略
            backup_metadata: 备份元数据
            
        Returns:
            恢复步骤列表
        """
        steps = []
        
        # 步骤1：准备恢复
        steps.append({
            "step_name": "准备恢复",
            "description": "验证备份文件，准备恢复环境",
            "estimated_duration": 30
        })
        
        # 步骤2：处理目标索引
        if strategy.create_index:
            steps.append({
                "step_name": "创建目标索引",
                "description": f"创建或准备目标索引: {strategy.target_index}",
                "estimated_duration": 60
            })
        
        # 步骤3：恢复数据
        total_docs = backup_metadata.get("backed_up_documents", 0)
        estimated_restore_time = total_docs / (500 * strategy.parallel_workers) if total_docs > 0 else 60
        
        steps.append({
            "step_name": "恢复数据",
            "description": f"恢复 {total_docs} 个文档到目标索引",
            "estimated_duration": int(estimated_restore_time)
        })
        
        # 步骤4：验证数据
        if strategy.validate_data:
            steps.append({
                "step_name": "验证数据",
                "description": "验证恢复数据的完整性和正确性",
                "estimated_duration": 120
            })
        
        # 步骤5：完成恢复
        steps.append({
            "step_name": "完成恢复",
            "description": "生成恢复报告，清理临时资源",
            "estimated_duration": 30
        })
        
        return steps


class RestoreManager:
    """恢复管理器

    执行实际的恢复操作。
    """

    def __init__(self, es_client: Optional[ESClient] = None, config: Optional[Dict[str, Any]] = None):
        """初始化恢复管理器

        Args:
            es_client: ES客户端
            config: 配置信息
        """
        self.es_client = es_client or ESClient()
        self.config = config or get_settings().config.restore.dict()

        self.strategy_manager = RestoreStrategyManager()
        self.storage_manager = StorageManager()
        self.metadata_manager = MetadataManager()
        self.active_restores = {}  # 活跃的恢复任务

        logger.info("恢复管理器初始化完成")

    def restore_from_backup(
        self,
        backup_id: str,
        strategy: Optional[Union[RestoreStrategy, str, Dict[str, Any]]] = None,
        **kwargs
    ) -> RestoreMetadata:
        """从备份恢复数据

        Args:
            backup_id: 备份ID
            strategy: 恢复策略
            **kwargs: 额外参数

        Returns:
            恢复元数据
        """
        logger.info(f"开始从备份恢复数据: {backup_id}")

        try:
            # 获取备份信息
            backup_info = None
            backup_metadata = {}

            # 首先检查策略中是否有备份路径
            if hasattr(strategy, 'backup_path') and strategy.backup_path:
                backup_path = Path(strategy.backup_path)
                if backup_path.exists():
                    # 直接从备份目录读取元数据
                    metadata_file = backup_path / "metadata.json"
                    if metadata_file.exists():
                        import json
                        with open(metadata_file, 'r', encoding='utf-8') as f:
                            backup_metadata = json.load(f)
                        backup_info = {'backup_path': str(backup_path), 'full_metadata': backup_metadata}

            # 如果没有找到，尝试从元数据管理器获取
            if not backup_info:
                backup_info = self.metadata_manager.get_backup_info(backup_id)
                if backup_info:
                    backup_metadata = backup_info.get("full_metadata", {})

            if not backup_info:
                raise RestoreError(f"备份不存在: {backup_id}")

            # 处理策略参数
            if isinstance(strategy, str):
                restore_strategy = self.strategy_manager.create_strategy(strategy, **kwargs)
            elif isinstance(strategy, dict):
                restore_strategy = RestoreStrategy(**strategy)
            elif isinstance(strategy, RestoreStrategy):
                restore_strategy = strategy
            else:
                restore_strategy = self.strategy_manager.create_strategy("full", **kwargs)

            # 设置备份路径（如果有的话）
            if backup_info and 'backup_path' in backup_info:
                restore_strategy.backup_path = backup_info['backup_path']

            # 创建恢复计划
            plan = self.strategy_manager.create_restore_plan(
                restore_strategy, self.es_client, backup_metadata
            )

            # 创建恢复元数据
            metadata = RestoreMetadata(
                restore_id=plan["restore_id"],
                strategy=plan["strategy"],
                source_backup_id=backup_id,
                target_index=plan["strategy"].target_index,
                start_time=datetime.now(timezone.utc),
                total_documents=backup_metadata.get("backed_up_documents", 0)
            )

            # 注册活跃恢复
            self.active_restores[metadata.restore_id] = metadata

            # 执行恢复
            self._execute_restore(metadata, plan, backup_metadata)

            # 更新状态
            metadata.end_time = datetime.now(timezone.utc)
            metadata.status = "completed"

            logger.info(f"恢复完成: {metadata.restore_id}")
            return metadata

        except Exception as e:
            logger.error(f"恢复失败: {backup_id}, {e}")
            if 'metadata' in locals():
                metadata.status = "failed"
                metadata.error_message = str(e)
                metadata.end_time = datetime.now(timezone.utc)
            raise RestoreError(f"恢复失败: {backup_id}", backup_id, e)

        finally:
            # 清理活跃恢复记录
            if 'metadata' in locals() and metadata.restore_id in self.active_restores:
                del self.active_restores[metadata.restore_id]

    def _execute_restore(
        self,
        metadata: RestoreMetadata,
        plan: Dict[str, Any],
        backup_metadata: Dict[str, Any]
    ) -> None:
        """执行恢复操作

        Args:
            metadata: 恢复元数据
            plan: 恢复计划
            backup_metadata: 备份元数据
        """
        strategy = metadata.strategy

        # 执行恢复步骤
        steps = [
            ("准备恢复", self._prepare_restore),
            ("处理目标索引", self._prepare_target_index),
            ("恢复数据", self._restore_data),
            ("验证数据", self._validate_restore),
            ("完成恢复", self._finalize_restore)
        ]

        total_steps = len(steps)

        for step_index, (step_name, step_func) in enumerate(steps):
            logger.info(f"执行步骤 {step_index + 1}/{total_steps}: {step_name}")

            # 执行步骤
            step_func(metadata, backup_metadata)

    def _prepare_restore(
        self,
        metadata: RestoreMetadata,
        backup_metadata: Dict[str, Any]
    ) -> None:
        """准备恢复"""
        logger.info("准备恢复环境")

        # 验证备份完整性（如果启用）
        if metadata.strategy.validate_data:
            backup_id = metadata.source_backup_id
            backup_path = None

            # 如果策略中有备份路径，传递给验证方法
            if hasattr(metadata.strategy, 'backup_path') and metadata.strategy.backup_path:
                backup_path = metadata.strategy.backup_path

            integrity_result = self.metadata_manager.verify_backup_integrity(backup_id, backup_path)

            if not integrity_result["valid"]:
                raise RestoreError(f"备份完整性验证失败: {integrity_result['issues']}")
            else:
                logger.info(f"备份完整性验证通过: {backup_id}")
        else:
            logger.info("跳过备份完整性验证")

        logger.debug("恢复准备完成")

    def _prepare_target_index(
        self,
        metadata: RestoreMetadata,
        backup_metadata: Dict[str, Any]
    ) -> None:
        """准备目标索引"""
        strategy = metadata.strategy
        target_index = strategy.target_index

        logger.info(f"准备目标索引: {target_index}")

        try:
            # 检查索引是否存在
            index_exists = self.es_client.index_exists(target_index)

            if index_exists and strategy.overwrite_existing:
                # 删除现有索引
                logger.warning(f"删除现有索引: {target_index}")
                self.es_client.client.indices.delete(index=target_index)
                index_exists = False

            if not index_exists and strategy.create_index:
                # 创建新索引
                logger.info(f"创建新索引: {target_index}")

                # 从备份中获取映射和设置
                backup_path = backup_metadata["backup_path"]
                index_metadata_file = Path(backup_path) / "index_metadata.json"

                index_config = {}
                if index_metadata_file.exists():
                    with open(index_metadata_file, 'r', encoding='utf-8') as f:
                        index_metadata = json.load(f)

                    # 提取映射
                    if "mappings" in index_metadata:
                        source_index = backup_metadata["source_index"]
                        if source_index in index_metadata["mappings"]:
                            index_config["mappings"] = index_metadata["mappings"][source_index]["mappings"]

                    # 提取设置（过滤掉只读设置）
                    if "settings" in index_metadata:
                        source_index = backup_metadata["source_index"]
                        if source_index in index_metadata["settings"]:
                            settings = index_metadata["settings"][source_index]["settings"]
                            # 过滤掉系统设置
                            filtered_settings = {}
                            if "index" in settings:
                                index_settings = settings["index"]
                                allowed_settings = [
                                    "number_of_shards", "number_of_replicas",
                                    "refresh_interval", "max_result_window"
                                ]
                                for setting in allowed_settings:
                                    if setting in index_settings:
                                        filtered_settings[setting] = index_settings[setting]

                            if filtered_settings:
                                index_config["settings"] = {"index": filtered_settings}

                # 创建索引
                success = self.es_client.create_index(target_index, index_config)
                if not success:
                    raise RestoreError(f"创建索引失败: {target_index}")

            logger.info(f"目标索引准备完成: {target_index}")

        except Exception as e:
            logger.error(f"准备目标索引失败: {e}")
            raise

    def _restore_data(
        self,
        metadata: RestoreMetadata,
        backup_metadata: Dict[str, Any]
    ) -> None:
        """恢复数据"""
        logger.info("开始恢复数据")

        try:
            strategy = metadata.strategy
            backup_path = backup_metadata["backup_path"]

            # 创建自适应批量管理器
            batch_manager = None
            if strategy.adaptive_batch_size:
                batch_manager = AdaptiveBatchManager(strategy.batch_size)
                logger.info(f"启用自适应批量大小管理，初始批量大小: {strategy.batch_size}")

            # 创建备份存储读取器
            backup_storage = self.storage_manager.create_backup_storage(Path(backup_path))

            # 批量恢复数据
            batch = []
            doc_count = 0
            current_batch_size = strategy.batch_size

            for document in backup_storage.read_all_documents():
                # 应用文档过滤
                if not self._should_restore_document(document, strategy):
                    metadata.skipped_documents += 1
                    continue

                # 处理文档
                processed_doc = self._process_document(document, strategy)
                if processed_doc:
                    batch.append(processed_doc)
                    doc_count += 1

                # 动态调整批量大小
                if batch_manager:
                    current_batch_size = batch_manager.get_current_batch_size()

                # 批量写入
                if len(batch) >= current_batch_size:
                    try:
                        self._write_batch(batch, metadata.target_index, batch_manager)
                        metadata.restored_documents += doc_count
                        batch = []
                        doc_count = 0

                        # 记录进度
                        if metadata.restored_documents % 10000 == 0:
                            logger.info(f"已恢复 {metadata.restored_documents} 个文档")
                            if batch_manager:
                                stats = batch_manager.get_statistics()
                                logger.info(f"当前批量大小: {stats['current_batch_size']}, "
                                          f"成功: {stats['success_count']}, 错误: {stats['error_count']}")

                    except Exception as e:
                        # 如果启用了断路器重试
                        if strategy.circuit_breaker_retry and batch_manager:
                            circuit_handler = CircuitBreakerHandler()
                            if circuit_handler.is_circuit_breaker_error(e):
                                logger.warning(f"遇到断路器异常，调整批量大小后继续: {e}")
                                # 批量管理器已经在 _write_batch 中调整了批量大小
                                # 这里只需要重置当前批次并继续
                                batch = []
                                doc_count = 0
                                continue

                        # 其他错误或未启用重试，直接抛出
                        raise

            # 处理最后一批
            if batch:
                try:
                    self._write_batch(batch, metadata.target_index, batch_manager)
                    metadata.restored_documents += doc_count
                except Exception as e:
                    if strategy.circuit_breaker_retry and batch_manager:
                        circuit_handler = CircuitBreakerHandler()
                        if circuit_handler.is_circuit_breaker_error(e):
                            logger.warning(f"最后一批遇到断路器异常: {e}")
                            # 尝试分割最后一批
                            if len(batch) > 1:
                                mid = len(batch) // 2
                                try:
                                    self._write_batch(batch[:mid], metadata.target_index, batch_manager)
                                    metadata.restored_documents += mid
                                    self._write_batch(batch[mid:], metadata.target_index, batch_manager)
                                    metadata.restored_documents += (len(batch) - mid)
                                except Exception as split_error:
                                    logger.error(f"分割批次后仍然失败: {split_error}")
                                    raise
                            else:
                                logger.error(f"单个文档也无法写入: {e}")
                                raise
                    else:
                        raise

            # 输出最终统计信息
            if batch_manager:
                final_stats = batch_manager.get_statistics()
                logger.info(f"恢复完成统计 - 最终批量大小: {final_stats['current_batch_size']}, "
                          f"总成功: {final_stats['success_count']}, 总错误: {final_stats['error_count']}, "
                          f"平均文档大小: {final_stats['average_document_size_kb']:.2f}KB")

            logger.info(f"数据恢复完成，共恢复 {metadata.restored_documents} 个文档")

        except Exception as e:
            logger.error(f"恢复数据失败: {e}")
            raise

    def _should_restore_document(self, document: Dict[str, Any], strategy: RestoreStrategy) -> bool:
        """判断是否应该恢复文档

        Args:
            document: 文档数据
            strategy: 恢复策略

        Returns:
            是否应该恢复
        """
        # 应用文档过滤器
        if strategy.document_filter:
            # 简单的字段匹配过滤
            source = document.get("_source", {})
            for field, expected_value in strategy.document_filter.items():
                if source.get(field) != expected_value:
                    return False

        # 应用时间范围过滤
        if strategy.time_range_start or strategy.time_range_end:
            # 这里需要根据实际的时间字段进行过滤
            # 简化实现，实际应该从备份元数据中获取时间字段信息
            pass

        return True

    def _process_document(self, document: Dict[str, Any], strategy: RestoreStrategy) -> Optional[Dict[str, Any]]:
        """处理文档

        Args:
            document: 原始文档
            strategy: 恢复策略

        Returns:
            处理后的文档
        """
        try:
            # 构建ES批量操作格式
            action = {
                "index": {
                    "_index": strategy.target_index,
                    "_id": document.get("_id")
                }
            }

            source = document.get("_source", {})

            # 应用字段映射
            if strategy.field_mapping:
                mapped_source = {}
                for old_field, new_field in strategy.field_mapping.items():
                    if old_field in source:
                        mapped_source[new_field] = source[old_field]
                    else:
                        mapped_source[old_field] = source.get(old_field)
                source = mapped_source

            # 处理时区调整
            if strategy.adjust_timezone and strategy.target_timezone:
                source = self._adjust_document_timezone(source, strategy.target_timezone)

            return [action, source]

        except Exception as e:
            logger.warning(f"处理文档失败: {e}")
            return None

    def _adjust_document_timezone(self, source: Dict[str, Any], target_timezone: str) -> Dict[str, Any]:
        """调整文档时区

        Args:
            source: 文档源数据
            target_timezone: 目标时区

        Returns:
            调整后的文档
        """
        # 简化实现，实际应该根据时间字段配置进行调整
        # 这里只是示例代码
        return source

    def _write_batch(self, batch: List[Dict[str, Any]], target_index: str, batch_manager: Optional[AdaptiveBatchManager] = None) -> None:
        """批量写入数据

        Args:
            batch: 批量数据
            target_index: 目标索引
            batch_manager: 自适应批量管理器
        """
        max_retries = 3
        retry_delay = 2.0

        for attempt in range(max_retries):
            try:
                # 展平批量数据
                bulk_body = []
                total_size_kb = 0

                for item in batch:
                    if isinstance(item, list) and len(item) == 2:
                        bulk_body.extend(item)
                        # 估算文档大小
                        if batch_manager and len(item) == 2:
                            doc_size = len(str(item[1]).encode('utf-8')) / 1024  # KB
                            total_size_kb += doc_size
                            batch_manager.record_document_size(doc_size)
                    else:
                        bulk_body.append(item)

                # 执行批量写入
                start_time = time.time()
                result = self.es_client.bulk(bulk_body)
                processing_time = time.time() - start_time

                # 检查错误
                if result.get("errors"):
                    error_count = sum(1 for item in result["items"]
                                    if any(op.get("error") for op in item.values()))
                    logger.warning(f"批量写入包含 {error_count} 个错误")

                    # 检查是否有断路器错误
                    circuit_breaker_errors = []
                    for item in result["items"]:
                        for op_result in item.values():
                            if "error" in op_result:
                                error_info = op_result["error"]
                                if isinstance(error_info, dict):
                                    error_type = error_info.get("type", "")
                                    if "circuit_breaking_exception" in error_type:
                                        circuit_breaker_errors.append(error_info)

                    if circuit_breaker_errors and batch_manager:
                        # 模拟断路器异常以触发批量大小调整
                        error_msg = circuit_breaker_errors[0].get("reason", "Circuit breaker exception")
                        from elasticsearch.exceptions import TransportError
                        circuit_error = TransportError(429, 'circuit_breaking_exception', error_msg)
                        batch_manager.record_error(circuit_error, len(batch))

                        if attempt < max_retries - 1:
                            logger.info(f"检测到断路器异常，等待 {retry_delay} 秒后重试...")
                            time.sleep(retry_delay)
                            retry_delay *= 1.5  # 指数退避
                            continue
                        else:
                            raise TransportError(429, 'circuit_breaking_exception', error_msg)

                # 记录成功
                if batch_manager:
                    batch_manager.record_success(len(batch), processing_time)

                return  # 成功完成，退出重试循环

            except Exception as e:
                logger.error(f"批量写入失败 (尝试 {attempt + 1}/{max_retries}): {e}")

                # 如果有批量管理器，记录错误并调整批量大小
                if batch_manager:
                    batch_manager.record_error(e, len(batch))

                # 如果是最后一次尝试，抛出异常
                if attempt == max_retries - 1:
                    raise

                # 等待后重试
                logger.info(f"等待 {retry_delay} 秒后重试...")
                time.sleep(retry_delay)
                retry_delay *= 1.5  # 指数退避

    def _validate_restore(
        self,
        metadata: RestoreMetadata,
        backup_metadata: Dict[str, Any]
    ) -> None:
        """验证恢复结果"""
        if not metadata.strategy.validate_data:
            return

        logger.info("验证恢复数据")

        try:
            target_index = metadata.target_index

            # 检查文档数量
            count_result = self.es_client.client.count(index=target_index)
            actual_count = count_result["count"]

            if actual_count != metadata.restored_documents:
                logger.warning(
                    f"文档数量不匹配: 预期 {metadata.restored_documents}, "
                    f"实际 {actual_count}"
                )

            logger.info(f"恢复验证完成，目标索引包含 {actual_count} 个文档")

        except Exception as e:
            logger.error(f"恢复验证失败: {e}")
            raise

    def _finalize_restore(
        self,
        metadata: RestoreMetadata,
        backup_metadata: Dict[str, Any]
    ) -> None:
        """完成恢复"""
        # 更新元数据
        metadata.end_time = datetime.now(timezone.utc)
        metadata.status = "completed"

        # 生成恢复报告
        self._generate_restore_report(metadata, backup_metadata)

        logger.info("恢复完成")

    def _generate_restore_report(
        self,
        metadata: RestoreMetadata,
        backup_metadata: Dict[str, Any]
    ) -> None:
        """生成恢复报告"""
        report = {
            "restore_id": metadata.restore_id,
            "source_backup_id": metadata.source_backup_id,
            "target_index": metadata.target_index,
            "strategy": metadata.strategy.strategy_type,
            "start_time": metadata.start_time.isoformat(),
            "end_time": metadata.end_time.isoformat() if metadata.end_time else None,
            "duration_seconds": (metadata.end_time - metadata.start_time).total_seconds() if metadata.end_time else None,
            "total_documents": metadata.total_documents,
            "restored_documents": metadata.restored_documents,
            "skipped_documents": metadata.skipped_documents,
            "error_count": metadata.error_count,
            "status": metadata.status
        }

        logger.info(f"恢复报告: {json.dumps(report, indent=2, ensure_ascii=False)}")

    def get_restore_status(self, restore_id: str) -> Optional[RestoreMetadata]:
        """获取恢复状态

        Args:
            restore_id: 恢复ID

        Returns:
            恢复元数据
        """
        return self.active_restores.get(restore_id)

    def list_active_restores(self) -> List[RestoreMetadata]:
        """列出活跃的恢复任务

        Returns:
            活跃恢复列表
        """
        return list(self.active_restores.values())

    def cancel_restore(self, restore_id: str) -> bool:
        """取消恢复任务

        Args:
            restore_id: 恢复ID

        Returns:
            是否取消成功
        """
        if restore_id in self.active_restores:
            metadata = self.active_restores[restore_id]
            metadata.status = "cancelled"
            metadata.end_time = datetime.now(timezone.utc)
            del self.active_restores[restore_id]
            logger.info(f"恢复任务已取消: {restore_id}")
            return True
        return False


class DataImporter:
    """数据导入器

    专门负责将数据批量导入到ES索引中。
    """

    def __init__(self, es_client: ESClient, config: Optional[Dict[str, Any]] = None):
        """初始化数据导入器

        Args:
            es_client: ES客户端
            config: 配置信息
        """
        self.es_client = es_client
        self.config = config or {}

        # 默认配置
        self.batch_size = self.config.get("batch_size", 1000)
        self.max_retries = self.config.get("max_retries", 3)
        self.timeout = self.config.get("timeout", 60)

        logger.info("数据导入器初始化完成")

    def import_documents(
        self,
        documents: List[Dict[str, Any]],
        target_index: str,
        progress_callback: Optional[Callable[[Dict[str, Any]], None]] = None
    ) -> Dict[str, Any]:
        """导入文档列表

        Args:
            documents: 文档列表
            target_index: 目标索引
            progress_callback: 进度回调函数

        Returns:
            导入结果统计
        """
        logger.info(f"开始导入 {len(documents)} 个文档到索引: {target_index}")

        stats = {
            "total_documents": len(documents),
            "imported_documents": 0,
            "failed_documents": 0,
            "start_time": datetime.now(timezone.utc),
            "end_time": None,
            "errors": []
        }

        try:
            # 分批处理
            for i in range(0, len(documents), self.batch_size):
                batch = documents[i:i + self.batch_size]
                batch_result = self._import_batch(batch, target_index)

                # 更新统计
                stats["imported_documents"] += batch_result["success_count"]
                stats["failed_documents"] += batch_result["error_count"]
                stats["errors"].extend(batch_result["errors"])

                # 报告进度
                if progress_callback:
                    progress = {
                        "processed": stats["imported_documents"] + stats["failed_documents"],
                        "total": stats["total_documents"],
                        "success": stats["imported_documents"],
                        "failed": stats["failed_documents"],
                        "progress_percent": (stats["imported_documents"] + stats["failed_documents"]) / stats["total_documents"] * 100
                    }
                    progress_callback(progress)

                # 记录进度
                if (i + self.batch_size) % (self.batch_size * 10) == 0:
                    logger.info(f"已处理 {stats['imported_documents'] + stats['failed_documents']} / {stats['total_documents']} 个文档")

            stats["end_time"] = datetime.now(timezone.utc)
            stats["duration_seconds"] = (stats["end_time"] - stats["start_time"]).total_seconds()

            logger.info(f"导入完成: 成功 {stats['imported_documents']}, 失败 {stats['failed_documents']}")
            return stats

        except Exception as e:
            logger.error(f"导入过程中出错: {e}")
            stats["end_time"] = datetime.now(timezone.utc)
            stats["errors"].append({"error": str(e), "type": "import_error"})
            raise

    def _import_batch(
        self,
        batch: List[Dict[str, Any]],
        target_index: str
    ) -> Dict[str, Any]:
        """导入单个批次

        Args:
            batch: 文档批次
            target_index: 目标索引

        Returns:
            批次导入结果
        """
        result = {
            "success_count": 0,
            "error_count": 0,
            "errors": []
        }

        try:
            # 构建批量操作
            bulk_body = []
            for doc in batch:
                # 构建索引操作
                action = {
                    "index": {
                        "_index": target_index,
                        "_id": doc.get("_id")
                    }
                }

                bulk_body.append(action)
                bulk_body.append(doc.get("_source", doc))

            # 执行批量操作
            response = self.es_client.bulk(bulk_body, timeout=f"{self.timeout}s")

            # 处理响应
            if response.get("errors"):
                for item in response["items"]:
                    for operation, details in item.items():
                        if "error" in details:
                            result["error_count"] += 1
                            result["errors"].append({
                                "doc_id": details.get("_id"),
                                "error": details["error"],
                                "operation": operation
                            })
                        else:
                            result["success_count"] += 1
            else:
                result["success_count"] = len(batch)

        except Exception as e:
            logger.error(f"批次导入失败: {e}")
            result["error_count"] = len(batch)
            result["errors"].append({
                "error": str(e),
                "type": "batch_error",
                "batch_size": len(batch)
            })

        return result

    def import_from_iterator(
        self,
        document_iterator: Iterator[Dict[str, Any]],
        target_index: str,
        progress_callback: Optional[Callable[[Dict[str, Any]], None]] = None
    ) -> Dict[str, Any]:
        """从迭代器导入文档

        Args:
            document_iterator: 文档迭代器
            target_index: 目标索引
            progress_callback: 进度回调函数

        Returns:
            导入结果统计
        """
        logger.info(f"开始从迭代器导入文档到索引: {target_index}")

        stats = {
            "total_documents": 0,
            "imported_documents": 0,
            "failed_documents": 0,
            "start_time": datetime.now(timezone.utc),
            "end_time": None,
            "errors": []
        }

        try:
            batch = []

            for document in document_iterator:
                batch.append(document)
                stats["total_documents"] += 1

                # 当批次满了时处理
                if len(batch) >= self.batch_size:
                    batch_result = self._import_batch(batch, target_index)

                    # 更新统计
                    stats["imported_documents"] += batch_result["success_count"]
                    stats["failed_documents"] += batch_result["error_count"]
                    stats["errors"].extend(batch_result["errors"])

                    # 报告进度
                    if progress_callback:
                        progress = {
                            "processed": stats["imported_documents"] + stats["failed_documents"],
                            "total": stats["total_documents"],
                            "success": stats["imported_documents"],
                            "failed": stats["failed_documents"]
                        }
                        progress_callback(progress)

                    # 清空批次
                    batch = []

                    # 记录进度
                    if stats["total_documents"] % (self.batch_size * 10) == 0:
                        logger.info(f"已处理 {stats['imported_documents'] + stats['failed_documents']} 个文档")

            # 处理最后一个批次
            if batch:
                batch_result = self._import_batch(batch, target_index)
                stats["imported_documents"] += batch_result["success_count"]
                stats["failed_documents"] += batch_result["error_count"]
                stats["errors"].extend(batch_result["errors"])

            stats["end_time"] = datetime.now(timezone.utc)
            stats["duration_seconds"] = (stats["end_time"] - stats["start_time"]).total_seconds()

            logger.info(f"迭代器导入完成: 总计 {stats['total_documents']}, 成功 {stats['imported_documents']}, 失败 {stats['failed_documents']}")
            return stats

        except Exception as e:
            logger.error(f"迭代器导入过程中出错: {e}")
            stats["end_time"] = datetime.now(timezone.utc)
            stats["errors"].append({"error": str(e), "type": "iterator_error"})
            raise

    def validate_documents(
        self,
        documents: List[Dict[str, Any]],
        target_index: str
    ) -> Dict[str, Any]:
        """验证文档是否可以导入

        Args:
            documents: 文档列表
            target_index: 目标索引

        Returns:
            验证结果
        """
        validation_result = {
            "valid": True,
            "total_documents": len(documents),
            "valid_documents": 0,
            "invalid_documents": 0,
            "errors": []
        }

        try:
            # 检查目标索引是否存在
            if not self.es_client.index_exists(target_index):
                validation_result["valid"] = False
                validation_result["errors"].append({
                    "type": "index_not_found",
                    "message": f"目标索引不存在: {target_index}"
                })
                return validation_result

            # 获取索引映射
            mapping = self.es_client.get_mapping(target_index)

            # 验证每个文档
            for i, doc in enumerate(documents):
                doc_errors = self._validate_single_document(doc, mapping, target_index)

                if doc_errors:
                    validation_result["invalid_documents"] += 1
                    validation_result["errors"].extend([
                        {**error, "document_index": i, "document_id": doc.get("_id")}
                        for error in doc_errors
                    ])
                else:
                    validation_result["valid_documents"] += 1

            # 如果有无效文档，整体验证失败
            if validation_result["invalid_documents"] > 0:
                validation_result["valid"] = False

        except Exception as e:
            logger.error(f"文档验证失败: {e}")
            validation_result["valid"] = False
            validation_result["errors"].append({
                "type": "validation_error",
                "message": str(e)
            })

        return validation_result

    def _validate_single_document(
        self,
        document: Dict[str, Any],
        mapping: Dict[str, Any],
        target_index: str
    ) -> List[Dict[str, Any]]:
        """验证单个文档

        Args:
            document: 文档数据
            mapping: 索引映射
            target_index: 目标索引

        Returns:
            错误列表
        """
        errors = []

        try:
            # 检查必需字段
            if "_source" not in document and "_id" not in document:
                errors.append({
                    "type": "missing_required_field",
                    "message": "文档缺少_source或_id字段"
                })

            # 检查文档结构
            source = document.get("_source", document)
            if not isinstance(source, dict):
                errors.append({
                    "type": "invalid_document_structure",
                    "message": "文档源数据必须是字典类型"
                })

            # 简单的字段类型检查（可以扩展）
            # 这里只做基本检查，实际应该根据映射进行详细验证

        except Exception as e:
            errors.append({
                "type": "validation_exception",
                "message": str(e)
            })

        return errors

    def get_import_statistics(self) -> Dict[str, Any]:
        """获取导入统计信息

        Returns:
            统计信息
        """
        # 这里可以实现更详细的统计信息收集
        return {
            "batch_size": self.batch_size,
            "max_retries": self.max_retries,
            "timeout": self.timeout
        }


class RestoreExecutionEngine:
    """恢复执行引擎

    负责实际执行恢复操作，支持并发、进度监控、断点续传等功能。
    """

    def __init__(self, es_client: ESClient, config: Optional[Dict[str, Any]] = None):
        """初始化恢复执行引擎

        Args:
            es_client: ES客户端
            config: 配置信息
        """
        self.es_client = es_client
        self.config = config or get_settings().config.restore.dict()

        self.storage_manager = StorageManager()
        self.data_importer = DataImporter(es_client, config)
        self.progress_callbacks = []
        self.is_cancelled = False

        logger.info("恢复执行引擎初始化完成")

    def execute_restore(
        self,
        metadata: RestoreMetadata,
        backup_metadata: Dict[str, Any],
        progress_callback: Optional[Callable[[Dict[str, Any]], None]] = None
    ) -> None:
        """执行恢复

        Args:
            metadata: 恢复元数据
            backup_metadata: 备份元数据
            progress_callback: 进度回调函数
        """
        if progress_callback:
            self.progress_callbacks.append(progress_callback)

        try:
            logger.info(f"开始执行恢复: {metadata.restore_id}")

            # 执行恢复步骤
            self._execute_restore_steps(metadata, backup_metadata)

            logger.info(f"恢复执行完成: {metadata.restore_id}")

        except Exception as e:
            logger.error(f"恢复执行失败: {e}")
            metadata.status = "failed"
            metadata.error_message = str(e)
            raise

        finally:
            self.progress_callbacks.clear()

    def _execute_restore_steps(
        self,
        metadata: RestoreMetadata,
        backup_metadata: Dict[str, Any]
    ) -> None:
        """执行恢复步骤

        Args:
            metadata: 恢复元数据
            backup_metadata: 备份元数据
        """
        steps = [
            ("验证备份", self._verify_backup),
            ("准备目标索引", self._prepare_target_index),
            ("恢复数据", self._restore_data_parallel),
            ("验证恢复", self._verify_restore),
            ("完成恢复", self._finalize_restore)
        ]

        total_steps = len(steps)

        for step_index, (step_name, step_func) in enumerate(steps):
            if self.is_cancelled:
                metadata.status = "cancelled"
                return

            logger.info(f"执行步骤 {step_index + 1}/{total_steps}: {step_name}")

            # 更新进度
            self._report_progress({
                "step": step_name,
                "step_index": step_index,
                "total_steps": total_steps,
                "progress": step_index / total_steps,
                "status": "running"
            })

            # 执行步骤
            step_func(metadata, backup_metadata)

        # 最终进度报告
        self._report_progress({
            "step": "完成",
            "step_index": total_steps,
            "total_steps": total_steps,
            "progress": 1.0,
            "status": "completed"
        })

    def _verify_backup(
        self,
        metadata: RestoreMetadata,
        backup_metadata: Dict[str, Any]
    ) -> None:
        """验证备份完整性"""
        logger.info("验证备份完整性")

        backup_path = backup_metadata["backup_path"]

        # 检查备份路径
        if not Path(backup_path).exists():
            raise RestoreError(f"备份路径不存在: {backup_path}")

        # 检查元数据文件
        metadata_file = Path(backup_path) / "metadata.json"
        if not metadata_file.exists():
            raise RestoreError("备份元数据文件不存在")

        # 检查分片文件
        chunks = backup_metadata.get("chunks", [])
        missing_chunks = []

        for chunk_file in chunks:
            if not Path(chunk_file).exists():
                missing_chunks.append(chunk_file)

        if missing_chunks:
            raise RestoreError(f"缺失分片文件: {len(missing_chunks)} 个")

        logger.info("备份验证通过")

    def _prepare_target_index(
        self,
        metadata: RestoreMetadata,
        backup_metadata: Dict[str, Any]
    ) -> None:
        """准备目标索引"""
        strategy = metadata.strategy
        target_index = strategy.target_index

        logger.info(f"准备目标索引: {target_index}")

        # 检查索引是否存在
        index_exists = self.es_client.index_exists(target_index)

        if index_exists and strategy.overwrite_existing:
            # 删除现有索引
            logger.warning(f"删除现有索引: {target_index}")
            self.es_client.client.indices.delete(index=target_index)
            index_exists = False

        if not index_exists and strategy.create_index:
            # 创建新索引
            self._create_target_index(target_index, backup_metadata)

        logger.info(f"目标索引准备完成: {target_index}")

    def _create_target_index(self, target_index: str, backup_metadata: Dict[str, Any]) -> None:
        """创建目标索引"""
        logger.info(f"创建新索引: {target_index}")

        # 从备份中获取映射和设置
        backup_path = backup_metadata["backup_path"]
        index_metadata_file = Path(backup_path) / "index_metadata.json"

        index_config = {}
        if index_metadata_file.exists():
            with open(index_metadata_file, 'r', encoding='utf-8') as f:
                index_metadata = json.load(f)

            # 提取映射
            if "mappings" in index_metadata:
                source_index = backup_metadata["source_index"]
                if source_index in index_metadata["mappings"]:
                    index_config["mappings"] = index_metadata["mappings"][source_index]["mappings"]

            # 提取设置（过滤掉只读设置）
            if "settings" in index_metadata:
                source_index = backup_metadata["source_index"]
                if source_index in index_metadata["settings"]:
                    settings = index_metadata["settings"][source_index]["settings"]
                    filtered_settings = self._filter_index_settings(settings)
                    if filtered_settings:
                        index_config["settings"] = {"index": filtered_settings}

        # 创建索引
        success = self.es_client.create_index(target_index, index_config)
        if not success:
            raise RestoreError(f"创建索引失败: {target_index}")

    def _filter_index_settings(self, settings: Dict[str, Any]) -> Dict[str, Any]:
        """过滤索引设置，移除只读设置"""
        filtered_settings = {}

        if "index" in settings:
            index_settings = settings["index"]
            allowed_settings = [
                "number_of_shards", "number_of_replicas",
                "refresh_interval", "max_result_window",
                "analysis", "mapping"
            ]

            for setting in allowed_settings:
                if setting in index_settings:
                    filtered_settings[setting] = index_settings[setting]

        return filtered_settings

    def _restore_data_parallel(
        self,
        metadata: RestoreMetadata,
        backup_metadata: Dict[str, Any]
    ) -> None:
        """并行恢复数据"""
        logger.info("开始并行恢复数据")

        strategy = metadata.strategy
        backup_path = backup_metadata["backup_path"]

        # 创建备份存储读取器
        backup_storage = self.storage_manager.create_backup_storage(Path(backup_path))

        # 创建文档迭代器
        def document_generator():
            for document in backup_storage.read_all_documents():
                if self.is_cancelled:
                    break

                # 应用文档过滤
                if self._should_restore_document(document, strategy):
                    processed_doc = self._process_document(document, strategy)
                    if processed_doc:
                        yield processed_doc
                else:
                    metadata.skipped_documents += 1

        # 使用数据导入器进行并行导入
        import_stats = self.data_importer.import_from_iterator(
            document_generator(),
            metadata.target_index,
            progress_callback=self._create_import_progress_callback(metadata)
        )

        # 更新元数据
        metadata.restored_documents = import_stats["imported_documents"]
        metadata.error_count = import_stats["failed_documents"]

        logger.info(f"并行数据恢复完成，共恢复 {metadata.restored_documents} 个文档")

    def _should_restore_document(self, document: Dict[str, Any], strategy: RestoreStrategy) -> bool:
        """判断是否应该恢复文档"""
        # 应用文档过滤器
        if strategy.document_filter:
            source = document.get("_source", {})
            for field, expected_value in strategy.document_filter.items():
                if source.get(field) != expected_value:
                    return False

        # 应用时间范围过滤
        if strategy.time_range_start or strategy.time_range_end:
            # 简化实现，实际应该根据时间字段进行过滤
            pass

        return True

    def _process_document(self, document: Dict[str, Any], strategy: RestoreStrategy) -> Optional[Dict[str, Any]]:
        """处理文档"""
        try:
            source = document.get("_source", {})

            # 应用字段映射
            if strategy.field_mapping:
                mapped_source = {}
                for old_field, new_field in strategy.field_mapping.items():
                    if old_field in source:
                        mapped_source[new_field] = source[old_field]
                    else:
                        mapped_source[old_field] = source.get(old_field)
                source = mapped_source

            # 处理时区调整
            if strategy.adjust_timezone and strategy.target_timezone:
                source = self._adjust_document_timezone(source, strategy.target_timezone)

            # 返回处理后的文档
            return {
                "_id": document.get("_id"),
                "_source": source
            }

        except Exception as e:
            logger.warning(f"处理文档失败: {e}")
            return None

    def _adjust_document_timezone(self, source: Dict[str, Any], target_timezone: str) -> Dict[str, Any]:
        """调整文档时区"""
        # 简化实现，实际应该根据时间字段配置进行调整
        return source

    def _create_import_progress_callback(self, metadata: RestoreMetadata) -> Callable[[Dict[str, Any]], None]:
        """创建导入进度回调"""
        def progress_callback(progress: Dict[str, Any]):
            # 更新恢复元数据
            metadata.restored_documents = progress["success"]
            metadata.error_count = progress["failed"]

            # 报告进度
            self._report_progress({
                "step": "恢复数据",
                "documents_processed": progress["processed"],
                "documents_success": progress["success"],
                "documents_failed": progress["failed"],
                "progress_percent": progress.get("progress_percent", 0),
                "status": "running"
            })

        return progress_callback

    def _verify_restore(
        self,
        metadata: RestoreMetadata,
        backup_metadata: Dict[str, Any]
    ) -> None:
        """验证恢复结果"""
        if not metadata.strategy.validate_data:
            return

        logger.info("验证恢复数据")

        target_index = metadata.target_index

        # 检查文档数量
        count_result = self.es_client.client.count(index=target_index)
        actual_count = count_result["count"]

        if actual_count != metadata.restored_documents:
            logger.warning(
                f"文档数量不匹配: 预期 {metadata.restored_documents}, "
                f"实际 {actual_count}"
            )

        logger.info(f"恢复验证完成，目标索引包含 {actual_count} 个文档")

    def _finalize_restore(
        self,
        metadata: RestoreMetadata,
        backup_metadata: Dict[str, Any]
    ) -> None:
        """完成恢复"""
        # 更新元数据
        metadata.end_time = datetime.now(timezone.utc)
        metadata.status = "completed"

        # 生成恢复报告
        self._generate_restore_report(metadata, backup_metadata)

        logger.info("恢复完成")

    def _generate_restore_report(
        self,
        metadata: RestoreMetadata,
        backup_metadata: Dict[str, Any]
    ) -> None:
        """生成恢复报告"""
        report = {
            "restore_id": metadata.restore_id,
            "source_backup_id": metadata.source_backup_id,
            "target_index": metadata.target_index,
            "strategy": metadata.strategy.strategy_type,
            "start_time": metadata.start_time.isoformat(),
            "end_time": metadata.end_time.isoformat() if metadata.end_time else None,
            "duration_seconds": (metadata.end_time - metadata.start_time).total_seconds() if metadata.end_time else None,
            "total_documents": metadata.total_documents,
            "restored_documents": metadata.restored_documents,
            "skipped_documents": metadata.skipped_documents,
            "error_count": metadata.error_count,
            "status": metadata.status
        }

        logger.info(f"恢复报告: {json.dumps(report, indent=2, ensure_ascii=False)}")

    def _report_progress(self, progress_info: Dict[str, Any]) -> None:
        """报告进度"""
        for callback in self.progress_callbacks:
            try:
                callback(progress_info)
            except Exception as e:
                logger.warning(f"进度回调失败: {e}")

    def cancel(self) -> None:
        """取消恢复"""
        self.is_cancelled = True
        logger.info("恢复已被取消")

    def add_progress_callback(self, callback: Callable[[Dict[str, Any]], None]) -> None:
        """添加进度回调"""
        self.progress_callbacks.append(callback)
