# -*- coding: utf-8 -*-
"""
增强的断点续跑系统
提供智能恢复、状态分析和断点管理
"""

import json
import logging
from pathlib import Path
from datetime import datetime, timedelta
from typing import Dict, Any, Optional, List, Tuple
import shutil

from .checkpoint_manager import CheckpointManager
from .step_base import StepStatus

logger = logging.getLogger(__name__)


class CheckpointMetadata:
    """断点元数据"""

    def __init__(self, data: Optional[Dict[str, Any]] = None):
        """
        初始化元数据

        Args:
            data: 元数据字典
        """
        self.data = data or {}

    def get_creation_time(self) -> Optional[datetime]:
        """获取创建时间"""
        timestamp = self.data.get("timestamp")
        if timestamp:
            return datetime.fromisoformat(timestamp)
        return None

    def get_pipeline_info(self) -> Dict[str, Any]:
        """获取流水线信息"""
        return self.data.get("metadata", {})

    def get_completion_percentage(self) -> float:
        """获取完成百分比"""
        steps = self.data.get("steps", {})
        if not steps:
            return 0.0

        total = len(steps)
        completed = sum(
            1 for s in steps.values()
            if s.get("status") == StepStatus.COMPLETED.value
        )

        return (completed / total) * 100

    def get_failed_steps(self) -> List[str]:
        """获取失败的步骤"""
        steps = self.data.get("steps", {})
        return [
            name for name, info in steps.items()
            if info.get("status") == StepStatus.FAILED.value
        ]

    def get_skipped_steps(self) -> List[str]:
        """获取跳过的步骤"""
        steps = self.data.get("steps", {})
        return [
            name for name, info in steps.items()
            if info.get("status") == StepStatus.SKIPPED.value
        ]

    def get_total_duration(self) -> float:
        """获取总执行时长"""
        steps = self.data.get("steps", {})
        return sum(
            step_info.get("duration", 0)
            for step_info in steps.values()
        )

    def is_recoverable(self) -> bool:
        """检查是否可恢复"""
        # 不可恢复的情况：
        # 1. 没有任何完成的步骤
        # 2. 所有步骤都失败了
        steps = self.data.get("steps", {})
        if not steps:
            return False

        has_completed = any(
            s.get("status") == StepStatus.COMPLETED.value
            for s in steps.values()
        )

        all_failed = all(
            s.get("status") == StepStatus.FAILED.value
            for s in steps.values()
        )

        return has_completed and not all_failed

    def get_recovery_strategy(self) -> str:
        """获取恢复策略"""
        if not self.is_recoverable():
            return "unrecoverable"

        failed_steps = self.get_failed_steps()
        if failed_steps:
            return "retry_failed"

        return "resume_from_checkpoint"

    def to_dict(self) -> Dict[str, Any]:
        """转换为字典"""
        return self.data


class RecoveryAnalyzer:
    """恢复分析器"""

    @staticmethod
    def analyze_checkpoint(checkpoint_metadata: CheckpointMetadata) -> Dict[str, Any]:
        """
        分析断点

        Args:
            checkpoint_metadata: 断点元数据

        Returns:
            分析结果
        """
        analysis = {
            "recoverable": checkpoint_metadata.is_recoverable(),
            "completion_percentage": checkpoint_metadata.get_completion_percentage(),
            "failed_steps": checkpoint_metadata.get_failed_steps(),
            "skipped_steps": checkpoint_metadata.get_skipped_steps(),
            "total_duration": checkpoint_metadata.get_total_duration(),
            "recovery_strategy": checkpoint_metadata.get_recovery_strategy(),
            "recommendations": [],
        }

        # 生成建议
        if not analysis["recoverable"]:
            analysis["recommendations"].append(
                "断点不可恢复，建议重新开始"
            )
        else:
            if analysis["failed_steps"]:
                analysis["recommendations"].append(
                    f"重试失败的步骤: {', '.join(analysis['failed_steps'])}"
                )

            if analysis["completion_percentage"] < 50:
                analysis["recommendations"].append(
                    "已完成步骤较少，重建断点可能更高效"
                )

            if analysis["total_duration"] > 3600:  # 超过1小时
                analysis["recommendations"].append(
                    "执行时间较长，建议检查资源使用情况"
                )

        return analysis

    @staticmethod
    def suggest_recovery_actions(
        analysis: Dict[str, Any]
    ) -> List[Dict[str, Any]]:
        """
        建议恢复操作

        Args:
            analysis: 分析结果

        Returns:
            建议操作列表
        """
        actions = []

        if not analysis["recoverable"]:
            actions.append({
                "action": "clear_checkpoint",
                "description": "清理不可恢复的断点",
                "priority": "high",
            })
            return actions

        if analysis["failed_steps"]:
            actions.append({
                "action": "retry_steps",
                "steps": analysis["failed_steps"],
                "description": f"重试 {len(analysis['failed_steps'])} 个失败步骤",
                "priority": "high",
            })

        # 如果有跳过的步骤，询问是否重新执行
        if analysis["skipped_steps"]:
            actions.append({
                "action": "reexecute_skipped",
                "steps": analysis["skipped_steps"],
                "description": f"重新执行 {len(analysis['skipped_steps'])} 个跳过步骤",
                "priority": "medium",
            })

        # 如果完成度低，建议重试
        if analysis["completion_percentage"] < 30:
            actions.append({
                "action": "restart",
                "description": "完成度较低，建议重新开始",
                "priority": "medium",
            })

        return actions


class SmartCheckpointManager(CheckpointManager):
    """智能断点管理器"""

    def __init__(self, checkpoint_dir: Path):
        """
        初始化智能断点管理器

        Args:
            checkpoint_dir: 断点目录
        """
        super().__init__(checkpoint_dir)
        self.max_checkpoints = 10  # 最大保留断点数

    def create_checkpoint(
        self,
        pipeline_name: str,
        current_step: Optional[str],
        steps_state: Dict[str, Any],
        metadata: Optional[Dict[str, Any]] = None,
    ) -> CheckpointMetadata:
        """
        创建断点

        Args:
            pipeline_name: 流水线名称
            current_step: 当前步骤
            steps_state: 步骤状态
            metadata: 元数据

        Returns:
            断点元数据
        """
        # 保存断点
        self.save_pipeline_state(
            pipeline_name,
            current_step,
            steps_state,
            metadata,
        )

        # 加载并返回元数据
        state = self.load_pipeline_state()
        return CheckpointMetadata(state)

    def analyze_checkpoint(self) -> Dict[str, Any]:
        """
        分析当前断点

        Returns:
            分析结果
        """
        state = self.load_pipeline_state()
        if not state:
            return {
                "exists": False,
                "recoverable": False,
                "message": "No checkpoint found",
            }

        metadata = CheckpointMetadata(state)
        analysis = RecoveryAnalyzer.analyze_checkpoint(metadata)

        return {
            "exists": True,
            "pipeline_name": state.get("pipeline_name"),
            "timestamp": state.get("timestamp"),
            **analysis,
        }

    def suggest_recovery(self) -> Optional[Dict[str, Any]]:
        """
        建议恢复策略

        Returns:
            恢复建议
        """
        analysis = self.analyze_checkpoint()

        if not analysis["exists"]:
            return None

        actions = RecoveryAnalyzer.suggest_recovery_actions(analysis)

        return {
            "analysis": analysis,
            "actions": actions,
            "suggested_action": actions[0] if actions else None,
        }

    def cleanup_old_checkpoints(self, keep: int = 5):
        """
        清理旧断点

        Args:
            keep: 保留的断点数
        """
        # 列出所有断点目录
        checkpoint_dirs = [
            d for d in self.checkpoint_dir.parent.iterdir()
            if d.is_dir() and d.name.startswith("checkpoints_")
        ]

        if len(checkpoint_dirs) <= keep:
            return

        # 按修改时间排序
        checkpoint_dirs.sort(key=lambda d: d.stat().st_mtime, reverse=True)

        # 删除多余的
        for old_dir in checkpoint_dirs[keep:]:
            try:
                shutil.rmtree(old_dir)
                logger.info(f"Removed old checkpoint: {old_dir}")
            except Exception as e:
                logger.error(f"Failed to remove checkpoint {old_dir}: {e}")

    def create_backup(self) -> Path:
        """
        创建断点备份

        Returns:
            备份目录路径
        """
        timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
        backup_dir = self.checkpoint_dir.parent / f"checkpoint_backup_{timestamp}"

        shutil.copytree(self.checkpoint_dir, backup_dir)
        logger.info(f"Checkpoint backed up to: {backup_dir}")

        return backup_dir

    def restore_from_backup(self, backup_dir: Path):
        """
        从备份恢复

        Args:
            backup_dir: 备份目录
        """
        if not backup_dir.exists():
            raise FileNotFoundError(f"Backup not found: {backup_dir}")

        # 清理当前断点
        if self.checkpoint_dir.exists():
            shutil.rmtree(self.checkpoint_dir)

        # 恢复备份
        shutil.copytree(backup_dir, self.checkpoint_dir)
        logger.info(f"Checkpoint restored from: {backup_dir}")

    def validate_checkpoint(self) -> Tuple[bool, List[str]]:
        """
        验证断点完整性

        Returns:
            (是否有效, 错误列表)
        """
        errors = []

        # 检查主文件
        if not self.pipeline_file.exists():
            errors.append("Pipeline state file missing")
            return False, errors

        # 尝试加载
        state = self.load_pipeline_state()
        if not state:
            errors.append("Failed to load pipeline state")
            return False, errors

        # 检查必要字段
        required_fields = ["pipeline_name", "timestamp", "steps"]
        for field in required_fields:
            if field not in state:
                errors.append(f"Missing required field: {field}")

        # 检查步骤文件
        steps_dir = self.steps_dir
        if not steps_dir.exists():
            errors.append("Steps directory missing")
        else:
            for step_file in steps_dir.glob("*.json"):
                try:
                    with open(step_file, "r", encoding="utf-8") as f:
                        json.load(f)
                except json.JSONDecodeError:
                    errors.append(f"Invalid JSON in: {step_file}")
                except Exception as e:
                    errors.append(f"Error reading {step_file}: {e}")

        return len(errors) == 0, errors

    def get_checkpoint_info(self) -> Dict[str, Any]:
        """
        获取断点信息

        Returns:
            断点信息
        """
        if not self.has_checkpoint():
            return {
                "exists": False,
                "message": "No checkpoint found",
            }

        analysis = self.analyze_checkpoint()
        state = self.load_pipeline_state()

        return {
            "exists": True,
            "pipeline_name": state.get("pipeline_name") if state else None,
            "timestamp": state.get("timestamp") if state else None,
            "size_mb": self._get_checkpoint_size(),
            **analysis,
        }

    def _get_checkpoint_size(self) -> float:
        """获取断点大小（MB）"""
        total_size = 0
        for file_path in self.checkpoint_dir.rglob("*"):
            if file_path.is_file():
                total_size += file_path.stat().st_size

        return total_size / (1024 * 1024)

    def export_checkpoint(self, export_path: Path):
        """
        导出断点

        Args:
            export_path: 导出路径
        """
        if not self.has_checkpoint():
            raise ValueError("No checkpoint to export")

        # 创建压缩包
        shutil.make_archive(
            str(export_path.with_suffix("")),
            "zip",
            self.checkpoint_dir,
        )

        logger.info(f"Checkpoint exported to: {export_path}.zip")

    def import_checkpoint(self, import_path: Path):
        """
        导入断点

        Args:
            import_path: 导入路径
        """
        if not import_path.exists():
            raise FileNotFoundError(f"Import file not found: {import_path}")

        # 清理当前断点
        if self.checkpoint_dir.exists():
            shutil.rmtree(self.checkpoint_dir)

        # 解压导入文件
        shutil.unpack_archive(str(import_path), str(self.checkpoint_dir))

        logger.info(f"Checkpoint imported from: {import_path}")


class CheckpointPruner:
    """断点修剪器"""

    @staticmethod
    def prune_checkpoints(
        checkpoint_dirs: List[Path],
        max_age_days: int = 7,
        max_count: int = 5,
    ) -> List[Path]:
        """
        修剪断点

        Args:
            checkpoint_dirs: 断点目录列表
            max_age_days: 最大保留天数
            max_count: 最大保留数量

        Returns:
            已删除的目录列表
        """
        deleted = []

        # 按时间排序
        checkpoint_dirs.sort(key=lambda d: d.stat().st_mtime, reverse=True)

        now = datetime.now()
        to_delete = []

        # 标记过期的
        for checkpoint_dir in checkpoint_dirs:
            age = now - datetime.fromtimestamp(checkpoint_dir.stat().st_mtime)

            if age > timedelta(days=max_age_days):
                to_delete.append(checkpoint_dir)

        # 保留最新的
        to_delete.extend(checkpoint_dirs[max_count:])

        # 删除
        for checkpoint_dir in to_delete:
            try:
                shutil.rmtree(checkpoint_dir)
                deleted.append(checkpoint_dir)
                logger.info(f"Pruned checkpoint: {checkpoint_dir}")
            except Exception as e:
                logger.error(f"Failed to prune {checkpoint_dir}: {e}")

        return deleted
