#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
错误恢复和重试机制
提供强大的错误处理、自动重试和故障恢复能力
"""

import os
import time
import json
import traceback
from typing import Any, Callable, Dict, List, Optional, TypeVar, Union
from dataclasses import dataclass, field
from enum import Enum
from functools import wraps
from threading import Lock
import pickle
from pathlib import Path

from ..utils_module.logger import ObfuscationLogger


class ErrorType(Enum):
    """错误类型枚举"""
    TEMPORARY = "temporary"      # 临时错误（可重试）
    PERMANENT = "permanent"      # 永久错误（不可重试）
    RESOURCE = "resource"        # 资源错误（等待后重试）
    NETWORK = "network"          # 网络错误（延迟重试）
    VALIDATION = "validation"    # 验证错误（修复后重试）


@dataclass
class RetryPolicy:
    """重试策略"""
    max_retries: int = 3
    initial_delay: float = 1.0
    max_delay: float = 60.0
    exponential_base: float = 2.0
    jitter: bool = True
    retry_on: List[Exception] = field(default_factory=lambda: [Exception])


@dataclass
class ErrorContext:
    """错误上下文"""
    error_type: ErrorType
    exception: Exception
    traceback: str
    timestamp: float
    retry_count: int = 0
    recoverable: bool = True
    metadata: Dict[str, Any] = field(default_factory=dict)


@dataclass
class RecoveryCheckpoint:
    """恢复检查点"""
    checkpoint_id: str
    timestamp: float
    state: Dict[str, Any]
    metadata: Dict[str, Any]


T = TypeVar('T')


class ErrorRecoveryManager:
    """错误恢复管理器"""

    def __init__(self, cache_dir: str = None, logger: ObfuscationLogger = None):
        """
        初始化错误恢复管理器

        Args:
            cache_dir: 缓存目录
            logger: 日志记录器
        """
        self.logger = logger
        self.cache_dir = cache_dir or os.path.join(os.getcwd(), '.recovery')
        os.makedirs(self.cache_dir, exist_ok=True)

        # 错误历史
        self.error_history: List[ErrorContext] = []
        self.error_patterns: Dict[str, int] = {}  # 错误模式计数

        # 检查点管理
        self.checkpoints: Dict[str, RecoveryCheckpoint] = {}
        self.checkpoint_dir = os.path.join(self.cache_dir, 'checkpoints')
        os.makedirs(self.checkpoint_dir, exist_ok=True)

        # 配置属性兼容性
        self.config = type('SimpleConfig', (), {'cache_dir': self.cache_dir})()

    def record_error(self, error_type: str, message: str):
        """记录错误"""
        from datetime import datetime
        error_context = type('ErrorContext', (), {
            'error_type': error_type,
            'message': message,
            'timestamp': datetime.now()
        })()
        self.error_history.append(error_context)

    def get_errors(self) -> List[Any]:
        """获取错误列表"""
        return self.error_history

        # 恢复策略
        self.recovery_strategies: Dict[ErrorType, Callable] = {
            ErrorType.TEMPORARY: self._recovery_temporary,
            ErrorType.RESOURCE: self._recovery_resource,
            ErrorType.NETWORK: self._recovery_network,
            ErrorType.VALIDATION: self._recovery_validation,
        }

        # 线程安全
        self.lock = Lock()

        # 默认重试策略
        self.retry_policy = RetryPolicy()

        # 统计
        self.stats = {
            'errors_caught': 0,
            'errors_recovered': 0,
            'retries_attempted': 0,
            'retries_succeeded': 0,
            'checkpoints_created': 0,
            'checkpoints_restored': 0
        }

    def set_retry_policy(self, policy: RetryPolicy):
        """
        设置重试策略

        Args:
            policy: 重试策略
        """
        self.retry_policy = policy
        if self.logger:
            self.logger.log_operation("重试策略", f"已更新，最大重试次数: {policy.max_retries}")

    def with_retry(self, policy: RetryPolicy = None):
        """
        重试装饰器

        Args:
            policy: 重试策略

        Returns:
            装饰器函数
        """
        if policy is None:
            policy = RetryPolicy()

        def decorator(func: Callable[..., T]) -> Callable[..., T]:
            @wraps(func)
            def wrapper(*args, **kwargs) -> T:
                last_exception = None
                delay = policy.initial_delay

                for retry in range(policy.max_retries + 1):
                    try:
                        # 尝试执行
                        result = func(*args, **kwargs)

                        # 成功后更新统计
                        if retry > 0:
                            self.stats['retries_succeeded'] += 1
                            if self.logger:
                                self.logger.log_operation(
                                    "重试成功",
                                    f"函数 {func.__name__} 在第 {retry} 次重试后成功"
                                )

                        return result

                    except tuple(policy.retry_on) as e:
                        last_exception = e
                        self.stats['errors_caught'] += 1

                        # 记录错误
                        error_context = self._create_error_context(e, retry)
                        self.error_history.append(error_context)

                        if retry < policy.max_retries:
                            self.stats['retries_attempted'] += 1

                            # 计算延迟
                            if policy.jitter:
                                import random
                                actual_delay = delay * (0.5 + random.random())
                            else:
                                actual_delay = delay

                            if self.logger:
                                self.logger.log_warning(
                                    f"重试 {retry + 1}/{policy.max_retries}: "
                                    f"{e.__class__.__name__} - 等待 {actual_delay:.1f}秒"
                                )

                            time.sleep(actual_delay)

                            # 指数退避
                            delay = min(delay * policy.exponential_base, policy.max_delay)
                        else:
                            # 最终失败
                            if self.logger:
                                self.logger.log_error(
                                    f"重试失败: {func.__name__} 在 {policy.max_retries} 次重试后仍然失败"
                                )
                            raise

                raise last_exception

            return wrapper
        return decorator

    def create_checkpoint(self, checkpoint_id: str, state: Dict[str, Any],
                         metadata: Dict[str, Any] = None) -> bool:
        """
        创建检查点

        Args:
            checkpoint_id: 检查点ID
            state: 状态数据
            metadata: 元数据

        Returns:
            是否成功
        """
        try:
            checkpoint = RecoveryCheckpoint(
                checkpoint_id=checkpoint_id,
                timestamp=time.time(),
                state=state,
                metadata=metadata or {}
            )

            # 保存到内存
            with self.lock:
                self.checkpoints[checkpoint_id] = checkpoint

            # 持久化到磁盘
            checkpoint_file = os.path.join(
                self.checkpoint_dir,
                f"{checkpoint_id}.checkpoint"
            )

            with open(checkpoint_file, 'wb') as f:
                pickle.dump(checkpoint, f)

            self.stats['checkpoints_created'] += 1

            if self.logger:
                self.logger.log_operation(
                    "创建检查点",
                    f"检查点 {checkpoint_id} 已创建"
                )

            return True

        except Exception as e:
            if self.logger:
                self.logger.log_error(f"创建检查点失败: {e}")
            return False

    def restore_checkpoint(self, checkpoint_id: str) -> Optional[Dict[str, Any]]:
        """
        恢复检查点

        Args:
            checkpoint_id: 检查点ID

        Returns:
            状态数据
        """
        try:
            # 尝试从内存恢复
            if checkpoint_id in self.checkpoints:
                checkpoint = self.checkpoints[checkpoint_id]
            else:
                # 从磁盘恢复
                checkpoint_file = os.path.join(
                    self.checkpoint_dir,
                    f"{checkpoint_id}.checkpoint"
                )

                if not os.path.exists(checkpoint_file):
                    return None

                with open(checkpoint_file, 'rb') as f:
                    checkpoint = pickle.load(f)

                # 缓存到内存
                self.checkpoints[checkpoint_id] = checkpoint

            self.stats['checkpoints_restored'] += 1

            if self.logger:
                self.logger.log_operation(
                    "恢复检查点",
                    f"检查点 {checkpoint_id} 已恢复"
                )

            return checkpoint.state

        except Exception as e:
            if self.logger:
                self.logger.log_error(f"恢复检查点失败: {e}")
            return None

    def handle_error(self, exception: Exception,
                    context: Dict[str, Any] = None) -> bool:
        """
        处理错误

        Args:
            exception: 异常对象
            context: 错误上下文

        Returns:
            是否成功恢复
        """
        self.stats['errors_caught'] += 1

        # 创建错误上下文
        error_context = self._create_error_context(exception, 0, context)
        self.error_history.append(error_context)

        # 分析错误模式
        error_pattern = self._analyze_error_pattern(exception)
        self.error_patterns[error_pattern] = self.error_patterns.get(error_pattern, 0) + 1

        # 尝试恢复
        if error_context.recoverable:
            recovery_strategy = self.recovery_strategies.get(
                error_context.error_type,
                self._recovery_default
            )

            if recovery_strategy(error_context):
                self.stats['errors_recovered'] += 1
                if self.logger:
                    self.logger.log_operation(
                        "错误恢复",
                        f"成功从 {exception.__class__.__name__} 恢复"
                    )
                return True

        # 记录失败
        if self.logger:
            self.logger.log_error(
                f"无法恢复的错误: {exception.__class__.__name__}: {str(exception)}"
            )

        return False

    def _create_error_context(self, exception: Exception,
                            retry_count: int = 0,
                            metadata: Dict[str, Any] = None) -> ErrorContext:
        """
        创建错误上下文

        Args:
            exception: 异常对象
            retry_count: 重试次数
            metadata: 元数据

        Returns:
            错误上下文
        """
        error_type = self._classify_error(exception)

        return ErrorContext(
            error_type=error_type,
            exception=exception,
            traceback=traceback.format_exc(),
            timestamp=time.time(),
            retry_count=retry_count,
            recoverable=error_type != ErrorType.PERMANENT,
            metadata=metadata or {}
        )

    def _classify_error(self, exception: Exception) -> ErrorType:
        """
        分类错误类型

        Args:
            exception: 异常对象

        Returns:
            错误类型
        """
        error_str = str(exception).lower()
        exception_type = type(exception).__name__

        # 网络相关
        if any(keyword in error_str for keyword in ['network', 'connection', 'timeout']):
            return ErrorType.NETWORK

        # 资源相关
        if any(keyword in error_str for keyword in ['memory', 'disk', 'space', 'resource']):
            return ErrorType.RESOURCE

        # 验证相关
        if any(keyword in error_str for keyword in ['validation', 'invalid', 'format']):
            return ErrorType.VALIDATION

        # 文件相关（通常是临时的）
        if exception_type in ['FileNotFoundError', 'PermissionError', 'IOError']:
            return ErrorType.TEMPORARY

        # 默认
        return ErrorType.TEMPORARY if exception_type not in ['ValueError', 'TypeError'] else ErrorType.PERMANENT

    def _analyze_error_pattern(self, exception: Exception) -> str:
        """
        分析错误模式

        Args:
            exception: 异常对象

        Returns:
            错误模式标识
        """
        return f"{type(exception).__name__}_{str(exception)[:50]}"

    def _recovery_temporary(self, error_context: ErrorContext) -> bool:
        """临时错误恢复策略"""
        # 简单重试即可
        return True

    def _recovery_resource(self, error_context: ErrorContext) -> bool:
        """资源错误恢复策略"""
        # 等待资源释放
        time.sleep(5)

        # 尝试清理资源
        import gc
        gc.collect()

        return True

    def _recovery_network(self, error_context: ErrorContext) -> bool:
        """网络错误恢复策略"""
        # 指数退避等待
        delay = min(2 ** error_context.retry_count, 30)
        time.sleep(delay)
        return True

    def _recovery_validation(self, error_context: ErrorContext) -> bool:
        """验证错误恢复策略"""
        # 验证错误通常需要修复数据
        # 这里返回False，让上层处理
        return False

    def _recovery_default(self, error_context: ErrorContext) -> bool:
        """默认恢复策略"""
        return error_context.retry_count < 3

    def get_error_report(self) -> str:
        """
        获取错误报告

        Returns:
            错误报告字符串
        """
        report = []
        report.append("=== 错误恢复报告 ===\n")

        # 统计信息
        report.append("统计信息:")
        report.append(f"  - 捕获错误: {self.stats['errors_caught']}")
        report.append(f"  - 成功恢复: {self.stats['errors_recovered']}")
        report.append(f"  - 重试尝试: {self.stats['retries_attempted']}")
        report.append(f"  - 重试成功: {self.stats['retries_succeeded']}")
        report.append(f"  - 创建检查点: {self.stats['checkpoints_created']}")
        report.append(f"  - 恢复检查点: {self.stats['checkpoints_restored']}")

        # 错误模式
        if self.error_patterns:
            report.append("\n错误模式Top 5:")
            sorted_patterns = sorted(
                self.error_patterns.items(),
                key=lambda x: x[1],
                reverse=True
            )[:5]

            for pattern, count in sorted_patterns:
                report.append(f"  - {pattern}: {count}次")

        # 最近错误
        if self.error_history:
            report.append("\n最近错误:")
            for error in self.error_history[-5:]:
                report.append(f"  - {error.error_type.value}: {error.exception.__class__.__name__}")

        return "\n".join(report)

    def clear_checkpoints(self, older_than: float = None):
        """
        清理检查点

        Args:
            older_than: 清理多久之前的检查点（秒）
        """
        current_time = time.time()
        removed = 0

        for checkpoint_file in Path(self.checkpoint_dir).glob("*.checkpoint"):
            if older_than:
                # 检查时间
                stat = checkpoint_file.stat()
                if current_time - stat.st_mtime < older_than:
                    continue

            checkpoint_file.unlink()
            removed += 1

        # 清理内存
        if older_than:
            self.checkpoints = {
                k: v for k, v in self.checkpoints.items()
                if current_time - v.timestamp < older_than
            }
        else:
            self.checkpoints.clear()

        if self.logger:
            self.logger.log_operation(
                "清理检查点",
                f"已清理 {removed} 个检查点"
            )


class CircuitBreaker:
    """断路器模式实现"""

    def __init__(self, failure_threshold: int = 5,
                 recovery_timeout: float = 60.0,
                 logger: ObfuscationLogger = None):
        """
        初始化断路器

        Args:
            failure_threshold: 失败阈值
            recovery_timeout: 恢复超时时间
            logger: 日志记录器
        """
        self.failure_threshold = failure_threshold
        self.recovery_timeout = recovery_timeout
        self.logger = logger

        self.failure_count = 0
        self.last_failure_time = 0
        self.state = "closed"  # closed, open, half_open
        self.lock = Lock()

    def call(self, func: Callable[..., T], *args, **kwargs) -> T:
        """
        通过断路器调用函数

        Args:
            func: 要调用的函数
            *args: 位置参数
            **kwargs: 关键字参数

        Returns:
            函数返回值
        """
        with self.lock:
            # 检查断路器状态
            if self.state == "open":
                # 检查是否可以尝试恢复
                if time.time() - self.last_failure_time > self.recovery_timeout:
                    self.state = "half_open"
                    if self.logger:
                        self.logger.log_operation("断路器", "进入半开状态")
                else:
                    raise Exception("断路器开启，服务不可用")

        try:
            # 执行函数
            result = func(*args, **kwargs)

            with self.lock:
                # 成功，重置计数
                if self.state == "half_open":
                    self.state = "closed"
                    if self.logger:
                        self.logger.log_operation("断路器", "恢复关闭状态")

                self.failure_count = 0

            return result

        except Exception as e:
            with self.lock:
                self.failure_count += 1
                self.last_failure_time = time.time()

                # 检查是否需要开启断路器
                if self.failure_count >= self.failure_threshold:
                    self.state = "open"
                    if self.logger:
                        self.logger.log_error(f"断路器开启: 连续失败 {self.failure_count} 次")

            raise

    def reset(self):
        """重置断路器"""
        with self.lock:
            self.failure_count = 0
            self.state = "closed"
            self.last_failure_time = 0


def with_checkpoint(checkpoint_id: str, recovery_manager: ErrorRecoveryManager):
    """
    检查点装饰器

    Args:
        checkpoint_id: 检查点ID
        recovery_manager: 恢复管理器

    Returns:
        装饰器函数
    """
    def decorator(func: Callable[..., T]) -> Callable[..., T]:
        @wraps(func)
        def wrapper(*args, **kwargs) -> T:
            # 不自动恢复检查点状态，只是检查是否有失败记录
            # 应用程序应该自己决定如何恢复

            try:
                result = func(*args, **kwargs)

                # 清理检查点（成功完成）
                checkpoint_file = os.path.join(
                    recovery_manager.checkpoint_dir,
                    f"{checkpoint_id}.checkpoint"
                )
                if os.path.exists(checkpoint_file):
                    os.remove(checkpoint_file)

                return result

            except Exception as e:
                # 创建检查点
                recovery_manager.create_checkpoint(
                    checkpoint_id,
                    {
                        'args': args,
                        'kwargs': kwargs,
                        'error': str(e)
                    }
                )
                raise

        return wrapper
    return decorator