# -*- coding: utf-8 -*-
"""
稳定性增强模块
提供高并发、资源限制、异常恢复等稳定性功能
"""

import time
import threading
import queue
import psutil
import logging
from typing import List, Dict, Any, Optional, Callable
# from concurrent.futures import ThreadPoolExecutor, as_completed  # noqa: F401
from dataclasses import dataclass
import gc
from collections import defaultdict

logger = logging.getLogger(__name__)


@dataclass
class ResourceMonitor:
    """系统资源监控"""
    max_memory_mb: int = 8192
    max_cpu_percent: float = 80.0
    check_interval: float = 5.0

    def __post_init__(self):
        self._monitoring = False
        self._monitor_thread = None

    def start_monitoring(self):
        """开始资源监控"""
        self._monitoring = True
        self._monitor_thread = threading.Thread(target=self._monitor_loop)
        self._monitor_thread.daemon = True
        self._monitor_thread.start()
        logger.info("资源监控已启动")

    def stop_monitoring(self):
        """停止资源监控"""
        self._monitoring = False
        if self._monitor_thread:
            self._monitor_thread.join(timeout=2.0)
        logger.info("资源监控已停止")

    def _monitor_loop(self):
        """监控循环"""
        while self._monitoring:
            try:
                # 检查进程内存使用
                process = psutil.Process()
                memory_mb = process.memory_info().rss / 1024 / 1024
                if memory_mb > self.max_memory_mb:
                    logger.warning(f"进程内存使用过高: {memory_mb:.1f}MB > {self.max_memory_mb}MB")
                    gc.collect()  # 强制垃圾回收

                # 检查CPU使用
                cpu_percent = psutil.cpu_percent(interval=0.1)
                if cpu_percent > self.max_cpu_percent:
                    logger.warning(f"CPU使用过高: {cpu_percent:.1f}% > {self.max_cpu_percent}%")
                    time.sleep(0.5)  # 短暂休息

                time.sleep(self.check_interval)
            except Exception as e:
                logger.error(f"资源监控异常: {e}")
                time.sleep(self.check_interval)


class CircuitBreaker:
    """熔断器模式实现"""

    def __init__(self, failure_threshold: int = 5, recovery_timeout: int = 60):
        self.failure_threshold = failure_threshold
        self.recovery_timeout = recovery_timeout
        self.failure_count = 0
        self.last_failure_time = None
        self.state = 'CLOSED'  # CLOSED, OPEN, HALF_OPEN
        self._lock = threading.Lock()

    def call(self, func: Callable, *args, **kwargs):
        """通过熔断器调用函数"""
        with self._lock:
            if self.state == 'OPEN':
                if time.time() - self.last_failure_time > self.recovery_timeout:
                    self.state = 'HALF_OPEN'
                    logger.info("熔断器进入半开状态")
                else:
                    raise Exception("熔断器开启，拒绝请求")

            try:
                result = func(*args, **kwargs)
                if self.state == 'HALF_OPEN':
                    self.state = 'CLOSED'
                    self.failure_count = 0
                    logger.info("熔断器恢复正常")
                return result

            except Exception as e:
                self.failure_count += 1
                self.last_failure_time = time.time()

                if self.failure_count >= self.failure_threshold:
                    self.state = 'OPEN'
                    logger.error(f"熔断器开启，失败次数: {self.failure_count}")

                raise e


class RetryHandler:
    """重试处理器"""

    def __init__(self, max_retries: int = 3, backoff_factor: float = 2.0):
        self.max_retries = max_retries
        self.backoff_factor = backoff_factor

    def retry(self, func: Callable, *args, **kwargs):
        """带重试的函数调用"""
        last_exception = None

        for attempt in range(self.max_retries + 1):
            try:
                return func(*args, **kwargs)
            except Exception as e:
                last_exception = e
                if attempt < self.max_retries:
                    wait_time = self.backoff_factor ** attempt
                    logger.warning(f"第{attempt + 1}次尝试失败，{wait_time}秒后重试: {e}")
                    time.sleep(wait_time)
                else:
                    logger.error(f"重试{self.max_retries}次后仍然失败: {e}")

        raise last_exception


class ConcurrentProcessor:
    """并发处理器"""

    def __init__(self, max_workers: int = 4, queue_size: int = 1000):
        self.max_workers = max_workers
        self.queue_size = queue_size
        self.task_queue = queue.Queue(maxsize=queue_size)
        self.result_queue = queue.Queue()
        self.workers = []
        self.running = False
        self.circuit_breaker = CircuitBreaker()
        self.retry_handler = RetryHandler()

    def start(self):
        """启动并发处理器"""
        self.running = True
        for i in range(self.max_workers):
            worker = threading.Thread(target=self._worker_loop, args=(i,))
            worker.daemon = True
            worker.start()
            self.workers.append(worker)
        logger.info(f"并发处理器启动，工作线程数: {self.max_workers}")

    def stop(self):
        """停止并发处理器"""
        self.running = False
        # 发送停止信号
        for _ in range(self.max_workers):
            try:
                self.task_queue.put(None, timeout=1.0)
            except queue.Full:
                pass

        # 等待工作线程结束
        for worker in self.workers:
            worker.join(timeout=2.0)
        logger.info("并发处理器已停止")

    def submit_task(self, func: Callable, *args, **kwargs) -> bool:
        """提交任务"""
        try:
            task = (func, args, kwargs)
            self.task_queue.put(task, timeout=1.0)
            return True
        except queue.Full:
            logger.warning("任务队列已满，无法提交任务")
            return False

    def get_result(self, timeout: float = 1.0) -> Optional[Any]:
        """获取结果"""
        try:
            return self.result_queue.get(timeout=timeout)
        except queue.Empty:
            return None

    def _worker_loop(self, worker_id: int):
        """工作线程循环"""
        logger.info(f"工作线程 {worker_id} 启动")

        while self.running:
            try:
                task = self.task_queue.get(timeout=1.0)
                if task is None:  # 停止信号
                    break

                func, args, kwargs = task

                # 使用熔断器和重试机制
                def protected_call():
                    return self.circuit_breaker.call(func, *args, **kwargs)

                result = self.retry_handler.retry(protected_call)
                self.result_queue.put(('success', result))

            except queue.Empty:
                continue
            except Exception as e:
                logger.error(f"工作线程 {worker_id} 处理任务失败: {e}")
                self.result_queue.put(('error', str(e)))

        logger.info(f"工作线程 {worker_id} 结束")


class StabilityManager:
    """稳定性管理器"""

    def __init__(self):
        self.resource_monitor = ResourceMonitor()
        self.concurrent_processor = ConcurrentProcessor()
        self.circuit_breaker = CircuitBreaker()
        self.retry_handler = RetryHandler()
        self.active = False
        self._health_checks = {}
        self._error_counts = defaultdict(int)
        self._last_health_check = time.time()

    def start(self):
        """启动稳定性管理"""
        self.resource_monitor.start_monitoring()
        self.concurrent_processor.start()
        self.active = True
        logger.info("稳定性管理器已启动")

    def stop(self):
        """停止稳定性管理"""
        self.active = False
        self.resource_monitor.stop_monitoring()
        self.concurrent_processor.stop()
        logger.info("稳定性管理器已停止")

    def process_batch(self, items: List[Any], processor_func: Callable,
                      batch_size: int = 100) -> List[Any]:
        """批量处理数据"""
        if not self.active:
            raise RuntimeError("稳定性管理器未启动")

        results = []
        total_batches = (len(items) + batch_size - 1) // batch_size

        for i in range(0, len(items), batch_size):
            batch = items[i:i + batch_size]
            batch_num = i // batch_size + 1

            logger.info(f"处理批次 {batch_num}/{total_batches}, 大小: {len(batch)}")

            # 提交批次任务
            if not self.concurrent_processor.submit_task(processor_func, batch):
                logger.error(f"批次 {batch_num} 提交失败")
                continue

            # 获取结果
            result = self.concurrent_processor.get_result(timeout=30.0)
            if result:
                status, data = result
                if status == 'success':
                    results.extend(data if isinstance(data, list) else [data])
                else:
                    logger.error(f"批次 {batch_num} 处理失败: {data}")

            # 检查资源使用情况，必要时暂停
            if hasattr(psutil, 'Process'):
                try:
                    process = psutil.Process()
                    memory_mb = process.memory_info().rss / 1024 / 1024
                    if memory_mb > self.resource_monitor.max_memory_mb * 0.9:
                        logger.info(f"进程内存使用接近限制: {memory_mb:.1f}MB，暂停处理")
                        time.sleep(1.0)
                        gc.collect()
                except Exception:
                    pass  # 忽略内存监控错误

        return results

    def register_health_check(self, name: str, check_func: Callable) -> None:
        """注册健康检查"""
        self._health_checks[name] = check_func
        logger.info(f"注册健康检查: {name}")

    def run_health_checks(self) -> Dict[str, bool]:
        """运行所有健康检查"""
        results = {}
        current_time = time.time()
        
        # 限制健康检查频率
        if current_time - self._last_health_check < 5.0:
            return results
            
        self._last_health_check = current_time
        
        for name, check_func in self._health_checks.items():
            try:
                result = self.retry_handler.retry(check_func)
                results[name] = bool(result)
                if not result:
                    self._error_counts[name] += 1
                    logger.warning(f"健康检查失败: {name}")
                else:
                    self._error_counts[name] = 0  # 重置错误计数
            except Exception as e:
                results[name] = False
                self._error_counts[name] += 1
                logger.error(f"健康检查异常 {name}: {e}")
        
        return results

    def execute_with_stability(self, func: Callable, *args, **kwargs):
        """使用稳定性保障执行函数"""
        def wrapped_func():
            return func(*args, **kwargs)
        
        # 使用熔断器和重试机制
        return self.circuit_breaker.call(
            lambda: self.retry_handler.retry(wrapped_func)
        )

    def get_stability_metrics(self) -> Dict[str, Any]:
        """获取稳定性指标"""
        return {
            'active': self.active,
            'resource_status': self.resource_monitor.get_status() if hasattr(self.resource_monitor, 'get_status') else {},
            'circuit_breaker_state': self.circuit_breaker.state.name,
            'error_counts': dict(self._error_counts),
            'health_checks': list(self._health_checks.keys()),
            'last_health_check': self._last_health_check
        }


# 全局稳定性管理器实例
stability_manager = StabilityManager()
