"""
负载均衡和水平扩展服务
"""

import asyncio
import time
import json
import hashlib
from typing import Dict, List, Any, Optional, Callable, Tuple
from dataclasses import dataclass, field
from enum import Enum
from collections import defaultdict, deque
import statistics
import random
from datetime import datetime, timedelta

from ...core.exceptions import BusinessLogicError, ValidationError
from ...core.logging import get_logger
from ...core.config import settings

logger = get_logger(__name__)


class LoadBalancingStrategy(Enum):
    """负载均衡策略"""
    ROUND_ROBIN = "round_robin"        # 轮询
    WEIGHTED_ROUND_ROBIN = "weighted_round_robin"  # 加权轮询
    LEAST_CONNECTIONS = "least_connections"  # 最少连接
    LEAST_RESPONSE_TIME = "least_response_time"  # 最少响应时间
    HASH_BASED = "hash_based"          # 基于哈希
    RANDOM = "random"                  # 随机


class NodeStatus(Enum):
    """节点状态"""
    HEALTHY = "healthy"
    UNHEALTHY = "unhealthy"
    DEGRADED = "degraded"
    MAINTENANCE = "maintenance"
    DRAINING = "draining"


class ScalingDirection(Enum):
    """扩展方向"""
    UP = "up"           # 扩展
    DOWN = "down"       # 缩减
    MAINTAIN = "maintain"  # 维持


@dataclass
class NodeMetrics:
    """节点指标"""
    node_id: str
    cpu_usage: float = 0.0
    memory_usage: float = 0.0
    active_connections: int = 0
    response_time_ms: float = 0.0
    request_rate: float = 0.0
    error_rate: float = 0.0
    last_heartbeat: float = field(default_factory=time.time)
    uptime_seconds: float = 0.0
    
    def to_dict(self) -> Dict[str, Any]:
        return {
            "node_id": self.node_id,
            "cpu_usage": self.cpu_usage,
            "memory_usage": self.memory_usage,
            "active_connections": self.active_connections,
            "response_time_ms": self.response_time_ms,
            "request_rate": self.request_rate,
            "error_rate": self.error_rate,
            "last_heartbeat": self.last_heartbeat,
            "uptime_seconds": self.uptime_seconds
        }


@dataclass
class ServiceNode:
    """服务节点"""
    node_id: str
    host: str
    port: int
    weight: float = 1.0
    max_connections: int = 1000
    health_check_path: str = "/health"
    health_check_interval: float = 30.0
    health_check_timeout: float = 5.0
    
    # 状态
    status: NodeStatus = NodeStatus.HEALTHY
    current_connections: int = 0
    total_requests: int = 0
    failed_requests: int = 0
    
    # 指标
    metrics: NodeMetrics = field(default_factory=lambda: NodeMetrics(node_id=""))
    
    # 时间戳
    created_at: float = field(default_factory=time.time)
    last_health_check: float = field(default_factory=time.time)
    
    def __post_init__(self):
        self.metrics.node_id = self.node_id
    
    def get_load_score(self) -> float:
        """获取负载评分（越高负载越重）"""
        cpu_score = self.metrics.cpu_usage * 0.4
        memory_score = self.metrics.memory_usage * 0.3
        connection_score = (self.current_connections / self.max_connections) * 0.2
        response_score = min(1.0, self.metrics.response_time_ms / 1000) * 0.1
        
        return cpu_score + memory_score + connection_score + response_score
    
    def is_healthy(self) -> bool:
        """检查节点是否健康"""
        return (self.status == NodeStatus.HEALTHY and
                self.metrics.cpu_usage < 90 and
                self.metrics.memory_usage < 90 and
                self.current_connections < self.max_connections)
    
    def can_accept_connection(self) -> bool:
        """检查是否可以接受新连接"""
        return (self.is_healthy() and
                self.current_connections < self.max_connections)


@dataclass
class ScalingPolicy:
    """扩展策略"""
    name: str
    min_nodes: int = 1
    max_nodes: int = 10
    scale_up_threshold: float = 80.0    # CPU使用率阈值
    scale_down_threshold: float = 20.0  # CPU使用率阈值
    scale_up_cooldown: float = 300.0    # 扩展冷却时间
    scale_down_cooldown: float = 600.0  # 缩减冷却时间
    evaluation_interval: float = 60.0   # 评估间隔
    
    # 扩展条件
    scale_up_conditions: Dict[str, float] = field(default_factory=lambda: {
        "cpu_usage": 80.0,
        "memory_usage": 85.0,
        "avg_connections": 0.8
    })
    
    scale_down_conditions: Dict[str, float] = field(default_factory=lambda: {
        "cpu_usage": 20.0,
        "memory_usage": 30.0,
        "avg_connections": 0.2
    })


class HealthChecker:
    """健康检查器"""
    
    def __init__(self):
        self.check_tasks: Dict[str, asyncio.Task] = {}
        self.http_client = None
        
        # 回调函数
        self.health_callbacks: List[Callable[[str, bool, str], None]] = []
        
        logger.info("HealthChecker initialized")
    
    async def start_health_check(self, node: ServiceNode):
        """启动节点健康检查"""
        if node.node_id in self.check_tasks:
            return
        
        task = asyncio.create_task(self._health_check_loop(node))
        self.check_tasks[node.node_id] = task
        
        logger.info(f"Started health check for node {node.node_id}")
    
    async def stop_health_check(self, node_id: str):
        """停止节点健康检查"""
        if node_id in self.check_tasks:
            task = self.check_tasks[node_id]
            task.cancel()
            del self.check_tasks[node_id]
            
            logger.info(f"Stopped health check for node {node_id}")
    
    async def perform_health_check(self, node: ServiceNode) -> Tuple[bool, str]:
        """执行健康检查"""
        try:
            # 构建健康检查URL
            url = f"http://{node.host}:{node.port}{node.health_check_path}"
            
            # 发送HTTP请求
            import aiohttp
            timeout = aiohttp.ClientTimeout(total=node.health_check_timeout)
            
            async with aiohttp.ClientSession(timeout=timeout) as session:
                start_time = time.time()
                async with session.get(url) as response:
                    response_time = (time.time() - start_time) * 1000
                    
                    if response.status == 200:
                        # 更新响应时间
                        node.metrics.response_time_ms = response_time
                        return True, "OK"
                    else:
                        return False, f"HTTP {response.status}"
        
        except asyncio.TimeoutError:
            return False, "Timeout"
        except Exception as e:
            return False, str(e)
    
    async def _health_check_loop(self, node: ServiceNode):
        """健康检查循环"""
        while True:
            try:
                # 执行健康检查
                is_healthy, message = await self.perform_health_check(node)
                
                # 更新节点状态
                old_status = node.status
                if is_healthy:
                    if node.status == NodeStatus.UNHEALTHY:
                        node.status = NodeStatus.HEALTHY
                    node.last_health_check = time.time()
                else:
                    if node.status == NodeStatus.HEALTHY:
                        node.status = NodeStatus.UNHEALTHY
                
                # 更新心跳时间
                node.metrics.last_heartbeat = time.time()
                
                # 通知状态变化
                if old_status != node.status:
                    await self._notify_health_change(node.node_id, is_healthy, message)
                
                # 等待下一次检查
                await asyncio.sleep(node.health_check_interval)
                
            except asyncio.CancelledError:
                break
            except Exception as e:
                logger.error(f"Health check error for node {node.node_id}: {e}")
                await asyncio.sleep(10)
    
    async def _notify_health_change(self, node_id: str, is_healthy: bool, message: str):
        """通知健康状态变化"""
        for callback in self.health_callbacks:
            try:
                if asyncio.iscoroutinefunction(callback):
                    await callback(node_id, is_healthy, message)
                else:
                    callback(node_id, is_healthy, message)
            except Exception as e:
                logger.error(f"Health callback error: {e}")
    
    def add_health_callback(self, callback: Callable[[str, bool, str], None]):
        """添加健康检查回调"""
        self.health_callbacks.append(callback)


class LoadBalancer:
    """负载均衡器"""
    
    def __init__(self, strategy: LoadBalancingStrategy = LoadBalancingStrategy.LEAST_CONNECTIONS):
        self.strategy = strategy
        self.nodes: Dict[str, ServiceNode] = {}
        self.health_checker = HealthChecker()
        
        # 轮询状态
        self.round_robin_index = 0
        
        # 权重轮询状态
        self.weighted_round_robin_index = 0
        self.weighted_round_robin_weight = 0
        
        # 回调函数
        self.node_callbacks: List[Callable[[str, str], None]] = []
        
        # 统计信息
        self.stats = {
            "total_requests": 0,
            "successful_requests": 0,
            "failed_requests": 0,
            "distribution": defaultdict(int)
        }
        
        # 启动健康检查
        self.health_checker.add_health_callback(self._on_health_change)
        
        logger.info(f"LoadBalancer initialized with strategy {strategy.value}")
    
    async def add_node(self, node: ServiceNode) -> bool:
        """添加节点"""
        if node.node_id in self.nodes:
            return False
        
        self.nodes[node.node_id] = node
        
        # 启动健康检查
        await self.health_checker.start_health_check(node)
        
        # 通知回调
        await self._notify_node_change(node.node_id, "added")
        
        logger.info(f"Added node {node.node_id} ({node.host}:{node.port})")
        return True
    
    async def remove_node(self, node_id: str) -> bool:
        """移除节点"""
        if node_id not in self.nodes:
            return False
        
        node = self.nodes[node_id]
        
        # 停止健康检查
        await self.health_checker.stop_health_check(node_id)
        
        # 移除节点
        del self.nodes[node_id]
        
        # 通知回调
        await self._notify_node_change(node_id, "removed")
        
        logger.info(f"Removed node {node_id}")
        return True
    
    async def select_node(self, request_key: Optional[str] = None) -> Optional[ServiceNode]:
        """选择节点"""
        # 获取健康节点
        healthy_nodes = [node for node in self.nodes.values() if node.can_accept_connection()]
        
        if not healthy_nodes:
            logger.warning("No healthy nodes available")
            return None
        
        # 根据策略选择节点
        if self.strategy == LoadBalancingStrategy.ROUND_ROBIN:
            node = self._select_round_robin(healthy_nodes)
        elif self.strategy == LoadBalancingStrategy.WEIGHTED_ROUND_ROBIN:
            node = self._select_weighted_round_robin(healthy_nodes)
        elif self.strategy == LoadBalancingStrategy.LEAST_CONNECTIONS:
            node = self._select_least_connections(healthy_nodes)
        elif self.strategy == LoadBalancingStrategy.LEAST_RESPONSE_TIME:
            node = self._select_least_response_time(healthy_nodes)
        elif self.strategy == LoadBalancingStrategy.HASH_BASED:
            node = self._select_hash_based(healthy_nodes, request_key)
        elif self.strategy == LoadBalancingStrategy.RANDOM:
            node = self._select_random(healthy_nodes)
        else:
            node = healthy_nodes[0]
        
        if node:
            # 更新统计
            self.stats["total_requests"] += 1
            self.stats["distribution"][node.node_id] += 1
        
        return node
    
    def _select_round_robin(self, nodes: List[ServiceNode]) -> ServiceNode:
        """轮询选择"""
        node = nodes[self.round_robin_index % len(nodes)]
        self.round_robin_index += 1
        return node
    
    def _select_weighted_round_robin(self, nodes: List[ServiceNode]) -> ServiceNode:
        """加权轮询选择"""
        # 计算总权重
        total_weight = sum(node.weight for node in nodes)
        
        # 计算当前权重
        current_weight = self.weighted_round_robin_weight % total_weight
        cumulative_weight = 0
        
        for node in nodes:
            cumulative_weight += node.weight
            if current_weight < cumulative_weight:
                self.weighted_round_robin_weight += 1
                return node
        
        # 回退到第一个节点
        self.weighted_round_robin_weight += 1
        return nodes[0]
    
    def _select_least_connections(self, nodes: List[ServiceNode]) -> ServiceNode:
        """最少连接选择"""
        return min(nodes, key=lambda node: node.current_connections)
    
    def _select_least_response_time(self, nodes: List[ServiceNode]) -> ServiceNode:
        """最少响应时间选择"""
        return min(nodes, key=lambda node: node.metrics.response_time_ms)
    
    def _select_hash_based(self, nodes: List[ServiceNode], key: Optional[str]) -> ServiceNode:
        """基于哈希选择"""
        if not key:
            key = str(time.time())
        
        hash_value = int(hashlib.md5(key.encode()).hexdigest(), 16)
        index = hash_value % len(nodes)
        return nodes[index]
    
    def _select_random(self, nodes: List[ServiceNode]) -> ServiceNode:
        """随机选择"""
        return random.choice(nodes)
    
    async def request_served(self, node_id: str, success: bool, response_time_ms: float = 0.0):
        """请求完成回调"""
        if node_id not in self.nodes:
            return
        
        node = self.nodes[node_id]
        
        if success:
            self.stats["successful_requests"] += 1
            node.total_requests += 1
        else:
            self.stats["failed_requests"] += 1
            node.failed_requests += 1
        
        # 更新响应时间
        if response_time_ms > 0:
            # 使用指数移动平均
            alpha = 0.1
            if node.metrics.response_time_ms == 0:
                node.metrics.response_time_ms = response_time_ms
            else:
                node.metrics.response_time_ms = (
                    alpha * response_time_ms + 
                    (1 - alpha) * node.metrics.response_time_ms
                )
    
    async def update_node_metrics(self, node_id: str, metrics: NodeMetrics):
        """更新节点指标"""
        if node_id not in self.nodes:
            return
        
        node = self.nodes[node_id]
        node.metrics = metrics
    
    def get_node(self, node_id: str) -> Optional[ServiceNode]:
        """获取节点"""
        return self.nodes.get(node_id)
    
    def get_all_nodes(self) -> List[ServiceNode]:
        """获取所有节点"""
        return list(self.nodes.values())
    
    def get_healthy_nodes(self) -> List[ServiceNode]:
        """获取健康节点"""
        return [node for node in self.nodes.values() if node.is_healthy()]
    
    def get_load_balancer_stats(self) -> Dict[str, Any]:
        """获取负载均衡器统计"""
        healthy_count = len(self.get_healthy_nodes())
        total_count = len(self.nodes)
        
        return {
            "strategy": self.strategy.value,
            "total_nodes": total_count,
            "healthy_nodes": healthy_count,
            "unhealthy_nodes": total_count - healthy_count,
            "total_requests": self.stats["total_requests"],
            "successful_requests": self.stats["successful_requests"],
            "failed_requests": self.stats["failed_requests"],
            "success_rate": (
                self.stats["successful_requests"] / max(1, self.stats["total_requests"])
            ) * 100,
            "request_distribution": dict(self.stats["distribution"])
        }
    
    def add_node_callback(self, callback: Callable[[str, str], None]):
        """添加节点变化回调"""
        self.node_callbacks.append(callback)
    
    async def _notify_node_change(self, node_id: str, action: str):
        """通知节点变化"""
        for callback in self.node_callbacks:
            try:
                if asyncio.iscoroutinefunction(callback):
                    await callback(node_id, action)
                else:
                    callback(node_id, action)
            except Exception as e:
                logger.error(f"Node callback error: {e}")
    
    async def _on_health_change(self, node_id: str, is_healthy: bool, message: str):
        """健康状态变化处理"""
        node = self.nodes.get(node_id)
        if node:
            old_status = node.status
            if is_healthy and node.status == NodeStatus.UNHEALTHY:
                node.status = NodeStatus.HEALTHY
            elif not is_healthy and node.status == NodeStatus.HEALTHY:
                node.status = NodeStatus.UNHEALTHY
            
            if old_status != node.status:
                await self._notify_node_change(node_id, f"health_changed_{node.status.value}")
                logger.info(f"Node {node_id} health changed to {node.status.value}: {message}")


class AutoScaler:
    """自动扩展器"""
    
    def __init__(self, load_balancer: LoadBalancer):
        self.load_balancer = load_balancer
        self.scaling_policies: Dict[str, ScalingPolicy] = {}
        self.scaling_tasks: Dict[str, asyncio.Task] = {}
        
        # 扩展历史
        self.scaling_history: List[Dict[str, Any]] = []
        
        # 回调函数
        self.scaling_callbacks: List[Callable[[str, ScalingDirection, int], None]] = []
        
        # 节点工厂（用于创建新节点）
        self.node_factory: Optional[Callable] = None
        
        logger.info("AutoScaler initialized")
    
    def add_scaling_policy(self, name: str, policy: ScalingPolicy):
        """添加扩展策略"""
        self.scaling_policies[name] = policy
        logger.info(f"Added scaling policy: {name}")
    
    def set_node_factory(self, factory: Callable[[], ServiceNode]):
        """设置节点工厂"""
        self.node_factory = factory
    
    async def start_scaling(self, policy_name: str):
        """启动自动扩展"""
        if policy_name not in self.scaling_policies:
            raise ValidationError(f"Scaling policy {policy_name} not found")
        
        if policy_name in self.scaling_tasks:
            return
        
        policy = self.scaling_policies[policy_name]
        task = asyncio.create_task(self._scaling_loop(policy_name, policy))
        self.scaling_tasks[policy_name] = task
        
        logger.info(f"Started auto scaling for policy {policy_name}")
    
    async def stop_scaling(self, policy_name: str):
        """停止自动扩展"""
        if policy_name in self.scaling_tasks:
            task = self.scaling_tasks[policy_name]
            task.cancel()
            del self.scaling_tasks[policy_name]
            
            logger.info(f"Stopped auto scaling for policy {policy_name}")
    
    async def evaluate_scaling(self, policy_name: str) -> Optional[ScalingDirection]:
        """评估扩展需求"""
        policy = self.scaling_policies[policy_name]
        nodes = self.load_balancer.get_healthy_nodes()
        current_node_count = len(nodes)
        
        if current_node_count == 0:
            return ScalingDirection.UP
        
        # 计算平均指标
        avg_cpu = statistics.mean([node.metrics.cpu_usage for node in nodes])
        avg_memory = statistics.mean([node.metrics.memory_usage for node in nodes])
        avg_connections = statistics.mean([node.current_connections / node.max_connections for node in nodes])
        
        # 检查扩展条件
        scale_up_met = any([
            avg_cpu > policy.scale_up_conditions["cpu_usage"],
            avg_memory > policy.scale_up_conditions["memory_usage"],
            avg_connections > policy.scale_up_conditions["avg_connections"]
        ])
        
        scale_down_met = all([
            avg_cpu < policy.scale_down_conditions["cpu_usage"],
            avg_memory < policy.scale_down_conditions["memory_usage"],
            avg_connections < policy.scale_down_conditions["avg_connections"]
        ]) and current_node_count > policy.min_nodes
        
        if scale_up_met and current_node_count < policy.max_nodes:
            return ScalingDirection.UP
        elif scale_down_met and current_node_count > policy.min_nodes:
            return ScalingDirection.DOWN
        else:
            return ScalingDirection.MAINTAIN
    
    async def scale_up(self, policy_name: str, count: int = 1) -> int:
        """扩展节点"""
        if not self.node_factory:
            raise ValidationError("Node factory not configured")
        
        added_count = 0
        policy = self.scaling_policies[policy_name]
        current_nodes = self.load_balancer.get_healthy_nodes()
        
        for _ in range(count):
            if len(current_nodes) + added_count >= policy.max_nodes:
                break
            
            try:
                # 创建新节点
                node = self.node_factory()
                success = await self.load_balancer.add_node(node)
                
                if success:
                    added_count += 1
                    await self._record_scaling_event(
                        policy_name, ScalingDirection.UP, node.node_id
                    )
                    logger.info(f"Scaled up: added node {node.node_id}")
                
            except Exception as e:
                logger.error(f"Failed to add node during scale up: {e}")
        
        if added_count > 0:
            await self._notify_scaling(policy_name, ScalingDirection.UP, added_count)
        
        return added_count
    
    async def scale_down(self, policy_name: str, count: int = 1) -> int:
        """缩减节点"""
        policy = self.scaling_policies[policy_name]
        nodes = sorted(
            self.load_balancer.get_healthy_nodes(),
            key=lambda node: node.current_connections
        )
        
        removed_count = 0
        for node in nodes[:count]:
            if len(self.load_balancer.get_healthy_nodes()) - removed_count <= policy.min_nodes:
                break
            
            try:
                success = await self.load_balancer.remove_node(node.node_id)
                if success:
                    removed_count += 1
                    await self._record_scaling_event(
                        policy_name, ScalingDirection.DOWN, node.node_id
                    )
                    logger.info(f"Scaled down: removed node {node.node_id}")
                
            except Exception as e:
                logger.error(f"Failed to remove node during scale down: {e}")
        
        if removed_count > 0:
            await self._notify_scaling(policy_name, ScalingDirection.DOWN, removed_count)
        
        return removed_count
    
    async def _scaling_loop(self, policy_name: str, policy: ScalingPolicy):
        """扩展循环"""
        last_scale_up_time = 0
        last_scale_down_time = 0
        
        while True:
            try:
                current_time = time.time()
                
                # 评估扩展需求
                direction = await self.evaluate_scaling(policy_name)
                
                if direction == ScalingDirection.UP:
                    # 检查扩展冷却时间
                    if current_time - last_scale_up_time >= policy.scale_up_cooldown:
                        added = await self.scale_up(policy_name)
                        if added > 0:
                            last_scale_up_time = current_time
                
                elif direction == ScalingDirection.DOWN:
                    # 检查缩减冷却时间
                    if current_time - last_scale_down_time >= policy.scale_down_cooldown:
                        removed = await self.scale_down(policy_name)
                        if removed > 0:
                            last_scale_down_time = current_time
                
                # 等待下一次评估
                await asyncio.sleep(policy.evaluation_interval)
                
            except asyncio.CancelledError:
                break
            except Exception as e:
                logger.error(f"Auto scaling error for policy {policy_name}: {e}")
                await asyncio.sleep(30)
    
    async def _record_scaling_event(self, policy_name: str, direction: ScalingDirection, node_id: str):
        """记录扩展事件"""
        event = {
            "timestamp": datetime.utcnow().isoformat(),
            "policy": policy_name,
            "direction": direction.value,
            "node_id": node_id,
            "total_nodes": len(self.load_balancer.get_all_nodes())
        }
        
        self.scaling_history.append(event)
        
        # 限制历史记录大小
        if len(self.scaling_history) > 1000:
            self.scaling_history.pop(0)
    
    async def _notify_scaling(self, policy_name: str, direction: ScalingDirection, count: int):
        """通知扩展事件"""
        for callback in self.scaling_callbacks:
            try:
                if asyncio.iscoroutinefunction(callback):
                    await callback(policy_name, direction, count)
                else:
                    callback(policy_name, direction, count)
            except Exception as e:
                logger.error(f"Scaling callback error: {e}")
    
    def add_scaling_callback(self, callback: Callable[[str, ScalingDirection, int], None]):
        """添加扩展回调"""
        self.scaling_callbacks.append(callback)
    
    def get_scaling_history(self, limit: int = 100) -> List[Dict[str, Any]]:
        """获取扩展历史"""
        return self.scaling_history[-limit:]
    
    def get_scaling_stats(self) -> Dict[str, Any]:
        """获取扩展统计"""
        return {
            "active_policies": len(self.scaling_tasks),
            "total_policies": len(self.scaling_policies),
            "recent_events": len(self.scaling_history),
            "current_nodes": len(self.load_balancer.get_all_nodes()),
            "healthy_nodes": len(self.load_balancer.get_healthy_nodes())
        }


# 负载均衡管理器
class LoadBalancingManager:
    """负载均衡管理器"""
    
    def __init__(self):
        self.load_balancers: Dict[str, LoadBalancer] = {}
        self.auto_scalers: Dict[str, AutoScaler] = {}
        
        logger.info("LoadBalancingManager initialized")
    
    async def create_load_balancer(
        self,
        name: str,
        strategy: LoadBalancingStrategy = LoadBalancingStrategy.LEAST_CONNECTIONS
    ) -> LoadBalancer:
        """创建负载均衡器"""
        if name in self.load_balancers:
            raise ValidationError(f"Load balancer {name} already exists")
        
        load_balancer = LoadBalancer(strategy)
        auto_scaler = AutoScaler(load_balancer)
        
        self.load_balancers[name] = load_balancer
        self.auto_scalers[name] = auto_scaler
        
        logger.info(f"Created load balancer {name}")
        return load_balancer
    
    def get_load_balancer(self, name: str) -> Optional[LoadBalancer]:
        """获取负载均衡器"""
        return self.load_balancers.get(name)
    
    def get_auto_scaler(self, name: str) -> Optional[AutoScaler]:
        """获取自动扩展器"""
        return self.auto_scalers.get(name)
    
    async def remove_load_balancer(self, name: str) -> bool:
        """移除负载均衡器"""
        if name not in self.load_balancers:
            return False
        
        load_balancer = self.load_balancers[name]
        auto_scaler = self.auto_scalers[name]
        
        # 停止所有扩展策略
        for policy_name in auto_scaler.scaling_policies:
            await auto_scaler.stop_scaling(policy_name)
        
        # 停止所有健康检查
        for node in load_balancer.get_all_nodes():
            await load_balancer.health_checker.stop_health_check(node.node_id)
        
        del self.load_balancers[name]
        del self.auto_scalers[name]
        
        logger.info(f"Removed load balancer {name}")
        return True
    
    async def get_global_stats(self) -> Dict[str, Any]:
        """获取全局统计"""
        total_nodes = 0
        total_healthy_nodes = 0
        total_requests = 0
        total_successful = 0
        total_failed = 0
        
        for load_balancer in self.load_balancers.values():
            stats = load_balancer.get_load_balancer_stats()
            total_nodes += stats["total_nodes"]
            total_healthy_nodes += stats["healthy_nodes"]
            total_requests += stats["total_requests"]
            total_successful += stats["successful_requests"]
            total_failed += stats["failed_requests"]
        
        return {
            "total_balancers": len(self.load_balancers),
            "total_nodes": total_nodes,
            "total_healthy_nodes": total_healthy_nodes,
            "total_requests": total_requests,
            "total_successful_requests": total_successful,
            "total_failed_requests": total_failed,
            "global_success_rate": (
                total_successful / max(1, total_requests)
            ) * 100
        }


# 全局负载均衡管理器实例
load_balancing_manager = LoadBalancingManager()
