#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
智能扩缩容逻辑
实现基于负载的自动扩缩容机制
"""

import time
import logging
import threading
from typing import Dict, List, Optional, Any, Tuple
from dataclasses import dataclass
from enum import Enum
import statistics

logger = logging.getLogger(__name__)


class ScalingAction(Enum):
    """扩缩容动作枚举"""
    SCALE_OUT = "scale_out"  # 扩容
    SCALE_IN = "scale_in"    # 缩容
    NO_ACTION = "no_action"  # 无动作


class ScalingTrigger(Enum):
    """扩缩容触发条件枚举"""
    HIGH_RESPONSE_TIME = "high_response_time"
    HIGH_QUEUE_BACKLOG = "high_queue_backlog"
    HIGH_CPU_USAGE = "high_cpu_usage"
    HIGH_WAIT_TIME = "high_wait_time"
    LOW_RESPONSE_TIME = "low_response_time"
    LOW_QUEUE_BACKLOG = "low_queue_backlog"
    LOW_CPU_USAGE = "low_cpu_usage"
    LOW_LOAD_DURATION = "low_load_duration"


@dataclass
class ScalingMetrics:
    """扩缩容指标数据类"""
    timestamp: float
    average_response_time: float
    queue_backlog: int
    cpu_utilization: float
    max_wait_time: float
    active_servers: int
    total_capacity: int
    used_capacity: int


@dataclass
class ScalingDecision:
    """扩缩容决策数据类"""
    action: ScalingAction
    trigger: ScalingTrigger
    reason: str
    target_servers: int
    priority_regions: List[str]
    preferred_gpu_type: str
    confidence: float  # 决策置信度 0-1


class AutoScaler:
    """自动扩缩容器"""
    
    def __init__(self, hai_client, load_balancer, fair_queue, structured_logger):
        """
        初始化自动扩缩容器
        
        Args:
            hai_client: HAI客户端
            load_balancer: 负载均衡器
            fair_queue: 公平队列
            structured_logger: 结构化日志记录器
        """
        self.hai_client = hai_client
        self.load_balancer = load_balancer
        self.fair_queue = fair_queue
        self.structured_logger = structured_logger
        
        # 扩缩容阈值配置
        self.scaling_config = {
            # 扩容触发条件
            'scale_out': {
                'response_time_threshold': 3.0,      # 平均响应时间 > 3秒
                'queue_backlog_threshold': 5,        # 任务队列积压 > 5个（降低阈值便于测试）
                'cpu_utilization_threshold': 85.0,   # 服务器CPU利用率 > 85%
                'max_wait_time_threshold': 10.0      # 用户最大等待时间 > 10秒
            },
            # 缩容触发条件
            'scale_in': {
                'response_time_threshold': 1.0,      # 平均响应时间 < 1秒
                'queue_backlog_threshold': 3,        # 任务队列 < 3个
                'cpu_utilization_threshold': 30.0,   # 服务器CPU利用率 < 30%
                'low_load_duration_threshold': 300   # 低负载状态持续 > 5分钟
            },
            # 其他配置
            'min_servers': 1,                        # 最少服务器数量
            'max_servers': 100,                      # 最大服务器数量（理论上无限制）
            'cooldown_period': 60,                   # 扩缩容冷却期（秒）
            'metrics_window_size': 10,               # 指标窗口大小
            'scaling_confidence_threshold': 0.7      # 决策置信度阈值
        }
        
        # 状态跟踪
        self.metrics_history: List[ScalingMetrics] = []
        self.last_scaling_action: Optional[ScalingDecision] = None
        self.last_scaling_time: float = 0
        self.low_load_start_time: Optional[float] = None
        self.running = False
        self.monitor_thread = None
        
        # 线程锁
        self.lock = threading.Lock()
    
    def start(self) -> None:
        """启动自动扩缩容"""
        if self.running:
            return
        
        self.running = True
        self.monitor_thread = threading.Thread(target=self._monitor_loop, daemon=True)
        self.monitor_thread.start()
        logger.info("自动扩缩容已启动")
    
    def stop(self) -> None:
        """停止自动扩缩容"""
        self.running = False
        if self.monitor_thread:
            self.monitor_thread.join(timeout=5)
        logger.info("自动扩缩容已停止")
    
    def _monitor_loop(self) -> None:
        """监控循环"""
        while self.running:
            try:
                self._collect_metrics()
                self._make_scaling_decision()
                time.sleep(10)  # 每10秒检查一次
            except Exception as e:
                logger.error(f"自动扩缩容监控错误: {e}")
                time.sleep(30)
    
    def _collect_metrics(self) -> None:
        """收集系统指标"""
        try:
            # 获取队列状态
            queue_status = self.fair_queue.get_queue_status()
            
            # 获取系统指标
            system_metrics = self.load_balancer.get_system_metrics()
            
            # 计算平均响应时间（这里需要从任务处理日志中获取）
            # 暂时使用模拟数据
            average_response_time = self._calculate_average_response_time()
            
            # 计算最大等待时间
            max_wait_time = self._calculate_max_wait_time()
            
            # 计算CPU利用率
            cpu_utilization = self._calculate_average_cpu_utilization()
            
            metrics = ScalingMetrics(
                timestamp=time.time(),
                average_response_time=average_response_time,
                queue_backlog=queue_status['total_tasks_in_queue'],
                cpu_utilization=cpu_utilization,
                max_wait_time=max_wait_time,
                active_servers=system_metrics['servers']['available'],
                total_capacity=system_metrics['servers']['total_capacity'],
                used_capacity=system_metrics['servers']['used_capacity']
            )
            
            with self.lock:
                self.metrics_history.append(metrics)
                
                # 保持指标历史窗口大小
                if len(self.metrics_history) > self.scaling_config['metrics_window_size']:
                    self.metrics_history.pop(0)
            
            logger.debug(f"收集指标: {metrics}")
            
        except Exception as e:
            logger.error(f"收集指标失败: {e}")
    
    def _make_scaling_decision(self) -> None:
        """做出扩缩容决策"""
        try:
            with self.lock:
                if len(self.metrics_history) < 3:  # 至少需要3个指标点
                    return
                
                # 检查冷却期
                if time.time() - self.last_scaling_time < self.scaling_config['cooldown_period']:
                    return
                
                latest_metrics = self.metrics_history[-1]
                
                # 检查扩容条件
                scale_out_decision = self._check_scale_out_conditions(latest_metrics)
                if scale_out_decision:
                    self._execute_scaling_action(scale_out_decision)
                    return
                
                # 检查缩容条件
                scale_in_decision = self._check_scale_in_conditions(latest_metrics)
                if scale_in_decision:
                    self._execute_scaling_action(scale_in_decision)
                    return
            
        except Exception as e:
            logger.error(f"扩缩容决策错误: {e}")
    
    def _check_scale_out_conditions(self, metrics: ScalingMetrics) -> Optional[ScalingDecision]:
        """检查扩容条件"""
        scale_out_config = self.scaling_config['scale_out']
        triggers = []
        
        # 检查平均响应时间
        if metrics.average_response_time > scale_out_config['response_time_threshold']:
            triggers.append(ScalingTrigger.HIGH_RESPONSE_TIME)
        
        # 检查任务队列积压
        if metrics.queue_backlog > scale_out_config['queue_backlog_threshold']:
            triggers.append(ScalingTrigger.HIGH_QUEUE_BACKLOG)
        
        # 检查CPU利用率
        if metrics.cpu_utilization > scale_out_config['cpu_utilization_threshold']:
            triggers.append(ScalingTrigger.HIGH_CPU_USAGE)
        
        # 检查最大等待时间
        if metrics.max_wait_time > scale_out_config['max_wait_time_threshold']:
            triggers.append(ScalingTrigger.HIGH_WAIT_TIME)
        
        if triggers:
            # 计算目标服务器数量
            target_servers = self._calculate_scale_out_target(metrics)
            
            # 选择最优地域和GPU类型
            priority_regions = self._select_priority_regions()
            preferred_gpu_type = self.hai_client.get_cheapest_gpu_type()
            
            # 计算决策置信度
            confidence = self._calculate_scaling_confidence(triggers, metrics)
            
            return ScalingDecision(
                action=ScalingAction.SCALE_OUT,
                trigger=triggers[0],  # 使用第一个触发条件
                reason=f"扩容触发: {[t.value for t in triggers]}",
                target_servers=target_servers,
                priority_regions=priority_regions,
                preferred_gpu_type=preferred_gpu_type,
                confidence=confidence
            )
        
        return None
    
    def _check_scale_in_conditions(self, metrics: ScalingMetrics) -> Optional[ScalingDecision]:
        """检查缩容条件"""
        scale_in_config = self.scaling_config['scale_in']
        
        # 检查最少服务器数量限制
        if metrics.active_servers <= self.scaling_config['min_servers']:
            return None
        
        # 检查是否有正在部署的服务器
        if self._has_deploying_servers():
            return None
        
        triggers = []
        
        # 检查平均响应时间
        if metrics.average_response_time < scale_in_config['response_time_threshold']:
            triggers.append(ScalingTrigger.LOW_RESPONSE_TIME)
        
        # 检查任务队列
        if metrics.queue_backlog < scale_in_config['queue_backlog_threshold']:
            triggers.append(ScalingTrigger.LOW_QUEUE_BACKLOG)
        
        # 检查CPU利用率
        if metrics.cpu_utilization < scale_in_config['cpu_utilization_threshold']:
            triggers.append(ScalingTrigger.LOW_CPU_USAGE)
        
        # 检查低负载持续时间
        if self._check_low_load_duration():
            triggers.append(ScalingTrigger.LOW_LOAD_DURATION)
        
        # 需要满足多个条件才缩容
        if len(triggers) >= 2:
            # 计算目标服务器数量
            target_servers = self._calculate_scale_in_target(metrics)
            
            # 选择要销毁的服务器
            servers_to_destroy = self._select_servers_to_destroy(target_servers)
            
            if servers_to_destroy:
                # 计算决策置信度
                confidence = self._calculate_scaling_confidence(triggers, metrics)
                
                return ScalingDecision(
                    action=ScalingAction.SCALE_IN,
                    trigger=triggers[0],
                    reason=f"缩容触发: {[t.value for t in triggers]}, 销毁服务器: {servers_to_destroy}",
                    target_servers=target_servers,
                    priority_regions=[],  # 缩容不需要地域信息
                    preferred_gpu_type="",  # 缩容不需要GPU类型信息
                    confidence=confidence
                )
        
        return None
    
    def _execute_scaling_action(self, decision: ScalingDecision) -> None:
        """执行扩缩容动作"""
        try:
            if decision.confidence < self.scaling_config['scaling_confidence_threshold']:
                logger.warning(f"扩缩容决策置信度不足: {decision.confidence}")
                return
            
            logger.info(f"执行扩缩容动作: {decision.action.value} - {decision.reason}")
            
            if decision.action == ScalingAction.SCALE_OUT:
                self._execute_scale_out(decision)
            elif decision.action == ScalingAction.SCALE_IN:
                self._execute_scale_in(decision)
            
            # 更新最后扩缩容时间
            self.last_scaling_time = time.time()
            self.last_scaling_action = decision
            
            # 记录扩缩容日志
            self.structured_logger.log_alert(
                alert_type="scaling_action",
                severity="info",
                message=f"执行{decision.action.value}",
                details={
                    "trigger": decision.trigger.value,
                    "reason": decision.reason,
                    "target_servers": decision.target_servers,
                    "confidence": decision.confidence
                }
            )
            
        except Exception as e:
            logger.error(f"执行扩缩容动作失败: {e}")
    
    def _execute_scale_out(self, decision: ScalingDecision) -> None:
        """执行扩容"""
        current_servers = len(self.hai_client.list_instances(status="RUNNING"))
        servers_to_create = decision.target_servers - current_servers
        
        if servers_to_create <= 0:
            return
        
        logger.info(f"开始扩容: 创建{servers_to_create}个服务器")
        
        created_count = 0
        for i in range(servers_to_create):
            try:
                # 选择地域
                region = self._select_region_for_scale_out(decision.priority_regions)
                
                # 创建实例
                instance_id = self.hai_client.create_instance(
                    region=region,
                    gpu_type=decision.preferred_gpu_type
                )
                
                if instance_id:
                    # 添加到负载均衡器
                    from .task_distribution import Server
                    server = Server(
                        server_id=instance_id,
                        region=region,
                        gpu_type=decision.preferred_gpu_type,
                        status="HEALTHY",
                        created_time=time.time(),
                        endpoint=f"http://{instance_id}.hai.tencentcloud.com"
                    )
                    self.load_balancer.add_server(server)
                    created_count += 1
                    
                    logger.info(f"扩容成功: {instance_id} ({region})")
                else:
                    logger.error(f"扩容失败: 无法创建实例")
                    
            except Exception as e:
                logger.error(f"扩容创建实例失败: {e}")
        
        logger.info(f"扩容完成: 成功创建{created_count}个服务器")
    
    def _execute_scale_in(self, decision: ScalingDecision) -> None:
        """执行缩容"""
        # 解析要销毁的服务器列表
        servers_to_destroy = self._parse_servers_from_reason(decision.reason)
        
        if not servers_to_destroy:
            return
        
        logger.info(f"开始缩容: 销毁{len(servers_to_destroy)}个服务器")
        
        destroyed_count = 0
        for server_id in servers_to_destroy:
            try:
                # 从负载均衡器移除
                self.load_balancer.remove_server(server_id)
                
                # 销毁HAI实例
                if self.hai_client.destroy_instance(server_id):
                    destroyed_count += 1
                    logger.info(f"缩容成功: {server_id}")
                else:
                    logger.error(f"缩容失败: 无法销毁实例 {server_id}")
                    
            except Exception as e:
                logger.error(f"缩容销毁实例失败: {server_id}, 错误: {e}")
        
        logger.info(f"缩容完成: 成功销毁{destroyed_count}个服务器")
    
    def _calculate_scale_out_target(self, metrics: ScalingMetrics) -> int:
        """计算扩容目标服务器数量"""
        current_servers = metrics.active_servers
        
        # 基于队列积压计算
        queue_based = max(1, metrics.queue_backlog // 10)  # 每个服务器处理10个任务
        
        # 基于响应时间计算
        if metrics.average_response_time > 0:
            response_time_based = max(1, int(metrics.average_response_time))
        else:
            response_time_based = 1
        
        # 基于CPU利用率计算
        if metrics.cpu_utilization > 0:
            cpu_based = max(1, int(metrics.cpu_utilization / 50))  # 目标50%利用率
        else:
            cpu_based = 1
        
        # 取最大值并加上当前服务器数量
        target = current_servers + max(queue_based, response_time_based, cpu_based)
        
        # 限制最大服务器数量
        return min(target, self.scaling_config['max_servers'])
    
    def _calculate_scale_in_target(self, metrics: ScalingMetrics) -> int:
        """计算缩容目标服务器数量"""
        current_servers = metrics.active_servers
        
        # 基于队列积压计算
        if metrics.queue_backlog <= 3:
            target = max(self.scaling_config['min_servers'], current_servers // 2)
        else:
            target = max(self.scaling_config['min_servers'], current_servers - 1)
        
        return target
    
    def _select_servers_to_destroy(self, target_servers: int) -> List[str]:
        """选择要销毁的服务器"""
        current_servers = self.hai_client.list_instances(status="RUNNING")
        
        if len(current_servers) <= target_servers:
            return []
        
        # 按成本和负载选择要销毁的服务器
        # 优先销毁成本高、负载低的服务器
        servers_to_destroy = []
        
        # 按GPU类型成本排序（从贵到便宜）
        gpu_cost_order = self.hai_client.get_gpu_cost_order()
        gpu_cost_rank = {gpu: i for i, gpu in enumerate(gpu_cost_order)}
        
        # 按成本排序服务器
        sorted_servers = sorted(
            current_servers,
            key=lambda s: (
                gpu_cost_rank.get(s.gpu_type, 999),  # GPU成本（越贵越靠前）
                -s.current_tasks  # 当前任务数（越少越靠前）
            )
        )
        
        # 选择要销毁的服务器
        destroy_count = len(current_servers) - target_servers
        for i in range(min(destroy_count, len(sorted_servers))):
            servers_to_destroy.append(sorted_servers[i].instance_id)
        
        return servers_to_destroy
    
    def _select_priority_regions(self) -> List[str]:
        """选择优先地域"""
        return self.hai_client.get_available_regions()
    
    def _select_region_for_scale_out(self, priority_regions: List[str]) -> str:
        """为扩容选择地域"""
        # 获取当前各地域的服务器数量
        region_counts = {}
        for instance in self.hai_client.list_instances(status="RUNNING"):
            region = instance.region
            region_counts[region] = region_counts.get(region, 0) + 1
        
        # 按优先级和服务器数量选择
        for region in priority_regions:
            if region not in region_counts or region_counts[region] == 0:
                return region
        
        # 如果所有地域都有服务器，选择服务器数量最少的
        return min(priority_regions, key=lambda r: region_counts.get(r, 0))
    
    def _calculate_scaling_confidence(self, triggers: List[ScalingTrigger], 
                                    metrics: ScalingMetrics) -> float:
        """计算扩缩容决策置信度"""
        confidence = 0.0
        
        for trigger in triggers:
            if trigger in [ScalingTrigger.HIGH_RESPONSE_TIME, ScalingTrigger.LOW_RESPONSE_TIME]:
                confidence += 0.3
            elif trigger in [ScalingTrigger.HIGH_QUEUE_BACKLOG, ScalingTrigger.LOW_QUEUE_BACKLOG]:
                confidence += 0.25
            elif trigger in [ScalingTrigger.HIGH_CPU_USAGE, ScalingTrigger.LOW_CPU_USAGE]:
                confidence += 0.2
            elif trigger in [ScalingTrigger.HIGH_WAIT_TIME]:
                confidence += 0.15
            elif trigger in [ScalingTrigger.LOW_LOAD_DURATION]:
                confidence += 0.1
        
        # 考虑指标历史趋势
        if len(self.metrics_history) >= 3:
            recent_trend = self._calculate_metrics_trend()
            confidence += recent_trend * 0.2
        
        return min(confidence, 1.0)
    
    def _calculate_metrics_trend(self) -> float:
        """计算指标趋势"""
        if len(self.metrics_history) < 3:
            return 0.0
        
        recent_metrics = self.metrics_history[-3:]
        
        # 计算响应时间趋势
        response_times = [m.average_response_time for m in recent_metrics]
        response_trend = statistics.mean(response_times[-2:]) - statistics.mean(response_times[:-1])
        
        # 计算队列积压趋势
        queue_backlogs = [m.queue_backlog for m in recent_metrics]
        queue_trend = statistics.mean(queue_backlogs[-2:]) - statistics.mean(queue_backlogs[:-1])
        
        # 计算CPU利用率趋势
        cpu_utilizations = [m.cpu_utilization for m in recent_metrics]
        cpu_trend = statistics.mean(cpu_utilizations[-2:]) - statistics.mean(cpu_utilizations[:-1])
        
        # 综合趋势
        overall_trend = (response_trend + queue_trend/10 + cpu_trend/100) / 3
        
        return max(-1.0, min(1.0, overall_trend))
    
    def _calculate_average_response_time(self) -> float:
        """计算平均响应时间"""
        try:
            # 从任务处理日志中获取实际数据
            import os
            import json
            from datetime import datetime, timedelta
            
            log_dir = "/home/ubuntu/PhotoEnhanceAI-web/logs"
            response_times = []
            
            # 统计最近1小时的任务处理时间
            for i in range(24):
                date = datetime.now() - timedelta(hours=i)
                log_file = os.path.join(log_dir, f"task_processing_{date.strftime('%Y-%m-%d')}.log")
                
                if os.path.exists(log_file):
                    with open(log_file, 'r') as f:
                        for line in f:
                            try:
                                data = json.loads(line.strip())
                                if data.get('type') == 'task_processing':
                                    duration = data.get('data', {}).get('processing_duration', 0)
                                    if duration > 0:
                                        response_times.append(duration)
                            except:
                                continue
            
            if response_times:
                return sum(response_times) / len(response_times)
            else:
                return 2.0  # 默认2秒
        except Exception as e:
            logger.error(f"计算平均响应时间失败: {e}")
            return 2.0
    
    def _calculate_max_wait_time(self) -> float:
        """计算最大等待时间"""
        try:
            # 从公平队列中获取实际数据
            from .user_identification import fair_queue
            queue_status = fair_queue.get_queue_status()
            
            # 根据队列长度估算等待时间
            queue_length = queue_status.get('total_tasks_in_queue', 0)
            if queue_length > 0:
                # 假设每个任务平均处理时间30秒
                return queue_length * 30.0
            else:
                return 0.0
        except Exception as e:
            logger.error(f"计算最大等待时间失败: {e}")
            return 0.0
    
    def _calculate_average_cpu_utilization(self) -> float:
        """计算平均CPU利用率"""
        instances = self.hai_client.list_instances(status="RUNNING")
        if not instances:
            return 0.0
        
        total_cpu = sum(instance.cpu_usage for instance in instances)
        return total_cpu / len(instances)
    
    def _check_low_load_duration(self) -> bool:
        """检查低负载持续时间"""
        current_time = time.time()
        
        # 检查最近3个指标点是否都是低负载
        if len(self.metrics_history) < 3:
            return False
        
        recent_metrics = self.metrics_history[-3:]
        low_load_count = 0
        
        for metrics in recent_metrics:
            if (metrics.cpu_utilization < self.scaling_config['scale_in']['cpu_utilization_threshold'] and
                metrics.queue_backlog < self.scaling_config['scale_in']['queue_backlog_threshold']):
                low_load_count += 1
        
        return low_load_count >= 3
    
    def _has_deploying_servers(self) -> bool:
        """检查是否有正在部署的服务器"""
        # 这里需要检查HAI实例状态
        # 暂时返回False
        return False
    
    def _parse_servers_from_reason(self, reason: str) -> List[str]:
        """从原因字符串中解析服务器列表"""
        # 从reason中提取服务器ID列表
        # 格式: "缩容触发: [...], 销毁服务器: [server1, server2, ...]"
        try:
            if "销毁服务器:" in reason:
                servers_str = reason.split("销毁服务器:")[-1].strip()
                servers_str = servers_str.strip("[]")
                return [s.strip() for s in servers_str.split(",") if s.strip()]
        except Exception as e:
            logger.error(f"解析服务器列表失败: {e}")
        
        return []
    
    def get_scaling_status(self) -> Dict[str, Any]:
        """获取扩缩容状态"""
        with self.lock:
            latest_metrics = self.metrics_history[-1] if self.metrics_history else None
            
            # 安全序列化扩缩容动作
            last_action_data = None
            if self.last_scaling_action:
                last_action_data = {
                    "action": self.last_scaling_action.action.value if hasattr(self.last_scaling_action, 'action') else str(self.last_scaling_action),
                    "trigger": self.last_scaling_action.trigger.value if hasattr(self.last_scaling_action, 'trigger') else None,
                    "reason": self.last_scaling_action.reason if hasattr(self.last_scaling_action, 'reason') else None,
                    "target_servers": self.last_scaling_action.target_servers if hasattr(self.last_scaling_action, 'target_servers') else 0,
                    "priority_regions": self.last_scaling_action.priority_regions if hasattr(self.last_scaling_action, 'priority_regions') else [],
                    "preferred_gpu_type": self.last_scaling_action.preferred_gpu_type if hasattr(self.last_scaling_action, 'preferred_gpu_type') else None,
                    "confidence": self.last_scaling_action.confidence if hasattr(self.last_scaling_action, 'confidence') else 0.0
                }
            
            # 安全序列化最新指标
            latest_metrics_data = None
            if latest_metrics:
                latest_metrics_data = {
                    "timestamp": latest_metrics.timestamp,
                    "average_response_time": latest_metrics.average_response_time,
                    "queue_backlog": latest_metrics.queue_backlog,
                    "cpu_utilization": latest_metrics.cpu_utilization,
                    "max_wait_time": latest_metrics.max_wait_time,
                    "total_capacity": latest_metrics.total_capacity,
                    "used_capacity": latest_metrics.used_capacity
                }
            
            return {
                "running": self.running,
                "last_scaling_action": last_action_data,
                "last_scaling_time": self.last_scaling_time,
                "metrics_history_size": len(self.metrics_history),
                "latest_metrics": latest_metrics_data,
                "scaling_config": self.scaling_config
            }


# 全局实例
auto_scaler = None  # 将在初始化时设置


def initialize_auto_scaler(hai_client, load_balancer, fair_queue, structured_logger):
    """初始化自动扩缩容器"""
    global auto_scaler
    auto_scaler = AutoScaler(hai_client, load_balancer, fair_queue, structured_logger)
    return auto_scaler


if __name__ == "__main__":
    # 测试代码
    logging.basicConfig(level=logging.INFO)
    
    # 这里需要实际的对象实例进行测试
    print("自动扩缩容模块测试完成")
