#!/usr/bin/env python3
# -*- coding: utf-8 -*-

"""
智能扩缩容策略实现
基于用户等级、负载预测和成本控制的智能扩缩容系统
"""

import time
import json
import logging
from datetime import datetime, timedelta
from typing import Dict, List, Tuple, Optional
from dataclasses import dataclass
from enum import Enum
import statistics

# 配置日志
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)

class UserTier(Enum):
    """用户等级枚举"""
    FREE = "free"
    BASIC = "basic"
    PRO = "pro"
    ENTERPRISE = "enterprise"

@dataclass
class ScalingMetrics:
    """扩缩容指标"""
    queue_length: int
    average_wait_time: float
    cpu_utilization: float
    gpu_utilization: float
    active_users: int
    user_distribution: Dict[UserTier, int]
    current_instances: int
    timestamp: datetime

@dataclass
class UserBilling:
    """用户计费信息"""
    user_id: str
    tier: UserTier
    monthly_quota: int
    used_quota: int
    daily_cost_limit: float
    current_daily_cost: float
    last_reset_date: datetime

class IntelligentScalingStrategy:
    """智能扩缩容策略"""
    
    def __init__(self):
        self.scaling_config = self._load_scaling_config()
        self.cost_config = self._load_cost_config()
        self.user_billing = {}
        self.scaling_history = []
        
    def _load_scaling_config(self) -> Dict:
        """加载扩缩容配置"""
        return {
            UserTier.FREE: {
                'max_instances': 1,
                'scale_out_threshold': {
                    'queue_length': 5,
                    'wait_time': 300,  # 5分钟
                },
                'scale_in_threshold': {
                    'idle_time': 60,   # 1分钟
                    'queue_empty': True,
                },
                'cooldown_period': 30,  # 30秒
                'priority': 1,  # 最低优先级
            },
            UserTier.BASIC: {
                'max_instances': 2,
                'scale_out_threshold': {
                    'queue_length': 3,
                    'wait_time': 120,  # 2分钟
                },
                'scale_in_threshold': {
                    'idle_time': 180,  # 3分钟
                    'queue_empty': False,
                },
                'cooldown_period': 60,  # 1分钟
                'priority': 2,
            },
            UserTier.PRO: {
                'max_instances': 5,
                'scale_out_threshold': {
                    'queue_length': 2,
                    'wait_time': 60,   # 1分钟
                },
                'scale_in_threshold': {
                    'idle_time': 300,  # 5分钟
                    'queue_empty': False,
                },
                'cooldown_period': 30,  # 30秒
                'priority': 3,
            },
            UserTier.ENTERPRISE: {
                'max_instances': 20,
                'scale_out_threshold': {
                    'queue_length': 1,
                    'wait_time': 30,   # 30秒
                },
                'scale_in_threshold': {
                    'idle_time': 600,  # 10分钟
                    'queue_empty': False,
                },
                'cooldown_period': 15,  # 15秒
                'priority': 4,  # 最高优先级
            }
        }
    
    def _load_cost_config(self) -> Dict:
        """加载成本配置"""
        return {
            'hai_cost_per_second': 0.00033,  # 1.2元/小时 = 0.00033元/秒
            'daily_limits': {
                UserTier.FREE: 0.5,        # 0.5元/天
                UserTier.BASIC: 5.0,       # 5元/天
                UserTier.PRO: 15.0,        # 15元/天
                UserTier.ENTERPRISE: 50.0, # 50元/天
            },
            'cost_alerts': {
                'warning': 0.8,     # 80%预算时警告
                'critical': 0.95,   # 95%预算时限制
                'emergency': 1.0,   # 100%预算时停止
            }
        }
    
    def analyze_current_load(self, metrics: ScalingMetrics) -> Dict:
        """分析当前负载"""
        analysis = {
            'total_load': 0,
            'user_priority_score': 0,
            'cost_risk': 'low',
            'scaling_recommendation': 'maintain',
            'target_instances': metrics.current_instances,
        }
        
        # 1. 计算总负载
        queue_weight = min(metrics.queue_length / 20, 1.0)  # 队列长度权重
        wait_weight = min(metrics.average_wait_time / 300, 1.0)  # 等待时间权重
        cpu_weight = metrics.cpu_utilization / 100  # CPU使用率权重
        
        analysis['total_load'] = (queue_weight * 0.4 + wait_weight * 0.4 + cpu_weight * 0.2)
        
        # 2. 计算用户优先级分数
        priority_weights = {UserTier.FREE: 1, UserTier.BASIC: 2, UserTier.PRO: 3, UserTier.ENTERPRISE: 4}
        total_users = sum(metrics.user_distribution.values())
        
        if total_users > 0:
            for tier, count in metrics.user_distribution.items():
                weight = priority_weights.get(tier, 1)
                analysis['user_priority_score'] += (count / total_users) * weight
        
        # 3. 评估成本风险
        analysis['cost_risk'] = self._assess_cost_risk(metrics)
        
        # 4. 生成扩缩容建议
        analysis.update(self._generate_scaling_recommendation(metrics, analysis))
        
        return analysis
    
    def _assess_cost_risk(self, metrics: ScalingMetrics) -> str:
        """评估成本风险"""
        total_daily_cost = 0
        total_daily_limit = 0
        
        for tier, count in metrics.user_distribution.items():
            daily_limit = self.cost_config['daily_limits'].get(tier, 0)
            total_daily_limit += count * daily_limit
            
            # 估算当前成本 (基于实例数量和使用时间)
            estimated_cost = metrics.current_instances * self.cost_config['hai_cost_per_second'] * 3600  # 每小时成本
            total_daily_cost += estimated_cost
        
        if total_daily_limit == 0:
            return 'low'
        
        cost_ratio = total_daily_cost / total_daily_limit
        
        if cost_ratio >= self.cost_config['cost_alerts']['emergency']:
            return 'emergency'
        elif cost_ratio >= self.cost_config['cost_alerts']['critical']:
            return 'critical'
        elif cost_ratio >= self.cost_config['cost_alerts']['warning']:
            return 'warning'
        else:
            return 'low'
    
    def _generate_scaling_recommendation(self, metrics: ScalingMetrics, analysis: Dict) -> Dict:
        """生成扩缩容建议"""
        recommendation = {
            'scaling_recommendation': 'maintain',
            'target_instances': metrics.current_instances,
            'reason': 'No scaling needed',
            'confidence': 0.5,
        }
        
        # 基于负载和用户优先级计算目标实例数
        base_instances = max(1, int(metrics.queue_length / 5))  # 基础实例数
        priority_multiplier = 1 + (analysis['user_priority_score'] - 1) * 0.5  # 优先级倍数
        load_multiplier = 1 + analysis['total_load'] * 2  # 负载倍数
        
        target_instances = int(base_instances * priority_multiplier * load_multiplier)
        
        # 应用用户等级限制
        max_instances = self._get_max_instances_for_users(metrics.user_distribution)
        target_instances = min(target_instances, max_instances)
        
        # 成本控制
        if analysis['cost_risk'] in ['critical', 'emergency']:
            target_instances = max(1, target_instances // 2)  # 成本控制时减半
        
        # 生成建议
        if target_instances > metrics.current_instances:
            recommendation.update({
                'scaling_recommendation': 'scale_out',
                'target_instances': target_instances,
                'reason': f'High load detected (load: {analysis["total_load"]:.2f}, priority: {analysis["user_priority_score"]:.2f})',
                'confidence': min(0.9, 0.5 + analysis['total_load'] * 0.4),
            })
        elif target_instances < metrics.current_instances and analysis['cost_risk'] != 'emergency':
            recommendation.update({
                'scaling_recommendation': 'scale_in',
                'target_instances': target_instances,
                'reason': f'Low load detected (load: {analysis["total_load"]:.2f})',
                'confidence': min(0.9, 0.5 + (1 - analysis['total_load']) * 0.4),
            })
        
        return recommendation
    
    def _get_max_instances_for_users(self, user_distribution: Dict[UserTier, int]) -> int:
        """根据用户分布计算最大实例数"""
        max_instances = 0
        
        for tier, count in user_distribution.items():
            if count > 0:
                tier_config = self.scaling_config.get(tier, {})
                tier_max = tier_config.get('max_instances', 1)
                max_instances += tier_max
        
        return max_instances
    
    def predict_future_load(self, historical_metrics: List[ScalingMetrics]) -> Dict:
        """预测未来负载"""
        if len(historical_metrics) < 3:
            return {'predicted_load': 0.5, 'confidence': 0.3}
        
        # 简单的时间序列预测
        recent_loads = [m.queue_length for m in historical_metrics[-5:]]
        recent_wait_times = [m.average_wait_time for m in historical_metrics[-5:]]
        
        # 计算趋势
        load_trend = statistics.mean(recent_loads[-2:]) - statistics.mean(recent_loads[:2])
        wait_trend = statistics.mean(recent_wait_times[-2:]) - statistics.mean(recent_wait_times[:2])
        
        # 预测未来5分钟的负载
        predicted_load = statistics.mean(recent_loads) + load_trend * 0.5
        predicted_wait = statistics.mean(recent_wait_times) + wait_trend * 0.5
        
        # 计算置信度
        confidence = min(0.9, 0.3 + len(historical_metrics) * 0.1)
        
        return {
            'predicted_load': max(0, predicted_load),
            'predicted_wait_time': max(0, predicted_wait),
            'confidence': confidence,
            'trend': 'increasing' if load_trend > 0 else 'decreasing',
        }
    
    def should_scale(self, metrics: ScalingMetrics, last_scaling_time: Optional[datetime] = None) -> Tuple[bool, str]:
        """判断是否应该扩缩容"""
        analysis = self.analyze_current_load(metrics)
        
        # 检查冷却期
        if last_scaling_time:
            cooldown_period = self._get_cooldown_period(metrics.user_distribution)
            time_since_last_scaling = (datetime.now() - last_scaling_time).total_seconds()
            
            if time_since_last_scaling < cooldown_period:
                return False, f"In cooldown period ({time_since_last_scaling:.1f}s < {cooldown_period}s)"
        
        # 检查扩缩容建议
        if analysis['scaling_recommendation'] == 'maintain':
            return False, "No scaling needed"
        
        # 检查成本风险
        if analysis['cost_risk'] == 'emergency' and analysis['scaling_recommendation'] == 'scale_out':
            return False, "Cost emergency - scaling out blocked"
        
        # 检查置信度
        if analysis['confidence'] < 0.6:
            return False, f"Low confidence ({analysis['confidence']:.2f})"
        
        return True, analysis['reason']
    
    def _get_cooldown_period(self, user_distribution: Dict[UserTier, int]) -> int:
        """获取冷却期"""
        # 根据最高优先级用户确定冷却期
        max_priority = 0
        cooldown = 60  # 默认1分钟
        
        for tier, count in user_distribution.items():
            if count > 0:
                tier_config = self.scaling_config.get(tier, {})
                priority = tier_config.get('priority', 1)
                if priority > max_priority:
                    max_priority = priority
                    cooldown = tier_config.get('cooldown_period', 60)
        
        return cooldown
    
    def calculate_optimal_instances(self, metrics: ScalingMetrics, prediction: Dict) -> int:
        """计算最优实例数"""
        analysis = self.analyze_current_load(metrics)
        
        # 基础实例数
        base_instances = max(1, int(metrics.queue_length / 5))
        
        # 考虑预测负载
        predicted_instances = max(1, int(prediction['predicted_load'] / 5))
        
        # 考虑用户优先级
        priority_multiplier = 1 + (analysis['user_priority_score'] - 1) * 0.3
        
        # 计算目标实例数
        target_instances = int((base_instances + predicted_instances) / 2 * priority_multiplier)
        
        # 应用限制
        max_instances = self._get_max_instances_for_users(metrics.user_distribution)
        target_instances = min(target_instances, max_instances)
        
        # 成本控制
        if analysis['cost_risk'] in ['critical', 'emergency']:
            target_instances = max(1, target_instances // 2)
        
        return max(1, target_instances)
    
    def get_scaling_plan(self, metrics: ScalingMetrics, prediction: Dict) -> Dict:
        """获取扩缩容计划"""
        analysis = self.analyze_current_load(metrics)
        optimal_instances = self.calculate_optimal_instances(metrics, prediction)
        
        plan = {
            'current_instances': metrics.current_instances,
            'target_instances': optimal_instances,
            'scaling_action': 'maintain',
            'scaling_amount': 0,
            'reason': analysis.get('reason', 'No scaling needed'),
            'confidence': analysis.get('confidence', 0.5),
            'cost_risk': analysis.get('cost_risk', 'low'),
            'estimated_cost_impact': 0,
            'execution_time': datetime.now(),
        }
        
        if optimal_instances > metrics.current_instances:
            plan.update({
                'scaling_action': 'scale_out',
                'scaling_amount': optimal_instances - metrics.current_instances,
            })
        elif optimal_instances < metrics.current_instances:
            plan.update({
                'scaling_action': 'scale_in',
                'scaling_amount': metrics.current_instances - optimal_instances,
            })
        
        # 估算成本影响
        cost_per_instance_per_hour = 1.2  # 1.2元/小时
        if plan['scaling_action'] == 'scale_out':
            plan['estimated_cost_impact'] = plan['scaling_amount'] * cost_per_instance_per_hour
        elif plan['scaling_action'] == 'scale_in':
            plan['estimated_cost_impact'] = -plan['scaling_amount'] * cost_per_instance_per_hour
        
        return plan
    
    def log_scaling_decision(self, metrics: ScalingMetrics, plan: Dict):
        """记录扩缩容决策"""
        log_entry = {
            'timestamp': datetime.now().isoformat(),
            'metrics': {
                'queue_length': metrics.queue_length,
                'average_wait_time': metrics.average_wait_time,
                'cpu_utilization': metrics.cpu_utilization,
                'user_distribution': {tier.value: count for tier, count in metrics.user_distribution.items()},
                'current_instances': metrics.current_instances,
            },
            'plan': plan,
        }
        
        self.scaling_history.append(log_entry)
        
        # 保持历史记录在合理范围内
        if len(self.scaling_history) > 1000:
            self.scaling_history = self.scaling_history[-500:]
        
        logger.info(f"Scaling decision: {plan['scaling_action']} to {plan['target_instances']} instances. Reason: {plan['reason']}")

# 使用示例
def main():
    """使用示例"""
    strategy = IntelligentScalingStrategy()
    
    # 模拟指标
    metrics = ScalingMetrics(
        queue_length=15,
        average_wait_time=180,
        cpu_utilization=75,
        gpu_utilization=80,
        active_users=50,
        user_distribution={
            UserTier.FREE: 30,
            UserTier.BASIC: 15,
            UserTier.PRO: 4,
            UserTier.ENTERPRISE: 1,
        },
        current_instances=3,
        timestamp=datetime.now(),
    )
    
    # 分析负载
    analysis = strategy.analyze_current_load(metrics)
    print(f"负载分析: {analysis}")
    
    # 预测未来负载
    prediction = strategy.predict_future_load([metrics])
    print(f"负载预测: {prediction}")
    
    # 获取扩缩容计划
    plan = strategy.get_scaling_plan(metrics, prediction)
    print(f"扩缩容计划: {plan}")
    
    # 判断是否应该扩缩容
    should_scale, reason = strategy.should_scale(metrics)
    print(f"是否扩缩容: {should_scale}, 原因: {reason}")

if __name__ == '__main__':
    main()
