# uamcf/core/scheduler.py
import random
from typing import Dict, List, Any, Tuple, Optional, Union
from collections import defaultdict

from ..utils.logger import get_logger
from ..methods import get_method_registry


class ConstraintSolver:
    """约束求解器，用于处理压缩过程中的各种约束条件"""

    def __init__(self):
        self.logger = get_logger("ConstraintSolver")

    def is_feasible(self, plan: List[Dict], constraints: Dict) -> bool:
        """
        检查压缩计划是否满足约束

        Args:
            plan: 压缩计划
            constraints: 约束条件

        Returns:
            计划是否可行
        """
        # 这里是一个简化的检查，实际实现会更复杂
        # 目前假设每个压缩方法都提供了预期的压缩率和精度影响

        # 提取约束条件
        accuracy_threshold = constraints.get("accuracy_threshold", 0)
        size_limit = constraints.get("size_limit")
        memory_limit = constraints.get("memory_limit")
        latency_target = constraints.get("latency_target")

        # 评估计划的影响
        expected_accuracy = 1.0  # 初始假设精度为100%
        expected_size_ratio = 1.0  # 初始假设大小为原始大小
        expected_memory = None
        expected_latency = None

        for step in plan:
            # 假设每个步骤都有预期的指标影响
            metrics = step.get("expected_metrics", {})

            # 更新精度估计（乘法组合）
            accuracy_ratio = metrics.get("accuracy_ratio", 1.0)
            expected_accuracy *= accuracy_ratio

            # 更新大小估计（乘法组合）
            size_ratio = metrics.get("size_ratio", 1.0)
            expected_size_ratio *= size_ratio

            # 更新内存和延迟估计（加法和最大值组合更复杂，此处简化）
            if expected_memory is None and "memory" in metrics:
                expected_memory = metrics["memory"]

            if expected_latency is None and "latency" in metrics:
                expected_latency = metrics["latency"]

        # 检查约束满足情况
        if accuracy_threshold > 0 and expected_accuracy < accuracy_threshold:
            self.logger.debug(f"Plan failed accuracy constraint: {expected_accuracy} < {accuracy_threshold}")
            return False

        if size_limit and expected_size_ratio > size_limit:
            self.logger.debug(f"Plan failed size constraint: {expected_size_ratio} > {size_limit}")
            return False

        if memory_limit and expected_memory and expected_memory > memory_limit:
            self.logger.debug(f"Plan failed memory constraint")
            return False

        if latency_target and expected_latency and expected_latency > latency_target:
            self.logger.debug(f"Plan failed latency constraint")
            return False

        return True

    def adjust_plan(self, plan: List[Dict], constraints: Dict,
                    violation_metrics: List[str]) -> List[Dict]:
        """
        调整压缩计划以满足约束

        Args:
            plan: 原始压缩计划
            constraints: 约束条件
            violation_metrics: 违反的约束指标列表

        Returns:
            调整后的压缩计划
        """
        adjusted_plan = []

        for step in plan:
            adjusted_step = step.copy()

            # 基于违反的约束进行调整
            if "accuracy" in violation_metrics:
                # 如果精度不满足要求，减少压缩强度
                if "config" in adjusted_step:
                    config = adjusted_step["config"]

                    # 量化调整
                    if adjusted_step["method"] == "quantization" and "bits" in config:
                        config["bits"] = min(config["bits"] + 2, 8)  # 增加比特数

                    # 剪枝调整
                    elif adjusted_step["method"] == "pruning" and "sparsity" in config:
                        config["sparsity"] = max(config["sparsity"] - 0.1, 0.3)  # 减少稀疏度

            elif "size" in violation_metrics or "memory" in violation_metrics:
                # 如果大小/内存约束不满足，增加压缩强度
                if "config" in adjusted_step:
                    config = adjusted_step["config"]

                    # 量化调整
                    if adjusted_step["method"] == "quantization" and "bits" in config:
                        config["bits"] = max(config["bits"] - 2, 2)  # 减少比特数

                    # 剪枝调整
                    elif adjusted_step["method"] == "pruning" and "sparsity" in config:
                        config["sparsity"] = min(config["sparsity"] + 0.1, 0.9)  # 增加稀疏度

            adjusted_plan.append(adjusted_step)

        return adjusted_plan


class StrategyOptimizer:
    """策略优化器，优化压缩策略以达到最佳效果"""

    def __init__(self):
        self.logger = get_logger("StrategyOptimizer")
        self.constraint_solver = ConstraintSolver()
        self.method_registry = get_method_registry()

    def select_methods_for_layer(self, layer_info: Dict, constraints: Dict) -> List[Dict]:
        """
        为给定层选择合适的压缩方法

        Args:
            layer_info: 层信息
            constraints: 约束条件

        Returns:
            适合该层的压缩方法列表，按优先级排序
        """
        layer_type = layer_info.get("type", "Unknown")
        suitable_methods = []

        # 获取所有可用方法
        all_methods = self.method_registry.keys()

        for method_name in all_methods:
            # 检查方法是否适用于该层类型
            if self._is_method_applicable(method_name, layer_type):
                # 为该方法创建默认配置
                default_config = self._get_default_config(method_name, layer_info, constraints)

                # 评估该方法的适用性分数
                score = self._evaluate_method_suitability(method_name, layer_info, constraints)

                suitable_methods.append({
                    "method": method_name,
                    "config": default_config,
                    "score": score
                })

        # 按分数排序
        suitable_methods.sort(key=lambda x: x["score"], reverse=True)
        return suitable_methods

    def _is_method_applicable(self, method_name: str, layer_type: str) -> bool:
        """判断压缩方法是否适用于特定类型的层"""
        # 这里是一个简化的实现，实际应基于更多因素判断
        method_compatibility = {
            "quantization": ["Conv2d", "Linear", "BatchNorm2d", "LayerNorm", "ReLU", "Embedding"],
            "pruning": ["Conv2d", "Linear"],
            "distillation": ["Conv2d", "Linear", "Embedding", "LSTM", "GRU"],
            "factorization": ["Conv2d", "Linear"]
        }

        return layer_type in method_compatibility.get(method_name, [])

    def _get_default_config(self, method_name: str, layer_info: Dict, constraints: Dict) -> Dict:
        """生成压缩方法的默认配置"""
        if method_name == "quantization":
            # 根据精度约束选择合适的比特数
            if constraints.get("accuracy_threshold", 0) > 0.95:
                return {"bits": 8, "scheme": "symmetric"}
            elif constraints.get("accuracy_threshold", 0) > 0.9:
                return {"bits": 6, "scheme": "asymmetric"}
            else:
                return {"bits": 4, "scheme": "asymmetric"}

        elif method_name == "pruning":
            # 根据层类型和精度约束选择稀疏度
            if layer_info["type"] == "Conv2d":
                sparsity = 0.5 if constraints.get("accuracy_threshold", 0) > 0.9 else 0.7
                return {"sparsity": sparsity, "method": "structured" if sparsity > 0.6 else "magnitude"}
            else:
                sparsity = 0.6 if constraints.get("accuracy_threshold", 0) > 0.9 else 0.8
                return {"sparsity": sparsity, "method": "magnitude"}

        elif method_name == "distillation":
            return {"temperature": 2.0}

        elif method_name == "factorization":
            return {"rank_ratio": 0.5}

        return {}

    def _evaluate_method_suitability(self, method_name: str, layer_info: Dict, constraints: Dict) -> float:
        """评估压缩方法对特定层的适用性分数"""
        # 基础分数
        base_score = 0.5

        # 根据层类型调整分数
        layer_type = layer_info.get("type", "Unknown")

        if method_name == "quantization":
            # 量化适用于几乎所有层类型
            base_score += 0.3

            # 对于参数量大的层，量化特别有效
            if layer_info.get("parameters", 0) > 1000000:
                base_score += 0.1

        elif method_name == "pruning":
            # 剪枝最适合卷积和全连接层
            if layer_type in ["Conv2d", "Linear"]:
                base_score += 0.2

                # 对于参数量大且分布稀疏的层，剪枝特别有效
                if layer_info.get("parameters", 0) > 1000000:
                    base_score += 0.2

        elif method_name == "distillation":
            # 蒸馏适用于复杂的表示学习层
            if layer_type in ["Conv2d", "Embedding", "LSTM", "GRU"]:
                base_score += 0.2

            # 对于浅层网络，蒸馏可能不太适用
            if layer_info.get("depth", 0) < 3:
                base_score -= 0.1

        elif method_name == "factorization":
            # 因子分解适用于大型矩阵操作
            if layer_type in ["Conv2d", "Linear"] and layer_info.get("parameters", 0) > 500000:
                base_score += 0.3
            else:
                base_score -= 0.1

        # 根据约束调整分数
        if constraints.get("accuracy_critical", False) and method_name in ["quantization", "distillation"]:
            base_score += 0.1

        if constraints.get("memory_critical", False) and method_name in ["pruning", "factorization"]:
            base_score += 0.1

        return max(0.0, min(1.0, base_score))  # 确保分数在[0,1]范围内

    def optimize_layer_compression(self, layer_info: Dict, candidate_methods: List[Dict],
                                   constraints: Dict) -> Dict:
        """
        为特定层优化压缩配置

        Args:
            layer_info: 层信息
            candidate_methods: 候选压缩方法列表
            constraints: 约束条件

        Returns:
            优化后的压缩方法和配置
        """
        if not candidate_methods:
            return None

        # 选择得分最高的方法作为起点
        best_method = candidate_methods[0]

        # 根据约束微调配置
        if "config" in best_method:
            config = best_method["config"].copy()

            if constraints.get("accuracy_critical", False):
                # 提高精度
                if best_method["method"] == "quantization" and "bits" in config:
                    config["bits"] = min(config["bits"] + 2, 8)

                elif best_method["method"] == "pruning" and "sparsity" in config:
                    config["sparsity"] = max(config["sparsity"] - 0.1, 0.3)

            elif constraints.get("memory_critical", False) or constraints.get("size_critical", False):
                # 提高压缩率
                if best_method["method"] == "quantization" and "bits" in config:
                    config["bits"] = max(config["bits"] - 2, 2)

                elif best_method["method"] == "pruning" and "sparsity" in config:
                    config["sparsity"] = min(config["sparsity"] + 0.1, 0.9)

            best_method = {
                "method": best_method["method"],
                "config": config,
                "score": best_method["score"]
            }

        return best_method


class CompressionScheduler:
    """压缩调度器，生成和管理模型压缩计划"""

    def __init__(self):
        self.logger = get_logger("CompressionScheduler")
        self.strategy_optimizer = StrategyOptimizer()
        self.constraint_solver = ConstraintSolver()

    def generate_plan(self, model_analysis: Dict, constraints: Dict) -> List[Dict]:
        """
        根据模型分析和约束生成压缩计划

        Args:
            model_analysis: 模型分析结果
            constraints: 压缩约束

        Returns:
            压缩计划列表
        """
        self.logger.info("Generating compression plan based on model analysis")

        # 获取压缩候选层
        layers_info = model_analysis.get("layers", [])
        bottlenecks = model_analysis.get("bottlenecks", {})

        # 根据瓶颈和约束确定优先压缩的层
        compression_targets = self._prioritize_layers(layers_info, bottlenecks, constraints)

        # 为每个目标层生成压缩步骤
        compression_plan = []
        for target in compression_targets:
            layer_info = next((l for l in layers_info if l["name"] == target["name"]), None)
            if not layer_info:
                continue

            # 为该层选择合适的压缩方法
            candidate_methods = self.strategy_optimizer.select_methods_for_layer(layer_info, constraints)

            if not candidate_methods:
                self.logger.warning(f"No suitable compression method found for layer {target['name']}")
                continue

            # 优化压缩配置
            optimized_method = self.strategy_optimizer.optimize_layer_compression(
                layer_info, candidate_methods, constraints
            )

            if optimized_method:
                compression_step = {
                    "layer": target["name"],
                    "method": optimized_method["method"],
                    "config": optimized_method["config"],
                    "priority": target["priority"]
                }
                compression_plan.append(compression_step)

        # 验证计划是否满足约束
        if compression_plan and not self.constraint_solver.is_feasible(compression_plan, constraints):
            self.logger.warning("Generated plan does not meet constraints, adjusting...")
            compression_plan = self.constraint_solver.adjust_plan(
                compression_plan, constraints, ["accuracy", "size"]
            )

        self.logger.info(f"Generated compression plan with {len(compression_plan)} steps")
        return compression_plan

    def _prioritize_layers(self, layers_info: List[Dict], bottlenecks: Dict,
                           constraints: Dict) -> List[Dict]:
        """
        基于分析结果和约束优先排序要压缩的层

        Args:
            layers_info: 层信息列表
            bottlenecks: 瓶颈分析结果
            constraints: 约束条件

        Returns:
            优先级排序的目标层列表
        """
        targets = []

        # 首先添加组合瓶颈（既是计算又是内存瓶颈）
        for bottleneck in bottlenecks.get("combined", []):
            targets.append({
                "name": bottleneck["name"],
                "priority": "high",
                "reason": "Combined bottleneck",
                "score": bottleneck.get("combined_score", 1.0)
            })

        # 根据主要约束添加其他瓶颈
        if constraints.get("memory_critical", False) or constraints.get("size_critical", False):
            # 内存约束优先考虑内存瓶颈
            for bottleneck in bottlenecks.get("memory", []):
                if any(t["name"] == bottleneck["name"] for t in targets):
                    continue

                targets.append({
                    "name": bottleneck["name"],
                    "priority": "medium",
                    "reason": "Memory bottleneck",
                    "score": bottleneck.get("ratio", 0.5)
                })

        if constraints.get("latency_critical", False):
            # 延迟约束优先考虑计算瓶颈
            for bottleneck in bottlenecks.get("compute", []):
                if any(t["name"] == bottleneck["name"] for t in targets):
                    continue

                targets.append({
                    "name": bottleneck["name"],
                    "priority": "medium",
                    "reason": "Compute bottleneck",
                    "score": bottleneck.get("ratio", 0.5)
                })

        # 添加具有大量参数的层
        sorted_by_params = sorted(
            layers_info,
            key=lambda x: x.get("parameters", 0),
            reverse=True
        )

        for layer in sorted_by_params[:min(10, len(sorted_by_params))]:
            if any(t["name"] == layer["name"] for t in targets):
                continue

            targets.append({
                "name": layer["name"],
                "priority": "low",
                "reason": "High parameter count",
                "score": 0.3
            })

        # 按优先级和得分排序
        priority_map = {"high": 3, "medium": 2, "low": 1}
        targets.sort(key=lambda x: (priority_map[x["priority"]], x["score"]), reverse=True)

        return targets

    def update_plan(self, plan: List[Dict], eval_results: Dict) -> List[Dict]:
        """
        根据评估结果更新压缩计划

        Args:
            plan: 当前压缩计划
            eval_results: 评估结果

        Returns:
            更新后的压缩计划
        """
        if not eval_results:
            return plan

        updated_plan = []
        accuracy = eval_results.get("accuracy", 1.0)
        latency = eval_results.get("latency", 0)
        memory = eval_results.get("memory", 0)

        for step in plan:
            updated_step = step.copy()

            # 根据评估结果调整配置
            if "config" in updated_step:
                config = updated_step["config"].copy()

                # 如果精度下降太多，减少压缩强度
                if accuracy < 0.9 and updated_step["method"] == "quantization" and "bits" in config:
                    config["bits"] = min(config["bits"] + 2, 8)

                elif accuracy < 0.9 and updated_step["method"] == "pruning" and "sparsity" in config:
                    config["sparsity"] = max(config["sparsity"] - 0.1, 0.3)

                # 如果延迟或内存仍然不达标，增加压缩强度
                if (latency > 0 or memory > 0) and updated_step["priority"] == "high":
                    if updated_step["method"] == "quantization" and "bits" in config:
                        config["bits"] = max(config["bits"] - 1, 2)

                    elif updated_step["method"] == "pruning" and "sparsity" in config:
                        config["sparsity"] = min(config["sparsity"] + 0.05, 0.9)

                updated_step["config"] = config

            updated_plan.append(updated_step)

        return updated_plan

    def get_supported_methods(self) -> Dict[str, Dict]:
        """
        获取支持的压缩方法及其参数信息

        Returns:
            压缩方法信息字典
        """
        method_registry = get_method_registry()
        methods_info = {}

        for name, method_cls in method_registry.items():
            # 获取方法描述和默认参数
            methods_info[name] = {
                "description": method_cls.__doc__ or f"{name} compression method",
                "parameters": method_cls.get_default_params() if hasattr(method_cls, "get_default_params") else {}
            }

        return methods_info