# uamcf/core/analyzer.py
import time
import numpy as np
from typing import Dict, List, Any, Tuple, Optional, Union
from collections import defaultdict

from ..utils.logger import get_logger
from ..adapters import get_adapter
# def get_adapter(adapter_name):
#     """获取适配器实例"""
#     from ..adapters import PyTorchAdapter, TensorFlowAdapter, ONNXAdapter, TFLiteAdapter
#
#     ADAPTER_REGISTRY = {
#         "pytorch": PyTorchAdapter,
#         "tensorflow": TensorFlowAdapter,
#         "onnx": ONNXAdapter,
#         "tflite": TFLiteAdapter
#     }
#
#     adapter_cls = ADAPTER_REGISTRY.get(adapter_name)
#     if adapter_cls:
#         return adapter_cls()
#     return None


class LayerProfiler:
    """层级分析器，负责分析模型中各层的特性"""

    def __init__(self):
        self.logger = get_logger("LayerProfiler")

    def profile_layers(self, model: Any, adapter_name: str = "pytorch") -> List[Dict]:
        """
        分析模型中所有层的特性

        Args:
            model: 需要分析的模型
            adapter_name: 使用的适配器名称

        Returns:
            包含每一层信息的列表
        """
        adapter = get_adapter(adapter_name)
        if adapter is None:
            self.logger.error(f"Unsupported adapter: {adapter_name}")
            return []

        layers_info = []

        # 获取模型的层次结构
        for name, info in adapter.get_layers_info(model).items():
            layer_info = {
                "name": name,
                "type": info.get("type", "Unknown"),
                "parameters": info.get("parameters", 0),
                "input_shape": info.get("input_shape", None),
                "output_shape": info.get("output_shape", None),
                "memory_size": info.get("memory_size", 0),  # 参数+缓冲区占用的内存
                "is_trainable": info.get("is_trainable", True)
            }
            layers_info.append(layer_info)

        return layers_info

    def measure_computation_time(self, model: Any, sample_input: Any,
                                 adapter_name: str = "pytorch") -> Dict[str, float]:
        """
        测量模型各层的计算时间

        Args:
            model: 需要分析的模型
            sample_input: 样本输入
            adapter_name: 使用的适配器名称

        Returns:
            每层的计算时间统计（毫秒）
        """
        adapter = get_adapter(adapter_name)
        if adapter is None:
            self.logger.error(f"Unsupported adapter: {adapter_name}")
            return {}

        return adapter.measure_layer_latency(model, sample_input)

    def estimate_memory_usage(self, layers_info: List[Dict]) -> Dict[str, Dict]:
        """
        估计各层的内存使用情况

        Args:
            layers_info: 层信息列表

        Returns:
            每层的内存使用统计
        """
        memory_stats = {}

        for layer in layers_info:
            name = layer["name"]
            params_size = layer.get("parameters", 0) * 4  # 假设每个参数占4字节（float32）

            # 估计中间激活值的大小
            activation_size = 0
            if layer.get("input_shape") and layer.get("output_shape"):
                input_size = np.prod(layer["input_shape"]) * 4  # float32
                output_size = np.prod(layer["output_shape"]) * 4  # float32
                activation_size = input_size + output_size

            memory_stats[name] = {
                "parameters_size": params_size,
                "activation_size": activation_size,
                "total_size": params_size + activation_size
            }

        return memory_stats


class BottleneckDetector:
    """瓶颈检测器，识别模型中的性能瓶颈"""

    def __init__(self):
        self.logger = get_logger("BottleneckDetector")

    def identify_compute_bottlenecks(self, computation_times: Dict[str, float],
                                     threshold: float = 0.1) -> List[Dict]:
        """
        识别计算瓶颈

        Args:
            computation_times: 计算时间统计
            threshold: 被视为瓶颈的阈值（占总时间的比例）

        Returns:
            计算瓶颈层列表
        """
        if not computation_times:
            return []

        total_time = sum(computation_times.values())
        if total_time == 0:
            return []

        bottlenecks = []
        for name, time in computation_times.items():
            ratio = time / total_time
            if ratio > threshold:
                bottlenecks.append({
                    "name": name,
                    "time": time,
                    "ratio": ratio
                })

        # 按比例从大到小排序
        bottlenecks.sort(key=lambda x: x["ratio"], reverse=True)
        return bottlenecks

    def identify_memory_bottlenecks(self, memory_stats: Dict[str, Dict],
                                    threshold: float = 0.1) -> List[Dict]:
        """
        识别内存瓶颈

        Args:
            memory_stats: 内存使用统计
            threshold: 被视为瓶颈的阈值（占总内存的比例）

        Returns:
            内存瓶颈层列表
        """
        if not memory_stats:
            return []

        total_memory = sum(stats["total_size"] for stats in memory_stats.values())
        if total_memory == 0:
            return []

        bottlenecks = []
        for name, stats in memory_stats.items():
            ratio = stats["total_size"] / total_memory
            if ratio > threshold:
                bottlenecks.append({
                    "name": name,
                    "memory": stats["total_size"],
                    "ratio": ratio,
                    "param_ratio": stats["parameters_size"] / stats["total_size"] if stats["total_size"] > 0 else 0
                })

        # 按比例从大到小排序
        bottlenecks.sort(key=lambda x: x["ratio"], reverse=True)
        return bottlenecks

    def identify_combined_bottlenecks(self, compute_bottlenecks: List[Dict],
                                      memory_bottlenecks: List[Dict]) -> List[Dict]:
        """
        识别同时是计算和内存瓶颈的层

        Args:
            compute_bottlenecks: 计算瓶颈列表
            memory_bottlenecks: 内存瓶颈列表

        Returns:
            组合瓶颈列表
        """
        compute_names = {b["name"] for b in compute_bottlenecks}
        memory_names = {b["name"] for b in memory_bottlenecks}

        # 找出同时在两个集合中的层
        combined_names = compute_names.intersection(memory_names)

        # 构建详细信息
        combined_bottlenecks = []
        for name in combined_names:
            compute_info = next(b for b in compute_bottlenecks if b["name"] == name)
            memory_info = next(b for b in memory_bottlenecks if b["name"] == name)

            combined_bottlenecks.append({
                "name": name,
                "compute_ratio": compute_info["ratio"],
                "memory_ratio": memory_info["ratio"],
                "combined_score": compute_info["ratio"] * memory_info["ratio"]  # 组合得分
            })

        # 按组合得分排序
        combined_bottlenecks.sort(key=lambda x: x["combined_score"], reverse=True)
        return combined_bottlenecks


class ModelAnalyzer:
    """模型分析器，协调各种分析工具分析模型结构和性能特征"""

    def __init__(self):
        self.logger = get_logger("ModelAnalyzer")
        self.layer_profiler = LayerProfiler()
        self.bottleneck_detector = BottleneckDetector()

    def analyze_model(self, model: Any, sample_input: Any = None,
                      adapter_name: str = "pytorch") -> Dict:
        """
        全面分析模型

        Args:
            model: 需要分析的模型
            sample_input: 样本输入，用于测量计算时间
            adapter_name: 使用的适配器名称

        Returns:
            模型分析结果
        """
        self.logger.info(f"Starting comprehensive model analysis using {adapter_name} adapter")

        # 获取适配器
        adapter = get_adapter(adapter_name)
        if adapter is None:
            self.logger.error(f"Unsupported adapter: {adapter_name}")
            return {}

        # 如果没有提供样本输入，尝试生成一个
        if sample_input is None:
            try:
                sample_input = adapter.generate_sample_input(model)
                self.logger.info("Generated sample input for analysis")
            except Exception as e:
                self.logger.warning(f"Failed to generate sample input: {str(e)}")

        # 分析层结构
        layers_info = self.layer_profiler.profile_layers(model, adapter_name)
        self.logger.info(f"Analyzed {len(layers_info)} layers in the model")

        # 计算内存使用情况
        memory_stats = self.layer_profiler.estimate_memory_usage(layers_info)

        # 测量计算时间
        computation_times = {}
        if sample_input is not None:
            computation_times = self.layer_profiler.measure_computation_time(
                model, sample_input, adapter_name
            )
            self.logger.info("Measured computation time for model layers")

        # 识别瓶颈
        compute_bottlenecks = self.bottleneck_detector.identify_compute_bottlenecks(computation_times)
        memory_bottlenecks = self.bottleneck_detector.identify_memory_bottlenecks(memory_stats)
        combined_bottlenecks = self.bottleneck_detector.identify_combined_bottlenecks(
            compute_bottlenecks, memory_bottlenecks
        )

        # 收集模型总体信息
        model_info = adapter.get_model_info(model)

        # 整合分析结果
        analysis_result = {
            "model_info": model_info,
            "layers": layers_info,
            "memory_stats": memory_stats,
            "computation_times": computation_times,
            "bottlenecks": {
                "compute": compute_bottlenecks,
                "memory": memory_bottlenecks,
                "combined": combined_bottlenecks
            }
        }

        self.logger.info("Model analysis completed")
        return analysis_result

    def get_compression_candidates(self, analysis_result: Dict, top_k: int = 10) -> List[Dict]:
        """
        基于分析结果推荐压缩候选层

        Args:
            analysis_result: 模型分析结果
            top_k: 返回的候选层数量

        Returns:
            压缩候选层列表
        """
        if not analysis_result:
            return []

        candidates = []

        # 首先考虑组合瓶颈
        for bottleneck in analysis_result["bottlenecks"]["combined"]:
            layer_info = next((l for l in analysis_result["layers"]
                               if l["name"] == bottleneck["name"]), None)
            if layer_info:
                candidates.append({
                    "name": bottleneck["name"],
                    "type": layer_info["type"],
                    "priority": "high",
                    "reason": "Combined compute and memory bottleneck",
                    "score": bottleneck["combined_score"]
                })

        # 然后考虑内存瓶颈
        for bottleneck in analysis_result["bottlenecks"]["memory"]:
            if any(c["name"] == bottleneck["name"] for c in candidates):
                continue

            layer_info = next((l for l in analysis_result["layers"]
                               if l["name"] == bottleneck["name"]), None)
            if layer_info:
                candidates.append({
                    "name": bottleneck["name"],
                    "type": layer_info["type"],
                    "priority": "medium",
                    "reason": "Memory bottleneck",
                    "score": bottleneck["ratio"]
                })

        # 最后考虑计算瓶颈
        for bottleneck in analysis_result["bottlenecks"]["compute"]:
            if any(c["name"] == bottleneck["name"] for c in candidates):
                continue

            layer_info = next((l for l in analysis_result["layers"]
                               if l["name"] == bottleneck["name"]), None)
            if layer_info:
                candidates.append({
                    "name": bottleneck["name"],
                    "type": layer_info["type"],
                    "priority": "medium",
                    "reason": "Compute bottleneck",
                    "score": bottleneck["ratio"]
                })

        # 如果候选层不足，添加参数最多的层
        if len(candidates) < top_k:
            # 按参数数量排序
            sorted_layers = sorted(
                analysis_result["layers"],
                key=lambda x: x.get("parameters", 0),
                reverse=True
            )

            for layer in sorted_layers:
                if any(c["name"] == layer["name"] for c in candidates):
                    continue

                candidates.append({
                    "name": layer["name"],
                    "type": layer["type"],
                    "priority": "low",
                    "reason": "High parameter count",
                    "score": layer.get("parameters", 0) / max(l.get("parameters", 1) for l in analysis_result["layers"])
                })

                if len(candidates) >= top_k:
                    break

        # 按得分排序并限制数量
        candidates.sort(key=lambda x: x["score"], reverse=True)
        return candidates[:top_k]