"""
Refactored model architecture analyzer with improved design and reduced redundancy.

This module provides comprehensive analysis of model structure with clean, maintainable code
following SOLID principles and DRY methodology.
"""

import logging
import csv
from abc import ABC, abstractmethod
from typing import Dict, List, Tuple, Any, Optional, Union, Set
from collections import defaultdict
from dataclasses import dataclass
from enum import Enum
import torch
import torch.nn as nn
from tabulate import tabulate

from learners.interfaces.event import EventContext
from learners.events.handlers.base import BaseEventHandler
from learners.events.config import HandlerConfig


# Constants for magic numbers
class DisplayConstrains:
    """
    Constants for display configuration and threshold values.

    This class centralizes magic numbers used throughout the analysis
    to improve maintainability and consistency.
    """

    MAX_PARAMS_PER_COMPONENT: int = 8
    TOP_COMPONENTS_LIMIT: int = 3
    MAX_COMPONENTS_DEFAULT: int = 10
    MEMORY_THRESHOLD_MB: float = 0.1
    PRECISION_THRESHOLD: float = 0.001


class AnalysisType(Enum):
    """
    Types of analysis that can be performed on model architecture.

    Defines the different display strategies available for model analysis,
    each providing different levels of detail and organization.
    """

    HIERARCHICAL = "hierarchical"
    DETAILED = "detailed"
    ULTRA_COMPACT = "ultra_compact"
    COMPONENT_GROUPED = "component_grouped"
    COMPACT_DETAILED = "compact_detailed"


@dataclass
class ParameterInfo:
    """
    Comprehensive parameter information for a single model parameter.

    Contains all relevant details about a parameter including its shape,
    memory usage, component classification, and training status.
    """

    name: str
    shape: Tuple[int, ...]
    dtype: str
    device: str
    total_params: int
    trainable: bool
    memory_mb: float
    component: str
    layer_type: str
    parameter_type: str


@dataclass
class ComponentStats:
    """
    Statistics for a model component aggregated from individual parameters.

    Tracks total parameters, trainable status, layer counts, and memory usage
    for logical groups of model parameters (e.g., backbone, classifier, etc.).
    """

    total_parameters: int = 0
    trainable_parameters: int = 0
    layer_count: int = 0
    memory_mb: float = 0.0

    @property
    def frozen_parameters(self) -> int:
        """Number of frozen parameters."""
        return self.total_parameters - self.trainable_parameters

    @property
    def trainable_ratio(self) -> float:
        """Ratio of trainable parameters."""
        return (
            self.trainable_parameters / self.total_parameters
            if self.total_parameters > 0
            else 0.0
        )


@dataclass
class ModelStatistics:
    """
    Basic model statistics providing high-level overview of model parameters.

    Contains fundamental metrics including total parameters, trainable/frozen counts,
    ratios, and memory usage for the entire model.
    """

    total_parameters: int
    trainable_parameters: int
    frozen_parameters: int
    trainable_ratio: float
    model_size_mb: float


class ParameterClassifier:
    """
    Responsible for classifying parameters into components and types.

    Uses pattern matching to categorize model parameters into logical components
    (backbone, classifier, adapter, etc.) and determine their layer types and
    parameter types (weight, bias, buffer, etc.).
    """

    def __init__(self):
        """
        Initialize the classifier with component classification patterns.

        Sets up pattern dictionaries used for automatic component classification
        based on parameter names.
        """
        self.component_patterns = {
            "backbone": [
                "backbone",
                "encoder",
                "feature_extractor",
                "base_model",
                "feature",
            ],
            "classifier": [
                "classifier",
                "head",
                "fc",
                "linear_head",
                "classification_head",
            ],
            "adapter": ["adapter", "lora", "bias", "prompt", "prefix"],
            "attention": ["attention", "attn", "self_attn", "cross_attn"],
            "normalization": ["norm", "bn", "layer_norm", "batch_norm"],
        }

    def classify_component(self, param_name: str) -> str:
        """Classify a parameter name into a component category."""
        param_name_lower = param_name.lower()

        for component, patterns in self.component_patterns.items():
            if any(pattern in param_name_lower for pattern in patterns):
                return component

        return "other"

    def classify_layer_type(self, param_name: str) -> str:
        """Infer layer type from parameter name."""
        name_lower = param_name.lower()

        layer_type_mapping = {
            "conv": "Convolution",
            "linear|fc|classifier": "Linear",
            "bn|batch_norm|layer_norm|norm": "Normalization",
            "embedding": "Embedding",
            "attention|attn": "Attention",
            "lstm|gru|rnn": "Recurrent",
        }

        for pattern, layer_type in layer_type_mapping.items():
            if any(term in name_lower for term in pattern.split("|")):
                return layer_type

        return "Other"

    def classify_parameter_type(self, param_name: str) -> str:
        """Classify parameter as weight, bias, or other."""
        name_lower = param_name.lower()

        if name_lower.endswith(".weight") or "weight" in name_lower:
            return "weight"
        elif name_lower.endswith(".bias") or "bias" in name_lower:
            return "bias"
        elif any(
            term in name_lower
            for term in ["running_mean", "running_var", "num_batches_tracked"]
        ):
            return "buffer"
        else:
            return "other"


class StatisticsCalculator:
    """
    Responsible for calculating model statistics.

    Provides static methods to compute basic model statistics and
    component-level statistics from model parameters and structure.
    """

    @staticmethod
    def calculate_basic_stats(model: nn.Module) -> ModelStatistics:
        """Calculate basic model statistics."""
        total_params = sum(p.numel() for p in model.parameters())
        trainable_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
        frozen_params = total_params - trainable_params

        # Calculate model size
        param_size = sum(p.numel() * p.element_size() for p in model.parameters())
        buffer_size = sum(b.numel() * b.element_size() for b in model.buffers())
        model_size_mb = (param_size + buffer_size) / (1024**2)

        return ModelStatistics(
            total_parameters=total_params,
            trainable_parameters=trainable_params,
            frozen_parameters=frozen_params,
            trainable_ratio=(
                trainable_params / total_params if total_params > 0 else 0.0
            ),
            model_size_mb=model_size_mb,
        )

    @staticmethod
    def calculate_component_stats(
        param_details: List[ParameterInfo], model: nn.Module
    ) -> Dict[str, ComponentStats]:
        """Calculate statistics by component."""
        component_stats = defaultdict(ComponentStats)

        # Aggregate parameter stats
        for param in param_details:
            stats = component_stats[param.component]
            stats.total_parameters += param.total_params
            if param.trainable:
                stats.trainable_parameters += param.total_params
            stats.memory_mb += param.memory_mb

        # Count layers per component
        for name, module in model.named_modules():
            if len(list(module.parameters(recurse=False))) > 0:
                classifier = ParameterClassifier()
                component = classifier.classify_component(name)
                # Avoid double counting nested modules
                if not any(
                    name.startswith(parent_name) and parent_name != name
                    for parent_name, _ in model.named_modules()
                ):
                    component_stats[component].layer_count += 1

        return dict(component_stats)


class TableFormatter:
    """
    Responsible for formatting tables with consistent styling.

    Provides static methods for consistent formatting of parameter counts,
    percentages, memory usage, and hierarchical tree structures in output tables.
    """

    @staticmethod
    def get_parameter_type_icon(param_type: str) -> str:
        """Get icon for parameter type."""
        icon_map = {"weight": "⚖️", "bias": "📏", "buffer": "💾", "other": "❓"}
        return icon_map.get(param_type, "❓")

    @staticmethod
    def format_percentage(value: float, total: float) -> str:
        """Format percentage with consistent precision."""
        if total == 0:
            return "0.0%"
        return f"{value/total*100:.1f}%"

    @staticmethod
    def format_memory(memory_mb: float) -> str:
        """Format memory with appropriate precision."""
        if memory_mb >= DisplayConstrains.PRECISION_THRESHOLD:
            return f"{memory_mb:.3f} MB"
        else:
            return "<0.001 MB"

    @staticmethod
    def format_trainable_status(trainable: int, total: int) -> str:
        """Format trainable status with icons."""
        if trainable == total:
            return "✓ All"
        elif trainable == 0:
            return "✗ None"
        else:
            return f"✓{trainable:,}/✗{total-trainable:,}"

    @staticmethod
    def build_hierarchical_prefix(is_last: bool, depth: int = 0) -> str:
        """Build tree structure prefix."""
        if depth == 0:
            return "└──" if is_last else "├──"
        else:
            prefix = "│   " * (depth - 1)
            if is_last:
                prefix += "└──"
            else:
                prefix += "├──"
            return prefix


class DisplayStrategy(ABC):
    """
    Abstract base class for different display strategies.

    Defines the interface for display strategies implementing the Strategy pattern.
    Each concrete strategy provides a different way to visualize model analysis results.
    """

    @abstractmethod
    def display(self, analyzer: "ModelAnalyzer") -> Optional[Dict[str, Any]]:
        """Display analysis results."""
        pass


class HierarchicalDisplayStrategy(DisplayStrategy):
    """
    Strategy for hierarchical display of model architecture.

    Displays model parameters in a tree-like structure showing the relationship
    between total, trainable, and frozen parameters with component breakdown.
    """

    def display(self, analyzer: "ModelAnalyzer") -> Optional[Dict[str, Any]]:
        """Display hierarchical analysis."""
        logger = analyzer.logger
        stats = analyzer.basic_stats
        components = analyzer.component_stats

        logger.info(f"\n{'='*80}")
        logger.info(f"ARCHITECTURE ANALYSIS: {analyzer.model_name}")
        logger.info(f"{'='*80}")

        self._print_parameter_hierarchy(logger, stats, components)
        self._print_memory_summary(logger, analyzer.memory_analysis)

        return None

    def _print_parameter_hierarchy(
        self,
        logger: logging.Logger,
        stats: ModelStatistics,
        components: Dict[str, ComponentStats],
    ) -> None:
        """Print hierarchical parameter structure."""
        formatter = TableFormatter()
        table_data = []

        # Root level
        table_data.append(
            [
                "Total Parameters",
                f"{stats.total_parameters:,}",
                "100.0%",
                f"{stats.model_size_mb:.1f} MB",
            ]
        )

        # Trainable branch
        table_data.append(
            [
                "├── Trainable",
                f"{stats.trainable_parameters:,}",
                formatter.format_percentage(
                    stats.trainable_parameters, stats.total_parameters
                ),
                "",
            ]
        )

        # Component details
        if components:
            sorted_components = sorted(
                [
                    (name, comp)
                    for name, comp in components.items()
                    if comp.trainable_parameters > 0
                ],
                key=lambda x: x[1].trainable_parameters,
                reverse=True,
            )

            for i, (comp_name, comp_stats) in enumerate(sorted_components):
                is_last = i == len(sorted_components) - 1
                prefix = "│   └──" if is_last else "│   ├──"

                details = []
                if comp_stats.layer_count > 0:
                    details.append(f"({comp_stats.layer_count} layers)")

                table_data.append(
                    [
                        f"{prefix} {comp_name.title()}",
                        f"{comp_stats.trainable_parameters:,}",
                        formatter.format_percentage(
                            comp_stats.trainable_parameters, stats.total_parameters
                        ),
                        " ".join(details),
                    ]
                )

        # Frozen branch
        table_data.append(
            [
                "└── Frozen",
                f"{stats.frozen_parameters:,}",
                formatter.format_percentage(
                    stats.frozen_parameters, stats.total_parameters
                ),
                "",
            ]
        )

        headers = ["Component", "Parameters", "Trainable %", "Details"]
        logger.info("\nPARAMETER HIERARCHY:")
        logger.info("\n" + tabulate(table_data, headers=headers, tablefmt="pipe"))

    def _print_memory_summary(
        self, logger: logging.Logger, memory: Dict[str, float]
    ) -> None:
        """Print compact memory summary."""
        memory_info = (
            f"Memory Usage: Parameters {memory['parameters_mb']:.1f} MB + "
            f"Buffers {memory['buffers_mb']:.1f} MB = "
            f"Total {memory['total_mb']:.1f} MB"
        )
        logger.info(f"\n{memory_info}")


class UltraCompactDisplayStrategy(DisplayStrategy):
    """
    Strategy for ultra compact display with minimal output.

    Provides the most condensed view of model architecture showing only
    essential information and top components by parameter count.
    """

    def display(self, analyzer: "ModelAnalyzer") -> Dict[str, Any]:
        """Display ultra compact summary."""
        logger = analyzer.logger
        stats = analyzer.basic_stats
        components = analyzer.component_stats

        logger.info(f"\nQUICK SUMMARY: {analyzer.model_name}")

        # Get top components
        top_components = self._get_top_components(
            components, DisplayConstrains.TOP_COMPONENTS_LIMIT
        )

        # Build table data
        table_data = []
        table_data.append(
            [
                "Parameters",
                f"{stats.total_parameters:,}",
                f"Trainable: {stats.trainable_parameters:,} ({stats.trainable_ratio:.1%})",
                f"Size: {stats.model_size_mb:.1f} MB",
            ]
        )

        for comp_name, comp_stats in top_components:
            table_data.append(
                [
                    f"├── {comp_name.title()}",
                    f"{comp_stats.trainable_parameters:,}",
                    f"{comp_stats.trainable_parameters/stats.total_parameters*100:.1f}%",
                    (
                        f"{comp_stats.layer_count} layers"
                        if comp_stats.layer_count > 0
                        else ""
                    ),
                ]
            )

        headers = ["Component", "Count", "Details", "Extra"]
        logger.info("\n" + tabulate(table_data, headers=headers, tablefmt="pipe"))

        return {
            "total_parameters": stats.total_parameters,
            "trainable_parameters": stats.trainable_parameters,
            "top_components": [
                (name, comp.trainable_parameters) for name, comp in top_components
            ],
            "model_size_mb": stats.model_size_mb,
        }

    def _get_top_components(
        self, components: Dict[str, ComponentStats], limit: int
    ) -> List[Tuple[str, ComponentStats]]:
        """Get top N components by trainable parameters."""
        if not components:
            return []

        trainable_components = [
            (name, stats)
            for name, stats in components.items()
            if stats.trainable_parameters > 0
        ]
        trainable_components.sort(key=lambda x: x[1].trainable_parameters, reverse=True)

        return trainable_components[:limit]


class CompactDetailedDisplayStrategy(DisplayStrategy):
    """
    Strategy for compact detailed display with component breakdown.

    Balances detail and brevity by showing hierarchical structure with
    additional component information while maintaining readability.
    """

    def display(self, analyzer: "ModelAnalyzer") -> Optional[Dict[str, Any]]:
        """Display compact detailed analysis with hierarchical structure."""
        logger = analyzer.logger
        stats = analyzer.basic_stats
        components = analyzer.component_stats

        max_components = DisplayConstrains.MAX_COMPONENTS_DEFAULT

        logger.info(f"{'='*60}")
        logger.info(f"COMPACT ANALYSIS: {analyzer.model_name}")
        logger.info(f"{'='*60}")

        # Hierarchical structure with additional details
        table_data = []
        formatter = TableFormatter()

        # Root level
        table_data.append(
            [
                "Total Parameters",
                f"{stats.total_parameters:,}",
                "100.0%",
                f"{stats.model_size_mb:.1f} MB",
            ]
        )

        # Trainable branch
        table_data.append(
            [
                "├── Trainable",
                f"{stats.trainable_parameters:,}",
                formatter.format_percentage(
                    stats.trainable_parameters, stats.total_parameters
                ),
                f"{stats.trainable_ratio:.1%} ratio",
            ]
        )

        # Component details (sorted by trainable parameters)
        if components:
            trainable_components = [
                (name, comp_stats)
                for name, comp_stats in components.items()
                if comp_stats.trainable_parameters > 0
            ]
            trainable_components.sort(
                key=lambda x: x[1].trainable_parameters, reverse=True
            )
            trainable_components = trainable_components[:max_components]

            for i, (comp_name, comp_stats) in enumerate(trainable_components):
                is_last_trainable = i == len(trainable_components) - 1
                prefix = "│   └──" if is_last_trainable else "│   ├──"

                # Add layer count and memory info to details
                details = []
                if comp_stats.layer_count > 0:
                    details.append(f"{comp_stats.layer_count} layers")
                if comp_stats.memory_mb > DisplayConstrains.MEMORY_THRESHOLD_MB:
                    details.append(f"{comp_stats.memory_mb:.1f} MB")
                detail_str = f"({', '.join(details)})" if details else ""

                table_data.append(
                    [
                        f"{prefix} {comp_name.title()}",
                        f"{comp_stats.trainable_parameters:,}",
                        formatter.format_percentage(
                            comp_stats.trainable_parameters, stats.total_parameters
                        ),
                        detail_str,
                    ]
                )

        # Frozen branch
        frozen_detail = ""
        if stats.frozen_parameters > 0:
            frozen_components = sum(
                1
                for comp_stats in components.values()
                if comp_stats.frozen_parameters > 0
            )
            if frozen_components > 0:
                frozen_detail = f"({frozen_components} components)"

        table_data.append(
            [
                "└── Frozen",
                f"{stats.frozen_parameters:,}",
                formatter.format_percentage(
                    stats.frozen_parameters, stats.total_parameters
                ),
                frozen_detail,
            ]
        )

        headers = ["Component", "Parameters", "Percentage", "Details"]
        logger.info("\n" + tabulate(table_data, headers=headers, tablefmt="pipe"))

        # Memory summary line
        memory = analyzer.memory_analysis
        memory_line = (
            f"Memory: Parameters {memory['parameters_mb']:.1f} MB + "
            f"Buffers {memory['buffers_mb']:.1f} MB = "
            f"Total {memory['total_mb']:.1f} MB"
        )
        logger.info(memory_line)

        return None


class DetailedDisplayStrategy(DisplayStrategy):
    """
    Strategy for detailed parameter analysis with filtering and grouping options.

    Provides comprehensive analysis with customizable filtering, sorting,
    grouping, and hierarchical display options for in-depth parameter exploration.
    Follows single responsibility principle by delegating specific tasks to helper methods.
    """

    def __init__(
        self,
        sort_by: str = "name",
        group_by: Optional[str] = None,
        show_top_n: Optional[int] = None,
        trainable_only: bool = True,
        max_depth: Optional[int] = None,
        hierarchical: bool = True,
    ):
        """
        Initialize detailed display strategy with comprehensive options.

        Args:
            sort_by: Parameter attribute to sort by ('total_params', 'memory_mb', 'name', etc.)
            group_by: Parameter attribute to group by ('component', 'layer_type', etc.)
            show_top_n: Maximum number of parameters to display (None for all)
            trainable_only: If True, only show trainable parameters
            max_depth: Maximum parameter name depth to display (based on dot count)
            hierarchical: If True, use hierarchical tree structure display
        """
        self.sort_by = sort_by
        self.group_by = group_by
        self.show_top_n = show_top_n
        self.trainable_only = trainable_only
        self.max_depth = max_depth
        self.hierarchical = hierarchical
        self._table_formatter = TableFormatter()

    def display(self, analyzer: "ModelAnalyzer") -> Optional[Dict[str, Any]]:
        """Display detailed parameter analysis following single responsibility principle."""
        if not self._validate_input(analyzer):
            return None

        filtered_params = self._prepare_parameters(analyzer.param_details)
        if not filtered_params:
            analyzer.logger.warning("No parameters match the specified criteria")
            return None

        self._print_analysis_header(analyzer.logger, analyzer.model_name)
        self._display_parameters(analyzer.logger, filtered_params)

        return self._create_result_summary(filtered_params, analyzer.param_details)

    def _validate_input(self, analyzer: "ModelAnalyzer") -> bool:
        """Validate input parameters and log warnings if needed."""
        if not analyzer.param_details:
            analyzer.logger.warning("No parameters found in the model")
            return False
        return True

    def _prepare_parameters(
        self, param_details: List[ParameterInfo]
    ) -> List[ParameterInfo]:
        """Prepare parameters by filtering, sorting, and limiting."""
        filtered_params = self._filter_parameters(param_details)
        filtered_params = self._sort_parameters(filtered_params)

        if self.show_top_n:
            filtered_params = filtered_params[: self.show_top_n]

        return filtered_params

    def _display_parameters(
        self, logger: logging.Logger, filtered_params: List[ParameterInfo]
    ) -> None:
        """Display parameters based on configuration using strategy pattern."""
        display_strategies = {
            (True, False): self._print_hierarchical_parameter_table,
            (False, True): lambda l, p: self._print_grouped_parameter_table(
                l, self.group_by, p
            ) if self.group_by is not None else None,
            (False, False): self._print_flat_parameter_table,
        }

        strategy_key = (self.hierarchical and not self.group_by, bool(self.group_by))
        strategy = display_strategies.get(
            strategy_key, self._print_flat_parameter_table
        )
        strategy(logger, filtered_params)

    def _create_result_summary(
        self, filtered_params: List[ParameterInfo], all_params: List[ParameterInfo]
    ) -> Dict[str, Any]:
        """Create result summary following consistent return format."""
        return {
            "filtered_parameters": len(filtered_params),
            "total_parameters": len(all_params),
        }

    def _filter_parameters(
        self, param_details: List[ParameterInfo]
    ) -> List[ParameterInfo]:
        """Filter parameters based on criteria using functional approach."""
        filters = []

        if self.trainable_only:
            filters.append(lambda p: p.trainable)

        if self.max_depth is not None:
            filters.append(lambda p: p.name.count(".") <= self.max_depth)

        # Apply all filters using functional programming
        filtered = param_details
        for filter_func in filters:
            filtered = [p for p in filtered if filter_func(p)]

        return filtered

    def _sort_parameters(
        self, param_details: List[ParameterInfo]
    ) -> List[ParameterInfo]:
        """Sort parameters based on sort_by criteria with proper error handling."""
        reverse_sort = self.sort_by in ["total_params", "memory_mb"]

        def get_sort_key(param: ParameterInfo):
            """Get sort key with fallback for missing attributes."""
            return getattr(param, self.sort_by, 0)

        try:
            return sorted(param_details, key=get_sort_key, reverse=reverse_sort)
        except (AttributeError, TypeError) as e:
            # Fallback to name sorting if specified attribute doesn't exist
            return sorted(param_details, key=lambda p: p.name)

    def _print_analysis_header(self, logger: logging.Logger, model_name: str) -> None:
        """Print analysis header with filter information following DRY principle."""
        filter_config = [
            (self.trainable_only, "TRAINABLE ONLY"),
            (self.max_depth is not None, f"MAX DEPTH {self.max_depth}"),
            (self.show_top_n is not None, f"TOP {self.show_top_n}"),
        ]

        filter_info = [text for condition, text in filter_config if condition]
        filter_text = f" ({', '.join(filter_info)})" if filter_info else ""

        header_length = 100
        separator = "=" * header_length

        logger.info(f"{separator}")
        logger.info(f"DETAILED PARAMETER ANALYSIS: {model_name}{filter_text}")
        logger.info(f"{separator}")

    def _print_hierarchical_parameter_table(
        self, logger: logging.Logger, param_details: List[ParameterInfo]
    ) -> None:
        """Print parameters in hierarchical tree structure grouped by components."""
        component_groups = self._group_parameters_by_component(param_details)
        sorted_components = self._sort_components_by_params(component_groups)

        statistics = self._calculate_display_statistics(param_details)
        table_data = self._build_hierarchical_table_data(sorted_components, statistics)

        self._display_hierarchical_table(logger, table_data)

    def _group_parameters_by_component(
        self, param_details: List[ParameterInfo]
    ) -> Dict[str, List[ParameterInfo]]:
        """Group parameters by their component classification."""
        component_groups = defaultdict(list)
        for param in param_details:
            component_groups[param.component].append(param)
        return dict(component_groups)

    def _sort_components_by_params(
        self, component_groups: Dict[str, List[ParameterInfo]]
    ) -> List[Tuple[str, List[ParameterInfo]]]:
        """Sort components by their total parameter count."""
        component_totals = {
            component: sum(p.total_params for p in params)
            for component, params in component_groups.items()
        }

        return sorted(
            component_groups.items(), key=lambda x: component_totals[x[0]], reverse=True
        )

    def _calculate_display_statistics(
        self, param_details: List[ParameterInfo]
    ) -> Dict[str, Union[int, float]]:
        """Calculate overall statistics for display."""
        return {
            "total_params": sum(p.total_params for p in param_details),
            "trainable_params": sum(
                p.total_params for p in param_details if p.trainable
            ),
            "total_memory": sum(p.memory_mb for p in param_details),
            "param_count": len(param_details),
        }

    def _build_hierarchical_table_data(
        self,
        sorted_components: List[Tuple[str, List[ParameterInfo]]],
        statistics: Dict[str, Union[int, float]],
    ) -> List[List[str]]:
        """Build table data for hierarchical display."""
        table_data = []

        # Add root level summary
        table_data.append(
            [
                f"📊 Model Parameters ({statistics['param_count']} parameters)",
                f"{statistics['total_params']:,}",
                "100.0%",
                f"{statistics['total_memory']:.2f} MB",
            ]
        )

        # Add component branches
        for comp_idx, (component, params) in enumerate(sorted_components):
            table_data.extend(
                self._build_component_rows(
                    component,
                    params,
                    is_last_component=comp_idx == len(sorted_components) - 1,
                    total_params=int(statistics["total_params"]),
                )
            )

        return table_data

    def _build_component_rows(
        self,
        component: str,
        params: List[ParameterInfo],
        is_last_component: bool,
        total_params: int,
    ) -> List[List[str]]:
        """Build table rows for a single component."""
        rows = []
        comp_stats = self._calculate_component_stats(params)

        # Component header row
        comp_prefix = "└──" if is_last_component else "├──"
        rows.append(
            [
                f"{comp_prefix} {component.title()} Component",
                f"{comp_stats['total']:,}",
                self._table_formatter.format_percentage(
                    comp_stats["total"], total_params
                ),
                f"{len(params)} params, {comp_stats['memory']:.2f} MB",
            ]
        )

        # Parameter rows
        sorted_params = sorted(params, key=lambda p: p.total_params, reverse=True)
        show_params = sorted_params[: DisplayConstrains.MAX_PARAMS_PER_COMPONENT]

        rows.extend(
            self._build_parameter_rows(show_params, is_last_component, total_params)
        )

        # Add "more" indicator if needed
        if len(sorted_params) > DisplayConstrains.MAX_PARAMS_PER_COMPONENT:
            remaining = len(sorted_params) - DisplayConstrains.MAX_PARAMS_PER_COMPONENT
            more_prefix = "    └──" if is_last_component else "│   └──"
            rows.append([f"{more_prefix} ... and {remaining} more", "", "", ""])

        return rows

    def _calculate_component_stats(
        self, params: List[ParameterInfo]
    ) -> Dict[str, float]:
        """Calculate statistics for a single component."""
        return {
            "total": sum(p.total_params for p in params),
            "trainable": sum(p.total_params for p in params if p.trainable),
            "memory": sum(p.memory_mb for p in params),
        }

    def _build_parameter_rows(
        self, params: List[ParameterInfo], is_last_component: bool, total_params: int
    ) -> List[List[str]]:
        """Build table rows for individual parameters."""
        rows = []
        for param_idx, param in enumerate(params):
            is_last_in_component = param_idx == len(params) - 1

            # Determine prefix based on position
            if is_last_component:
                param_prefix = "    └──" if is_last_in_component else "    ├──"
            else:
                param_prefix = "│   └──" if is_last_in_component else "│   ├──"

            # Format parameter information
            param_name = self._truncate_name(param.name, 50)
            trainable_icon = "✓" if param.trainable else "✗"
            type_icon = self._table_formatter.get_parameter_type_icon(
                param.parameter_type
            )

            rows.append(
                [
                    f"{param_prefix} {param_name}",
                    f"{param.total_params:,}",
                    self._table_formatter.format_percentage(
                        param.total_params, total_params
                    ),
                    f"{trainable_icon} {type_icon} {param.layer_type}",
                ]
            )

        return rows

    def _truncate_name(self, name: str, max_length: int) -> str:
        """Truncate parameter name if too long."""
        return name if len(name) <= max_length else name[: max_length - 3] + "..."

    def _display_hierarchical_table(
        self, logger: logging.Logger, table_data: List[List[str]]
    ) -> None:
        """Display the hierarchical table with proper formatting."""
        headers = ["Parameter", "Count", "Percentage", "Details"]
        # logger.info("\nHIERARCHICAL PARAMETER STRUCTURE:")
        logger.info("\n" + tabulate(table_data, headers=headers, tablefmt="pipe"))

    def _print_grouped_parameter_table(
        self, logger: logging.Logger, group_by: str, param_details: List[ParameterInfo]
    ) -> None:
        """Print parameters grouped by specified attribute using functional approach."""
        groups = self._create_parameter_groups(param_details, group_by)

        logger.info(f"\nPARAMETERS GROUPED BY {group_by.upper()}:")

        for group_name in sorted(groups.keys()):
            self._display_parameter_group(logger, group_name, groups[group_name])

    def _create_parameter_groups(
        self, param_details: List[ParameterInfo], group_by: str
    ) -> Dict[str, List[ParameterInfo]]:
        """Create parameter groups using functional approach."""
        groups = defaultdict(list)
        for param in param_details:
            group_key = getattr(param, group_by, "unknown")
            groups[group_key].append(param)
        return dict(groups)

    def _display_parameter_group(
        self, logger: logging.Logger, group_name: str, params: List[ParameterInfo]
    ) -> None:
        """Display a single parameter group with statistics."""
        group_stats = self._calculate_group_statistics(params)

        logger.info(f"\n{group_name.upper()} ({len(params)} parameters):")

        table_data = self._build_group_table_data(params)
        headers = ["Parameter", "Count", "Trainable", "Memory", "Shape"]
        logger.info("\n" + tabulate(table_data, headers=headers, tablefmt="pipe"))

    def _calculate_group_statistics(
        self, params: List[ParameterInfo]
    ) -> Dict[str, int]:
        """Calculate statistics for a parameter group."""
        return {
            "total": sum(p.total_params for p in params),
            "trainable": sum(p.total_params for p in params if p.trainable),
        }

    def _build_group_table_data(self, params: List[ParameterInfo]) -> List[List[str]]:
        """Build table data for parameter group display."""
        return [
            [
                param.name,
                f"{param.total_params:,}",
                "✓" if param.trainable else "✗",
                f"{param.memory_mb:.3f} MB",
                str(param.shape),
            ]
            for param in params
        ]

    def _print_flat_parameter_table(
        self, logger: logging.Logger, param_details: List[ParameterInfo]
    ) -> None:
        """Print flat parameter table with consistent formatting."""
        logger.info("\nPARAMETER TABLE:")

        table_data = self._build_flat_table_data(param_details)
        headers = [
            "Parameter",
            "Count",
            "Trainable",
            "Component",
            "Layer Type",
            "Type",
            "Memory",
        ]
        logger.info("\n" + tabulate(table_data, headers=headers, tablefmt="pipe"))

    def _build_flat_table_data(
        self, param_details: List[ParameterInfo]
    ) -> List[List[str]]:
        """Build table data for flat parameter display."""
        return [
            [
                param.name,
                f"{param.total_params:,}",
                "✓" if param.trainable else "✗",
                param.component,
                param.layer_type,
                self._table_formatter.get_parameter_type_icon(param.parameter_type),
                f"{param.memory_mb:.3f} MB",
            ]
            for param in param_details
        ]


class ComponentGroupedDisplayStrategy(DisplayStrategy):
    """
    Strategy for component-grouped display of model parameters.

    Groups parameters by their component classification (backbone, classifier,
    adapter, etc.) and displays detailed statistics for each component.
    Follows single responsibility principle with clean separation of concerns.
    """

    # Class constants for consistent behavior
    MAX_PARAMS_PER_COMPONENT = 20

    def __init__(self, max_params_per_component: int | None = None):
        """
        Initialize component grouped display strategy.

        Args:
            max_params_per_component: Maximum parameters to show per component (default: 20)
        """
        self.max_params_per_component = (
            max_params_per_component or self.MAX_PARAMS_PER_COMPONENT
        )
        self._table_formatter = TableFormatter()

    def display(self, analyzer: "ModelAnalyzer") -> Optional[Dict[str, Any]]:
        """Display parameters grouped by components following clean architecture principles."""
        if not self._validate_analyzer(analyzer):
            return None

        component_groups = self._group_parameters_by_component(analyzer.param_details)
        total_params = sum(p.total_params for p in analyzer.param_details)

        self._print_analysis_header(analyzer.logger, analyzer.model_name)

        for component in sorted(component_groups.keys()):
            self._display_component_analysis(
                analyzer.logger, component, component_groups[component], total_params
            )

        return self._create_analysis_summary(component_groups)

    def _validate_analyzer(self, analyzer: "ModelAnalyzer") -> bool:
        """Validate analyzer input and log appropriate messages."""
        if not analyzer.param_details:
            analyzer.logger.warning("No parameters found in the model")
            return False
        return True

    def _group_parameters_by_component(
        self, param_details: List[ParameterInfo]
    ) -> Dict[str, List[ParameterInfo]]:
        """Group parameters by component using functional approach."""
        component_groups = defaultdict(list)
        for param in param_details:
            component_groups[param.component].append(param)
        return dict(component_groups)

    def _print_analysis_header(self, logger: logging.Logger, model_name: str) -> None:
        """Print consistent analysis header."""
        header_length = 80
        separator = "=" * header_length

        logger.info(f"{separator}")
        logger.info(f"COMPONENT ANALYSIS: {model_name}")
        logger.info(f"{separator}")

    def _display_component_analysis(
        self,
        logger: logging.Logger,
        component: str,
        params: List[ParameterInfo],
        total_params: int,
    ) -> None:
        """Display analysis for a single component."""
        component_stats = self._calculate_component_statistics(params, total_params)

        self._print_component_summary(logger, component, component_stats)
        self._print_component_detail_table(logger, params)

    def _calculate_component_statistics(
        self, params: List[ParameterInfo], total_params: int
    ) -> Dict[str, Union[int, float]]:
        """Calculate comprehensive statistics for a component."""
        component_total = sum(p.total_params for p in params)
        component_trainable = sum(p.total_params for p in params if p.trainable)

        return {
            "total": component_total,
            "trainable": component_trainable,
            "percentage": (
                (component_total / total_params * 100) if total_params > 0 else 0.0
            ),
        }

    def _print_component_summary(
        self,
        logger: logging.Logger,
        component: str,
        stats: Dict[str, Union[int, float]],
    ) -> None:
        """Print component summary information."""
        logger.info(f"\n{component.upper()} COMPONENT:")
        logger.info(
            f"Total: {stats['total']:,} parameters ({stats['percentage']:.1f}%)"
        )
        logger.info(f"Trainable: {stats['trainable']:,} parameters")

    def _print_component_detail_table(
        self, logger: logging.Logger, params: List[ParameterInfo]
    ) -> None:
        """Print detailed parameter table for component."""
        sorted_params = sorted(params, key=lambda p: p.total_params, reverse=True)
        display_params = sorted_params[: self.max_params_per_component]

        table_data = self._build_component_table_data(display_params)
        headers = ["Parameter", "Count", "Trainable", "Layer Type", "Shape"]
        logger.info("\n" + tabulate(table_data, headers=headers, tablefmt="pipe"))

        self._print_remaining_params_info(logger, sorted_params, display_params)

    def _build_component_table_data(
        self, params: List[ParameterInfo]
    ) -> List[List[str]]:
        """Build table data for component parameter display."""
        return [
            [
                self._extract_param_short_name(param.name),
                f"{param.total_params:,}",
                "✓" if param.trainable else "✗",
                param.layer_type,
                str(param.shape),
            ]
            for param in params
        ]

    def _extract_param_short_name(self, full_name: str) -> str:
        """Extract short parameter name (last component) for better readability."""
        return full_name.split(".")[-1]

    def _print_remaining_params_info(
        self,
        logger: logging.Logger,
        all_params: List[ParameterInfo],
        displayed_params: List[ParameterInfo],
    ) -> None:
        """Print information about remaining parameters if any."""
        if len(all_params) > len(displayed_params):
            remaining_count = len(all_params) - len(displayed_params)
            logger.info(f"... and {remaining_count} more parameters")

    def _create_analysis_summary(
        self, component_groups: Dict[str, List[ParameterInfo]]
    ) -> Dict[str, Any]:
        """Create analysis summary for return value."""
        return {
            "component_count": len(component_groups),
            "components": {
                component: {
                    "parameter_count": len(params),
                    "total_params": sum(p.total_params for p in params),
                    "trainable_params": sum(
                        p.total_params for p in params if p.trainable
                    ),
                }
                for component, params in component_groups.items()
            },
        }


class ModelAnalyzer:
    """
    Refactored model analyzer with single responsibility and better separation of concerns.

    This class focuses on orchestrating analysis and delegates specific tasks to specialized classes.
    It provides a comprehensive interface for analyzing PyTorch models with various display options
    and caching for efficient repeated analysis.
    """

    def __init__(self, model: nn.Module, model_name: str = "Model"):
        """
        Initialize the analyzer with a PyTorch model.

        Args:
            model: PyTorch model to analyze
            model_name: Human-readable name for the model used in displays
        """
        self.model = model
        self.model_name = model_name
        self.logger = logging.getLogger("ArchParser")

        # Initialize specialized components
        self._classifier = ParameterClassifier()
        self._calculator = StatisticsCalculator()
        self._formatter = TableFormatter()

        # Cache analysis results
        self._basic_stats: Optional[ModelStatistics] = None
        self._param_details: Optional[List[ParameterInfo]] = None
        self._component_stats: Optional[Dict[str, ComponentStats]] = None
        self._memory_analysis: Optional[Dict[str, float]] = None

    @property
    def basic_stats(self) -> ModelStatistics:
        """Get basic model statistics (cached)."""
        if self._basic_stats is None:
            self._basic_stats = self._calculator.calculate_basic_stats(self.model)
        return self._basic_stats

    @property
    def param_details(self) -> List[ParameterInfo]:
        """Get detailed parameter information (cached)."""
        if self._param_details is None:
            self._param_details = self._analyze_parameters()
        return self._param_details

    @property
    def component_stats(self) -> Dict[str, ComponentStats]:
        """Get component statistics (cached)."""
        if self._component_stats is None:
            self._component_stats = self._calculator.calculate_component_stats(
                self.param_details, self.model
            )
        return self._component_stats

    @property
    def memory_analysis(self) -> Dict[str, float]:
        """Get memory analysis (cached)."""
        if self._memory_analysis is None:
            self._memory_analysis = self._analyze_memory()
        return self._memory_analysis

    def _analyze_parameters(self) -> List[ParameterInfo]:
        """Analyze all parameters in detail."""
        param_details = []

        for name, param in self.model.named_parameters():
            param_info = ParameterInfo(
                name=name,
                shape=tuple(param.shape),
                dtype=str(param.dtype),
                device=str(param.device),
                total_params=param.numel(),
                trainable=param.requires_grad,
                memory_mb=param.numel() * param.element_size() / (1024**2),
                component=self._classifier.classify_component(name),
                layer_type=self._classifier.classify_layer_type(name),
                parameter_type=self._classifier.classify_parameter_type(name),
            )
            param_details.append(param_info)

        return param_details

    def _analyze_memory(self) -> Dict[str, float]:
        """Analyze memory usage."""
        param_memory = sum(
            p.numel() * p.element_size() for p in self.model.parameters()
        ) / (1024**2)
        buffer_memory = sum(
            b.numel() * b.element_size() for b in self.model.buffers()
        ) / (1024**2)

        return {
            "parameters_mb": param_memory,
            "buffers_mb": buffer_memory,
            "total_mb": param_memory + buffer_memory,
        }

    def display_analysis(
        self, analysis_type: AnalysisType, **kwargs
    ) -> Optional[Dict[str, Any]]:
        """Display analysis using specified strategy."""
        strategy_map = {
            AnalysisType.HIERARCHICAL: HierarchicalDisplayStrategy(),
            AnalysisType.ULTRA_COMPACT: UltraCompactDisplayStrategy(),
            AnalysisType.COMPACT_DETAILED: CompactDetailedDisplayStrategy(),
            AnalysisType.DETAILED: DetailedDisplayStrategy(**kwargs),
            AnalysisType.COMPONENT_GROUPED: ComponentGroupedDisplayStrategy(),
        }

        strategy = strategy_map.get(analysis_type)
        if strategy is None:
            raise ValueError(f"Unknown analysis type: {analysis_type}")

        return strategy.display(self)

    def filter_parameters(
        self,
        trainable_only: bool = False,
        max_depth: Optional[int] = None,
        component_filter: Optional[str] = None,
    ) -> List[ParameterInfo]:
        """Filter parameters based on criteria."""
        filtered = self.param_details

        if trainable_only:
            filtered = [p for p in filtered if p.trainable]

        if max_depth is not None:
            filtered = [p for p in filtered if p.name.count(".") <= max_depth]

        if component_filter:
            filtered = [p for p in filtered if p.component == component_filter]

        return filtered

    def print_detailed_parameter_table(
        self,
        sort_by: str = "name",
        group_by: Optional[str] = None,
        show_top_n: Optional[int] = None,
        trainable_only: bool = False,
        max_depth: Optional[int] = None,
        hierarchical: bool = True,
    ) -> None:
        """
        Print detailed parameter table with hierarchical tree structure.

        Args:
            sort_by: Column to sort by ('total_params', 'name', 'component', etc.)
            group_by: Group parameters by ('component', 'layer_type', 'parameter_type')
            show_top_n: Show only top N parameters (None for all)
            trainable_only: If True, only show trainable parameters
            max_depth: Maximum depth level to show (count dots in parameter name)
            hierarchical: If True, use hierarchical tree structure
        """
        strategy = DetailedDisplayStrategy(
            sort_by=sort_by,
            group_by=group_by,
            show_top_n=show_top_n,
            trainable_only=trainable_only,
            max_depth=max_depth,
            hierarchical=hierarchical,
        )
        strategy.display(self)

    def print_compact_detailed_analysis(self, max_components: int = 10) -> None:
        """
        Print compact detailed analysis with hierarchical structure.

        Args:
            max_components: Maximum number of components to show in detail
        """
        # Store the max_components in DisplayConstants temporarily
        original_max = DisplayConstrains.MAX_COMPONENTS_DEFAULT
        DisplayConstrains.MAX_COMPONENTS_DEFAULT = max_components

        try:
            strategy = CompactDetailedDisplayStrategy()
            strategy.display(self)
        finally:
            # Restore original value
            DisplayConstrains.MAX_COMPONENTS_DEFAULT = original_max

    def get_parameter_statistics_by_component(self) -> Dict[str, Dict[str, Any]]:
        """Get comprehensive parameter statistics grouped by component."""
        component_stats = {}

        for component, stats in self.component_stats.items():
            component_params = [
                p for p in self.param_details if p.component == component
            ]

            component_stats[component] = {
                "total_parameters": stats.total_parameters,
                "trainable_parameters": stats.trainable_parameters,
                "frozen_parameters": stats.frozen_parameters,
                "layer_count": stats.layer_count,
                "memory_mb": stats.memory_mb,
                "parameter_count": len(component_params),
                "trainable_ratio": stats.trainable_ratio,
                "parameters": [
                    {
                        "name": p.name,
                        "total_params": p.total_params,
                        "trainable": p.trainable,
                        "layer_type": p.layer_type,
                        "parameter_type": p.parameter_type,
                        "shape": p.shape,
                    }
                    for p in sorted(
                        component_params, key=lambda x: x.total_params, reverse=True
                    )
                ],
            }

        return component_stats

    def export_to_csv(self, filepath: str = "model_parameters.csv") -> None:
        """Export parameter analysis to CSV."""
        import csv

        with open(filepath, "w", newline="", encoding="utf-8") as csvfile:
            fieldnames = [
                "name",
                "shape",
                "dtype",
                "device",
                "total_params",
                "trainable",
                "memory_mb",
                "component",
                "layer_type",
                "parameter_type",
            ]
            writer = csv.DictWriter(csvfile, fieldnames=fieldnames)

            writer.writeheader()
            for param in self.param_details:
                row = {
                    "name": param.name,
                    "shape": str(param.shape),
                    "dtype": param.dtype,
                    "device": param.device,
                    "total_params": param.total_params,
                    "trainable": param.trainable,
                    "memory_mb": param.memory_mb,
                    "component": param.component,
                    "layer_type": param.layer_type,
                    "parameter_type": param.parameter_type,
                }
                writer.writerow(row)

        self.logger.info(f"Parameter analysis exported to: {filepath}")


class ArchitectureAnalysisEventHandler(BaseEventHandler):
    """
    Event handler for automatic model architecture analysis during training.

    Integrates with the event system to automatically analyze model architecture
    at specified training milestones with configurable display options and
    comparison capabilities.
    """

    def __init__(self, config: HandlerConfig, *args, **kwargs):
        """
        Initialize the event handler with configuration.

        Args:
            config: Handler configuration containing analysis settings
            *args: Additional arguments passed to base handler
            **kwargs: Additional keyword arguments passed to base handler
        """
        super().__init__(config, *args, **kwargs)
        self.logger = logging.getLogger("EventSystem")
        log_level = self.config.get("log_level", "INFO")
        self.log_level = getattr(logging, log_level.upper(), logging.INFO)

        # Configuration
        config_dict = config.config if hasattr(config, "config") else {}
        self.analyze_on_task_start = config_dict.get("analyze_on_task_start", True)
        self.analyze_on_epoch_start = config_dict.get("analyze_on_epoch_start", False)
        self.compare_with_initial = config_dict.get("compare_with_initial", False)
        self.analysis_type = AnalysisType(
            config_dict.get("analysis_type", "hierarchical")
        )

        # State
        self.initial_stats: Optional[ModelStatistics] = None
        self.current_analyzer: Optional[ModelAnalyzer] = None

    def _handle_event(self, context: EventContext) -> None:
        """Handle architecture analysis events."""
        model = getattr(context.learner, "model", None)
        if model is None:
            self.logger.warning("No model found in learner context")
            return

        # Initialize analyzer
        task_id = getattr(context, "task_id", 0)
        epoch = getattr(context, "epoch", 0)
        model_name = f"CL Model (Task {task_id}, Epoch {epoch})"

        self.current_analyzer = ModelAnalyzer(model, model_name)

        # Store initial state if needed
        if self.compare_with_initial and self.initial_stats is None:
            self.initial_stats = self.current_analyzer.basic_stats

        # Decide whether to analyze
        should_analyze = self._should_analyze(context.event_type)

        if should_analyze:
            self.logger.info(
                f"Performing architecture analysis for event: {context.event_type}"
            )
            self.current_analyzer.display_analysis(self.analysis_type)

            if self.compare_with_initial and self.initial_stats is not None:
                self._print_comparison()

    def _should_analyze(self, event_type: str) -> bool:
        """Determine if analysis should be performed for this event."""
        return (
            (event_type == "ON_TASK_START" and self.analyze_on_task_start)
            or (event_type == "ON_TRAIN_EPOCH_START" and self.analyze_on_epoch_start)
            or (event_type == "architecture_analysis")
        )

    def _print_comparison(self) -> None:
        """Print comparison with initial state."""
        if not self.initial_stats or not self.current_analyzer:
            return

        current_stats = self.current_analyzer.basic_stats
        formatter = TableFormatter()

        self.logger.info("COMPARISON WITH INITIAL MODEL STATE:")

        comparison_data = [
            [
                "Total Parameters",
                f"{self.initial_stats.total_parameters:,}",
                f"{current_stats.total_parameters:,}",
                self._calculate_change_percentage(
                    self.initial_stats.total_parameters, current_stats.total_parameters
                ),
            ],
            [
                "Trainable Parameters",
                f"{self.initial_stats.trainable_parameters:,}",
                f"{current_stats.trainable_parameters:,}",
                self._calculate_change_percentage(
                    self.initial_stats.trainable_parameters,
                    current_stats.trainable_parameters,
                ),
            ],
            [
                "Model Size",
                f"{self.initial_stats.model_size_mb:.2f} MB",
                f"{current_stats.model_size_mb:.2f} MB",
                self._calculate_change_percentage(
                    self.initial_stats.model_size_mb, current_stats.model_size_mb
                ),
            ],
        ]

        headers = ["Metric", "Initial", "Current", "Change"]
        self.logger.info(
            "\n" + tabulate(comparison_data, headers=headers, tablefmt="pipe")
        )

    def _calculate_change_percentage(self, initial: float, current: float) -> str:
        """Calculate and format change percentage."""
        if initial == 0:
            return "N/A"
        change_pct = ((current - initial) / initial) * 100
        return f"{change_pct:+.1f}%"


# Public API functions with clean interfaces


def analyze_model(
    model: nn.Module,
    model_name: str = "Model",
    analysis_type: str = "hierarchical",
    **kwargs,
) -> Optional[Dict[str, Any]]:
    """
    Analyze model architecture with specified type.

    Args:
        model: PyTorch model to analyze
        model_name: Name for the model
        analysis_type: Type of analysis ('hierarchical', 'ultra_compact', 'detailed',
                      'component_grouped', 'compact_detailed')
        **kwargs: Additional arguments passed to the display strategy

    Returns:
        Analysis results dictionary (for some analysis types)
    """
    analyzer = ModelAnalyzer(model, model_name)
    analysis_enum = AnalysisType(analysis_type)
    return analyzer.display_analysis(analysis_enum, **kwargs)


def analyze_model_detailed(
    model: nn.Module,
    model_name: str = "Model",
    sort_by: str = "total_params",
    group_by: Optional[str] = None,
    show_top_n: Optional[int] = None,
    trainable_only: bool = False,
    max_depth: Optional[int] = None,
    hierarchical: bool = True,
    export_csv: bool = False,
    csv_path: str = "model_parameters.csv",
) -> Dict[str, Any]:
    """
    Perform detailed parameter analysis with hierarchical tree structure.

    Args:
        model: PyTorch model to analyze
        model_name: Name for the model
        sort_by: Sort parameters by this attribute
        group_by: Group parameters by this attribute
        show_top_n: Show only top N parameters
        trainable_only: If True, only show trainable parameters
        max_depth: Maximum depth level to show (count dots in parameter name)
        hierarchical: If True, use hierarchical tree structure display
        export_csv: Whether to export results to CSV
        csv_path: Path for CSV export

    Returns:
        Complete analysis results
    """
    analyzer = ModelAnalyzer(model, model_name)

    # Print detailed parameter table with hierarchical structure
    analyzer.print_detailed_parameter_table(
        sort_by=sort_by,
        group_by=group_by,
        show_top_n=show_top_n,
        trainable_only=trainable_only,
        max_depth=max_depth,
        hierarchical=hierarchical,
    )

    # Export to CSV if requested
    if export_csv:
        analyzer.export_to_csv(csv_path)

    # Return complete analysis
    return {
        "detailed_params": analyzer.param_details,
        "component_stats": analyzer.get_parameter_statistics_by_component(),
        "basic_stats": analyzer.basic_stats,
        "memory_analysis": analyzer.memory_analysis,
    }


def quick_model_summary(
    model: nn.Module, task_id: Optional[int] = None
) -> Dict[str, Any]:
    """
    Generate quick model summary.

    Args:
        model: PyTorch model to analyze
        task_id: Optional task ID for context

    Returns:
        Summary dictionary
    """
    model_name = f"Task {task_id}" if task_id is not None else "Model"
    analyzer = ModelAnalyzer(model, model_name)

    result = analyzer.display_analysis(AnalysisType.ULTRA_COMPACT)
    return result if result is not None else {}


def export_model_analysis(
    model: nn.Module, filepath: str = "model_analysis.csv", model_name: str = "Model"
) -> None:
    """
    Export detailed model analysis to CSV.

    Args:
        model: PyTorch model to analyze
        filepath: Output CSV file path
        model_name: Name for the model
    """
    analyzer = ModelAnalyzer(model, model_name)
    analyzer.export_to_csv(filepath)


# Legacy compatibility functions


def count_parameters(model: nn.Module, trainable: bool = False) -> int:
    """Count model parameters."""
    if trainable:
        return sum(p.numel() for p in model.parameters() if p.requires_grad)
    return sum(p.numel() for p in model.parameters())


def print_model_parameter_summary(
    model: nn.Module, task_id: Optional[int] = None
) -> Dict[str, int]:
    """Legacy function for parameter summary with hierarchical display."""
    summary_result = quick_model_summary(model, task_id)

    # Calculate component-specific parameters
    analyzer = ModelAnalyzer(model, f"Task {task_id}" if task_id else "Model")
    component_stats = analyzer.component_stats

    return {
        "all_params": summary_result["total_parameters"],
        "trainable_params": summary_result["trainable_parameters"],
        "backbone_trainable_params": component_stats.get(
            "backbone", ComponentStats()
        ).trainable_parameters,
        "fc_trainable_params": component_stats.get(
            "classifier", ComponentStats()
        ).trainable_parameters,
        "adapter_trainable_params": component_stats.get(
            "adapter", ComponentStats()
        ).trainable_parameters,
        "other_trainable_params": sum(
            stats.trainable_parameters
            for name, stats in component_stats.items()
            if name not in ["backbone", "classifier", "adapter"]
        ),
    }


# Additional convenience functions for backward compatibility


def analyze_continual_learning_model(
    model: nn.Module, task_id: int = 0, include_component_analysis: bool = True
) -> Dict[str, Any]:
    """
    Specialized analysis for continual learning models.

    Args:
        model: Continual learning model to analyze
        task_id: Current task ID
        include_component_analysis: Whether to analyze components (backbone, classifier, etc.)

    Returns:
        Continual learning specific analysis
    """
    model_name = f"CL Model (Task {task_id})"
    analyzer = ModelAnalyzer(model, model_name)

    # Use hierarchical analysis for CL models
    analyzer.display_analysis(AnalysisType.HIERARCHICAL)

    if include_component_analysis:
        # Also show component-grouped analysis
        analyzer.display_analysis(AnalysisType.COMPONENT_GROUPED)

    # Return comprehensive analysis
    return {
        "basic_stats": analyzer.basic_stats,
        "component_stats": analyzer.component_stats,
        "detailed_params": analyzer.param_details,
        "memory_analysis": analyzer.memory_analysis,
        "component_breakdown": analyzer.get_parameter_statistics_by_component(),
    }


def print_compact_detailed_analysis(
    model: nn.Module, model_name: str = "Model", max_components: int = 10
) -> None:
    """
    Print compact detailed analysis with hierarchical structure.

    Args:
        model: PyTorch model to analyze
        model_name: Name for the model
        max_components: Maximum number of components to show in detail
    """
    analyzer = ModelAnalyzer(model, model_name)
    analyzer.print_compact_detailed_analysis(max_components=max_components)
