"""
Network structure analysis for HFVisionTransformer.

This script demonstrates how to inspect and analyze the Vision Transformer network structure,
including layer-wise input/output dimensions, attention patterns, and feature extraction.
"""

import torch
import torch.nn as nn
from transformers import ViTModel, ViTConfig
from continuallearning.models.backbones.huggingface_models import HFVisionTransformer
import matplotlib.pyplot as plt
import numpy as np
from typing import Dict, List, Tuple
import logging

# Set up logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)


class VisionTransformerAnalyzer:
    """Analyzer for Vision Transformer network structure and data flow."""

    def __init__(self, model_name: str = "google/vit-base-patch16-224"):
        """
        Initialize the analyzer with a specific ViT model.

        Args:
            model_name: Name of the pretrained ViT model
        """
        self.model_name = model_name
        self.vit = HFVisionTransformer(
            model_name=model_name,
            output_hidden_states=True,
            output_attentions=True,
            freeze_backbone=False,
        )
        self.config = self.vit.model.config
        logger.info(f"Loaded model: {model_name}")

    def analyze_model_architecture(self) -> Dict:
        """
        Analyze the overall model architecture.

        Returns:
            Dictionary containing architecture information
        """
        architecture_info = {
            "model_name": self.model_name,
            "hidden_size": self.config.hidden_size,
            "num_hidden_layers": self.config.num_hidden_layers,
            "num_attention_heads": self.config.num_attention_heads,
            "intermediate_size": self.config.intermediate_size,
            "patch_size": self.config.patch_size,
            "image_size": self.config.image_size,
            "num_channels": self.config.num_channels,
        }

        # Calculate derived parameters
        patch_size = self.config.patch_size
        image_size = self.config.image_size
        num_patches = (image_size // patch_size) ** 2
        sequence_length = num_patches + 1  # +1 for CLS token

        architecture_info.update(
            {
                "num_patches": num_patches,
                "sequence_length": sequence_length,
                "total_parameters": sum(p.numel() for p in self.vit.model.parameters()),
                "trainable_parameters": sum(
                    p.numel() for p in self.vit.model.parameters() if p.requires_grad
                ),
            }
        )

        return architecture_info

    def analyze_layer_structure(self) -> List[Dict]:
        """
        Analyze the structure of each transformer layer.

        Returns:
            List of dictionaries containing layer information
        """
        layer_info = []

        # Embeddings layer
        embeddings_info = {
            "layer_name": "embeddings",
            "layer_type": "ViTEmbeddings",
            "components": [
                "patch_embeddings",
                "position_embeddings",
                "cls_token",
                "dropout",
            ],
            "input_shape": f"(batch_size, {self.config.num_channels}, {self.config.image_size}, {self.config.image_size})",
            "output_shape": f"(batch_size, {(self.config.image_size // self.config.patch_size) ** 2 + 1}, {self.config.hidden_size})",
        }
        layer_info.append(embeddings_info)

        # Transformer layers
        for i in range(self.config.num_hidden_layers):
            layer = {
                "layer_name": f"encoder.layer.{i}",
                "layer_type": "ViTLayer",
                "components": [
                    "attention",
                    "intermediate",
                    "output",
                    "layernorm_before",
                    "layernorm_after",
                ],
                "input_shape": f"(batch_size, {(self.config.image_size // self.config.patch_size) ** 2 + 1}, {self.config.hidden_size})",
                "output_shape": f"(batch_size, {(self.config.image_size // self.config.patch_size) ** 2 + 1}, {self.config.hidden_size})",
                "attention_heads": self.config.num_attention_heads,
                "attention_head_size": self.config.hidden_size
                // self.config.num_attention_heads,
                "intermediate_size": self.config.intermediate_size,
            }
            layer_info.append(layer)

        return layer_info

    def trace_forward_pass(self, input_tensor: torch.Tensor) -> Dict:
        """
        Trace the forward pass through the network and capture intermediate outputs.

        Args:
            input_tensor: Input tensor of shape (batch_size, channels, height, width)

        Returns:
            Dictionary containing intermediate outputs and shapes
        """
        batch_size = input_tensor.shape[0]
        trace_info = {
            "input_shape": tuple(input_tensor.shape),
            "intermediate_shapes": {},
            "attention_shapes": {},
            "feature_extraction_points": {},
        }

        # Forward pass
        with torch.no_grad():
            outputs = self.vit.forward(input_tensor)

        # Record shapes
        trace_info["final_features_shape"] = tuple(outputs.features.shape)
        trace_info["last_hidden_state_shape"] = tuple(outputs.last_hidden_state.shape)

        if hasattr(outputs, "hidden_states") and outputs.hidden_states is not None:
            for i, hidden_state in enumerate(outputs.hidden_states):
                trace_info["intermediate_shapes"][f"layer_{i}"] = tuple(
                    hidden_state.shape
                )

        if hasattr(outputs, "attentions") and outputs.attentions is not None:
            for i, attention in enumerate(outputs.attentions):
                trace_info["attention_shapes"][f"layer_{i}"] = tuple(attention.shape)

        # Feature extraction at different layers
        for layer_idx in [-1, -2, -3, 0, 1, 2]:
            if abs(layer_idx) <= len(outputs.hidden_states):
                self.vit.feature_layer = layer_idx
                features = self.vit.extract_features(input_tensor)
                trace_info["feature_extraction_points"][f"layer_{layer_idx}"] = tuple(
                    features.shape
                )

        return trace_info

    def analyze_attention_patterns(
        self, input_tensor: torch.Tensor, layer_idx: int = -1
    ) -> Dict:
        """
        Analyze attention patterns for a specific layer.

        Args:
            input_tensor: Input tensor
            layer_idx: Layer index to analyze (default: last layer)

        Returns:
            Dictionary containing attention analysis
        """
        with torch.no_grad():
            outputs = self.vit.forward(input_tensor)

        if not hasattr(outputs, "attentions") or outputs.attentions is None:
            logger.warning(
                "Attention weights not available. Make sure output_attentions=True"
            )
            return {}

        attention = outputs.attentions[
            layer_idx
        ]  # Shape: (batch, num_heads, seq_len, seq_len)
        batch_size, num_heads, seq_len, _ = attention.shape

        # Analyze CLS token attention (first token)
        cls_attention = attention[:, :, 0, :]  # Shape: (batch, num_heads, seq_len)

        # Analyze patch-to-patch attention (excluding CLS token)
        patch_attention = attention[
            :, :, 1:, 1:
        ]  # Shape: (batch, num_heads, num_patches, num_patches)

        attention_analysis = {
            "layer_index": layer_idx,
            "attention_shape": tuple(attention.shape),
            "num_heads": num_heads,
            "sequence_length": seq_len,
            "cls_attention_shape": tuple(cls_attention.shape),
            "patch_attention_shape": tuple(patch_attention.shape),
            "cls_attention_stats": {
                "mean": cls_attention.mean().item(),
                "std": cls_attention.std().item(),
                "max": cls_attention.max().item(),
                "min": cls_attention.min().item(),
            },
            "patch_attention_stats": {
                "mean": patch_attention.mean().item(),
                "std": patch_attention.std().item(),
                "max": patch_attention.max().item(),
                "min": patch_attention.min().item(),
            },
        }

        return attention_analysis

    def benchmark_inference_time(
        self, input_shapes: List[Tuple[int, int, int, int]], num_runs: int = 10
    ) -> Dict:
        """
        Benchmark inference time for different input shapes.

        Args:
            input_shapes: List of input shapes to test
            num_runs: Number of runs for averaging

        Returns:
            Dictionary containing timing results
        """
        import time

        timing_results = {}

        for shape in input_shapes:
            batch_size, channels, height, width = shape
            input_tensor = torch.randn(shape)

            # Warmup
            with torch.no_grad():
                _ = self.vit.forward(input_tensor)

            # Timing
            times = []
            for _ in range(num_runs):
                start_time = time.time()
                with torch.no_grad():
                    _ = self.vit.forward(input_tensor)
                end_time = time.time()
                times.append(end_time - start_time)

            timing_results[f"{shape}"] = {
                "mean_time": np.mean(times),
                "std_time": np.std(times),
                "min_time": np.min(times),
                "max_time": np.max(times),
                "throughput_images_per_sec": batch_size / np.mean(times),
            }

        return timing_results

    def visualize_attention_heatmap(
        self, input_tensor: torch.Tensor, layer_idx: int = -1, head_idx: int = 0
    ):
        """
        Visualize attention patterns as heatmap.

        Args:
            input_tensor: Input tensor
            layer_idx: Layer index to visualize
            head_idx: Attention head index to visualize
        """
        with torch.no_grad():
            outputs = self.vit.forward(input_tensor)

        if not hasattr(outputs, "attentions") or outputs.attentions is None:
            logger.warning("Attention weights not available")
            return

        attention = outputs.attentions[layer_idx][0, head_idx]  # Take first batch item

        plt.figure(figsize=(10, 8))
        plt.imshow(attention.cpu().numpy(), cmap="Blues", interpolation="nearest")
        plt.colorbar()
        plt.title(f"Attention Pattern - Layer {layer_idx}, Head {head_idx}")
        plt.xlabel("Key Position")
        plt.ylabel("Query Position")
        plt.show()

    def analyze_detailed_layer_parameters(self) -> Dict:
        """
        Analyze detailed parameter information for each layer.

        Returns:
            Dictionary containing detailed parameter analysis
        """
        param_analysis = {
            "total_parameters": 0,
            "trainable_parameters": 0,
            "layer_breakdown": {},
            "parameter_distribution": {},
        }

        for name, param in self.vit.model.named_parameters():
            param_count = param.numel()
            param_analysis["total_parameters"] += param_count

            if param.requires_grad:
                param_analysis["trainable_parameters"] += param_count

            # Extract layer information
            layer_parts = name.split(".")
            if len(layer_parts) >= 2:
                layer_category = layer_parts[0]
                if layer_category not in param_analysis["layer_breakdown"]:
                    param_analysis["layer_breakdown"][layer_category] = {
                        "parameters": 0,
                        "components": {},
                    }

                param_analysis["layer_breakdown"][layer_category]["parameters"] += (
                    param_count
                )

                # Detailed component breakdown
                component_name = ".".join(layer_parts[1:])
                param_analysis["layer_breakdown"][layer_category]["components"][
                    component_name
                ] = {
                    "shape": list(param.shape),
                    "parameters": param_count,
                    "dtype": str(param.dtype),
                    "requires_grad": param.requires_grad,
                }

            # Parameter type distribution
            param_type = "bias" if "bias" in name else "weight"
            if param_type not in param_analysis["parameter_distribution"]:
                param_analysis["parameter_distribution"][param_type] = 0
            param_analysis["parameter_distribution"][param_type] += param_count

        return param_analysis

    def analyze_computational_complexity(
        self, input_shape: Tuple[int, int, int, int]
    ) -> Dict:
        """
        Analyze computational complexity and memory usage.

        Args:
            input_shape: Input tensor shape (batch_size, channels, height, width)

        Returns:
            Dictionary containing complexity analysis
        """
        batch_size, channels, height, width = input_shape
        patch_size = self.config.patch_size
        hidden_size = self.config.hidden_size
        num_layers = self.config.num_hidden_layers
        num_heads = self.config.num_attention_heads
        intermediate_size = self.config.intermediate_size

        num_patches = (height // patch_size) * (width // patch_size)
        seq_length = num_patches + 1  # +1 for CLS token

        complexity_analysis = {
            "input_info": {
                "batch_size": batch_size,
                "input_resolution": f"{height}x{width}",
                "patch_size": f"{patch_size}x{patch_size}",
                "num_patches": num_patches,
                "sequence_length": seq_length,
            },
            "flops_breakdown": {},
            "memory_breakdown": {},
            "attention_complexity": {},
        }

        # Patch embedding FLOPs
        patch_embed_flops = (
            batch_size
            * num_patches
            * (patch_size * patch_size * channels)
            * hidden_size
        )
        complexity_analysis["flops_breakdown"]["patch_embedding"] = patch_embed_flops

        # Transformer layer FLOPs (per layer)
        attention_flops = (
            batch_size * seq_length * seq_length * hidden_size * 2
        )  # Q*K + Attn*V
        feedforward_flops = (
            batch_size * seq_length * hidden_size * intermediate_size * 2
        )  # Linear layers
        layer_flops = attention_flops + feedforward_flops

        complexity_analysis["flops_breakdown"]["attention_per_layer"] = attention_flops
        complexity_analysis["flops_breakdown"]["feedforward_per_layer"] = (
            feedforward_flops
        )
        complexity_analysis["flops_breakdown"]["total_transformer_layers"] = (
            layer_flops * num_layers
        )
        complexity_analysis["flops_breakdown"]["total_flops"] = patch_embed_flops + (
            layer_flops * num_layers
        )

        # Memory analysis
        element_size = 4  # Assuming float32
        input_memory = batch_size * channels * height * width * element_size
        hidden_state_memory = batch_size * seq_length * hidden_size * element_size
        attention_memory = (
            batch_size * num_heads * seq_length * seq_length * element_size
        )

        complexity_analysis["memory_breakdown"]["input_tensor"] = input_memory
        complexity_analysis["memory_breakdown"]["hidden_states_per_layer"] = (
            hidden_state_memory
        )
        complexity_analysis["memory_breakdown"]["attention_weights_per_layer"] = (
            attention_memory
        )
        complexity_analysis["memory_breakdown"]["total_hidden_states"] = (
            hidden_state_memory * (num_layers + 1)
        )
        complexity_analysis["memory_breakdown"]["total_attention_weights"] = (
            attention_memory * num_layers
        )

        # Attention complexity analysis
        complexity_analysis["attention_complexity"] = {
            "attention_heads": num_heads,
            "head_dimension": hidden_size // num_heads,
            "sequence_length": seq_length,
            "quadratic_complexity": f"O({seq_length}²)",
            "attention_matrix_size_per_head": f"{seq_length}x{seq_length}",
            "total_attention_matrices": num_heads * num_layers,
        }

        return complexity_analysis

    def analyze_feature_extraction_capabilities(
        self, input_tensor: torch.Tensor
    ) -> Dict:
        """
        Analyze feature extraction capabilities at different layers.

        Args:
            input_tensor: Input tensor for analysis

        Returns:
            Dictionary containing feature extraction analysis
        """
        feature_analysis = {
            "layer_features": {},
            "feature_statistics": {},
            "representational_capacity": {},
        }

        with torch.no_grad():
            outputs = self.vit.forward(input_tensor)

        # Analyze features from different layers
        if hasattr(outputs, "hidden_states") and outputs.hidden_states is not None:
            for i, hidden_state in enumerate(outputs.hidden_states):
                cls_features = hidden_state[:, 0, :]  # CLS token features
                patch_features = hidden_state[:, 1:, :]  # Patch features

                feature_analysis["layer_features"][f"layer_{i}"] = {
                    "shape": list(hidden_state.shape),
                    "cls_features_shape": list(cls_features.shape),
                    "patch_features_shape": list(patch_features.shape),
                    "feature_statistics": {
                        "mean": hidden_state.mean().item(),
                        "std": hidden_state.std().item(),
                        "min": hidden_state.min().item(),
                        "max": hidden_state.max().item(),
                        "norm_l2": torch.norm(hidden_state, p=2).item(),
                    },
                    "cls_token_statistics": {
                        "mean": cls_features.mean().item(),
                        "std": cls_features.std().item(),
                        "activation_sparsity": (cls_features == 0)
                        .float()
                        .mean()
                        .item(),
                    },
                }

        # Representational capacity analysis
        final_features = outputs.features
        feature_analysis["representational_capacity"] = {
            "feature_dimension": final_features.shape[-1],
            "effective_rank": torch.linalg.matrix_rank(final_features).item(),
            "feature_diversity": torch.std(final_features, dim=-1).mean().item(),
            "feature_magnitude": torch.norm(final_features, dim=-1).mean().item(),
        }

        return feature_analysis

    def generate_comprehensive_markdown_summary(
        self, filepath: str = "vit_analysis_summary.md"
    ):
        """
        Generate a comprehensive markdown summary with detailed analysis.

        Args:
            filepath: Path to save the markdown summary
        """
        # Create analysis input
        dummy_input = torch.randn(1, 3, 224, 224)

        # Gather comprehensive analysis
        architecture = self.analyze_model_architecture()
        layer_structure = self.analyze_layer_structure()
        trace_info = self.trace_forward_pass(dummy_input)
        attention_analysis = self.analyze_attention_patterns(dummy_input)
        param_analysis = self.analyze_detailed_layer_parameters()
        complexity_analysis = self.analyze_computational_complexity(dummy_input.shape)
        feature_analysis = self.analyze_feature_extraction_capabilities(dummy_input)

        # Generate markdown content
        markdown_content = self._generate_markdown_content(
            architecture,
            layer_structure,
            trace_info,
            attention_analysis,
            param_analysis,
            complexity_analysis,
            feature_analysis,
        )

        # Write to file
        with open(filepath, "w", encoding="utf-8") as f:
            f.write(markdown_content)

        logger.info(f"Comprehensive model summary exported to {filepath}")

    def _generate_markdown_content(
        self,
        architecture,
        layer_structure,
        trace_info,
        attention_analysis,
        param_analysis,
        complexity_analysis,
        feature_analysis,
    ) -> str:
        """Generate comprehensive markdown content."""

        from datetime import datetime

        md_content = f"""# 🔍 Vision Transformer Model Analysis Report

> **Model**: `{self.model_name}`
> **Analysis Date**: {datetime.now().strftime("%B %d, %Y")}

---

## 📊 Architecture Overview

### Model Configuration
| Parameter | Value |
|-----------|-------|
| **Model Name** | `{architecture["model_name"]}` |
| **Hidden Size** | {architecture["hidden_size"]:,} |
| **Number of Layers** | {architecture["num_hidden_layers"]} |
| **Attention Heads** | {architecture["num_attention_heads"]} |
| **Intermediate Size** | {architecture["intermediate_size"]:,} |
| **Patch Size** | {architecture["patch_size"]}×{architecture["patch_size"]} |
| **Image Size** | {architecture["image_size"]}×{architecture["image_size"]} |
| **Input Channels** | {architecture["num_channels"]} (RGB) |
| **Number of Patches** | {architecture["num_patches"]} |
| **Sequence Length** | {architecture["sequence_length"]} ({architecture["num_patches"]} patches + 1 CLS token) |

### 📈 Model Statistics
- **Total Parameters**: `{architecture["total_parameters"]:,}`
- **Trainable Parameters**: `{architecture["trainable_parameters"]:,}`
- **Parameter Efficiency**: {(architecture["trainable_parameters"] / architecture["total_parameters"] * 100):.1f}% trainable

---

## 🏗️ Layer Architecture

### Input Embeddings Layer
```
📍 Layer 0: embeddings (ViTEmbeddings)
├── Input Shape:  (batch_size, {architecture["num_channels"]}, {architecture["image_size"]}, {architecture["image_size"]})
├── Output Shape: (batch_size, {architecture["sequence_length"]}, {architecture["hidden_size"]})
└── Components:
    ├── patch_embeddings    # Convert image patches to tokens
    ├── position_embeddings # Add positional information
    ├── cls_token          # Classification token
    └── dropout            # Regularization
```

### Transformer Encoder Layers
```
🔄 Layers 1-{architecture["num_hidden_layers"]}: encoder.layer.{{0-{architecture["num_hidden_layers"] - 1}}} (ViTLayer)
├── Input Shape:  (batch_size, {architecture["sequence_length"]}, {architecture["hidden_size"]})
├── Output Shape: (batch_size, {architecture["sequence_length"]}, {architecture["hidden_size"]})
└── Components:
    ├── attention          # Multi-head self-attention
    ├── intermediate       # Feed-forward network
    ├── output            # Output projection
    ├── layernorm_before  # Pre-normalization
    └── layernorm_after   # Post-normalization
```

<details>
<summary>📋 Detailed Layer Breakdown</summary>

| Layer | Name | Type | Input Shape | Output Shape |
|-------|------|------|-------------|--------------|"""

        # Add layer breakdown
        for i, layer in enumerate(layer_structure):
            input_shape = layer["input_shape"].replace("batch_size", "batch")
            output_shape = layer["output_shape"].replace("batch_size", "batch")
            md_content += f"\n| {i} | `{layer['layer_name']}` | {layer['layer_type']} | `{input_shape}` | `{output_shape}` |"

        md_content += """

</details>

---

## 🔄 Forward Pass Analysis

### Data Flow Summary
```mermaid
graph LR
    A[Input Image<br/>1×3×224×224] --> B[Patch Embeddings<br/>1×197×768]
    B --> C[Transformer Layers<br/>×12]
    C --> D[Final Features<br/>1×768]
    C --> E[Hidden States<br/>1×197×768]
```

### Shape Transformations
| Stage | Description | Shape |
|-------|-------------|-------|"""

        md_content += f"""
| **Input** | Raw image tensor | `{trace_info["input_shape"]}` |
| **Embeddings** | Tokenized patches + CLS | `{trace_info["last_hidden_state_shape"]}` |
| **Encoder Output** | Final hidden states | `{trace_info["last_hidden_state_shape"]}` |
| **CLS Features** | Classification features | `{trace_info["final_features_shape"]}` |"""

        # Add intermediate layer outputs
        md_content += """

### Layer-wise Output Shapes
<details>
<summary>🔍 Intermediate Layer Outputs</summary>

| Layer | Output Shape |
|-------|--------------|"""

        for layer, shape in trace_info["intermediate_shapes"].items():
            md_content += f"\n| `{layer}` | `{shape}` |"

        md_content += """

</details>

---

## 🎯 Attention Pattern Analysis

### Attention Configuration"""

        if attention_analysis:
            md_content += f"""
- **Layer Analyzed**: Final layer (`layer_index: {attention_analysis["layer_index"]}`)
- **Attention Shape**: `{attention_analysis["attention_shape"]}`
- **Number of Heads**: {attention_analysis["num_heads"]}
- **Sequence Length**: {attention_analysis["sequence_length"]} tokens

### Attention Statistics

#### 🔸 CLS Token Attention
| Metric | Value |
|--------|-------|
| **Shape** | `{attention_analysis["cls_attention_shape"]}` |
| **Mean** | `{attention_analysis["cls_attention_stats"]["mean"]:.5f}` |
| **Std Dev** | `{attention_analysis["cls_attention_stats"]["std"]:.5f}` |
| **Maximum** | `{attention_analysis["cls_attention_stats"]["max"]:.5f}` |
| **Minimum** | `{attention_analysis["cls_attention_stats"]["min"]:.5f}` |

#### 🔸 Patch-to-Patch Attention
| Metric | Value |
|--------|-------|
| **Shape** | `{attention_analysis["patch_attention_shape"]}` |
| **Mean** | `{attention_analysis["patch_attention_stats"]["mean"]:.5f}` |
| **Std Dev** | `{attention_analysis["patch_attention_stats"]["std"]:.5f}` |
| **Maximum** | `{attention_analysis["patch_attention_stats"]["max"]:.5f}` |
| **Minimum** | `{attention_analysis["patch_attention_stats"]["min"]:.5f}` |

### 📝 Attention Insights
- **CLS Token Attention**: The classification token shows focused attention patterns with a maximum attention weight of ~{attention_analysis["cls_attention_stats"]["max"] * 100:.1f}%
- **Patch Attention**: Inter-patch attention exhibits slightly higher variance, indicating selective focus on relevant image regions
- **Attention Distribution**: Both CLS and patch attentions show similar mean values (~{attention_analysis["cls_attention_stats"]["mean"]:.3f}), suggesting balanced attention across the sequence"""

        # Add parameter analysis
        md_content += f"""

---

## ⚙️ Parameter Analysis

### Parameter Distribution
| Component | Parameters | Percentage |
|-----------|------------|------------|"""

        total_params = param_analysis["total_parameters"]
        for layer_name, layer_info in param_analysis["layer_breakdown"].items():
            percentage = (layer_info["parameters"] / total_params) * 100
            md_content += f"\n| **{layer_name}** | {layer_info['parameters']:,} | {percentage:.1f}% |"

        # Add computational complexity
        md_content += f"""

---

## 🚀 Computational Complexity

### FLOPs Breakdown
| Operation | FLOPs |
|-----------|-------|"""

        for operation, flops in complexity_analysis["flops_breakdown"].items():
            if isinstance(flops, (int, float)):
                md_content += (
                    f"\n| **{operation.replace('_', ' ').title()}** | {flops:,.0f} |"
                )

        md_content += f"""

### Memory Usage (Float32)
| Component | Memory (MB) |
|-----------|-------------|"""

        for component, memory_bytes in complexity_analysis["memory_breakdown"].items():
            if isinstance(memory_bytes, (int, float)):
                memory_mb = memory_bytes / (1024 * 1024)
                md_content += f"\n| **{component.replace('_', ' ').title()}** | {memory_mb:.2f} MB |"

        # Add feature analysis
        if feature_analysis.get("representational_capacity"):
            rep_cap = feature_analysis["representational_capacity"]
            md_content += f"""

---

## 🎨 Feature Representation Analysis

### Representational Capacity
| Metric | Value |
|--------|-------|
| **Feature Dimension** | {rep_cap["feature_dimension"]} |
| **Effective Rank** | {rep_cap["effective_rank"]} |
| **Feature Diversity** | {rep_cap["feature_diversity"]:.4f} |
| **Feature Magnitude** | {rep_cap["feature_magnitude"]:.4f} |"""

        # Add summary
        md_content += f"""

---

## 🎓 Model Summary

This Vision Transformer (ViT-Base) demonstrates the standard architecture with:
- **Patch-based tokenization** converting {architecture["image_size"]}×{architecture["image_size"]} images into {architecture["num_patches"]} patches
- **{architecture["num_hidden_layers"]} transformer layers** with consistent {architecture["hidden_size"]}-dimensional representations
- **Multi-head attention** ({architecture["num_attention_heads"]} heads) enabling diverse attention patterns
- **Efficient parameter usage** with ~{architecture["total_parameters"] / 1000000:.0f}M parameters
- **Stable feature propagation** maintaining consistent shapes throughout the network

The attention analysis reveals effective information aggregation patterns, with the CLS token successfully attending to relevant image patches for classification tasks."""

        return md_content

    # Update the export_model_summary method to use the new comprehensive version
    def export_model_summary(self, filepath: str = "vit_analysis_summary.md"):
        """
        Export a comprehensive model summary to a markdown file.

        Args:
            filepath: Path to save the summary
        """
        self.generate_comprehensive_markdown_summary(filepath)


def main():
    """Main function to demonstrate network analysis."""
    # Initialize analyzer
    analyzer = VisionTransformerAnalyzer("google/vit-base-patch16-224")

    # Analyze architecture
    print("Analyzing model architecture...")
    architecture = analyzer.analyze_model_architecture()
    print(f"Model has {architecture['total_parameters']:,} total parameters")
    print(f"Hidden size: {architecture['hidden_size']}")
    print(f"Number of layers: {architecture['num_hidden_layers']}")
    print(f"Sequence length: {architecture['sequence_length']}")
    print(analyzer.vit)

    # Analyze layer structure
    print("\nAnalyzing layer structure...")
    layers = analyzer.analyze_layer_structure()
    for layer in layers[:3]:  # Show first 3 layers
        print(f"Layer: {layer['layer_name']}")
        print(f"  Input: {layer['input_shape']}")
        print(f"  Output: {layer['output_shape']}")

    # Trace forward pass
    print("\nTracing forward pass...")
    dummy_input = torch.randn(2, 3, 224, 224)
    trace = analyzer.trace_forward_pass(dummy_input)
    print(f"Input shape: {trace['input_shape']}")
    print(f"Final features shape: {trace['final_features_shape']}")
    print(f"Number of intermediate layers: {len(trace['intermediate_shapes'])}")

    # Analyze attention patterns
    print("\nAnalyzing attention patterns...")
    attention_analysis = analyzer.analyze_attention_patterns(dummy_input)
    if attention_analysis:
        print(f"Attention shape: {attention_analysis['attention_shape']}")
        print(f"CLS attention stats: {attention_analysis['cls_attention_stats']}")

    # Benchmark inference time
    print("\nBenchmarking inference time...")
    shapes = [(1, 3, 224, 224), (4, 3, 224, 224), (8, 3, 224, 224)]
    timing_results = analyzer.benchmark_inference_time(shapes, num_runs=5)
    for shape, results in timing_results.items():
        print(
            f"Shape {shape}: {results['mean_time']:.4f}s, {results['throughput_images_per_sec']:.2f} imgs/sec"
        )

    # Export summary
    analyzer.export_model_summary("vit_analysis_summary.md")
    print("\nAnalysis complete! Summary exported to vit_analysis_summary.md")


if __name__ == "__main__":
    main()
