#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
CLIP Vision Transformer模型访存量计算器
计算CLIP ViT-B/32和ViT-L/14模型的总访存量（Memory Access Cost）
"""

import math

class ViTMemoryAccessCalculator:
    def __init__(self, batch_size=1, data_type='float32'):
        """
        初始化ViT访存量计算器
        
        Args:
            batch_size (int): 批处理大小，默认为1
            data_type (str): 数据类型，支持'float32', 'float16', 'int8'
        """
        self.batch_size = batch_size
        self.data_type = data_type
        
        # 不同数据类型的字节数
        self.sizeof_map = {
            'float32': 4,
            'float16': 2,
            'int8': 1,
            'int32': 4
        }
        
        if data_type not in self.sizeof_map:
            raise ValueError(f"不支持的数据类型: {data_type}")
            
        self.sizeof_data = self.sizeof_map[data_type]
        self.total_mac = 0  # 总访存量
        
    def patch_embedding_mac(self, input_shape, patch_size, embed_dim, name=""):
        """
        计算Patch Embedding层的访存量
        
        Args:
            input_shape (tuple): 输入形状 (N, C, H, W)
            patch_size (int): patch大小
            embed_dim (int): 嵌入维度
            name (str): 层名称
        
        Returns:
            int: 访存量（字节）
        """
        N, C, H, W = input_shape
        
        # 计算patch数量
        num_patches = (H // patch_size) * (W // patch_size)
        
        # Patch Embedding相当于卷积: Conv2d(in_channels=C, out_channels=embed_dim, kernel_size=patch_size, stride=patch_size)
        input_mac = N * C * H * W
        weight_mac = embed_dim * C * patch_size * patch_size
        output_mac = N * embed_dim * num_patches
        
        total_mac = (input_mac + weight_mac + output_mac) * self.sizeof_data
        
        print(f"{name}: 输入={input_mac:,}, 权重={weight_mac:,}, 输出={output_mac:,}, 总计={total_mac:,} 字节")
        print(f"  -> Patch数量: {num_patches}, 每个patch: {patch_size}x{patch_size}")
        
        self.total_mac += total_mac
        return total_mac, num_patches
    
    def position_embedding_mac(self, num_patches, embed_dim, name=""):
        """
        计算Position Embedding的访存量
        
        Args:
            num_patches (int): patch数量
            embed_dim (int): 嵌入维度
            name (str): 层名称
        
        Returns:
            int: 访存量（字节）
        """
        N = self.batch_size
        
        # Position Embedding: (1 + num_patches) x embed_dim，+1是因为有class token
        seq_len = 1 + num_patches
        
        # 读position embedding参数 + 读输入 + 写输出
        pos_embed_mac = seq_len * embed_dim
        input_mac = N * seq_len * embed_dim
        output_mac = N * seq_len * embed_dim
        
        total_mac = (pos_embed_mac + input_mac + output_mac) * self.sizeof_data
        
        print(f"{name}: 位置嵌入={pos_embed_mac:,}, 输入={input_mac:,}, 输出={output_mac:,}, 总计={total_mac:,} 字节")
        print(f"  -> 序列长度: {seq_len} (包含class token)")
        
        self.total_mac += total_mac
        return total_mac
    
    def multi_head_attention_mac(self, seq_len, embed_dim, num_heads, name=""):
        """
        计算Multi-Head Self-Attention的访存量
        
        Args:
            seq_len (int): 序列长度
            embed_dim (int): 嵌入维度
            num_heads (int): 注意力头数
            name (str): 层名称
        
        Returns:
            int: 访存量（字节）
        """
        N = self.batch_size
        head_dim = embed_dim // num_heads
        
        # Q, K, V线性变换
        # 输入: (N, seq_len, embed_dim)
        input_mac = N * seq_len * embed_dim
        
        # Q, K, V权重: 3 * embed_dim * embed_dim
        qkv_weight_mac = 3 * embed_dim * embed_dim
        
        # Q, K, V输出: 3 * N * seq_len * embed_dim
        qkv_output_mac = 3 * N * seq_len * embed_dim
        
        # Attention计算: Q @ K^T
        # Q: (N, num_heads, seq_len, head_dim)
        # K: (N, num_heads, seq_len, head_dim)
        # QK^T: (N, num_heads, seq_len, seq_len)
        attention_scores_mac = N * num_heads * seq_len * seq_len
        
        # Attention @ V
        # Attention: (N, num_heads, seq_len, seq_len)
        # V: (N, num_heads, seq_len, head_dim)
        # Output: (N, num_heads, seq_len, head_dim)
        attention_output_mac = N * num_heads * seq_len * head_dim
        
        # 输出线性变换
        output_linear_weight_mac = embed_dim * embed_dim
        output_linear_output_mac = N * seq_len * embed_dim
        
        total_mac = (input_mac + qkv_weight_mac + qkv_output_mac + 
                    attention_scores_mac + attention_output_mac + 
                    output_linear_weight_mac + output_linear_output_mac) * self.sizeof_data
        
        print(f"{name}: ")
        print(f"  QKV变换: 输入={input_mac:,}, 权重={qkv_weight_mac:,}, 输出={qkv_output_mac:,}")
        print(f"  注意力计算: scores={attention_scores_mac:,}, output={attention_output_mac:,}")
        print(f"  输出变换: 权重={output_linear_weight_mac:,}, 输出={output_linear_output_mac:,}")
        print(f"  总计: {total_mac:,} 字节")
        
        self.total_mac += total_mac
        return total_mac
    
    def mlp_mac(self, seq_len, embed_dim, mlp_ratio=4.0, name=""):
        """
        计算MLP (Feed Forward Network)的访存量
        
        Args:
            seq_len (int): 序列长度
            embed_dim (int): 嵌入维度
            mlp_ratio (float): MLP隐藏层维度比例
            name (str): 层名称
        
        Returns:
            int: 访存量（字节）
        """
        N = self.batch_size
        hidden_dim = int(embed_dim * mlp_ratio)
        
        # 第一个线性层: embed_dim -> hidden_dim
        input_mac = N * seq_len * embed_dim
        fc1_weight_mac = embed_dim * hidden_dim
        fc1_output_mac = N * seq_len * hidden_dim
        
        # 第二个线性层: hidden_dim -> embed_dim
        fc2_input_mac = N * seq_len * hidden_dim
        fc2_weight_mac = hidden_dim * embed_dim
        fc2_output_mac = N * seq_len * embed_dim
        
        total_mac = (input_mac + fc1_weight_mac + fc1_output_mac + 
                    fc2_input_mac + fc2_weight_mac + fc2_output_mac) * self.sizeof_data
        
        print(f"{name}: ")
        print(f"  FC1: 输入={input_mac:,}, 权重={fc1_weight_mac:,}, 输出={fc1_output_mac:,}")
        print(f"  FC2: 输入={fc2_input_mac:,}, 权重={fc2_weight_mac:,}, 输出={fc2_output_mac:,}")
        print(f"  总计: {total_mac:,} 字节 (隐藏维度: {hidden_dim})")
        
        self.total_mac += total_mac
        return total_mac
    
    def layer_norm_mac(self, seq_len, embed_dim, name=""):
        """
        计算Layer Normalization的访存量
        
        Args:
            seq_len (int): 序列长度
            embed_dim (int): 嵌入维度
            name (str): 层名称
        
        Returns:
            int: 访存量（字节）
        """
        N = self.batch_size
        
        # LayerNorm参数: scale(gamma) + bias(beta)
        params_mac = 2 * embed_dim
        
        # 输入和输出
        input_mac = N * seq_len * embed_dim
        output_mac = N * seq_len * embed_dim
        
        total_mac = (params_mac + input_mac + output_mac) * self.sizeof_data
        
        print(f"{name}: 参数={params_mac:,}, 输入={input_mac:,}, 输出={output_mac:,}, 总计={total_mac:,} 字节")
        
        self.total_mac += total_mac
        return total_mac
    
    def classification_head_mac(self, embed_dim, num_classes, name=""):
        """
        计算分类头的访存量
        
        Args:
            embed_dim (int): 嵌入维度
            num_classes (int): 类别数
            name (str): 层名称
        
        Returns:
            int: 访存量（字节）
        """
        N = self.batch_size
        
        # 只使用class token进行分类
        input_mac = N * embed_dim
        weight_mac = embed_dim * num_classes
        output_mac = N * num_classes
        
        total_mac = (input_mac + weight_mac + output_mac) * self.sizeof_data
        
        print(f"{name}: 输入={input_mac:,}, 权重={weight_mac:,}, 输出={output_mac:,}, 总计={total_mac:,} 字节")
        
        self.total_mac += total_mac
        return total_mac

def calculate_clip_vit_mac(model_name, batch_size=1, data_type='float32', image_size=224):
    """
    计算CLIP Vision Transformer的访存量
    
    Args:
        model_name (str): 模型名称 ('vit-b/32', 'vit-l/14')
        batch_size (int): 批处理大小
        data_type (str): 数据类型
        image_size (int): 输入图像尺寸
    
    Returns:
        int: 总访存量（字节）
    """
    calc = ViTMemoryAccessCalculator(batch_size, data_type)
    N = batch_size
    
    print(f"计算CLIP {model_name.upper()}模型访存量")
    print(f"批处理大小: {batch_size}, 数据类型: {data_type}, 图像尺寸: {image_size}x{image_size}")
    print("=" * 80)
    
    # 模型配置
    vit_configs = {
        'vit-b/32': {
            'patch_size': 32,
            'embed_dim': 768,
            'num_layers': 12,
            'num_heads': 12,
            'mlp_ratio': 4.0
        },
        'vit-l/14': {
            'patch_size': 14,
            'embed_dim': 1024,
            'num_layers': 24,
            'num_heads': 16,
            'mlp_ratio': 4.0
        }
    }
    
    if model_name not in vit_configs:
        raise ValueError(f"不支持的模型: {model_name}")
    
    config = vit_configs[model_name]
    patch_size = config['patch_size']
    embed_dim = config['embed_dim']
    num_layers = config['num_layers']
    num_heads = config['num_heads']
    mlp_ratio = config['mlp_ratio']
    
    print(f"模型配置: patch_size={patch_size}, embed_dim={embed_dim}, layers={num_layers}, heads={num_heads}")
    print("=" * 80)
    
    # 输入图像: (N, 3, image_size, image_size)
    input_shape = (N, 3, image_size, image_size)
    
    # 1. Patch Embedding
    print("\n1. Patch Embedding层:")
    patch_embed_mac, num_patches = calc.patch_embedding_mac(
        input_shape, patch_size, embed_dim, "PatchEmbedding"
    )
    
    # 2. Position Embedding (包含class token)
    print("\n2. Position Embedding:")
    pos_embed_mac = calc.position_embedding_mac(num_patches, embed_dim, "PositionEmbedding")
    
    seq_len = 1 + num_patches  # +1 for class token
    
    # 3. Transformer Encoder Layers
    print(f"\n3. Transformer Encoder ({num_layers}层):")
    for layer_idx in range(num_layers):
        layer_name = f"Layer{layer_idx + 1}"
        print(f"\n{layer_name}:")
        
        # Pre-LayerNorm
        calc.layer_norm_mac(seq_len, embed_dim, f"{layer_name}.PreNorm1")
        
        # Multi-Head Self-Attention
        calc.multi_head_attention_mac(seq_len, embed_dim, num_heads, f"{layer_name}.Attention")
        
        # Pre-LayerNorm for MLP
        calc.layer_norm_mac(seq_len, embed_dim, f"{layer_name}.PreNorm2")
        
        # MLP
        calc.mlp_mac(seq_len, embed_dim, mlp_ratio, f"{layer_name}.MLP")
    
    # 4. Final Layer Norm
    print("\n4. Final Layer Norm:")
    calc.layer_norm_mac(seq_len, embed_dim, "FinalNorm")
    
    # 5. Classification Head (用于图像表示)
    print("\n5. Image Projection Head:")
    # CLIP通常使用512维的图像表示
    calc.classification_head_mac(embed_dim, 512, "ImageProjection")
    
    print("\n" + "=" * 80)
    print(f"{model_name.upper()}总访存量: {calc.total_mac:,} 字节")
    print(f"{model_name.upper()}总访存量: {calc.total_mac / 1024 / 1024:.2f} MB")
    print(f"{model_name.upper()}总访存量: {calc.total_mac / 1024 / 1024 / 1024:.2f} GB")
    
    # 额外信息
    print(f"\n模型详细信息:")
    print(f"  Patch数量: {num_patches}")
    print(f"  序列长度: {seq_len} (包含class token)")
    print(f"  总参数估算: {estimate_parameters(config)} M")
    
    return calc.total_mac

def estimate_parameters(config):
    """
    估算模型参数量
    
    Args:
        config (dict): 模型配置
    
    Returns:
        float: 参数量（百万）
    """
    patch_size = config['patch_size']
    embed_dim = config['embed_dim']
    num_layers = config['num_layers']
    mlp_ratio = config['mlp_ratio']
    
    # Patch Embedding
    patch_embed_params = 3 * patch_size * patch_size * embed_dim
    
    # Position Embedding (假设224x224图像)
    num_patches = (224 // patch_size) ** 2
    pos_embed_params = (1 + num_patches) * embed_dim
    
    # Transformer layers
    # 每层包含: Multi-head attention + MLP + LayerNorms
    attention_params = 4 * embed_dim * embed_dim  # Q, K, V, Output projections
    mlp_params = 2 * embed_dim * int(embed_dim * mlp_ratio)  # FC1 + FC2
    layernorm_params = 4 * embed_dim  # 2 LayerNorms per layer
    
    per_layer_params = attention_params + mlp_params + layernorm_params
    total_transformer_params = num_layers * per_layer_params
    
    # Classification head
    classification_params = embed_dim * 512
    
    total_params = (patch_embed_params + pos_embed_params + 
                   total_transformer_params + classification_params)
    
    return total_params / 1e6

def main():
    """主函数"""
    print("CLIP Vision Transformer模型访存量分析工具")
    print("=" * 80)
    
    # 测试的模型列表
    models = ['vit-b/32', 'vit-l/14']
    
    # 测试配置
    test_configs = [
        (1, 'float32'),
        (1, 'float16'),
        (1, 'int8'),
        (8, 'float32'),
        (16, 'float32'),
    ]
    
    results = {}
    
    # 对每个模型和配置进行测试
    for model_name in models:
        print(f"\n{'='*25} {model_name.upper()} {'='*25}")
        model_results = {}
        
        for batch_size, data_type in test_configs:
            print(f"\n配置: batch_size={batch_size}, data_type={data_type}")
            mac = calculate_clip_vit_mac(model_name, batch_size, data_type)
            model_results[(batch_size, data_type)] = mac
            print("\n" + "="*80)
        
        results[model_name] = model_results
    
    # 汇总对比结果
    print(f"\n{'='*30} 最终汇总对比 {'='*30}")
    print("=" * 80)
    
    # 按配置分组显示
    for batch_size, data_type in test_configs:
        print(f"\n配置: Batch={batch_size}, Type={data_type}")
        print("-" * 60)
        for model_name in models:
            mac = results[model_name][(batch_size, data_type)]
            print(f"{model_name.upper():>12s}: {mac/1024/1024:>10.2f} MB ({mac:>15,} bytes)")
    
    # 显示模型复杂度对比
    print(f"\n{'='*25} 模型复杂度对比 (float32, batch=1) {'='*25}")
    base_config = (1, 'float32')
    vit_b_mac = results['vit-b/32'][base_config]
    vit_l_mac = results['vit-l/14'][base_config]
    
    print(f"{'模型':>15s} {'访存量(MB)':>15s} {'相对ViT-B/32':>18s} {'参数量':>12s}")
    print("-" * 70)
    print(f"{'ViT-B/32':>15s} {vit_b_mac/1024/1024:>15.2f} {1.0:>18.2f}x {'~86M':>12s}")
    print(f"{'ViT-L/14':>15s} {vit_l_mac/1024/1024:>15.2f} {vit_l_mac/vit_b_mac:>18.2f}x {'~304M':>12s}")
    
    print(f"\n说明:")
    print(f"  - ViT-B/32: patch_size=32, embed_dim=768, layers=12, heads=12")
    print(f"  - ViT-L/14: patch_size=14, embed_dim=1024, layers=24, heads=16")
    print(f"  - 更小的patch_size意味着更多的patch和更长的序列")
    print(f"  - ViT-L/14由于patch更小，序列更长，访存量显著增加")

if __name__ == "__main__":
    main() 