#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
ResNet50模型访存量计算器
计算ResNet50模型的总访存量（Memory Access Cost）
"""

import math

class MemoryAccessCalculator:
    def __init__(self, batch_size=1, data_type='float32'):
        """
        初始化访存量计算器
        
        Args:
            batch_size (int): 批处理大小，默认为1
            data_type (str): 数据类型，支持'float32', 'float16', 'int8'
        """
        self.batch_size = batch_size
        self.data_type = data_type
        
        # 不同数据类型的字节数
        self.sizeof_map = {
            'float32': 4,
            'float16': 2,
            'int8': 1,
            'int32': 4
        }
        
        if data_type not in self.sizeof_map:
            raise ValueError(f"不支持的数据类型: {data_type}")
            
        self.sizeof_data = self.sizeof_map[data_type]
        self.total_mac = 0  # 总访存量
        
    def conv2d_mac(self, input_shape, output_shape, kernel_size, name=""):
        """
        计算卷积层的访存量
        
        Args:
            input_shape (tuple): 输入形状 (N, IC, IH, IW)
            output_shape (tuple): 输出形状 (N, OC, OH, OW)
            kernel_size (tuple): 卷积核大小 (KH, KW)
            name (str): 层名称
        
        Returns:
            int: 访存量（字节）
        """
        N, IC, IH, IW = input_shape
        N, OC, OH, OW = output_shape
        KH, KW = kernel_size
        
        # MAC_Conv = (N × IC × IH × IW + OC × IC × KH × KW + N × OC × OH × OW) × sizeof(data_type)
        input_mac = N * IC * IH * IW
        weight_mac = OC * IC * KH * KW
        output_mac = N * OC * OH * OW
        
        total_mac = (input_mac + weight_mac + output_mac) * self.sizeof_data
        
        print(f"{name}: 输入={input_mac:,}, 权重={weight_mac:,}, 输出={output_mac:,}, 总计={total_mac:,} 字节")
        
        self.total_mac += total_mac
        return total_mac
    
    def elementwise_add_mac(self, tensor_shape, name=""):
        """
        计算逐元素相加的访存量
        
        Args:
            tensor_shape (tuple): 张量形状 (N, C, H, W)
            name (str): 层名称
        
        Returns:
            int: 访存量（字节）
        """
        N, C, H, W = tensor_shape
        
        # (2+1) × N × C × H × W × sizeof(data_type)
        # 2代表读两个Tensor，1代表写一个Tensor
        total_mac = 3 * N * C * H * W * self.sizeof_data
        
        print(f"{name}: Element-wise Add, 访存量={total_mac:,} 字节")
        
        self.total_mac += total_mac
        return total_mac
    
    def pooling_mac(self, input_shape, output_shape, name=""):
        """
        计算池化层的访存量
        
        Args:
            input_shape (tuple): 输入形状 (N, C, H, W)
            output_shape (tuple): 输出形状 (N, C, H, W)
            name (str): 层名称
        
        Returns:
            int: 访存量（字节）
        """
        N1, C1, H1, W1 = input_shape
        N2, C2, H2, W2 = output_shape
        
        # 池化层：读输入 + 写输出
        input_mac = N1 * C1 * H1 * W1
        output_mac = N2 * C2 * H2 * W2
        
        total_mac = (input_mac + output_mac) * self.sizeof_data
        
        print(f"{name}: 输入={input_mac:,}, 输出={output_mac:,}, 总计={total_mac:,} 字节")
        
        self.total_mac += total_mac
        return total_mac
    
    def fc_mac(self, input_size, output_size, name=""):
        """
        计算全连接层的访存量
        
        Args:
            input_size (int): 输入特征数
            output_size (int): 输出特征数
            name (str): 层名称
        
        Returns:
            int: 访存量（字节）
        """
        N = self.batch_size
        
        # 全连接层：读输入 + 读权重 + 写输出
        input_mac = N * input_size
        weight_mac = input_size * output_size
        output_mac = N * output_size
        
        total_mac = (input_mac + weight_mac + output_mac) * self.sizeof_data
        
        print(f"{name}: 输入={input_mac:,}, 权重={weight_mac:,}, 输出={output_mac:,}, 总计={total_mac:,} 字节")
        
        self.total_mac += total_mac
        return total_mac

def calculate_resnet_mac(model_name, batch_size=1, data_type='float32'):
    """
    计算不同ResNet模型的总访存量
    
    Args:
        model_name (str): 模型名称 ('resnet18', 'resnet34', 'resnet50', 'resnet101', 'resnet152')
        batch_size (int): 批处理大小
        data_type (str): 数据类型
    
    Returns:
        int: 总访存量（字节）
    """
    calc = MemoryAccessCalculator(batch_size, data_type)
    N = batch_size
    
    print(f"计算{model_name.upper()}模型访存量")
    print(f"批处理大小: {batch_size}, 数据类型: {data_type}")
    print("=" * 60)
    
    # 输入: (N, 3, 224, 224)
    input_shape = (N, 3, 224, 224)
    
    # Conv1: 7x7, stride=2, padding=3
    conv1_output = (N, 64, 112, 112)
    calc.conv2d_mac(input_shape, conv1_output, (7, 7), "Conv1")
    
    # MaxPool: 3x3, stride=2, padding=1
    maxpool_output = (N, 64, 56, 56)
    calc.pooling_mac(conv1_output, maxpool_output, "MaxPool")
    
    # 定义不同ResNet架构的配置
    resnet_configs = {
        'resnet18': {
            'layers': [2, 2, 2, 2],
            'block_type': 'basic',  # BasicBlock
            'channels': [64, 128, 256, 512]
        },
        'resnet34': {
            'layers': [3, 4, 6, 3],
            'block_type': 'basic',  # BasicBlock
            'channels': [64, 128, 256, 512]
        },
        'resnet50': {
            'layers': [3, 4, 6, 3],
            'block_type': 'bottleneck',  # Bottleneck
            'channels': [256, 512, 1024, 2048]
        },
        'resnet101': {
            'layers': [3, 4, 23, 3],
            'block_type': 'bottleneck',  # Bottleneck
            'channels': [256, 512, 1024, 2048]
        },
        'resnet152': {
            'layers': [3, 8, 36, 3],
            'block_type': 'bottleneck',  # Bottleneck
            'channels': [256, 512, 1024, 2048]
        }
    }
    
    if model_name not in resnet_configs:
        raise ValueError(f"不支持的模型: {model_name}")
    
    config = resnet_configs[model_name]
    layers = config['layers']
    block_type = config['block_type']
    channels = config['channels']
    
    current_shape = maxpool_output
    input_channels = 64
    
    # 计算每个stage
    for stage_idx, (num_blocks, out_channels) in enumerate(zip(layers, channels)):
        stage_name = f"Layer{stage_idx + 1}"
        
        # 计算特征图尺寸
        if stage_idx == 0:
            feature_size = 56
        else:
            feature_size = 56 // (2 ** stage_idx)
        
        print(f"\n{stage_name} - {num_blocks}个残差块:")
        
        for block_idx in range(num_blocks):
            block_name = f"{stage_name}.Block{block_idx + 1}"
            
            if block_type == 'basic':
                # BasicBlock: Conv3x3 -> Conv3x3 + shortcut
                mid_channels = out_channels
                
                # 第一个块或需要下采样的块
                if block_idx == 0:
                    if stage_idx > 0:  # 需要下采样
                        # 第一个卷积带步长
                        calc.conv2d_mac(current_shape, (N, mid_channels, feature_size, feature_size), 
                                       (3, 3), f"{block_name}.Conv1")
                        # 第二个卷积
                        calc.conv2d_mac((N, mid_channels, feature_size, feature_size), 
                                       (N, out_channels, feature_size, feature_size), 
                                       (3, 3), f"{block_name}.Conv2")
                        # Shortcut连接（1x1卷积下采样）
                        calc.conv2d_mac(current_shape, (N, out_channels, feature_size, feature_size), 
                                       (1, 1), f"{block_name}.Shortcut")
                    else:  # Layer1的第一个块，不需要下采样但可能需要改变通道数
                        calc.conv2d_mac(current_shape, (N, mid_channels, feature_size, feature_size), 
                                       (3, 3), f"{block_name}.Conv1")
                        calc.conv2d_mac((N, mid_channels, feature_size, feature_size), 
                                       (N, out_channels, feature_size, feature_size), 
                                       (3, 3), f"{block_name}.Conv2")
                        if input_channels != out_channels:
                            calc.conv2d_mac(current_shape, (N, out_channels, feature_size, feature_size), 
                                           (1, 1), f"{block_name}.Shortcut")
                else:
                    # 其他块，维度不变
                    calc.conv2d_mac((N, out_channels, feature_size, feature_size), 
                                   (N, mid_channels, feature_size, feature_size), 
                                   (3, 3), f"{block_name}.Conv1")
                    calc.conv2d_mac((N, mid_channels, feature_size, feature_size), 
                                   (N, out_channels, feature_size, feature_size), 
                                   (3, 3), f"{block_name}.Conv2")
                
                # Element-wise Add
                calc.elementwise_add_mac((N, out_channels, feature_size, feature_size), f"{block_name}.Add")
                
            else:  # bottleneck
                # Bottleneck: Conv1x1 -> Conv3x3 -> Conv1x1 + shortcut
                mid_channels = out_channels // 4
                
                if block_idx == 0:
                    if stage_idx > 0:  # 需要下采样
                        # 1x1卷积
                        calc.conv2d_mac(current_shape, (N, mid_channels, feature_size, feature_size), 
                                       (1, 1), f"{block_name}.Conv1")
                        # 3x3卷积（带步长）
                        calc.conv2d_mac((N, mid_channels, feature_size, feature_size), 
                                       (N, mid_channels, feature_size, feature_size), 
                                       (3, 3), f"{block_name}.Conv2")
                        # 1x1卷积
                        calc.conv2d_mac((N, mid_channels, feature_size, feature_size), 
                                       (N, out_channels, feature_size, feature_size), 
                                       (1, 1), f"{block_name}.Conv3")
                        # Shortcut连接
                        calc.conv2d_mac(current_shape, (N, out_channels, feature_size, feature_size), 
                                       (1, 1), f"{block_name}.Shortcut")
                    else:  # Layer1的第一个块
                        calc.conv2d_mac(current_shape, (N, mid_channels, feature_size, feature_size), 
                                       (1, 1), f"{block_name}.Conv1")
                        calc.conv2d_mac((N, mid_channels, feature_size, feature_size), 
                                       (N, mid_channels, feature_size, feature_size), 
                                       (3, 3), f"{block_name}.Conv2")
                        calc.conv2d_mac((N, mid_channels, feature_size, feature_size), 
                                       (N, out_channels, feature_size, feature_size), 
                                       (1, 1), f"{block_name}.Conv3")
                        calc.conv2d_mac(current_shape, (N, out_channels, feature_size, feature_size), 
                                       (1, 1), f"{block_name}.Shortcut")
                else:
                    # 其他块，维度不变
                    calc.conv2d_mac((N, out_channels, feature_size, feature_size), 
                                   (N, mid_channels, feature_size, feature_size), 
                                   (1, 1), f"{block_name}.Conv1")
                    calc.conv2d_mac((N, mid_channels, feature_size, feature_size), 
                                   (N, mid_channels, feature_size, feature_size), 
                                   (3, 3), f"{block_name}.Conv2")
                    calc.conv2d_mac((N, mid_channels, feature_size, feature_size), 
                                   (N, out_channels, feature_size, feature_size), 
                                   (1, 1), f"{block_name}.Conv3")
                
                # Element-wise Add
                calc.elementwise_add_mac((N, out_channels, feature_size, feature_size), f"{block_name}.Add")
        
        current_shape = (N, out_channels, feature_size, feature_size)
        input_channels = out_channels
    
    # AdaptiveAvgPool (feature_size x feature_size -> 1x1)
    final_channels = channels[-1]
    avgpool_output = (N, final_channels, 1, 1)
    calc.pooling_mac(current_shape, avgpool_output, "AdaptiveAvgPool")
    
    # FC层 (final_channels -> 1000)
    calc.fc_mac(final_channels, 1000, "FC")
    
    print("=" * 60)
    print(f"{model_name.upper()}总访存量: {calc.total_mac:,} 字节")
    print(f"{model_name.upper()}总访存量: {calc.total_mac / 1024 / 1024:.2f} MB")
    print(f"{model_name.upper()}总访存量: {calc.total_mac / 1024 / 1024 / 1024:.2f} GB")
    
    return calc.total_mac

def calculate_resnet50_mac(batch_size=1, data_type='float32'):
    """
    计算ResNet50模型的总访存量（保持向后兼容）
    
    Args:
        batch_size (int): 批处理大小
        data_type (str): 数据类型
    
    Returns:
        int: 总访存量（字节）
    """
    return calculate_resnet_mac('resnet50', batch_size, data_type)

def main():
    """主函数"""
    print("ResNet模型访存量分析工具")
    print("=" * 80)
    
    # 测试的模型列表
    models = ['resnet18', 'resnet34', 'resnet50', 'resnet101', 'resnet152']
    
    # 测试配置
    test_configs = [
        (1, 'float32'),
        (1, 'int8'),
        (8, 'float32'),
    ]
    
    results = {}
    
    # 对每个模型和配置进行测试
    for model_name in models:
        print(f"\n{'='*20} {model_name.upper()} {'='*20}")
        model_results = {}
        
        for batch_size, data_type in test_configs:
            print(f"\n配置: batch_size={batch_size}, data_type={data_type}")
            mac = calculate_resnet_mac(model_name, batch_size, data_type)
            model_results[(batch_size, data_type)] = mac
            print("\n" + "="*60)
        
        results[model_name] = model_results
    
    # 汇总对比结果
    print(f"\n{'='*25} 最终汇总对比 {'='*25}")
    print("=" * 80)
    
    # 按配置分组显示
    for batch_size, data_type in test_configs:
        print(f"\n配置: Batch={batch_size}, Type={data_type}")
        print("-" * 60)
        for model_name in models:
            mac = results[model_name][(batch_size, data_type)]
            print(f"{model_name.upper():>10s}: {mac/1024/1024:>8.2f} MB ({mac:>12,} bytes)")
    
    # 显示模型复杂度对比（以float32, batch=1为基准）
    print(f"\n{'='*20} 模型复杂度对比 (float32, batch=1) {'='*20}")
    base_config = (1, 'float32')
    resnet18_mac = results['resnet18'][base_config]
    
    print(f"{'模型':>12s} {'访存量(MB)':>12s} {'相对ResNet18':>15s} {'总参数估算':>15s}")
    print("-" * 60)
    for model_name in models:
        mac = results[model_name][base_config]
        ratio = mac / resnet18_mac
        # 粗略估算参数量（仅用于对比）
        if model_name == 'resnet18':
            params = "~11M"
        elif model_name == 'resnet34':
            params = "~21M"
        elif model_name == 'resnet50':
            params = "~25M"
        elif model_name == 'resnet101':
            params = "~44M"
        elif model_name == 'resnet152':
            params = "~60M"
        
        print(f"{model_name.upper():>12s} {mac/1024/1024:>12.2f} {ratio:>15.2f}x {params:>15s}")

if __name__ == "__main__":
    main() 