import torch
import torchvision.models as models
from torch.optim import Adam
from memory_profiler import FakeTensorMemoryProfilerMode, tensor_storage_id
from torch._subclasses import FakeTensorMode


if __name__ == "__main__":
    MB = 2 ** 20
    GB = 2 ** 30
    MEMORY_LIMIT = 16 * GB

    def func(batch_size):
        print(f"Running batch size {batch_size}")
        with FakeTensorMode(allow_non_fake_inputs=True):
            with FakeTensorMemoryProfilerMode() as ftmp:
                # 初始化 Inception V3 模型，确保 transform_input=True 以适配预训练的要求
                # model = models.inception_v3(pretrained=False, transform_input=True)
                model = models.inception_v3(pretrained=False, aux_logits=False)
                optimizer = Adam(model.parameters(), lr=1e-5)
                ftmp.add_marker("model_init_boundary")
                
                # 生成随机图像数据 (batch_size, 3, 299, 299) - Inception V3 的输入尺寸
                input_tensor = torch.rand(batch_size, 3, 299, 299, requires_grad=True)
                
                for i in range(3):
                    optimizer.zero_grad()
                    # Inception v3 的输出是一个 tuple (outputs, aux_outputs)，需要适当处理
                    # output, aux_outputs = model(input_tensor)
                    output = model(input_tensor)
                    print(f"GB after forward: {ftmp.max_memory / GB}")
                    ftmp.add_marker(f"fw_bw_boundary_{i}")
                    
                    loss = output.sum()  # 计算输出的和作为一个简单的损失函数
                    # aux_loss = aux_outputs.sum()  # 计算辅助输出的损失
                    # total_loss = loss + 0.4 * aux_loss  # 通常辅助分类器的损失被赋予较小的权重
                    # total_loss.backward()
                    loss.backward()
                    ftmp.add_marker(f"bw_step_boundary_{i}")
                    print(f"GB after backward: {ftmp.max_memory / GB}")
                    
                    optimizer.step()
                    ftmp.add_marker(f"step_boundary_{i}")
                    print(f"GB after step: {ftmp.max_memory / GB}")

                ftmp.draw_varies()
                return ftmp.max_memory

    with torch.device("cuda:0"):
        func(512)
