import torch
import torchvision.models as models
from torch.optim import Adam
from memory_profiler import FakeTensorMemoryProfilerMode, tensor_storage_id
from torch._subclasses import FakeTensorMode

import torch
from transformers import BertModel, BertConfig

if __name__ == "__main__":
    MB = 2 ** 20
    GB = 2 ** 30

    MEMORY_LIMIT = 16 * GB

    def func(batch_size):
        print(f"Running batch size {batch_size}")
        with FakeTensorMode(allow_non_fake_inputs=True):
            with FakeTensorMemoryProfilerMode() as ftmp:
                # 定义BERT模型的配置参数
                config = BertConfig(
                    vocab_size=30522,  # 根据BERT的词汇量设置
                    hidden_size=768,
                    num_hidden_layers=12,
                    num_attention_heads=12,
                    intermediate_size=3072,
                    max_position_embeddings=512
                )

                # 根据定义的配置创建BERT模型
                model = BertModel(config)

                optimizer = torch.optim.Adam(model.parameters(), lr=1e-5)
                ftmp.add_marker("model_init_boundary")
                for i in range(3):
                    # 为BERT生成随机输入ID，确保是长整型
                    input_ids = torch.randint(0, config.vocab_size, (batch_size, 512), dtype=torch.long)
                    # 创建全1的注意力掩码
                    attention_mask = torch.ones_like(input_ids)

                    output = model(input_ids, attention_mask=attention_mask)
                    print(f"GB after forward: {ftmp.max_memory / GB}")
                    ftmp.add_marker(f"fw_bw_boundary_{i}")
                    # 提取输出的最后隐藏状态，计算求和
                    logits = output.last_hidden_state
                    loss = logits.sum()
                    loss.backward()
                    ftmp.add_marker(f"bw_step_boundary_{i}")
                    print(f"GB after backward: {ftmp.max_memory / GB}")
                    optimizer.step()
                    ftmp.add_marker(f"step_boundary_{i}")
                    print(f"GB after step: {ftmp.max_memory / GB}")

                ftmp.draw_varies()
                return ftmp.max_memory

    with torch.device("cuda:0"):
        func(512)




