import torch
import torchvision.models as models
from torch.optim import Adam
from memory_profiler import FakeTensorMemoryProfilerMode, tensor_storage_id
from torch._subclasses import FakeTensorMode


if __name__ == "__main__":
    MB = 2 ** 20
    GB = 2 ** 30

    MEMORY_LIMIT = 16 * GB


    def func(batch_size):
        print(f"Running batch size {batch_size}")
        with FakeTensorMode(allow_non_fake_inputs=True):
            with FakeTensorMemoryProfilerMode() as ftmp:
                model = torch.nn.TransformerEncoderLayer(1024, 16, 4096)
                optimizer = torch.optim.Adam(model.parameters(), lr=1e-5)
                ftmp.add_marker("model_init_boundary")
                for i in range(3):
                    output = model(torch.rand([batch_size, 512, 1024], requires_grad=True))
                    print(f"GB after forward: {ftmp.max_memory / GB}")
                    ftmp.add_marker(f"fw_bw_boundary_{i}")
                    output.sum().backward()
                    ftmp.add_marker(f"bw_step_boundary_{i}")
                    print(f"GB after backward: {ftmp.max_memory / GB}")
                    optimizer.step()
                    ftmp.add_marker(f"step_boundary_{i}")
                    print(f"GB after step: {ftmp.max_memory / GB}")

                ftmp.draw_varies()
                return ftmp.max_memory


    with torch.device("cuda:0"):
        func(2)

import torch
from transformers import GPT2Model, GPT2Config

# if __name__ == "__main__":
#     MB = 2 ** 20
#     GB = 2 ** 30

#     MEMORY_LIMIT = 16 * GB

#     def func(batch_size):
#         print(f"Running batch size {batch_size}")
#         with FakeTensorMode(allow_non_fake_inputs=True):
#             with FakeTensorMemoryProfilerMode() as ftmp:
#                 # Define the configuration parameters
#                 config = GPT2Config(
#                     vocab_size=50257,  # 根据预训练模型的词汇量来设置
#                     n_positions=1024,
#                     n_ctx=1024,
#                     n_embd=768,
#                     n_layer=12,
#                     n_head=12,
#                     max_length=1024
#                 )

#                 # Create the model with the defined configuration
#                 model = GPT2Model(config)

#                 optimizer = torch.optim.Adam(model.parameters(), lr=1e-5)
#                 ftmp.add_marker("model_init_boundary")
#                 for i in range(3):
#                     output = model(torch.randint(1, 10000, [batch_size, 512], dtype=torch.long))
#                     print(f"GB after forward: {ftmp.max_memory / GB}")
#                     ftmp.add_marker(f"fw_bw_boundary_{i}")
#                     # Extract the logits from the output object
#                     logits = output.last_hidden_state
#                     # Calculate the sum
#                     loss = logits.sum()
#                     loss.backward()
#                     ftmp.add_marker(f"bw_step_boundary_{i}")
#                     print(f"GB after backward: {ftmp.max_memory / GB}")
#                     optimizer.step()
#                     ftmp.add_marker(f"step_boundary_{i}")
#                     print(f"GB after step: {ftmp.max_memory / GB}")

#                 ftmp.draw_varies()
#                 return ftmp.max_memory


#     with torch.device("cuda:0"):
#         func(512)





