




import torch
import torchvision.models as models
from torch.profiler import profile, record_function, ProfilerActivity

model = models.resnet18()
inputs = torch.randn(5, 3, 224, 224)
device = torch.device("mps")




print('++++++++++++++++++++++++++++++++ 【analyze execution time】 +++++++++++++++++++++++++++++++++++++++++')
with profile(activities=[ProfilerActivity.CPU], record_shapes=True) as prof:
    with record_function("model_inference"):
        model(inputs)

print('+++++++++++++++++ 【CPU:stats for the execution】 +++++++++++++++++++++')

print(prof.key_averages().table(sort_by="cpu_time_total", row_limit=10))

print('+++++++++++++++++ 【CPU:finer granularity of results】 +++++++++++++++++++++')

print(prof.key_averages(group_by_input_shape=True).table(sort_by="cpu_time_total", row_limit=10))


print('+++++++++++++++++ 【GPU】 +++++++++++++++++++++')

model = models.resnet18().to(device)
inputs = torch.randn(5, 3, 224, 224).to(device)

with profile(activities=[
        ProfilerActivity.CPU, ProfilerActivity.XPU], record_shapes=True) as prof:
    with record_function("model_inference"):
        model(inputs)

print(prof.key_averages().table(sort_by="cuda_time_total", row_limit=10))



print('++++++++++++++++++++++++++++++++ 【analyze memory consumption】 +++++++++++++++++++++++++++++++++++++++++')

model = models.resnet18().to(torch.device('cpu'))
inputs = torch.randn(5, 3, 224, 224).to(torch.device('cpu'))

with profile(activities=[ProfilerActivity.CPU],
        profile_memory=True, record_shapes=True) as prof:
    model(inputs)

print(prof.key_averages().table(sort_by="self_cpu_memory_usage", row_limit=10))

# (omitting some columns)
# ---------------------------------  ------------  ------------  ------------
#                              Name       CPU Mem  Self CPU Mem    # of Calls
# ---------------------------------  ------------  ------------  ------------
#                       aten::empty      94.79 Mb      94.79 Mb           121
#     aten::max_pool2d_with_indices      11.48 Mb      11.48 Mb             1
#                       aten::addmm      19.53 Kb      19.53 Kb             1
#               aten::empty_strided         572 b         572 b            25
#                     aten::resize_         240 b         240 b             6
#                         aten::abs         480 b         240 b             4
#                         aten::add         160 b         160 b            20
#               aten::masked_select         120 b         112 b             1
#                          aten::ne         122 b          53 b             6
#                          aten::eq          60 b          30 b             2
# ---------------------------------  ------------  ------------  ------------
# Self CPU time total: 53.064ms

print(prof.key_averages().table(sort_by="cpu_memory_usage", row_limit=10))




print('++++++++++++++++++++++++++++++++ 【analyze memory consumption】 +++++++++++++++++++++++++++++++++++++++++')
model = models.resnet18().to(device)
inputs = torch.randn(5, 3, 224, 224).to(device)

with profile(activities=[ProfilerActivity.CPU, ProfilerActivity.XPU]) as prof:
    model(inputs)

prof.export_chrome_trace("output/trace.json")




print('++++++++++++++++++++++++++++++++ 【Examining stack traces】 +++++++++++++++++++++++++++++++++++++++++')
with profile(
    activities=[ProfilerActivity.CPU, ProfilerActivity.XPU],
    with_stack=True,
) as prof:
    model(inputs)

# Print aggregated stats
print(prof.key_averages(group_by_stack_n=5).table(sort_by="self_cuda_time_total", row_limit=2))


