import torch
import torch.nn
import torch.optim
# import torch.profiler
import torch.utils.data
import torchvision.datasets
import torchvision.models
import torchvision.transforms as T
from torch._C._profiler import ProfilerActivity

'''
    1. cd 到执行py的目录下
    2. 执行脚本： mprof run --multiprocess train.py -d iptvsmall -db singled82 -me 2 -lp simp -um async -gpu -np 10  
    3. mprof plot 
    4. tensorboard --logdir=./log
    5. 打开浏览器：http://localhost:6006/#pytorch_profiler
'''


transform = T.Compose(
    [T.Resize(224),
     T.ToTensor(),
     T.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
train_set = torchvision.datasets.CIFAR10(root='./data', train=True, download=True, transform=transform)
train_loader = torch.utils.data.DataLoader(train_set, batch_size=32, shuffle=True)


device = torch.device("mps")
model = torchvision.models.resnet18(weights='IMAGENET1K_V1').to(device)
inputs = torch.randn(5, 3, 224, 224).to(device)
criterion = torch.nn.CrossEntropyLoss().to(device)
optimizer = torch.optim.SGD(model.parameters(), lr=0.001, momentum=0.9)
model.train()

def train(data):
    inputs, labels = data[0].to(device=device), data[1].to(device=device)
    outputs = model(inputs)
    loss = criterion(outputs, labels)
    optimizer.zero_grad()
    loss.backward()
    optimizer.step()


with torch.profiler.profile(
        activities=[ProfilerActivity.CPU, ProfilerActivity.CUDA, ProfilerActivity.MTIA, ProfilerActivity.XPU],
        schedule=torch.profiler.schedule(wait=1, warmup=1, active=3, repeat=1),
        on_trace_ready=torch.profiler.tensorboard_trace_handler('./profiler_outout/resnet18'),
        record_shapes=True,
        profile_memory=True,
        with_stack=True
) as prof:
    for step, batch_data in enumerate(train_loader):
        prof.step()  # Need to call this at each step to notify profiler of steps' boundary.
        if step >= 1 + 1 + 3:
            break
        train(batch_data)

# with torch.mps.profiler.profile(mode="interval,event", wait_until_completed=True) as prof:
#     for step, batch_data in enumerate(train_loader):
#         # prof.step()  # Need to call this at each step to notify profiler of steps' boundary.
#         if step >= 1 + 1 + 3:
#             break
#         train(batch_data)
