import torch
import numpy as np
from torch import nn
import torch.profiler as profiler
import torchvision.models as models
# from torch.autograd.profiler import ac
from torch import profiler

class MyModule(nn.Module):
    def __init__(self, in_features: int ,out_features: int, bias: bool = True):
        super().__init__()
        self.linear = nn.Linear(in_features, out_features,bias)
    def forward(self, input, mask):
        # with profiler.record_function("LINEAR PASS"):
        out = self.linear(input)
        # with profiler.record_function("MASK INDICES"):
        threshold = out.sum(axis=1).mean().item()
        hi_idx = np.argwhere(mask.cpu().numpy() > threshold)
        hi_idx = torch.from_numpy(hi_idx).cuda()
        return out,hi_idx

# model = MyModule(500,10).cuda()
# input = torch.rand(128,500).cuda()
# mask = torch.rand((500,500,500), dtype=torch.double).cuda()

model = MyModule(500,10)
input = torch.rand(128,500)
mask = torch.rand((500,500,500), dtype=torch.double)

# #warm up
# model(input, mask)
device = "cuda" if torch.cuda.is_available() else "cpu"
model = models.resnet18().to(device)
inputs = torch.randn(5, 3, 224, 224).to(device)

with profiler.profile(activities=[profiler.ProfilerActivity.CPU], record_shapes=True) as prof:
    model(inputs)
print(prof.key_averages(group_by_stack_n=True).table(sort_by='cpu_time_total', row_limit=10))

with profiler.profile(activities=[profiler.ProfilerActivity.CPU,profiler.ProfilerActivity.CUDA], schedule=torch.profiler.schedule(wait=1, warmup=1, active=2),record_shapes=True, profile_memory=True) as prof:
  for epoch in range(4):
    model(inputs)
    prof.step()
print(prof.key_averages().table(sort_by='cpu_memory_usage', row_limit=10))
prof.export_chrome_trace("trace.json")

model = models.resnet18().to("cuda")
inputs = torch.randn(5, 3, 224, 224).to("cuda")
with torch.jit.optimized_execution(False):
  with profiler.profile(activities=[profiler.ProfilerActivity.CPU],with_stack=True,record_shapes=True,schedule=torch.profiler.schedule(wait=1, warmup=1, active=3)) as prof:
    for _ in range(5):
        model(inputs)
        prof.step()
print(prof.with_stack)  # 应输出 True
for event in prof.events():
    if event.stack:
        print(f"\nOperator: {event.name}")
        print("Stack:")
        for line in event.stack:
            print(f"  {line}")
# prof.export_stacks("stacks.json")
print(prof.events().table(sort_by='cpu_time_total', row_limit=10, header="Stack Traces"))
# print(prof.key_averages(group_by_input_shape=5).table(sort_by='cpu_time_total', row_limit=2))

print(torch.__version__)
with profiler.profile(with_stack=True,with_rank=True, profile_memory=True) as prof:
    out, idx = model(input, mask)
    print(prof.key_averages(group_by_stack_n=5).table(sort_by='self_cpu_time_total', row_limit=5))
prof.export_chrome_trace("tutorial/class_4/trace/trace.json")
