import torch
import torchvision.models as models

# 清除 CUDA 缓存并复位最大内存计数器
torch.cuda.reset_max_memory_allocated()
torch.cuda.empty_cache()

# 初始化模型
model = models.resnet50().cuda()

# 打印当前最大显存使用量（应为 0 或很小）
print(f"Max memory allocated: {torch.cuda.max_memory_allocated() / (1024 ** 2)} MB")

# 生成随机输入数据
input_tensor = torch.rand(1, 3, 224, 224).cuda()

# 前向传播
output = model(input_tensor)

# 打印前向传播后的最大显存使用量
print(f"Max memory allocated after forward: {torch.cuda.max_memory_allocated() / (1024 ** 2)} MB")

# 反向传播
output.sum().backward()

# 打印反向传播后的最大显存使用量
print(f"Max memory allocated after backward: {torch.cuda.max_memory_allocated() / (1024 ** 2)} MB")

print(f"memory cached after backward: {torch.cuda.memory_cached()  / (1024 ** 2)} MB")

print(f"Max memory cached after backward: {torch.cuda.max_memory_cached()  / (1024 ** 2)} MB")

# 再次清除 CUDA 缓存
torch.cuda.empty_cache()

print(f"memory cached after empty_cache: {torch.cuda.memory_cached()  / (1024 ** 2)} MB")
# 打印清除缓存后的最大显存使用量（应有所下降）
print(f"Max memory allocated after empty cache: {torch.cuda.max_memory_allocated() / (1024 ** 2)} MB")
