import torch
import torchvision.models as models
import torch.optim as optim
import time

# 设置设备
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

# 初始化 ResNet50 模型
model = models.resnet50()
model = model.to(device)

# 设置优化器
optimizer = optim.Adam(model.parameters(), lr=1e-5)

# 指定批次大小
batch_size = 1 # 根据你的GPU容量调整这个值

# 初始化记录最大显存使用量
max_memory_used = 0

time_start = time.time()
for i in range(3):  # 简单示例：运行3个训练步骤
    # 创建随机图像数据 (batch_size, 3, 224, 224)
    input_tensor = torch.rand(batch_size, 3, 224, 224, device=device, requires_grad=True)

    # 前向传播
    output = model(input_tensor)
    loss = output.sum()  # 使用简单的loss函数，实际应用中需更复杂的损失函数

    # 反向传播
    loss.backward()

    # 更新模型
    optimizer.step()

    # 清除优化器的旧梯度
    optimizer.zero_grad()

    # 检查并记录当前步骤后的最大显存使用
    current_memory = torch.cuda.max_memory_allocated(device)
    max_memory_used = max(max_memory_used, current_memory)
    
    # 重置显存峰值统计
    torch.cuda.reset_peak_memory_stats(device)

time_end = time.time()
# 打印记录的最大显存使用量和训练时间
print(f"Training time: {time_end - time_start:.2f} seconds")
print(f"Maximum GPU memory used: {max_memory_used / (1024**3):.15f} GB")
