import torch
import torch.nn as nn
import torch.optim as optim
import time
import numpy as np

print("开始导入所需库")

# 定义一个简单的神经网络
print("开始定义神经网络模型")
class SimpleNet(nn.Module):
    def __init__(self):
        super(SimpleNet, self).__init__()
        self.fc1 = nn.Linear(10, 64)
        self.fc2 = nn.Linear(64, 32)
        self.fc3 = nn.Linear(32, 1)

    def forward(self, x):
        x = torch.relu(self.fc1(x))
        x = torch.relu(self.fc2(x))
        x = self.fc3(x)
        return x

# 检查vGPU是否可用
print("开始检查vGPU是否可用")
def check_vgpu():
    start_time = time.time()
    if torch.cuda.is_available():
        gpu_count = torch.cuda.device_count()
        print(f"检测到 {gpu_count} 个GPU设备")

        for i in range(gpu_count):
            gpu_properties = torch.cuda.get_device_properties(i)
            print(f"GPU {i}: {gpu_properties.name}")
            print(f"  总内存: {gpu_properties.total_memory / 1024**3:.2f} GB")
            print(f"  多处理器数量: {gpu_properties.multi_processor_count}")
            print(f"  计算能力: {gpu_properties.major}.{gpu_properties.minor}")

        result = True
    else:
        print("未检测到可用的GPU设备")
        result = False

    end_time = time.time()
    print(f"检查vGPU耗时: {(end_time - start_time) * 1000:.2f} ms")
    return result

# 使用内存映射来加载大型数据集
def load_large_dataset(file_path, shape, dtype=np.float32):
    return np.memmap(file_path, dtype=dtype, mode='r', shape=shape)

# 主函数
print("开始执行主函数")
def main():
    vgpu_available = check_vgpu()

    print("开始创建模型")
    model_start_time = time.time()
    model = SimpleNet()
    model_end_time = time.time()
    print(f"创建模型耗时: {(model_end_time - model_start_time) * 1000:.2f} ms")

    if vgpu_available:
        print("开始将模型移动到GPU")
        start_time = time.time()
        device = torch.device("cuda:0")
        torch.cuda.empty_cache()  # 清理GPU缓存
        model = model.to(device)
        end_time = time.time()
        print(f"模型移动到GPU耗时: {(end_time - start_time) * 1000:.2f} ms")
        print(f"当前使用的GPU: {torch.cuda.get_device_name(0)}")
    else:
        device = torch.device("cpu")
        print("模型将在CPU上运行")

    print("开始创建随机输入数据")
    data_start_time = time.time()
    # 使用内存映射来模拟大型数据集
    input_data = load_large_dataset('large_dataset.dat', (1000, 10))
    data_end_time = time.time()
    print(f"创建随机输入数据耗时: {(data_end_time - data_start_time) * 1000:.2f} ms")

    print("开始定义损失函数和优化器")
    criterion = nn.MSELoss()
    optimizer = optim.Adam(model.parameters(), lr=0.001)

    print("开始训练模型")
    num_epochs = 200
    batch_size = 32
    num_batches = len(input_data) // batch_size

    print(f"开始训练，总共 {num_epochs} 个epoch，每个epoch包含 {num_batches} 个batch")

    training_start_time = time.time()
    for epoch in range(num_epochs):
        epoch_start_time = time.time()
        total_loss = 0
        for i in range(num_batches):
            # 使用numpy数组切片，避免将整个数据集加载到内存
            batch_input = torch.from_numpy(input_data[i*batch_size:(i+1)*batch_size]).float().to(device)

            optimizer.zero_grad()
            output = model(batch_input)
            loss = criterion(output, torch.randn(batch_size, 1, device=device))
            loss.backward()
            optimizer.step()

            total_loss += loss.item()

        avg_loss = total_loss / num_batches
        epoch_end_time = time.time()
        if (epoch + 1) % 10 == 0:
            print(f"Epoch [{epoch+1}/{num_epochs}], 平均损失: {avg_loss:.4f}, 耗时: {(epoch_end_time - epoch_start_time):.2f} 秒")

    training_end_time = time.time()
    training_time = training_end_time - training_start_time
    print(f"训练完成，总耗时: {training_time:.2f} 秒")
    print(f"平均每个epoch耗时: {training_time / num_epochs:.2f} 秒")

    if vgpu_available:
        print("开始获取GPU内存使用情况")
        memory_start_time = time.time()
        print("\nGPU内存使用情况:")
        print(f"已分配内存: {torch.cuda.memory_allocated() / 1024**2:.2f} MB")
        print(f"缓存内存: {torch.cuda.memory_reserved() / 1024**2:.2f} MB")
        memory_end_time = time.time()
        print(f"获取GPU内存信息耗时: {(memory_end_time - memory_start_time) * 1000:.2f} ms")

if __name__ == "__main__":
    print("开始执行程序")
    main_start_time = time.time()
    main()
    main_end_time = time.time()
    print(f"整个程序运行耗时: {(main_end_time - main_start_time):.2f} 秒")