# 导入所需的库
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.utils.data import DataLoader, Dataset # 导入 Dataset 以备后用
from torchvision import datasets, transforms   # 用于加载 MNIST 数据集
from lightning.fabric import Fabric, seed_everything # 从 lightning.fabric 导入 Fabric 和 seed_everything
from torchmetrics.classification import Accuracy # 用于计算准确率
import time # 用于计时

# 定义简单的卷积神经网络模型 (保持不变)
class SimpleConvNet(nn.Module):
    def __init__(self):
        super(SimpleConvNet, self).__init__()
        self.conv1 = nn.Conv2d(1, 32, 3, 1)
        self.conv2 = nn.Conv2d(32, 64, 3, 1)
        self.dropout1 = nn.Dropout(0.25)
        self.dropout2 = nn.Dropout(0.5)
        self.fc1 = nn.Linear(9216, 128)
        self.fc2 = nn.Linear(128, 10)

    def forward(self, x):
        x = self.conv1(x)
        x = F.relu(x)
        x = self.conv2(x)
        x = F.relu(x)
        x = F.max_pool2d(x, 2)
        x = self.dropout1(x)
        x = torch.flatten(x, 1)
        x = self.fc1(x)
        x = F.relu(x)
        x = self.dropout2(x)
        x = self.fc2(x)
        return F.log_softmax(x, dim=1)

# 训练一个 epoch 的函数 (保持不变)
def train_epoch(fabric: Fabric, model: nn.Module, train_loader: DataLoader, optimizer: optim.Optimizer, epoch: int):
    model.train()
    for batch_idx, (data, target) in enumerate(train_loader):
        optimizer.zero_grad()
        output = model(data)
        loss = F.nll_loss(output, target)
        fabric.backward(loss) # 使用 fabric.backward
        optimizer.step()
        if batch_idx % 100 == 0:
            fabric.print(f'训练 Epoch: {epoch} [{batch_idx * len(data) * fabric.world_size}/{len(train_loader.dataset)} '
                         f'({100. * batch_idx / len(train_loader):.0f}%)]\t损失: {loss.item():.6f}')

# 测试一个 epoch 的函数 (保持不变)
def test_epoch(fabric: Fabric, model: nn.Module, test_loader: DataLoader):
    model.eval()
    test_acc = Accuracy(task="multiclass", num_classes=10).to(fabric.device)
    total_loss_tensor = torch.tensor(0.0, device=fabric.device)
    with torch.no_grad():
        for data, target in test_loader:
            output = model(data)
            batch_loss = F.nll_loss(output, target, reduction='sum')
            total_loss_tensor += batch_loss
            test_acc.update(output, target)

    gathered_losses = fabric.all_gather(total_loss_tensor)
    avg_loss = gathered_losses.sum() / len(test_loader.dataset)
    final_acc = test_acc.compute()
    total_samples = len(test_loader.dataset)
    correct_samples = final_acc * total_samples
    fabric.print(f'\n测试集: 平均损失: {avg_loss:.4f}, '
                 f'准确率: {final_acc*100:.0f}% ({int(correct_samples)}/{total_samples})\n')
    test_acc.reset()

# 主执行函数
def main(num_devices_to_use: int):
    # --- 超参数设置 ---
    batch_size_per_device = 64
    epochs = 5
    lr = 1.0
    seed = 42
    num_workers_per_loader = 2 # 保持 > 0 以测试 worker 清理

    # --- Fabric 初始化 ---
    # 添加 precision='16-mixed' 像 xx.py 一样
    fabric = Fabric(accelerator="cuda",
                    devices=num_devices_to_use,
                    strategy="ddp",
                    precision='16-mixed') # <-- 添加混合精度
    fabric.launch()

    # --- 可复现性 ---
    seed_everything(seed + fabric.global_rank)

    # --- 数据准备 ---
    data_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), "mnist_data")
    fabric.print(f"数据路径: {data_path}")

    is_main_process = fabric.is_global_zero
    transform = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize((0.1307,), (0.3081,))
    ])

    if is_main_process:
        fabric.print(f"Rank {fabric.global_rank}: 检查并下载 MNIST 数据集...")
        datasets.MNIST(data_path, train=True, download=True)
        datasets.MNIST(data_path, train=False, download=True)
        fabric.print(f"Rank {fabric.global_rank}: 数据集下载完成或已存在。")

    fabric.barrier()
    fabric.print(f"Rank {fabric.global_rank}: 继续加载数据集...")

    train_dataset = datasets.MNIST(
        data_path, train=True, download=False, transform=transform
    )
    test_dataset = datasets.MNIST(
        data_path, train=False, download=False, transform=transform
    )

    # --- 创建数据加载器 ---
    # *** 关键修改：显式设置 persistent_workers=False ***
    train_loader = DataLoader(
        train_dataset,
        batch_size=batch_size_per_device,
        num_workers=num_workers_per_loader,
        shuffle=True,
        persistent_workers=True if num_workers_per_loader > 0 else False, # <-- 强制禁用持久化 workers
        pin_memory=True
    )
    test_loader = DataLoader(
        test_dataset,
        batch_size=batch_size_per_device * 2,
        num_workers=num_workers_per_loader,
        shuffle=False,
        persistent_workers=True if num_workers_per_loader > 0 else False, # <-- 强制禁用持久化 workers
        pin_memory=True
    )

    fabric.print(f"进程 {fabric.global_rank}/{fabric.world_size} 使用设备: {fabric.device}")
    fabric.print(f"训练数据集大小: {len(train_dataset)}")
    fabric.print(f"测试数据集大小: {len(test_dataset)}")
    fabric.print(f"每个设备的 Batch Size: {batch_size_per_device}")
    fabric.print(f"有效 Batch Size: {batch_size_per_device * fabric.world_size}")

    # --- 使用 Fabric 设置模型、优化器和数据加载器 ---
    train_loader, test_loader = fabric.setup_dataloaders(train_loader, test_loader)

    # 实例化模型和优化器
    model = SimpleConvNet()
    # 注意：如果模型有 BatchNorm，并且 world_size > 1, 在 fabric.setup 之前转换:
    # if fabric.world_size > 1:
    #     model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)
    optimizer = optim.Adadelta(model.parameters(), lr=lr)
    model, optimizer = fabric.setup(model, optimizer) # setup 会处理模型移动到设备和 DDP 包装

    # --- 训练循环 ---
    fabric.print(f"\n在 {fabric.world_size} 个 GPU 上开始训练...")
    start_time = time.time()

    for epoch in range(1, epochs + 1):
        epoch_start_time = time.time()
        train_epoch(fabric, model, train_loader, optimizer, epoch)
        # 在 epoch 内部保留屏障是可选的，但通常有助于确保测试前训练已在所有设备完成
        fabric.barrier()
        test_epoch(fabric, model, test_loader)
        # 保留 epoch 后的屏障也是可选的
        fabric.barrier()
        epoch_time = time.time() - epoch_start_time
        fabric.print(f"Epoch {epoch} 在 {epoch_time:.2f} 秒内完成。")

    total_time = time.time() - start_time
    fabric.print(f"\n训练在 {total_time:.2f} 秒内完成。")

    # # --- 保存模型 (使用手动 torch.save state_dict) ---
    # if fabric.is_global_zero:
    #     save_path = "mnist_fabric_model.pt"
    #     fabric.print(f"Rank {fabric.global_rank}: 获取模型 state_dict...")
    #     # 获取 state_dict 并在 CPU 上保存是推荐做法
    #     state_dict = {k: v.cpu() for k, v in model.state_dict().items()}
    #     fabric.print(f"Rank {fabric.global_rank}: 开始使用 torch.save 保存状态字典到 {save_path}...")
    #     try:
    #         torch.save(state_dict, save_path) # 直接使用 torch.save
    #         print(f"\n模型状态字典已保存到 {save_path}")
    #         fabric.print(f"Rank {fabric.global_rank}: torch.save 保存完毕。")
    #     except Exception as e:
    #         fabric.print(f"Rank {fabric.global_rank}: 保存模型时发生错误: {e}")
    ##另一种方法
    save_path = "mnist_fabric_model_1.pt"
    state = {"model": model}
    fabric.save(save_path, state)
    fabric.print(f"\n模型状态已保存到 {save_path}")

    # --- 移除末尾的最终屏障 ---
    # 让 fabric.launch() 的上下文管理器负责退出时的同步和清理
    fabric.print(f"all train finished.")
    # fabric.barrier() # <-- 已移除

# 脚本入口点 (保持不变)
if __name__ == "__main__":
    num_devices = 2
    if not torch.cuda.is_available():
        print("错误：未检测到 CUDA。此脚本需要 GPU 运行。")
    elif torch.cuda.device_count() < num_devices:
        print(f"错误：需要 {num_devices} 个 GPU，但只找到 {torch.cuda.device_count()} 个。")
    else:
        print(f"检测到 {torch.cuda.device_count()} 个 GPU。将使用指定的 {num_devices} 个 GPU。")
        main(num_devices_to_use=num_devices)