import torch
import torch.nn as nn
import torch.distributed as dist
from torch.nn.parallel import DistributedDataParallel as DDP
import torch.multiprocessing as mp
import os
import time
import math

# 设置随机种子以确保可复现性
torch.manual_seed(0)
torch.cuda.manual_seed_all(0)

def setup(rank, world_size):
    os.environ['MASTER_ADDR'] = 'localhost'
    os.environ['MASTER_PORT'] = '12355'
    dist.init_process_group("nccl", rank=rank, world_size=world_size)
    torch.cuda.set_device(rank)

def cleanup():
    dist.destroy_process_group()

def count_model_flops(model, input_shape):
    """
    简化估算模型前向传播的 FLOPs（仅线性层）
    这里假设输入为 [B, D]
    """
    total_flops = 0
    for m in model.modules():
        if isinstance(m, nn.Linear):
            in_features = m.in_features
            out_features = m.out_features
            # FLOPs for y = xW + b: B * D_in * D_out (matmul) + B * D_out (bias, negligible)
            batch_size = input_shape[0]
            flops = batch_size * in_features * out_features
            total_flops += 2 * flops  # 乘加算 2 ops
    # 反向传播 ≈ 2x 前向计算量（梯度计算）
    total_flops_backward = total_flops * 2
    total_flops_per_iter = total_flops + total_flops_backward  # 前向 + 反向
    return total_flops_per_iter

def train_step(model, data, target, optimizer, scaler):
    optimizer.zero_grad()
    with torch.cuda.amp.autocast():
        output = model(data)
        loss = nn.CrossEntropyLoss()(output, target)
    scaler.scale(loss).backward()
    scaler.step(optimizer)
    scaler.update()
    return loss.item()

class DeepMLP(nn.Module):
    def __init__(self, input_dim=4096, hidden_dim=4096, output_dim=1000, num_layers=10):
        super().__init__()
        layers = [nn.Linear(input_dim, hidden_dim), nn.ReLU()]
        for _ in range(num_layers - 2):
            layers.append(nn.Linear(hidden_dim, hidden_dim))
            layers.append(nn.ReLU())
            # 添加 Dropout 可选，但会轻微影响性能一致性
        layers.append(nn.Linear(hidden_dim, output_dim))
        self.net = nn.Sequential(*layers)

    def forward(self, x):
        return self.net(x)

def demo_basic(rank, world_size):
    print(f"Running DDP basic test on rank {rank}.")
    setup(rank, world_size)

    device = torch.device(f'cuda:{rank}')

    # ================================
    # 超参数配置
    # ================================
    input_dim = 4096
    hidden_dim = 4096
    output_dim = 1000
    num_layers = 12  # 增加深度以提高计算强度
    batch_size = 64
    iterations = 100
    warmup_iters = 10

    # ================================
    # 构建深度 MLP 模型
    # ================================
    model = DeepMLP(input_dim=input_dim, hidden_dim=hidden_dim,
                    output_dim=output_dim, num_layers=num_layers).to(device)

    # 使用 DDP 包装
    ddp_model = DDP(model, device_ids=[rank], output_device=rank)

    # 优化器和混合精度缩放器
    optimizer = torch.optim.Adam(ddp_model.parameters(), lr=1e-3)
    scaler = torch.cuda.amp.GradScaler()

    # ================================
    # 估算 FLOPs
    # ================================
    input_shape = (batch_size, input_dim)
    flops_per_iter = count_model_flops(ddp_model, input_shape)  # 总 FLOPs（前向+反向）
    flops_per_iter_tflops = flops_per_iter / 1e12  # 转为 TFLOPs/iter

    # ================================
    # 训练循环
    # ================================
    # 预热
    for _ in range(warmup_iters):
        data = torch.randn(batch_size, input_dim, device=device)
        target = torch.randint(0, output_dim, (batch_size,), device=device)
        train_step(ddp_model, data, target, optimizer, scaler)

    # 正式计时
    torch.cuda.synchronize()
    start_time = time.time()

    for _ in range(iterations):
        data = torch.randn(batch_size, input_dim, device=device)
        target = torch.randint(0, output_dim, (batch_size,), device=device)
        train_step(ddp_model, data, target, optimizer, scaler)

    torch.cuda.synchronize()
    end_time = time.time()

    total_time = end_time - start_time
    avg_time_per_iter = total_time / iterations
    throughput_samples = batch_size / avg_time_per_iter  # samples/sec
    achieved_tflops = flops_per_iter_tflops / avg_time_per_iter  # TFLOPS

    # ================================
    # 收集并打印结果（仅主进程）
    # ================================
    if rank == 0:
        print(f"\n🎯 Multi-GPU FP16 Performance Test Results:")
        print(f"  GPUs: {world_size} x NVIDIA RTX 4090 (假设)")
        print(f"  Model: {num_layers}L-MLP, Input: {input_dim}, Hidden: {hidden_dim}")
        print(f"  Batch Size: {batch_size} per GPU")
        print(f"  Total Iterations: {iterations}")
        print(f"  Total Time: {total_time:.4f}s")
        print(f"  Average Time per Iteration: {avg_time_per_iter:.4f}s")
        print(f"  Throughput: {throughput_samples:.2f} samples/sec/GPU")
        print(f"  Computation per Iteration: {flops_per_iter_tflops:.2f} TFLOPs")
        print(f"  Achieved Performance: {achieved_tflops:.2f} TFLOPS/GPU")
        print(f"  Aggregate Performance: {achieved_tflops * world_size:.2f} TFLOPS (total)")

        # 参考：RTX 4090 单卡 FP16 Tensor Core 峰值约 330 TFLOPS
        peak_tflops_per_gpu = 330.0
        efficiency = (achieved_tflops / peak_tflops_per_gpu) * 100
        print(f"  Hardware Utilization: {efficiency:.1f}% of peak (est.)")

    cleanup()

def run_demo():
    world_size = torch.cuda.device_count()
    print(f"Using {world_size} GPU(s).")
    assert world_size >= 1, "至少需要1个GPU"
    
    # 如果你想固定为4卡，可以设置 world_size = 4
    mp.spawn(demo_basic, args=(world_size,), nprocs=world_size, join=True)

if __name__ == "__main__":
    run_demo()