import os
import time
import torch
import torch.distributed as dist
import torch.optim as optim

try:
    import torch_npu
except ImportError:
    torch_npu = None

from torch.nn.parallel import DistributedDataParallel as DDP

from diffusers import StableDiffusionPipeline

from monitor import ColossalAIMonitor


def main():
    # 1) 分布式初始化 (HCCL)
    if not dist.is_initialized():
        dist.init_process_group(backend="hccl")
    rank = dist.get_rank()
    local_rank = int(os.environ.get("LOCAL_RANK", 0))
    world_size = dist.get_world_size()

    # 2) Ascend NPU device
    if torch_npu and torch_npu.npu.is_available():
        device = torch.device(f"npu:{local_rank}")
        torch_npu.npu.set_device(device)
    else:
        device = torch.device("cpu")

    print(f"[INFO] rank={rank}/{world_size}, local_rank={local_rank}, device={device}")

    # 3) 加载 StableDiffusion
    model_id = "Jiali/stable-diffusion-1.5"
    if rank == 0:
        print(f"Loading stable diffusion from {model_id}")

    pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16)
    pipe.to(device)

    unet = pipe.unet
    unet.train()

    ddp_unet = DDP(unet, device_ids=[local_rank] if device.type == "npu" else None)

    # 4) Optimizer
    optimizer = optim.SGD(ddp_unet.parameters(), lr=1e-4)

    # 5) 监控
    monitor = ColossalAIMonitor(config_path="monitor_config.json")
    monitor.monitor(ddp_unet)

    if device.type == "npu":
        baseline_mem = torch.npu.max_memory_allocated(device)
    else:
        baseline_mem = 0

    # 6) 训练循环 + 性能统计
    steps = 5
    total_step_time = 0.0
    total_monitor_time = 0.0

    for step in range(1, steps + 1):
        step_start = time.time()
        optimizer.zero_grad()

        latents = torch.randn(2, 4, 64, 64, dtype=torch.float16, device=device)
        timesteps = torch.randint(50, 100, (2,), device=device)
        text_embeds = torch.randn(2, 77, 768, dtype=torch.float16, device=device)

        out = ddp_unet(latents, timesteps, encoder_hidden_states=text_embeds)
        pred = out["sample"] if isinstance(out, dict) and "sample" in out else out[0] if isinstance(out, (tuple, list)) else out
        loss = pred.mean()

        loss.backward()
        optimizer.step()

        mon_start = time.time()
        monitor.step_end(ddp_unet)
        mon_time = time.time() - mon_start
        total_monitor_time += mon_time

        step_dur = time.time() - step_start
        total_step_time += step_dur

        if rank == 0:
            print(f"Step {step}, loss={loss.item():.4f} | step_time={step_dur:.4f}s (monitor overhead={mon_time:.4f}s)")

    monitor.stop()
    dist.barrier()
    dist.destroy_process_group()

    # 7) 打印真实测量值
    if rank == 0:
        if device.type == "npu":
            final_mem = torch.npu.max_memory_allocated(device)
            used_mem_mb = (final_mem - baseline_mem) / (1024 ** 2)
        else:
            used_mem_mb = 0

        avg_step_time = total_step_time / steps
        avg_monitor_time = total_monitor_time / steps
        overhead_ratio = (avg_monitor_time / avg_step_time) * 100 if avg_step_time > 0 else 0

        print("\n=== Monitoring Summary ===")
        print(f"Total steps: {steps}")
        print(f"Avg step time: {avg_step_time:.6f} s")
        print(f"Avg monitor overhead: {avg_monitor_time:.6f} s ({overhead_ratio:.2f}%)")
        print(f"Memory usage: {used_mem_mb:.2f} MB")


if __name__ == "__main__":
    main()
