import os
import time
import torch
import torch.optim as optim
import torch.distributed as dist

try:
    import torch_npu
except ImportError:
    torch_npu = None

# ColossalAI 0.4.9
import colossalai
from colossalai.booster import Booster
from colossalai.booster.plugin import GeminiPlugin

from diffusers import StableDiffusionPipeline

from monitor import ColossalAIMonitor


def main():
    """
    使用 ColossalAI Booster + GeminiPlugin (ZeRO-3) 在 Ascend NPU 上对 Stable Diffusion U-Net 作最小化训练示例
    """

    # 分布式初始化
    if not dist.is_initialized():
        dist.init_process_group(backend="hccl")
    rank = dist.get_rank()
    local_rank = int(os.environ.get("LOCAL_RANK", 0))
    world_size = dist.get_world_size()

    if torch_npu and torch_npu.npu.is_available():
        device = torch.device(f"npu:{local_rank}")
        torch_npu.npu.set_device(device)
    else:
        device = torch.device("cpu")

    print(f"[INFO] rank={rank}/{world_size}, local_rank={local_rank}, device={device}")

    if device.type == "npu":
        baseline_mem = torch.npu.max_memory_allocated(device)
    else:
        baseline_mem = 0

    # 加载 StableDiffusionPipeline（FP16）
    model_id = "Jiali/stable-diffusion-1.5"
    if rank == 0:
        print(f"Loading Stable Diffusion from {model_id}")
    pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16)

    unet = pipe.unet
    unet.train()

    base_optimizer = optim.SGD(unet.parameters(), lr=1e-4)

    # 构造GeminiPlugin
    plugin = GeminiPlugin(
        placement_policy='static',  # 在 Ascend 上建议使用 static
        precision='fp16'
    )

    booster = Booster(plugin=plugin)

    unet, optimizer, _, _, _ = booster.boost(unet, base_optimizer)

    # 调用我们的监控工具
    monitor = ColossalAIMonitor(config_path="monitor_config.json")
    monitor.monitor(unet)

    # 训练循环
    steps = 5
    total_step_time = 0.0
    total_monitor_time = 0.0

    for step in range(1, steps + 1):
        step_start = time.time()

        optimizer.zero_grad()

        latents = torch.randn(2, 4, 64, 64, dtype=torch.float16, device=device)
        timesteps = torch.randint(50, 100, (2,), device=device)
        text_embeds = torch.randn(2, 77, 768, dtype=torch.float16, device=device)

        # 前向
        out = unet(latents, timesteps, encoder_hidden_states=text_embeds)
        if isinstance(out, dict) and "sample" in out:
            pred = out["sample"]
            loss = pred.mean()
        else:
            loss = out[0].mean() if isinstance(out, (tuple, list)) else out.mean()

        # 反向
        booster.backward(loss, optimizer)

        # 更新
        optimizer.step()

        # 监控
        mon_start = time.time()
        monitor.step_end(unet)
        mon_time = time.time() - mon_start
        total_monitor_time += mon_time

        step_dur = time.time() - step_start
        total_step_time += step_dur

        if rank == 0:
            print(f"Step {step}, loss={loss.item():.4f} | step_time={step_dur:.4f}s "
                  f"(monitor overhead={mon_time:.4f}s)")

    monitor.stop()

    dist.barrier()
    dist.destroy_process_group()

    # 结果统计
    if rank == 0:
        if device.type == "npu":
            final_mem = torch.npu.max_memory_allocated(device)
            used_mem_mb_real = (final_mem - baseline_mem) / (1024 ** 2)
        else:
            used_mem_mb_real = 0

        avg_step_time_real = total_step_time / steps if steps > 0 else 0
        avg_mon_time_real = total_monitor_time / steps if steps > 0 else 0
        overhead_ratio_real = (avg_mon_time_real / avg_step_time_real * 100) if avg_step_time_real > 0 else 0

        print("\n=== Monitoring Summary ===")
        print(f"Total steps: {steps}")
        print(f"Avg step time (real) = {avg_step_time_real:.4f}s")
        print(f"Avg monitor overhead (real) = {avg_mon_time_real:.4f}s => ratio={overhead_ratio_real:.2f}%")
        print(f"Memory usage (real) = {used_mem_mb_real:.2f} MB")


if __name__ == "__main__":
    """
    运行方式示例:
        torchrun --nproc_per_node=2 train_stablediff_npu_dp_local.py
    要求:
        1) 在docker镜像 hpcaitech/pytorch-npu:2.4.0 内
        2) 安装 ColossalAI==0.4.9
        3) backend='hccl', device='npu'
    """
    main()
