import os
import time
import torch
import torch.distributed as dist
import torch.optim as optim
from torch.nn.parallel import DistributedDataParallel as DDP

try:
    import torch_npu
except ImportError:
    torch_npu = None

from transformers import LlamaForCausalLM, LlamaTokenizer

from monitor import ColossalAIMonitor


def main():
    if not dist.is_initialized():
        dist.init_process_group(backend="hccl")
    rank = dist.get_rank()
    local_rank = int(os.environ.get("LOCAL_RANK", 0))
    world_size = dist.get_world_size()

    # Ascend device
    if torch_npu and torch_npu.npu.is_available():
        device = torch.device(f"npu:{local_rank}")
        torch_npu.npu.set_device(device)
    else:
        device = torch.device("cpu")

    print(f"[INFO] rank={rank}/{world_size}, local_rank={local_rank}, device={device}")

    if device.type == "npu":
        baseline_mem = torch.npu.max_memory_allocated(device)
    else:
        baseline_mem = 0

    # ---- 加载HF模型（演示） ----
    model_name_or_path = "openlm-research/open_llama_7b"
    if rank == 0:
        print(f"Loading {model_name_or_path}")
    base_model = LlamaForCausalLM.from_pretrained(model_name_or_path).to(device).half()

    # ---- 使用PyTorch DDP ----
    ddp_model = DDP(
        base_model,
        device_ids=[local_rank] if (device.type == "npu") else None
    )

    # ---- 定义优化器 ----
    optimizer = optim.SGD(ddp_model.parameters(), lr=1e-3)

    # ---- 初始化监控工具 ----
    monitor = ColossalAIMonitor(config_path="monitor_config.json")
    monitor.monitor(ddp_model)

    # ---- 训练循环 + 时间测量 ----
    steps = 5
    ddp_model.train()

    total_monitor_time = 0.0  # 统计 monitor.step_end() 的累计耗时
    total_step_time = 0.0     # 统计训练各步总耗时

    for step in range(1, steps + 1):
        step_start = time.time()

        optimizer.zero_grad()
        # 构造随机输入
        input_ids = torch.randint(0, 32000, (2, 16), dtype=torch.long, device=device)
        out = ddp_model(input_ids, labels=input_ids)
        loss = out.loss if hasattr(out, "loss") else out[0].mean()

        loss.backward()
        optimizer.step()

        # ---- 监控耗时统计 ----
        mon_start = time.time()
        monitor.step_end(ddp_model)
        mon_time = time.time() - mon_start
        total_monitor_time += mon_time

        # 步骤总时长
        step_dur = time.time() - step_start
        total_step_time += step_dur

        if rank == 0:
            print(f"Step {step}, loss={loss.item():.4f} | step_time={step_dur:.4f}s (monitor overhead={mon_time:.4f}s)")

    monitor.stop()
    dist.barrier()
    dist.destroy_process_group()

    # ---- 在rank=0打印汇总信息 ----
    if rank == 0:
        # 显存差值
        if device.type == "npu":
            final_mem = torch.npu.max_memory_allocated(device)
            used_mem_mb = (final_mem - baseline_mem) / (1024**2)
        else:
            used_mem_mb = 0

        avg_step_time = total_step_time / steps
        avg_mon_time = total_monitor_time / steps
        overhead_ratio = (avg_mon_time / avg_step_time) * 100 if avg_step_time > 0 else 0

        print("\n=== Monitoring Summary ===")
        print(f"Total steps: {steps}")
        print(f"Average step time: {avg_step_time:.6f} s")
        print(f"Average monitor overhead per step: {avg_mon_time:.6f} s ({overhead_ratio:.2f}%)")
        print(f"Extra memory usage due to monitor: {used_mem_mb:.2f} MB\n")


if __name__ == "__main__":
    main()
