import os
import time
import torch
import torch.optim as optim
import torch.distributed as dist

try:
    import torch_npu
except ImportError:
    torch_npu = None

# ===== 新版 ColossalAI 0.4.9 接口 =====
import colossalai
from colossalai.booster import Booster
from colossalai.booster.plugin import GeminiPlugin

# transformers
from transformers import LlamaForCausalLM, LlamaTokenizer

from monitor import ColossalAIMonitor


def main():
    """
    使用 ColossalAI 0.4.9 中的 Booster + GeminiPlugin 实现 ZeRO-3 分片训练。
    兼容华为 Ascend NPU 环境 (torch_npu + HCCL)，以最小化示例的方式演示对 LLaMA 模型的训练和监控。
    """

    # 初始化分布式
    if not dist.is_initialized():
        dist.init_process_group(backend="hccl")
    rank = dist.get_rank()
    local_rank = int(os.environ.get("LOCAL_RANK", 0))
    world_size = dist.get_world_size()

    if torch_npu and torch_npu.npu.is_available():
        device = torch.device(f"npu:{local_rank}")
        torch_npu.npu.set_device(device)
    else:
        device = torch.device("cpu")

    print(f"[INFO] rank={rank}/{world_size}, local_rank={local_rank}, device={device}")

    # 记录初始显存用量
    if device.type == "npu":
        baseline_mem = torch.npu.max_memory_allocated(device)
    else:
        baseline_mem = 0

    # 加载 HF LLaMA 模型 (FP16)
    model_name_or_path = "openlm-research/open_llama_7b"
    if rank == 0:
        print(f"Loading model from: {model_name_or_path}")

    base_model = LlamaForCausalLM.from_pretrained(model_name_or_path)
    base_model = base_model.half()   # 混合精度

    base_optimizer = optim.SGD(base_model.parameters(), lr=1e-3)


    plugin = GeminiPlugin(
        placement_policy='static',  
        precision='fp16'
        # 其它可选参数: pin_memory, initial_scale
    )
    booster = Booster(plugin=plugin)

    model, optimizer, _, _, _ = booster.boost(
        model=base_model,
        optimizer=base_optimizer
    )

    monitor = ColossalAIMonitor(config_path="monitor_config.json")

    # 在封装后的模型上注册 Hook
    monitor.monitor(model)

    # 训练循环
    steps = 5
    total_monitor_time = 0.0
    total_step_time = 0.0

    # 构造最小化训练用的随机输入
    input_ids = torch.randint(0, 32000, (2, 16), dtype=torch.long, device=device)

    model.train()

    for step in range(1, steps + 1):
        step_start = time.time()

        optimizer.zero_grad()

        # 前向
        outputs = model(input_ids, labels=input_ids)
        loss = outputs.loss if hasattr(outputs, "loss") else outputs[0].mean()

        # 反向传播
        booster.backward(loss, optimizer)

        # 参数更新
        optimizer.step()

        # monitor统计
        mon_start = time.time()
        monitor.step_end(model)
        mon_time = time.time() - mon_start
        total_monitor_time += mon_time

        step_duration = time.time() - step_start
        total_step_time += step_duration

        if rank == 0:
            print(f"Step {step}, loss={loss.item():.4f}, step_time={step_duration:.4f}s "
                  f"(monitor overhead={mon_time:.4f}s)")

    # 停止监控
    monitor.stop()

    dist.barrier()
    dist.destroy_process_group()

    if rank == 0:
        if device.type == "npu":
            final_mem = torch.npu.max_memory_allocated(device)
            used_mem_mb = (final_mem - baseline_mem) / (1024**2)
        else:
            used_mem_mb = 0

        avg_step_time = total_step_time / steps if steps > 0 else 0
        avg_mon_time = total_monitor_time / steps if steps > 0 else 0
        overhead_ratio = (avg_mon_time / avg_step_time * 100) if avg_step_time > 0 else 0

        print("\n=== Monitoring Summary ===")
        print(f"Total steps: {steps}")
        print(f"Avg step time = {avg_step_time:.4f} s")
        print(f"Avg monitor overhead = {avg_mon_time:.4f} s => ratio = {overhead_ratio:.2f}%")
        print(f"Used mem (real) = {used_mem_mb:.2f} MB")


if __name__ == "__main__":
    """
    运行方式示例:
        torchrun --nproc_per_node=2 train_llama2_npu_dp_local.py
    要求:
        1) 在docker镜像 hpcaitech/pytorch-npu:2.4.0 内
        2) 安装 ColossalAI==0.4.9+ (pip install colossalai==0.4.9)
        3) backend='hccl', device='npu'
    """
    main()
