"""
Example of using ColoMonitor with Colossal AI's distributed training features.
This example demonstrates monitoring a model trained with ZeRO-3 and tensor parallelism.
"""

import os
import torch
import torch.nn as nn
from colossalai.booster import Booster
from colossalai.booster.plugin import ZeROPlugin
from colossalai.cluster import DistCoordinator
from colossalai.context import ParallelContext
from colossalai.core import global_context as gpc
from colossalai.engine import Engine
from colossalai.initialize import launch
from colossalai.logging import get_dist_logger
from colossalai.nn.optimizer import HybridAdam
from colossalai.utils import get_current_device

from colo_monitor import MonitorConfig, TrainerMon
from colo_monitor.hooks import GradientHook, ActivationHook, OptimizerHook

# 定义模型
class SimpleModel(nn.Module):
    def __init__(self, hidden_size=1024):
        super().__init__()
        self.linear1 = nn.Linear(hidden_size, hidden_size)
        self.linear2 = nn.Linear(hidden_size, hidden_size)
        self.linear3 = nn.Linear(hidden_size, hidden_size)
        self.relu = nn.ReLU()
        
    def forward(self, x):
        x = self.linear1(x)
        x = self.relu(x)
        x = self.linear2(x)
        x = self.relu(x)
        x = self.linear3(x)
        return x

def main():
    # 初始化分布式环境
    launch(
        config={},
        rank=0,
        world_size=1,
        host='localhost',
        port=29500,
        backend='nccl'
    )
    
    # 获取分布式上下文
    coordinator = DistCoordinator()
    logger = get_dist_logger()
    
    # 创建模型
    model = SimpleModel()
    
    # 配置 ZeRO-3 优化器
    zero_plugin = ZeROPlugin(
        stage=3,
        max_norm=1.0,
        initial_scale=2**16,
        min_num_params=1e8,
        max_num_params=1e9,
        max_num_grads=1e9,
        max_num_optim_states=1e9,
        offload_optimizer_device="cpu",
        offload_param_device="cpu",
        overlap_comm=True
    )
    
    # 创建 booster
    booster = Booster(
        plugin=zero_plugin,
        mixed_precision="fp16"
    )
    
    # 准备优化器
    optimizer = HybridAdam(model.parameters(), lr=1e-4)
    
    # 使用 booster 包装模型和优化器
    model, optimizer, _, _, _ = booster.boost(
        model=model,
        optimizer=optimizer,
        criterion=nn.MSELoss(),
        train_dataloader=None,
        test_dataloader=None
    )
    
    # 配置监控系统
    config = MonitorConfig(
        output_dir="./monitor_output",
        log_interval=1,
        anomaly_detection=True,
        tensorboard=True,
        csv=True,
        api=True,
        gradient_norm_threshold=1.0,
        outlier_threshold=3.0,
        gradient_accumulation_steps=4
    )
    
    # 创建监控器
    with TrainerMon(config) as monitor:
        # 设置模型和优化器
        monitor.set_model(model)
        monitor.set_optimizer(optimizer)
        
        # 注册钩子
        monitor.register_hook(GradientHook(
            pre_allreduce=True,
            post_allreduce=True,
            norm=True,
            max=True,
            min=True,
            mean=True
        ))
        monitor.register_hook(ActivationHook(
            norm=True,
            max=True,
            min=True,
            mean=True
        ))
        monitor.register_hook(OptimizerHook(
            state=True,
            step=True
        ))
        
        # 模拟训练循环
        device = get_current_device()
        for step in range(100):
            # 生成随机输入
            batch_size = 32
            hidden_size = 1024
            x = torch.randn(batch_size, hidden_size, device=device)
            y = torch.randn(batch_size, hidden_size, device=device)
            
            # 前向传播
            with monitor.forward_start():
                output = model(x)
            
            # 计算损失
            loss = nn.MSELoss()(output, y)
            
            # 反向传播
            with monitor.backward_start():
                loss.backward()
            
            # 优化器步进
            with monitor.optimizer_start():
                optimizer.step()
                optimizer.zero_grad()
            
            # 记录损失
            monitor.log_metric("loss", loss.item(), step)
            
            if step % 10 == 0:
                logger.info(f"Step {step}, Loss: {loss.item():.4f}")
    
    logger.info("Training completed!")

if __name__ == "__main__":
    main() 