"""
Benchmark script to measure the performance impact of ColoMonitor.
This script tests different model sizes and parallel strategies.
"""

import os
import time
import torch
import torch.nn as nn
from colossalai.booster import Booster
from colossalai.booster.plugin import ZeROPlugin
from colossalai.initialize import launch
from colossalai.logging import get_dist_logger

from colo_monitor import MonitorConfig, TrainerMon
from colo_monitor.hooks import GradientHook, ActivationHook, OptimizerHook

class BenchmarkModel(nn.Module):
    def __init__(self, hidden_size=1024, num_layers=12):
        super().__init__()
        self.layers = nn.ModuleList([
            nn.Sequential(
                nn.Linear(hidden_size, hidden_size),
                nn.ReLU(),
                nn.Linear(hidden_size, hidden_size),
                nn.ReLU()
            ) for _ in range(num_layers)
        ])
    
    def forward(self, x):
        for layer in self.layers:
            x = layer(x)
        return x

def benchmark_with_monitor(model_size, num_steps=100, batch_size=32):
    """Run benchmark with monitoring enabled."""
    hidden_size = model_size // 12  # 假设每层大小相等
    model = BenchmarkModel(hidden_size=hidden_size)
    
    config = MonitorConfig(
        output_dir=f"./benchmark_output/{model_size}",
        log_interval=1,
        anomaly_detection=True,
        tensorboard=True
    )
    
    optimizer = torch.optim.Adam(model.parameters(), lr=1e-4)
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    model = model.to(device)
    
    start_time = time.time()
    memory_before = torch.cuda.memory_allocated() if torch.cuda.is_available() else 0
    
    with TrainerMon(config) as monitor:
        monitor.set_model(model)
        monitor.set_optimizer(optimizer)
        
        monitor.register_hook(GradientHook(
            pre_allreduce=True,
            post_allreduce=True,
            norm=True,
            max=True,
            min=True,
            mean=True
        ))
        
        for step in range(num_steps):
            x = torch.randn(batch_size, hidden_size, device=device)
            y = torch.randn(batch_size, hidden_size, device=device)
            
            with monitor.forward_start():
                output = model(x)
            
            loss = nn.MSELoss()(output, y)
            
            with monitor.backward_start():
                loss.backward()
            
            with monitor.optimizer_start():
                optimizer.step()
                optimizer.zero_grad()
    
    end_time = time.time()
    memory_after = torch.cuda.memory_allocated() if torch.cuda.is_available() else 0
    
    return {
        'time': end_time - start_time,
        'memory': memory_after - memory_before,
        'throughput': num_steps / (end_time - start_time)
    }

def benchmark_without_monitor(model_size, num_steps=100, batch_size=32):
    """Run benchmark without monitoring."""
    hidden_size = model_size // 12
    model = BenchmarkModel(hidden_size=hidden_size)
    
    optimizer = torch.optim.Adam(model.parameters(), lr=1e-4)
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    model = model.to(device)
    
    start_time = time.time()
    memory_before = torch.cuda.memory_allocated() if torch.cuda.is_available() else 0
    
    for step in range(num_steps):
        x = torch.randn(batch_size, hidden_size, device=device)
        y = torch.randn(batch_size, hidden_size, device=device)
        
        output = model(x)
        loss = nn.MSELoss()(output, y)
        loss.backward()
        optimizer.step()
        optimizer.zero_grad()
    
    end_time = time.time()
    memory_after = torch.cuda.memory_allocated() if torch.cuda.is_available() else 0
    
    return {
        'time': end_time - start_time,
        'memory': memory_after - memory_before,
        'throughput': num_steps / (end_time - start_time)
    }

def main():
    # 测试不同模型大小
    model_sizes = [100M, 1B, 10B]  # 100M, 1B, 10B 参数
    results = {}
    
    for size in model_sizes:
        print(f"\nTesting model size: {size}")
        
        # 带监控的基准测试
        print("Running benchmark with monitoring...")
        with_monitor = benchmark_with_monitor(size)
        
        # 不带监控的基准测试
        print("Running benchmark without monitoring...")
        without_monitor = benchmark_without_monitor(size)
        
        # 计算开销
        time_overhead = (with_monitor['time'] - without_monitor['time']) / without_monitor['time'] * 100
        memory_overhead = (with_monitor['memory'] - without_monitor['memory']) / without_monitor['memory'] * 100
        throughput_degradation = (without_monitor['throughput'] - with_monitor['throughput']) / without_monitor['throughput'] * 100
        
        results[size] = {
            'time_overhead': time_overhead,
            'memory_overhead': memory_overhead,
            'throughput_degradation': throughput_degradation
        }
        
        print(f"Results for {size} parameter model:")
        print(f"Time overhead: {time_overhead:.2f}%")
        print(f"Memory overhead: {memory_overhead:.2f}%")
        print(f"Throughput degradation: {throughput_degradation:.2f}%")
    
    # 保存结果
    import json
    os.makedirs("./benchmark_output", exist_ok=True)
    with open("./benchmark_output/results.json", "w") as f:
        json.dump(results, f, indent=4)

if __name__ == "__main__":
    main() 