"""
LLaMA模型训练监控示例，展示如何使用监控系统监控LLaMA模型训练过程。
"""
import os
import torch
import argparse
from torch.utils.data import DataLoader
from transformers import LlamaForCausalLM, LlamaConfig, LlamaTokenizer

# 导入监控系统
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from colo_monitor import TrainerMon, MonitorConfig


def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("--gpus", type=int, default=1, help="number of gpus")
    parser.add_argument("--batch_size", type=int, default=2, help="batch size")
    parser.add_argument("--seq_len", type=int, default=512, help="sequence length")
    parser.add_argument("--output_dir", type=str, default="./monitor_output", help="monitor output directory")
    parser.add_argument("--num_batches", type=int, default=20, help="number of batches to run")
    args = parser.parse_args()
    
    # 确保输出目录存在
    os.makedirs(args.output_dir, exist_ok=True)
    
    # 创建LLaMA配置和模型
    print("Creating model")
    config = LlamaConfig(
        hidden_size=1024,
        intermediate_size=2048,
        num_attention_heads=8,
        num_hidden_layers=6,
        vocab_size=32000,
        max_position_embeddings=args.seq_len
    )
    model = LlamaForCausalLM(config)
    
    # 创建样例数据
    print("Creating data")
    data = torch.randint(0, config.vocab_size, (args.batch_size * 10, args.seq_len))
    labels = torch.randint(0, config.vocab_size, (args.batch_size * 10, args.seq_len))
    dataset = torch.utils.data.TensorDataset(data, labels)
    dataloader = DataLoader(dataset, batch_size=args.batch_size)
    
    # 定义优化器
    optimizer = torch.optim.Adam(model.parameters(), lr=1e-4)
    
    # 创建监控配置
    monitor_config = MonitorConfig(
        output_dir=args.output_dir,
        log_interval=1,
        anomaly_detection=True,
        tensorboard=True,
        csv=True,
        monitor_memory=True,
        monitor_performance=True
    )
    
    # 创建监控器
    print("Creating monitor")
    with TrainerMon(config=monitor_config) as monitor:
        # 设置模型和优化器
        monitor.set_model(model)
        monitor.set_optimizer(optimizer)
        
        # 注册钩子
        monitor.register_hooks()
        
        # 开始训练
        print("Starting training")
        model.train()
        
        # 训练循环
        for epoch in range(1):
            for batch_idx, (inputs, targets) in enumerate(dataloader):
                # 记录步骤开始
                timer_id = monitor.step_start()
                
                # 记录前向传播开始
                forward_id = monitor.forward_start()
                
                # 前向传播
                outputs = model(inputs)
                loss = torch.nn.functional.cross_entropy(
                    outputs.logits.view(-1, outputs.logits.size(-1)),
                    targets.view(-1)
                )
                
                # 记录前向传播结束
                monitor.forward_end(forward_id)
                
                # 记录反向传播开始
                backward_id = monitor.backward_start()
                
                # 反向传播
                optimizer.zero_grad()
                loss.backward()
                
                # 记录反向传播结束
                monitor.backward_end(backward_id)
                
                # 记录优化器步骤开始
                optimizer_id = monitor.optimizer_step_start()
                
                # 优化器步骤
                optimizer.step()
                
                # 记录优化器步骤结束
                monitor.optimizer_step_end(optimizer_id)
                
                # 记录步骤结束，并更新样本数
                monitor.step_end(timer_id, samples=len(inputs))
                
                # 更新步骤
                monitor.update_step()
                
                # 记录指标
                monitor.log_step_stats()
                
                # 打印进度
                if batch_idx % 5 == 0:
                    print(f"Epoch: {epoch}, Batch: {batch_idx}, Loss: {loss.item():.4f}")
                
                # 仅运行指定数量的批次作为示例
                if batch_idx >= args.num_batches - 1:
                    break
        
        # 获取性能统计
        perf_stats = monitor.get_performance_stats()
        print("\nPerformance statistics:")
        for key, val in perf_stats.items():
            print(f"  {key}: {val}")
        
        # 获取内存统计
        mem_stats = monitor.get_peak_memory()
        print("\nPeak memory usage:")
        for key, val in mem_stats.items():
            print(f"  {key}: {val}")

if __name__ == "__main__":
    main() 