"""
简单监控示例，展示如何使用监控系统监控基本的PyTorch模型。
"""
import os
import torch
import torch.nn as nn
import torch.optim as optim
import torchvision
import torchvision.transforms as transforms
from typing import Dict, List, Optional, Any

# 导入监控系统
import sys
import os
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from colo_monitor import TrainerMon, MonitorConfig

class SimpleModel(nn.Module):
    """简单模型"""
    
    def __init__(self):
        super(SimpleModel, self).__init__()
        # 简单的卷积网络
        self.conv1 = nn.Conv2d(1, 32, 3, 1)
        self.conv2 = nn.Conv2d(32, 64, 3, 1)
        self.dropout1 = nn.Dropout2d(0.25)
        self.dropout2 = nn.Dropout2d(0.5)
        self.fc1 = nn.Linear(9216, 128)
        self.fc2 = nn.Linear(128, 10)
    
    def forward(self, x):
        x = self.conv1(x)
        x = nn.functional.relu(x)
        x = self.conv2(x)
        x = nn.functional.relu(x)
        x = nn.functional.max_pool2d(x, 2)
        x = self.dropout1(x)
        x = torch.flatten(x, 1)
        x = self.fc1(x)
        x = nn.functional.relu(x)
        x = self.dropout2(x)
        x = self.fc2(x)
        output = nn.functional.log_softmax(x, dim=1)
        return output

def train(monitor: TrainerMon, model: nn.Module, device: torch.device, 
          train_loader: torch.utils.data.DataLoader, optimizer: optim.Optimizer, 
          epoch: int, log_interval: int = 10):
    """
    训练模型
    
    Args:
        monitor: 监控器
        model: 模型
        device: 设备
        train_loader: 训练数据加载器
        optimizer: 优化器
        epoch: 当前轮次
        log_interval: 日志间隔
    """
    model.train()
    for batch_idx, (data, target) in enumerate(train_loader):
        data, target = data.to(device), target.to(device)
        
        # 记录步骤开始
        timer_id = monitor.step_start()
        
        # 记录前向传播开始
        forward_id = monitor.forward_start()
        
        # 前向传播
        output = model(data)
        loss = nn.functional.nll_loss(output, target)
        
        # 记录前向传播结束
        monitor.forward_end(forward_id)
        
        # 记录反向传播开始
        backward_id = monitor.backward_start()
        
        # 反向传播
        optimizer.zero_grad()
        loss.backward()
        
        # 记录反向传播结束
        monitor.backward_end(backward_id)
        
        # 记录优化器步骤开始
        optimizer_id = monitor.optimizer_step_start()
        
        # 优化器步骤
        optimizer.step()
        
        # 记录优化器步骤结束
        monitor.optimizer_step_end(optimizer_id)
        
        # 记录步骤结束，并更新样本数
        monitor.step_end(timer_id, samples=len(data))
        
        # 更新步骤
        monitor.update_step()
        
        # 打印进度
        if batch_idx % log_interval == 0:
            print(f'Train Epoch: {epoch} [{batch_idx * len(data)}/{len(train_loader.dataset)}'
                  f' ({100. * batch_idx / len(train_loader):.0f}%)]\tLoss: {loss.item():.6f}')
            
            # 记录指标
            monitor.log_step_stats()

def test(monitor: TrainerMon, model: nn.Module, device: torch.device, 
         test_loader: torch.utils.data.DataLoader):
    """
    测试模型
    
    Args:
        monitor: 监控器
        model: 模型
        device: 设备
        test_loader: 测试数据加载器
    """
    model.eval()
    test_loss = 0
    correct = 0
    with torch.no_grad():
        for data, target in test_loader:
            data, target = data.to(device), target.to(device)
            output = model(data)
            test_loss += nn.functional.nll_loss(output, target, reduction='sum').item()
            pred = output.argmax(dim=1, keepdim=True)
            correct += pred.eq(target.view_as(pred)).sum().item()
    
    test_loss /= len(test_loader.dataset)
    accuracy = 100. * correct / len(test_loader.dataset)
    
    print(f'\nTest set: Average loss: {test_loss:.4f}, '
          f'Accuracy: {correct}/{len(test_loader.dataset)} ({accuracy:.0f}%)\n')
    
    # 记录测试指标
    monitor.log_metric("test_loss", test_loss)
    monitor.log_metric("test_accuracy", accuracy)

def main():
    """主函数"""
    # 参数
    batch_size = 64
    test_batch_size = 1000
    epochs = 5
    lr = 0.01
    gamma = 0.7
    no_cuda = False
    seed = 1
    log_interval = 10
    
    # CUDA设置
    use_cuda = not no_cuda and torch.cuda.is_available()
    torch.manual_seed(seed)
    device = torch.device("cuda" if use_cuda else "cpu")
    
    # 数据加载
    transform = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize((0.1307,), (0.3081,))
    ])
    
    train_dataset = torchvision.datasets.MNIST('./data', train=True, download=True, transform=transform)
    test_dataset = torchvision.datasets.MNIST('./data', train=False, transform=transform)
    
    train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size)
    test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=test_batch_size)
    
    # 创建模型
    model = SimpleModel().to(device)
    optimizer = optim.SGD(model.parameters(), lr=lr)
    scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=gamma)
    
    # 创建监控配置
    config = MonitorConfig(
        output_dir="./monitor_output",
        log_interval=1,
        anomaly_detection=True,
        tensorboard=True,
        csv=True,
        monitor_memory=True,
        monitor_performance=True
    )
    
    # 创建监控器
    with TrainerMon(config=config) as monitor:
        # 设置模型和优化器
        monitor.set_model(model)
        monitor.set_optimizer(optimizer)
        
        # 注册钩子
        monitor.register_hooks()
        
        # 训练循环
        for epoch in range(1, epochs + 1):
            train(monitor, model, device, train_loader, optimizer, epoch, log_interval)
            test(monitor, model, device, test_loader)
            scheduler.step()
        
        # 获取性能统计
        perf_stats = monitor.get_performance_stats()
        print("Performance statistics:")
        for key, val in perf_stats.items():
            print(f"  {key}: {val}")
        
        # 获取内存统计
        mem_stats = monitor.get_peak_memory()
        print("Peak memory usage:")
        for key, val in mem_stats.items():
            print(f"  {key}: {val}")

if __name__ == "__main__":
    main() 