"""
Example of using ColoMonitor with multimodal model training.
This example demonstrates monitoring a model that processes both images and text.
"""

import torch
import torch.nn as nn
from torchvision import transforms
from PIL import Image
import torchvision.models as models
from transformers import BertTokenizer, BertModel

from colo_monitor import MonitorConfig, TrainerMon
from colo_monitor.hooks import GradientHook, ActivationHook, OptimizerHook

class MultimodalModel(nn.Module):
    def __init__(self, num_classes=1000):
        super().__init__()
        # 图像编码器 (使用预训练的ResNet)
        self.image_encoder = models.resnet50(pretrained=True)
        self.image_encoder.fc = nn.Linear(2048, 512)
        
        # 文本编码器 (使用预训练的BERT)
        self.text_encoder = BertModel.from_pretrained('bert-base-uncased')
        self.text_proj = nn.Linear(768, 512)
        
        # 多模态融合层
        self.fusion = nn.Sequential(
            nn.Linear(1024, 512),  # 512 + 512 = 1024
            nn.ReLU(),
            nn.Dropout(0.1),
            nn.Linear(512, num_classes)
        )
        
        # 图像预处理
        self.image_transform = transforms.Compose([
            transforms.Resize(256),
            transforms.CenterCrop(224),
            transforms.ToTensor(),
            transforms.Normalize(mean=[0.485, 0.456, 0.406],
                              std=[0.229, 0.224, 0.225])
        ])
        
        # 文本预处理
        self.tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
        
    def process_image(self, image):
        if isinstance(image, str):
            image = Image.open(image).convert('RGB')
        return self.image_transform(image)
    
    def process_text(self, text):
        return self.tokenizer(text, padding=True, truncation=True, 
                            max_length=128, return_tensors='pt')
    
    def forward(self, image, text):
        # 处理图像
        image_features = self.image_encoder(image)
        
        # 处理文本
        text_inputs = self.process_text(text)
        text_outputs = self.text_encoder(**text_inputs)
        text_features = self.text_proj(text_outputs.last_hidden_state[:, 0, :])
        
        # 特征融合
        combined_features = torch.cat([image_features, text_features], dim=1)
        output = self.fusion(combined_features)
        
        return output

def main():
    # 创建模型
    model = MultimodalModel()
    
    # 配置监控系统
    config = MonitorConfig(
        output_dir="./monitor_output",
        log_interval=1,
        anomaly_detection=True,
        tensorboard=True,
        csv=True,
        api=True,
        gradient_norm_threshold=1.0,
        outlier_threshold=3.0
    )
    
    # 创建监控器
    with TrainerMon(config) as monitor:
        # 设置模型和优化器
        monitor.set_model(model)
        monitor.set_optimizer(torch.optim.Adam(model.parameters(), lr=1e-4))
        
        # 注册钩子
        monitor.register_hook(GradientHook(
            pre_allreduce=True,
            post_allreduce=True,
            norm=True,
            max=True,
            min=True,
            mean=True
        ))
        monitor.register_hook(ActivationHook(
            norm=True,
            max=True,
            min=True,
            mean=True
        ))
        monitor.register_hook(OptimizerHook(
            state=True,
            step=True
        ))
        
        # 模拟训练循环
        device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
        model = model.to(device)
        
        # 模拟数据
        batch_size = 32
        num_steps = 100
        
        for step in range(num_steps):
            # 生成随机图像和文本数据
            images = torch.randn(batch_size, 3, 224, 224, device=device)
            texts = ["Sample text " + str(i) for i in range(batch_size)]
            
            # 前向传播
            with monitor.forward_start():
                output = model(images, texts)
            
            # 计算损失
            target = torch.randint(0, 1000, (batch_size,), device=device)
            loss = nn.CrossEntropyLoss()(output, target)
            
            # 反向传播
            with monitor.backward_start():
                loss.backward()
            
            # 优化器步进
            with monitor.optimizer_start():
                monitor.optimizer.step()
                monitor.optimizer.zero_grad()
            
            # 记录损失
            monitor.log_metric("loss", loss.item(), step)
            
            if step % 10 == 0:
                print(f"Step {step}, Loss: {loss.item():.4f}")
    
    print("Training completed!")

if __name__ == "__main__":
    main() 