"""
Example of using ColoMonitor with Stable Diffusion training.
This example demonstrates monitoring the training process of a Stable Diffusion model,
including both the diffusion model and the text encoder.
"""

import os
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from torchvision import transforms
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, UNet2DConditionModel, DDPMScheduler
from diffusers.optimization import get_scheduler

from colo_monitor import MonitorConfig, TrainerMon
from colo_monitor.hooks import GradientHook, ActivationHook, OptimizerHook

class StableDiffusionTrainer:
    def __init__(self, model_id="runwayml/stable-diffusion-v1-5"):
        # 加载模型组件
        self.tokenizer = CLIPTokenizer.from_pretrained(model_id)
        self.text_encoder = CLIPTextModel.from_pretrained(model_id)
        self.vae = AutoencoderKL.from_pretrained(model_id)
        self.unet = UNet2DConditionModel.from_pretrained(model_id)
        
        # 设置噪声调度器
        self.noise_scheduler = DDPMScheduler.from_pretrained(model_id)
        
        # 设置设备
        self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
        self.text_encoder.to(self.device)
        self.vae.to(self.device)
        self.unet.to(self.device)
        
        # 设置优化器
        self.optimizer = torch.optim.AdamW(
            self.unet.parameters(),
            lr=1e-5,
            betas=(0.9, 0.999),
            weight_decay=1e-2,
            eps=1e-8
        )
        
        # 设置学习率调度器
        self.lr_scheduler = get_scheduler(
            "constant",
            optimizer=self.optimizer,
            num_warmup_steps=0,
            num_training_steps=1000
        )
        
        # 图像预处理
        self.image_transform = transforms.Compose([
            transforms.Resize(512),
            transforms.CenterCrop(512),
            transforms.ToTensor(),
            transforms.Normalize([0.5], [0.5])
        ])
    
    def prepare_inputs(self, image_path, prompt):
        # 处理图像
        image = Image.open(image_path).convert("RGB")
        image = self.image_transform(image).unsqueeze(0).to(self.device)
        
        # 处理文本
        text_inputs = self.tokenizer(
            prompt,
            padding="max_length",
            max_length=self.tokenizer.model_max_length,
            truncation=True,
            return_tensors="pt"
        ).to(self.device)
        
        return image, text_inputs
    
    def train_step(self, image, text_inputs, noise_scheduler, monitor):
        # 获取文本编码
        with torch.no_grad():
            text_embeddings = self.text_encoder(text_inputs.input_ids)[0]
        
        # 添加噪声
        noise = torch.randn_like(image)
        timesteps = torch.randint(0, noise_scheduler.config.num_train_timesteps, (image.shape[0],), device=self.device)
        noisy_images = noise_scheduler.add_noise(image, noise, timesteps)
        
        # 预测噪声
        with monitor.forward_start():
            noise_pred = self.unet(noisy_images, timesteps, text_embeddings).sample
        
        # 计算损失
        loss = nn.MSELoss()(noise_pred, noise)
        
        # 反向传播
        with monitor.backward_start():
            loss.backward()
        
        # 优化器步进
        with monitor.optimizer_start():
            self.optimizer.step()
            self.lr_scheduler.step()
            self.optimizer.zero_grad()
        
        return loss.item()
    
    def sample(self, prompt, num_inference_steps=50):
        # 文本编码
        text_inputs = self.tokenizer(
            prompt,
            padding="max_length",
            max_length=self.tokenizer.model_max_length,
            truncation=True,
            return_tensors="pt"
        ).to(self.device)
        
        with torch.no_grad():
            text_embeddings = self.text_encoder(text_inputs.input_ids)[0]
        
        # 生成初始噪声
        latents = torch.randn((1, 4, 64, 64), device=self.device)
        
        # 去噪过程
        self.noise_scheduler.set_timesteps(num_inference_steps)
        for t in self.noise_scheduler.timesteps:
            with torch.no_grad():
                noise_pred = self.unet(latents, t, text_embeddings).sample
                latents = self.noise_scheduler.step(noise_pred, t, latents).prev_sample
        
        # 解码图像
        with torch.no_grad():
            latents = 1 / 0.18215 * latents
            image = self.vae.decode(latents).sample
            image = (image / 2 + 0.5).clamp(0, 1)
            image = image.detach().cpu().permute(0, 2, 3, 1).numpy()[0]
            image = Image.fromarray((image * 255).round().astype("uint8"))
        
        return image

def main():
    # 创建训练器
    trainer = StableDiffusionTrainer()
    
    # 配置监控系统
    config = MonitorConfig(
        output_dir="./monitor_output",
        log_interval=1,
        anomaly_detection=True,
        tensorboard=True,
        csv=True,
        api=True,
        gradient_norm_threshold=1.0,
        outlier_threshold=3.0
    )
    
    # 创建监控器
    with TrainerMon(config) as monitor:
        # 设置模型和优化器
        monitor.set_model(trainer.unet)
        monitor.set_optimizer(trainer.optimizer)
        
        # 注册钩子
        monitor.register_hook(GradientHook(
            pre_allreduce=True,
            post_allreduce=True,
            norm=True,
            max=True,
            min=True,
            mean=True
        ))
        monitor.register_hook(ActivationHook(
            norm=True,
            max=True,
            min=True,
            mean=True
        ))
        monitor.register_hook(OptimizerHook(
            state=True,
            step=True
        ))
        
        # 模拟训练循环
        num_steps = 100
        image_path = "sample_image.jpg"  # 替换为实际的图像路径
        prompt = "a beautiful sunset over mountains"
        
        for step in range(num_steps):
            # 准备输入
            image, text_inputs = trainer.prepare_inputs(image_path, prompt)
            
            # 训练步骤
            loss = trainer.train_step(
                image, text_inputs, trainer.noise_scheduler, monitor
            )
            
            # 记录损失
            monitor.log_metric("loss", loss, step)
            
            if step % 10 == 0:
                print(f"Step {step}, Loss: {loss:.4f}")
                
                # 生成样本图像
                sample_image = trainer.sample(prompt)
                sample_image.save(f"sample_{step}.png")
    
    print("Training completed!")

if __name__ == "__main__":
    main() 