import torch
import logging
from tqdm import tqdm
from trainers.base_trainer import BaseTrainer
from utils.visualization import visualize_results
import torch.nn.functional as F

logger = logging.getLogger(__name__)

class DDPMTrainer(BaseTrainer):
    """DDPM训练器类"""
    
    def train_step(self, batch):
        """单步训练"""
        # 将数据移至设备
        masked_image = batch['masked_image'].to(self.device)
        mask = batch['mask'].to(self.device)
        gt_image = batch['gt_image'].to(self.device)
        
        # 确保掩码的维度为 [B, 1, H, W]
        if mask.dim() == 3:  # [B, H, W]
            mask = mask.unsqueeze(1)
        
        # 前向传播和计算损失
        loss = self.model(gt_image, mask)
        
        # 反向传播
        self.optimizer.zero_grad()
        loss.backward()
        
        # 梯度裁剪
        if self.args.max_grad_norm > 0:
            torch.nn.utils.clip_grad_norm_(self.model.parameters(), self.args.max_grad_norm)
        
        self.optimizer.step()
        self.scheduler.step()
        
        return loss.item()
    
    def visualize(self, batch, step):
        """可视化结果"""
        self.model.eval() # 确保模型在评估模式
        with torch.no_grad():
            masked_image = batch['masked_image'].to(self.device)
            mask = batch['mask'].to(self.device)
            gt_image = batch['gt_image'].to(self.device)
            
            if mask.dim() == 3:
                mask = mask.unsqueeze(1)
                
            output = self.model.inpaint(
                masked_image[0:1], 
                mask[0:1],
                num_resample_U=self.args.num_resample_U,
                jump_length=self.args.repaint_jump_length,
                jump_n_sample=self.args.repaint_jump_n_sample,
                jump_start_resampling_step_T_fraction=self.args.repaint_jump_start_fraction,
                use_repaint_schedule_fig9_approx=self.args.use_repaint_schedule_approx
            )
            
            visualize_results(
                masked_image[0], 
                mask[0], 
                gt_image[0], 
                output[0], 
                step, 
                self.args.output_dir, 
                prefix="train_vis" # 修改前缀以区分
            )
        self.model.train() # 恢复训练模式
    
    def evaluate(self):
        """评估函数"""
        self.model.eval()
        total_loss_on_noise_pred = 0.0 # 评估时，我们通常关心的是修复质量，但也计算一下原始的噪声预测损失
        
        # 评估指标，例如LPIPS, PSNR, SSIM (这里未实现，但可以添加)
        # lpips_metric = [] 

        with torch.no_grad():
            with tqdm(self.eval_loader, desc="Evaluating") as pbar:
                for i, batch in enumerate(pbar):
                    masked_image = batch['masked_image'].to(self.device)
                    mask = batch['mask'].to(self.device)
                    gt_image = batch['gt_image'].to(self.device)
                    
                    if mask.dim() == 3:
                        mask = mask.unsqueeze(1)
                    
                    # 1. 计算原始 DDPM 损失 (可选，但有助于监控模型基础能力)
                    # 为了计算损失，我们需要随机t和噪声
                    t_eval = torch.randint(0, self.model.timesteps, (gt_image.shape[0],), device=self.device).long()
                    noise_eval = torch.randn_like(gt_image)
                    loss_noise_pred = self.model.p_losses(gt_image, t_eval, mask=None, noise=noise_eval) # 使用原始DDPM损失
                    total_loss_on_noise_pred += loss_noise_pred.item()
                    
                    # 更新进度条
                    pbar.set_postfix(noise_loss=loss_noise_pred.item(), avg_noise_loss=total_loss_on_noise_pred/(i+1))
                    
                    # 2. 可视化部分验证结果 (使用新的inpaint方法)
                    if i == 0: # 只可视化第一个batch以节省时间
                        output = self.model.inpaint(
                            masked_image[0:1], 
                            mask[0:1],
                            num_resample_U=self.args.num_resample_U,
                            jump_length=self.args.repaint_jump_length,
                            jump_n_sample=self.args.repaint_jump_n_sample,
                            jump_start_resampling_step_T_fraction=self.args.repaint_jump_start_fraction,
                            use_repaint_schedule_fig9_approx=self.args.use_repaint_schedule_approx
                        )
                        visualize_results(
                            masked_image[0], 
                            mask[0], 
                            gt_image[0], 
                            output[0], 
                            self.global_step, 
                            self.args.output_dir, 
                            prefix="eval_vis" # 修改前缀
                        )
        
        avg_noise_loss = total_loss_on_noise_pred / len(self.eval_loader)
        logger.info(f"Validation DDPM Noise Prediction Loss (for monitoring): {avg_noise_loss:.4f}")
        # 你可能希望返回一个更关注修复质量的指标，如果实现了的话。
        # 对于早停，通常使用验证集上的一个关键指标。这里的avg_noise_loss是原始DDPM损失。
        # 如果你想基于修复质量早停，你需要一个修复质量指标。
        # RePaint论文使用LPIPS和用户研究。
        # 暂时，我们让早停基于这个噪声预测损失。
        
        self.model.train()
        return avg_noise_loss # 或者一个更合适的修复质量指标