import torch
from collections import OrderedDict
from os import path as osp
from tqdm import tqdm

from basicsr.archs import build_network
from basicsr.losses import build_loss
from basicsr.metrics import calculate_metric
from basicsr.utils import get_root_logger, imwrite, tensor2img
from basicsr.utils.registry import MODEL_REGISTRY
from .sr_model import SRModel


@MODEL_REGISTRY.register()
class SRDiffModel(SRModel):
    """
    SRDiff model for super-resolution based on diffusion models.
    This model extends the basic SRModel to implement the diffusion-based SR approach.
    """
    def __init__(self, opt):
        """Initialize the SRDiff model with given options."""
        super(SRDiffModel, self).__init__(opt)

    def init_training_settings(self):
        """Initialize training-related settings for SRDiff model."""
        super(SRDiffModel, self).init_training_settings()
        if self.cri_pix is None:
            raise ValueError('Pixel loss is required for SRDiff model.')

    def optimize_parameters(self, current_iter):
        """Update parameters for training iteration.

        Args:
            current_iter (int): Current iteration number.
        """
        self.optimizer_g.zero_grad()
        # Generate random noise for comparison
        noise = torch.randn_like(self.gt)
        # Sample random time steps
        t = torch.randint(0, self.net_g.num_timesteps, (self.lq.shape[0],), device=self.lq.device)
        # Predict noise using the network with low-quality and high-quality images as inputs, along with time and noise
        predicted_noise = self.net_g((self.lq, self.gt), t, noise)

        l_total = 0
        loss_dict = OrderedDict()

        # Calculate pixel-wise loss between predicted noise and actual noise
        if self.cri_pix:
            l_pix = self.cri_pix(predicted_noise, noise)
            l_total += l_pix
            loss_dict['l_pix'] = l_pix

        # Backward pass and optimize
        l_total.backward()
        self.optimizer_g.step()

        # Reduce loss values for logging
        self.log_dict = self.reduce_loss_dict(loss_dict)

        # Update EMA weights if enabled
        if self.ema_decay > 0:
            self.model_ema(decay=self.ema_decay)

    def test(self):
        """Generate super-resolution results during testing."""
        # Use EMA model if available
        if hasattr(self, 'net_g_ema'):
            self.net_g_ema.eval()
            with torch.no_grad():
                # 验证阶段使用25步采样以进一步加速验证过程
                self.output = self.net_g_ema.sample(self.lq, num_timesteps=25)
        else:
            # Otherwise use the regular generator network
            self.net_g.eval()
            with torch.no_grad():
                # 验证阶段使用25步采样以进一步加速验证过程
                self.output = self.net_g.sample(self.lq, num_timesteps=25)
            # Switch back to training mode
            self.net_g.train()

    def test_selfensemble(self):
        """Self-ensemble test with TTA (Test-Time Augmentation)."""
        self.test()
