import torch
import torch.nn as nn
import torch.nn.functional as F
import math
from basicsr.utils.registry import ARCH_REGISTRY
from basicsr.archs.arch_util import default_init_weights


class PositionalEncoding(nn.Module):
    """Positional encoding for diffusion time steps."""

    def __init__(self, dim):
        super(PositionalEncoding, self).__init__()
        self.dim = dim

    def forward(self, x):
        device = x.device
        half_dim = self.dim // 2
        embeddings = math.log(10000) / (half_dim - 1)
        embeddings = torch.exp(torch.arange(half_dim, device=device) * -embeddings)
        embeddings = x[:, None] * embeddings[None, :]
        embeddings = torch.cat((embeddings.sin(), embeddings.cos()), dim=-1)
        return embeddings


class ResidualBlock(nn.Module):
    """Residual block with time embedding and conditional input."""

    def __init__(self, in_channels, out_channels, time_channels, cond_channels=None, stride=1):
        super(ResidualBlock, self).__init__()
        self.in_channels = in_channels
        self.out_channels = out_channels

        self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=stride, padding=1)
        self.time_emb_proj = nn.Linear(time_channels, out_channels)

        if cond_channels is not None:
            self.cond_conv = nn.Conv2d(cond_channels, out_channels, kernel_size=1)
        else:
            self.cond_conv = None

        self.relu = nn.ReLU()  # 移除inplace=True，避免梯度计算错误
        self.conv2 = nn.Conv2d(out_channels, out_channels, kernel_size=3, padding=1)

        if in_channels != out_channels or stride != 1:
            self.skip_conv = nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=stride)
        else:
            self.skip_conv = None

    def forward(self, x, t_emb, cond=None):
        # First convolution
        h = self.relu(self.conv1(x))

        # Add time embedding
        t_emb = self.relu(t_emb)
        t_emb = self.time_emb_proj(t_emb)[:, :, None, None]
        h = h + t_emb

        # Add conditional input if provided
        if self.cond_conv is not None and cond is not None:
            # 先应用条件卷积
            cond_emb = self.cond_conv(cond)
            # 调整cond_emb的大小以匹配h的空间维度
            cond_emb = F.interpolate(cond_emb, size=h.shape[2:], mode='bilinear', align_corners=False)
            h = h + cond_emb

        # Second convolution
        h = self.conv2(self.relu(h))

        # Skip connection
        if self.skip_conv is not None:
            x = self.skip_conv(x)

        return h + x


class UNet(nn.Module):
    """UNet architecture for noise prediction in diffusion models."""

    def __init__(self, in_channels=3, out_channels=3, base_channels=64, num_levels=4,
                 time_channels=256, cond_channels=3):
        super(UNet, self).__init__()

        self.time_encoder = nn.Sequential(
            PositionalEncoding(time_channels),
            nn.Linear(time_channels, time_channels),
            nn.ReLU(),  # 移除inplace=True，避免梯度计算错误
            nn.Linear(time_channels, time_channels)
        )

        # Initial convolution
        self.init_conv = nn.Conv2d(in_channels, base_channels, kernel_size=3, padding=1)

        # Encoder
        self.down_blocks = nn.ModuleList()
        self.skip_channels = []  # 存储每个encoder层的输出通道数，用于skip连接
        channels = base_channels

        # 保存初始卷积后的通道数（对应forward方法中的第1个skip connection）
        self.skip_channels.append(channels)

        for i in range(num_levels):
            self.down_blocks.append(
                ResidualBlock(channels, channels * 2, time_channels, cond_channels)
            )
            channels *= 2
            # 保存每次down_block后的通道数
            self.skip_channels.append(channels)

        # Bottleneck
        self.bottleneck = ResidualBlock(channels, channels, time_channels, cond_channels)

        # Decoder
        self.up_blocks = nn.ModuleList()
        for i in range(num_levels):
            # 动态计算输入通道数：当前x的通道数 + 对应skip连接的通道数
            # 重要：这里的索引逻辑必须与forward方法中的skip_connections[-(i + 2)]保持一致
            # 计算与forward方法中相同的skip连接索引
            skip_idx_in_list = len(self.skip_channels) - (i + 2)
            skip_channels_value = self.skip_channels[skip_idx_in_list]
            concat_channels = channels + skip_channels_value  # 上采样后通道数 + skip通道数

            self.up_blocks.append(
                nn.Sequential(
                    nn.Conv2d(concat_channels, channels, kernel_size=1),  # 使用动态计算的拼接后通道数
                    ResidualBlock(channels, channels // 2, time_channels, cond_channels)
                )
            )
            channels //= 2

        # Final convolution
        self.final_conv = nn.Conv2d(base_channels, out_channels, kernel_size=3, padding=1)

        # Initialize weights - pass the module itself, not the modules() generator
        default_init_weights(self)

    def forward(self, x, t, cond=None):
        # Encode time step
        t_emb = self.time_encoder(t)

        # Initial convolution
        x = self.init_conv(x)

        # Encoder path with skip connections
        skip_connections = [x]
        for block in self.down_blocks:
            x = block(x, t_emb, cond)
            skip_connections.append(x)
            x = F.interpolate(x, scale_factor=0.5, mode='bilinear', align_corners=False)

        # Bottleneck
        x = self.bottleneck(x, t_emb, cond)

        # Decoder path with skip connections
        for i, block_seq in enumerate(self.up_blocks):
            # 获取对应的skip连接
            skip = skip_connections[-(i + 2)]
            # 精确上采样到skip连接的大小，而不是简单使用scale_factor
            x = F.interpolate(x, size=skip.shape[2:], mode='bilinear', align_corners=False)
            # 确保维度匹配后再拼接
            x = torch.cat([x, skip], dim=1)
            # 通过Sequential结构处理，其中包含1x1卷积和ResidualBlock
            # 提取ResidualBlock来传递t_emb和cond参数
            x = block_seq[0](x)  # 应用1x1卷积调整通道数
            x = block_seq[1](x, t_emb, cond)  # 应用ResidualBlock并传递额外参数

        # Final convolution
        x = self.final_conv(x)

        return x


class ConditionalUNet(nn.Module):
    """Conditional UNet with LR image as condition."""

    def __init__(self, in_channels=3, out_channels=3, base_channels=64, num_levels=4,
                 time_channels=256, cond_channels=3, upscale_factor=4):
        super(ConditionalUNet, self).__init__()

        # LR feature extraction
        self.cond_encoder = nn.Sequential(
            nn.Conv2d(cond_channels, base_channels, kernel_size=3, padding=1),
            nn.ReLU(),  # 移除inplace=True，避免梯度计算错误
            nn.Conv2d(base_channels, base_channels, kernel_size=3, padding=1)
        )

        # Upsample LR features to match HR resolution
        self.upsample_cond = nn.Sequential(
            nn.Upsample(scale_factor=upscale_factor, mode='bilinear', align_corners=False),
            nn.Conv2d(base_channels, base_channels, kernel_size=3, padding=1)
        )

        # Main UNet for noise prediction
        self.unet = UNet(
            in_channels=in_channels,
            out_channels=out_channels,
            base_channels=base_channels,
            num_levels=num_levels,
            time_channels=time_channels,
            cond_channels=base_channels
        )

    def forward(self, x, t, cond):
        # Extract features from LR condition
        cond_features = self.cond_encoder(cond)
        # Upsample features to HR resolution
        cond_features = self.upsample_cond(cond_features)
        # Forward through UNet
        return self.unet(x, t, cond_features)


@ARCH_REGISTRY.register()
class SRDiff(nn.Module):
    """SRDiff model for single image super-resolution with diffusion probabilistic models.

    Args:
        num_in_ch (int): Channel number of inputs.
        num_out_ch (int): Channel number of outputs.
        base_channels (int): Channel number of the base layer. Default: 64.
        num_levels (int): Number of levels in UNet. Default: 4.
        time_channels (int): Channel number for time embedding. Default: 256.
        upscale_factor (int): Upsampling factor. Default: 4.
        img_range (float): Image range. Default: 1.0.
        rgb_mean (tuple[float]): Image mean in RGB orders.
            Default: (0.5, 0.5, 0.5).
    """

    def __init__(self,
                 num_in_ch=3,
                 num_out_ch=3,
                 base_channels=64,
                 num_levels=4,
                 time_channels=256,
                 upscale_factor=4,
                 img_range=1.0,
                 rgb_mean=(0.5, 0.5, 0.5)):
        super(SRDiff, self).__init__()

        self.img_range = img_range
        # Register mean as buffer so it moves to the same device as the model
        self.register_buffer('mean', torch.Tensor(rgb_mean).view(1, 3, 1, 1))
        self.upscale_factor = upscale_factor

        # Noise predictor (Conditional UNet)
        self.noise_predictor = ConditionalUNet(
            in_channels=num_in_ch,
            out_channels=num_out_ch,
            base_channels=base_channels,
            num_levels=num_levels,
            time_channels=time_channels,
            cond_channels=num_in_ch,
            upscale_factor=upscale_factor
        )

        # Diffusion parameters
        self.num_timesteps = 1000
        self.beta_start = 0.0001
        self.beta_end = 0.02

        # Precompute diffusion schedule
        self.register_buffer('betas', torch.linspace(self.beta_start, self.beta_end, self.num_timesteps))
        alphas = 1. - self.betas
        alphas_cumprod = torch.cumprod(alphas, dim=0)
        self.register_buffer('alphas_cumprod', alphas_cumprod)

        # For sampling
        alphas_cumprod_prev = torch.cat([torch.ones(1), alphas_cumprod[:-1]])
        self.register_buffer('alphas_cumprod_prev', alphas_cumprod_prev)
        self.register_buffer('sqrt_recip_alphas', torch.sqrt(1. / alphas))
        self.register_buffer('sqrt_alphas_cumprod', torch.sqrt(alphas_cumprod))
        self.register_buffer('sqrt_one_minus_alphas_cumprod', torch.sqrt(1. - alphas_cumprod))
        self.register_buffer('posterior_variance', self.betas * (1. - alphas_cumprod_prev) / (1. - alphas_cumprod))

    def apply_img_norm(self, x):
        return (x - self.mean) * self.img_range

    def apply_img_denorm(self, x):
        return x / self.img_range + self.mean

    def forward(self, x, t=None, noise=None):
        """Forward pass for training.

        Args:
            x: Tuple of (lr_image, gt_image)
            t: Optional time steps, if None, random timesteps are sampled
            noise: Optional noise tensor, if None, random noise is generated
        """
        lr, gt = x

        # Normalize images
        gt = self.apply_img_norm(gt)
        lr = self.apply_img_norm(lr)

        # Sample random time steps if not provided
        if t is None:
            t = torch.randint(0, self.num_timesteps, (gt.shape[0],), device=gt.device)

        # Generate random noise if not provided
        if noise is None:
            noise = torch.randn_like(gt)

        # Forward diffusion process
        sqrt_alphas_cumprod_t = self.sqrt_alphas_cumprod[t][:, None, None, None]
        sqrt_one_minus_alphas_cumprod_t = self.sqrt_one_minus_alphas_cumprod[t][:, None, None, None]

        # Add noise to ground truth image
        noisy_gt = sqrt_alphas_cumprod_t * gt + sqrt_one_minus_alphas_cumprod_t * noise

        # Predict noise
        predicted_noise = self.noise_predictor(noisy_gt, t, lr)

        return predicted_noise

    @torch.no_grad()
    def sample(self, lr, num_timesteps=None):
        """Sample high-resolution image from low-resolution input.

        Args:
            lr: Low-resolution input image
            num_timesteps: Number of sampling steps, if None, use full schedule
        """
        device = lr.device
        batch_size = lr.shape[0]

        # Normalize LR image
        lr = self.apply_img_norm(lr)

        # Determine sampling steps
        if num_timesteps is None:
            num_timesteps = self.num_timesteps
            timesteps = list(range(num_timesteps))[::-1]
        else:
            # Use fewer steps for faster sampling
            skip = self.num_timesteps // num_timesteps
            timesteps = list(range(0, self.num_timesteps, skip))[::-1]

        # Start with random noise
        x = torch.randn(batch_size, 3, lr.shape[2] * self.upscale_factor, lr.shape[3] * self.upscale_factor, device=device)

        # Iterative denoising
        for t in timesteps:
            t_tensor = torch.full((batch_size,), t, device=device)

            # Predict noise
            noise_pred = self.noise_predictor(x, t_tensor, lr)

            # Compute coefficients for denoising step
            alpha = self.alphas_cumprod[t]
            alpha_prev = self.alphas_cumprod_prev[t]
            beta = self.betas[t]

            # Compute x_prev
            if t > 0:
                noise = torch.randn_like(x)
            else:
                noise = torch.zeros_like(x)

            # Denoising formula
            x = (1 / torch.sqrt(alpha)) * (x - ((1 - alpha) / torch.sqrt(1 - alpha_prev)) * noise_pred) + torch.sqrt(beta) * noise

        # Denormalize output
        x = self.apply_img_denorm(x)

        return x