from . import utils, layers, layerspp, dense_layer
import torch.nn as nn
import functools
import torch
import numpy as np

from .nn import normalization, zero_module, conv_nd, SiLU, conv_transpose_nd

ResnetBlockDDPM = layerspp.ResnetBlockDDPMpp_Adagn
ResnetBlockBigGAN = layerspp.ResnetBlockBigGANpp_Adagn
ResnetBlockBigGAN_one = layerspp.ResnetBlockBigGANpp_Adagn_one
Combine = layerspp.Combine
conv3x3 = layerspp.conv3x3
conv1x1 = layerspp.conv1x1
get_act = layers.get_act
default_initializer = layers.default_init
dense = dense_layer.dense


class PixelNorm(nn.Module):
    def __init__(self):
        super().__init__()

    def forward(self, input):
        return input / torch.sqrt(torch.mean(input ** 2, dim=1, keepdim=True) + 1e-8)


class ModuleSequential(nn.Sequential):
    def forward(self, x):
        for layer in self:
            x = layer(x)
        return x


class ResBlockEncoder(nn.Module):
    """
    A residual block specifically is designed for the encoder to have similar architecture as the Unet encoder
     that can optionally change the number of channels. The difference is that it does not have time embedding.

    :param channels: the number of input channels.
    :param dropout: the rate of dropout.
    :param out_channels: if specified, the number of out channels.
    :param use_conv: if True and out_channels is specified, use a spatial
        convolution instead of a smaller 1x1 convolution to change the
        channels in the skip connection.
    :param dims: determines if the signal is 1D, 2D, or 3D.
    :param use_checkpoint: if True, use gradient checkpointing on this module.
    """

    def __init__(
            self,
            channels,
            dropout,
            out_channels=None,
            use_conv=False,
            use_scale_shift_norm=False,
            dims=2,
            use_checkpoint=False,
    ):
        super().__init__()
        self.channels = channels
        self.dropout = dropout
        self.out_channels = out_channels or channels
        self.use_conv = use_conv
        self.use_checkpoint = use_checkpoint
        self.use_scale_shift_norm = use_scale_shift_norm

        self.in_layers = nn.Sequential(normalization(channels), SiLU(),
                                       conv_nd(dims, channels, self.out_channels, 3, padding=1), )

        self.out_layers = nn.Sequential(normalization(self.out_channels), SiLU(), nn.Dropout(p=dropout),
                                        zero_module(
                                            conv_nd(dims, self.out_channels, self.out_channels, 3, padding=1)), )

        if self.out_channels == channels:
            self.skip_connection = nn.Identity()
        elif use_conv:
            self.skip_connection = conv_nd(dims, channels, self.out_channels, 3, padding=1)
        else:
            self.skip_connection = conv_nd(dims, channels, self.out_channels, 1)

    def forward(self, x):
        """
    Apply the block to a Tensor, conditioned on a timestep embedding.

    :param x: an [N x C x ...] Tensor of features.
    :return: an [N x C x ...] Tensor of outputs.
    """
        h = self.in_layers(x)
        h = self.out_layers(h)
        return self.skip_connection(x) + h


@utils.register_model(name='ncsnpp')
class NCSNpp(nn.Module):
    """NCSN++ model"""
    def __init__(self, config):
        super().__init__()
        self.config = config
        self.not_use_tanh = config.not_use_tanh  # 保留tanh归一化，确保输出范围与T1一致
        self.act = act = nn.SiLU()
        self.z_emb_dim = z_emb_dim = config.z_emb_dim

        # 基础通道数保持不变，适配医学图像细节
        self.nf = nf = config.num_channels_dae
        ch_mult = config.ch_mult
        self.num_res_blocks = num_res_blocks = config.num_res_blocks
        self.attn_resolutions = attn_resolutions = config.attn_resolutions
        dropout = config.dropout
        resamp_with_conv = config.resamp_with_conv
        self.num_resolutions = num_resolutions = len(ch_mult)
        self.all_resolutions = all_resolutions = [config.image_size // (2 ** i) for i in range(num_resolutions)]

        self.conditional = conditional = config.conditional  # 保留噪声条件机制
        fir = config.fir
        fir_kernel = config.fir_kernel
        self.skip_rescale = skip_rescale = config.skip_rescale
        self.resblock_type = resblock_type = config.resblock_type.lower()
        self.progressive = progressive = config.progressive.lower()
        self.progressive_input = progressive_input = config.progressive_input.lower()
        self.embedding_type = embedding_type = config.embedding_type.lower()
        init_scale = 0.
        assert progressive in ['none', 'output_skip', 'residual']
        assert progressive_input in ['none', 'input_skip', 'residual']
        assert embedding_type in ['fourier', 'positional']
        combine_method = config.progressive_combine.lower()
        combiner = functools.partial(Combine, method=combine_method)

        modules = []
        # 时间步嵌入保持不变，适配扩散过程
        if embedding_type == 'fourier':
            modules.append(layerspp.GaussianFourierProjection(
                embedding_size=nf, scale=config.fourier_scale
            ))
            embed_dim = 2 * nf
        elif embedding_type == 'positional':
            embed_dim = nf
        else:
            raise ValueError(f'embedding type {embedding_type} unknown.')

        if conditional:
            modules.append(nn.Linear(embed_dim, nf * 4))
            modules[-1].weight.data = default_initializer()(modules[-1].weight.shape)
            nn.init.zeros_(modules[-1].bias)
            modules.append(nn.Linear(nf * 4, nf * 4))
            modules[-1].weight.data = default_initializer()(modules[-1].weight.shape)
            nn.init.zeros_(modules[-1].bias)

        # 注意力块配置保持不变，增强特征关联
        AttnBlock = functools.partial(layerspp.AttnBlockpp,
                                      init_scale=init_scale,
                                      skip_rescale=skip_rescale)

        Upsample = functools.partial(layerspp.Upsample,
                                     with_conv=resamp_with_conv, fir=fir, fir_kernel=fir_kernel)

        if progressive == 'output_skip':
            self.pyramid_upsample = layerspp.Upsample(fir=fir, fir_kernel=fir_kernel, with_conv=False)
        elif progressive == 'residual':
            pyramid_upsample = functools.partial(layerspp.Upsample,
                                                 fir=fir, fir_kernel=fir_kernel, with_conv=True)

        Downsample = functools.partial(layerspp.Downsample,
                                       with_conv=resamp_with_conv, fir=fir, fir_kernel=fir_kernel)

        if progressive_input == 'input_skip':
            self.pyramid_downsample = layerspp.Downsample(fir=fir, fir_kernel=fir_kernel, with_conv=False)
        elif progressive_input == 'residual':
            pyramid_downsample = functools.partial(layerspp.Downsample,
                                                   fir=fir, fir_kernel=fir_kernel, with_conv=True)

        # 选择适合风格迁移的残差块类型（保留自适应归一化）
        if resblock_type == 'ddpm':
            ResnetBlock = functools.partial(ResnetBlockDDPM,
                                            act=act,
                                            dropout=dropout,
                                            init_scale=init_scale,
                                            skip_rescale=skip_rescale,
                                            temb_dim=nf * 4,
                                            zemb_dim=z_emb_dim)
        elif resblock_type == 'biggan':
            ResnetBlock = functools.partial(ResnetBlockBigGAN,
                                            act=act,
                                            dropout=dropout,
                                            fir=fir,
                                            fir_kernel=fir_kernel,
                                            init_scale=init_scale,
                                            skip_rescale=skip_rescale,
                                            temb_dim=nf * 4,
                                            zemb_dim=z_emb_dim)
        elif resblock_type == 'biggan_oneadagn':
            ResnetBlock = functools.partial(ResnetBlockBigGAN_one,
                                            act=act,
                                            dropout=dropout,
                                            fir=fir,
                                            fir_kernel=fir_kernel,
                                            init_scale=init_scale,
                                            skip_rescale=skip_rescale,
                                            temb_dim=nf * 4,
                                            zemb_dim=z_emb_dim)
        else:
            raise ValueError(f'resblock type {resblock_type} unrecognized.')


        # --------------------------
        # 核心修改1：条件编码器适配T1单通道
        # --------------------------
        dims = 2
        in_ch = 1  # 固定为1通道（T1医学图像），移除数据集判断
        modules.append(conv_nd(dims, in_ch, nf, 3, padding=1))  # T1特征提取起始层
        self.cond_enc_layer_sizes = 0
        # 减少编码器深度，保留T1结构特征（原代码使用config.cond_enc_layers和cond_enc_num_res_blocks）
        for _ in range(1):  # 仅1层编码器
            for _ in range(1):  # 每层仅1个残差块
                layers = [ResBlockEncoder(nf, dropout, dims=dims, use_checkpoint=False,
                                          out_channels=nf,
                                          use_scale_shift_norm=False)]
                modules.append(ModuleSequential(*layers))
                self.cond_enc_layer_sizes += 1


        # --------------------------
        # 核心修改2：输出通道固定为1（T2单通道）
        # --------------------------
        channels = 1  # 替代原config.num_channels（分割任务的类别数）
        if progressive_input != 'none':
            input_pyramid_ch = channels  # 同步调整输入金字塔通道


        # 下采样块保持结构，适配单通道输入
        modules.append(conv3x3(channels, nf))
        hs_c = [nf]

        in_ch = nf
        for i_level in range(num_resolutions):
            # 残差块保持不变，确保特征提取能力
            for i_block in range(num_res_blocks):
                out_ch = nf * ch_mult[i_level]
                modules.append(ResnetBlock(in_ch=in_ch, out_ch=out_ch))
                in_ch = out_ch

                if all_resolutions[i_level] in attn_resolutions:
                    modules.append(AttnBlock(channels=in_ch))
                hs_c.append(in_ch)

            if i_level != num_resolutions - 1:
                if resblock_type == 'ddpm':
                    modules.append(Downsample(in_ch=in_ch))
                else:
                    modules.append(ResnetBlock(down=True, in_ch=in_ch))

                if progressive_input == 'input_skip':
                    modules.append(combiner(dim1=input_pyramid_ch, dim2=in_ch))
                    if combine_method == 'cat':
                        in_ch *= 2
                elif progressive_input == 'residual':
                    modules.append(pyramid_downsample(in_ch=input_pyramid_ch, out_ch=in_ch))
                    input_pyramid_ch = in_ch
                hs_c.append(in_ch)

        # 瓶颈层保持不变，增强特征融合
        in_ch = hs_c[-1]
        modules.append(ResnetBlock(in_ch=in_ch))
        modules.append(AttnBlock(channels=in_ch))
        modules.append(ResnetBlock(in_ch=in_ch))

        pyramid_ch = 0
        # 上采样块保持结构，确保输出尺寸匹配
        for i_level in reversed(range(num_resolutions)):
            for i_block in range(num_res_blocks + 1):
                out_ch = nf * ch_mult[i_level]
                modules.append(ResnetBlock(in_ch=in_ch + hs_c.pop(),
                                           out_ch=out_ch))
                in_ch = out_ch

            if all_resolutions[i_level] in attn_resolutions:
                modules.append(AttnBlock(channels=in_ch))

            if progressive != 'none':
                if i_level == num_resolutions - 1:
                    if progressive == 'output_skip':
                        modules.append(nn.GroupNorm(num_groups=min(in_ch // 4, 32),
                                                    num_channels=in_ch, eps=1e-6))
                        modules.append(conv3x3(in_ch, channels, init_scale=init_scale))
                        pyramid_ch = channels
                    elif progressive == 'residual':
                        modules.append(nn.GroupNorm(num_groups=min(in_ch // 4, 32),
                                                    num_channels=in_ch, eps=1e-6))
                        modules.append(conv3x3(in_ch, in_ch, bias=True))
                        pyramid_ch = in_ch
                    else:
                        raise ValueError(f'{progressive} is not a valid name.')
                else:
                    if progressive == 'output_skip':
                        modules.append(nn.GroupNorm(num_groups=min(in_ch // 4, 32),
                                                    num_channels=in_ch, eps=1e-6))
                        modules.append(conv3x3(in_ch, channels, bias=True, init_scale=init_scale))
                        pyramid_ch = channels
                    elif progressive == 'residual':
                        modules.append(pyramid_upsample(in_ch=pyramid_ch, out_ch=in_ch))
                        pyramid_ch = in_ch
                    else:
                        raise ValueError(f'{progressive} is not a valid name')

            if i_level != 0:
                if resblock_type == 'ddpm':
                    modules.append(Upsample(in_ch=in_ch))
                else:
                    modules.append(ResnetBlock(in_ch=in_ch, up=True))

        assert not hs_c

        # 最终输出层确保为1通道
        if progressive != 'output_skip':
            modules.append(nn.GroupNorm(num_groups=min(in_ch // 4, 32),
                                        num_channels=in_ch, eps=1e-6))
            modules.append(conv3x3(in_ch, channels, init_scale=init_scale))  # 输出T2单通道

        self.all_modules = nn.ModuleList(modules)

        # 潜在向量映射保持不变，增强多样性
        mapping_layers = [PixelNorm(),
                          dense(config.nz, z_emb_dim),
                          self.act, ]
        for _ in range(config.n_mlp):
            mapping_layers.append(dense(z_emb_dim, z_emb_dim))
            mapping_layers.append(self.act)
        self.z_transform = nn.Sequential(*mapping_layers)
    def forward(self, x, c, **kwargs):
        """
        适配T1→T2风格迁移的前向传播：
        - x: 噪声图像 (batch_size, 1, H, W) 或 (batch_size, 1, D, H, W)
        - c: 条件输入（T1图像），形状与x一致 (batch_size, 1, H, W) 或 (batch_size, 1, D, H, W)
        -** kwargs: 包含时间步`time_cond`和潜在向量`z`
        """
        # 1. 提取关键参数（时间步和潜在向量）
        time_cond = kwargs.get("time_cond")  # 扩散时间步（必传）
        z = kwargs.get("z")  # 潜在向量（控制风格多样性，必传）
        if time_cond is None or z is None:
            raise ValueError("风格迁移需传入time_cond（时间步）和z（潜在向量）")

        # 2. 处理潜在向量z（增强风格多样性）
        zemb = self.z_transform(z)  # 映射为特征向量

        # 3. 处理时间步嵌入（扩散过程的时间条件）
        modules = self.all_modules
        m_idx = 0  # 模块索引器

        if self.embedding_type == 'fourier':
            # 高斯傅里叶嵌入（连续时间步）
            used_sigmas = time_cond
            temb = modules[m_idx](torch.log(used_sigmas))  # 时间特征
            m_idx += 1
        elif self.embedding_type == 'positional':
            # 正弦位置嵌入（离散时间步）
            timesteps = time_cond
            temb = layers.get_timestep_embedding(timesteps, self.nf)  # 时间特征
        else:
            raise ValueError(f'不支持的嵌入类型 {self.embedding_type}')

        # 时间特征过两层线性变换（增强表达）
        if self.conditional:
            temb = modules[m_idx](temb)  # 线性层1
            m_idx += 1
            temb = modules[m_idx](self.act(temb))  # 线性层2 + 激活
            m_idx += 1
        else:
            temb = None

        # 4. 预处理输入（将T1/T2从[0,1]映射到[-1,1]，与模型输出范围匹配）
        if not self.config.centered:
            x = 2 * x - 1.0  # 噪声图像归一化
            c = 2 * c - 1.0  # T1图像归一化（保持与噪声范围一致）

        # 5. 处理条件输入c（T1图像）：通过条件编码器提取结构特征
        h = c  # h为T1的初始特征
        for i in range(self.cond_enc_layer_sizes + 1):
            h = modules[m_idx](h)  # 经过简化的条件编码器（1层残差块）
            m_idx += 1

        # 6. 下采样块：融合T1特征与噪声特征（核心修改：强化结构引导）
        input_pyramid = None
        if self.progressive_input != 'none':
            input_pyramid = x  # 输入金字塔特征

        # 初始卷积：噪声图像→特征
        hs = [modules[m_idx](x)]
        m_idx += 1

        for i_level in range(self.num_resolutions):
            # 每个分辨率的残差块
            for i_block in range(self.num_res_blocks):
                # 核心修改：首次融合时用"逐元素乘"替代"加"，强化T1结构对噪声的引导
                if i_level == i_block == 0:
                    hs[-1] = torch.mul(h, hs[-1])  # T1特征调制噪声特征，保留结构

                # 残差块前向：输入当前特征+时间特征+潜在特征
                h_res = modules[m_idx](hs[-1], temb, zemb)
                m_idx += 1
                
                # 新增调试打印
                assert h_res is not None, "残差块输出为None，检查temb和zemb"
                print(f"残差块输出形状: {h_res.shape}")
                
                # 注意力机制（若当前分辨率需要）
                if h_res.shape[-1] in self.attn_resolutions:
                    assert h_res is not None, "注意力块输入为None"
                    h_res = modules[m_idx](h_res)
                    m_idx += 1

                hs.append(h_res)

            # 下采样（除最后一个分辨率）
            if i_level != self.num_resolutions - 1:
                if self.resblock_type == 'ddpm':
                    h_down = modules[m_idx](hs[-1])  # DDPM下采样
                    m_idx += 1
                else:
                    h_down = modules[m_idx](hs[-1], temb, zemb)  # BigGAN下采样
                    m_idx += 1

                # 处理输入金字塔（若需要）
                if self.progressive_input == 'input_skip':
                    input_pyramid = self.pyramid_downsample(input_pyramid)
                    h_down = modules[m_idx](input_pyramid, h_down)  # 融合金字塔特征
                    m_idx += 1
                elif self.progressive_input == 'residual':
                    input_pyramid = modules[m_idx](input_pyramid)
                    m_idx += 1
                    input_pyramid = (input_pyramid + h_down) / np.sqrt(2.) if self.skip_rescale else input_pyramid + h_down
                    h_down = input_pyramid

                hs.append(h_down)

        # 7. 瓶颈层：增强特征融合
        h_bottleneck = hs[-1]
        h_bottleneck = modules[m_idx](h_bottleneck, temb, zemb)  # 残差块
        m_idx += 1
        h_bottleneck = modules[m_idx](h_bottleneck)  # 注意力块
        m_idx += 1
        h_bottleneck = modules[m_idx](h_bottleneck, temb, zemb)  # 残差块
        m_idx += 1

        # 8. 上采样块：逐步恢复分辨率
        pyramid = None
        for i_level in reversed(range(self.num_resolutions)):
            # 每个分辨率的残差块（融合下采样时的跳连特征）
            for i_block in range(self.num_res_blocks + 1):
                h_up = modules[m_idx](torch.cat([h_bottleneck, hs.pop()], dim=1), temb, zemb)
                m_idx += 1
                h_bottleneck = h_up

            # 注意力机制（若当前分辨率需要）
            if h_bottleneck.shape[-1] in self.attn_resolutions:
                h_bottleneck = modules[m_idx](h_bottleneck)
                m_idx += 1

            # 处理输出金字塔（若需要）
            if self.progressive != 'none':
                if i_level == self.num_resolutions - 1:
                    if self.progressive == 'output_skip':
                        pyramid = self.act(modules[m_idx](h_bottleneck))
                        m_idx += 1
                        pyramid = modules[m_idx](pyramid)
                        m_idx += 1
                    elif self.progressive == 'residual':
                        pyramid = self.act(modules[m_idx](h_bottleneck))
                        m_idx += 1
                        pyramid = modules[m_idx](pyramid)
                        m_idx += 1
                else:
                    if self.progressive == 'output_skip':
                        pyramid = self.pyramid_upsample(pyramid)
                        pyramid_h = self.act(modules[m_idx](h_bottleneck))
                        m_idx += 1
                        pyramid_h = modules[m_idx](pyramid_h)
                        m_idx += 1
                        pyramid = pyramid + pyramid_h
                    elif self.progressive == 'residual':
                        pyramid = modules[m_idx](pyramid)
                        m_idx += 1
                        pyramid = (pyramid + h_bottleneck) / np.sqrt(2.) if self.skip_rescale else pyramid + h_bottleneck
                        h_bottleneck = pyramid

            # 上采样（除第一个分辨率）
            if i_level != 0:
                if self.resblock_type == 'ddpm':
                    h_bottleneck = modules[m_idx](h_bottleneck)  # DDPM上采样
                    m_idx += 1
                else:
                    h_bottleneck = modules[m_idx](h_bottleneck, temb, zemb)  # BigGAN上采样
                    m_idx += 1

        # 9. 输出层：生成T2图像
        if self.progressive == 'output_skip':
            output = pyramid
        else:
            output = self.act(modules[m_idx](h_bottleneck))
            m_idx += 1
            output = modules[m_idx](output)
            m_idx += 1

        # 10. 输出归一化（确保与T2强度范围匹配）
        if not self.not_use_tanh:
            output = torch.tanh(output)  # 映射到[-1,1]，与输入T1范围一致

        # 若需要，可将输出从[-1,1]映射回[0,1]（根据训练数据范围调整）
        # output = (output + 1.0) / 2.0

        return output
