{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "b21c15ae",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "ModuleList(\n",
      "  (0): GaussianFourierProjection()\n",
      "  (1): Linear(in_features=128, out_features=256, bias=True)\n",
      "  (2): Linear(in_features=256, out_features=256, bias=True)\n",
      "  (3): Conv2d(1, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
      "  (4): ModuleSequential(\n",
      "    (0): ResBlockEncoder(\n",
      "      (in_layers): Sequential(\n",
      "        (0): GroupNorm32(32, 64, eps=1e-05, affine=True)\n",
      "        (1): SiLU()\n",
      "        (2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
      "      )\n",
      "      (out_layers): Sequential(\n",
      "        (0): GroupNorm32(32, 64, eps=1e-05, affine=True)\n",
      "        (1): SiLU()\n",
      "        (2): Dropout(p=0.1, inplace=False)\n",
      "        (3): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
      "      )\n",
      "      (skip_connection): Identity()\n",
      "    )\n",
      "  )\n",
      "  (5): Conv2d(1, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
      "  (6-7): 2 x ResnetBlockDDPMpp_Adagn(\n",
      "    (GroupNorm_0): AdaptiveGroupNorm(\n",
      "      (norm): GroupNorm(16, 64, eps=1e-06, affine=False)\n",
      "      (style): Linear(in_features=128, out_features=128, bias=True)\n",
      "    )\n",
      "    (Conv_0): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
      "    (Dense_0): Linear(in_features=256, out_features=64, bias=True)\n",
      "    (GroupNorm_1): AdaptiveGroupNorm(\n",
      "      (norm): GroupNorm(16, 64, eps=1e-06, affine=False)\n",
      "      (style): Linear(in_features=128, out_features=128, bias=True)\n",
      "    )\n",
      "    (Dropout_0): Dropout(p=0.1, inplace=False)\n",
      "    (Conv_1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
      "    (act): SiLU()\n",
      "  )\n",
      "  (8): Downsample(\n",
      "    (Conv2d_0): Conv2d()\n",
      "  )\n",
      "  (9): ResnetBlockDDPMpp_Adagn(\n",
      "    (GroupNorm_0): AdaptiveGroupNorm(\n",
      "      (norm): GroupNorm(16, 64, eps=1e-06, affine=False)\n",
      "      (style): Linear(in_features=128, out_features=128, bias=True)\n",
      "    )\n",
      "    (Conv_0): Conv2d(64, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
      "    (Dense_0): Linear(in_features=256, out_features=128, bias=True)\n",
      "    (GroupNorm_1): AdaptiveGroupNorm(\n",
      "      (norm): GroupNorm(32, 128, eps=1e-06, affine=False)\n",
      "      (style): Linear(in_features=128, out_features=256, bias=True)\n",
      "    )\n",
      "    (Dropout_0): Dropout(p=0.1, inplace=False)\n",
      "    (Conv_1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
      "    (NIN_0): NIN()\n",
      "    (act): SiLU()\n",
      "  )\n",
      "  (10): ResnetBlockDDPMpp_Adagn(\n",
      "    (GroupNorm_0): AdaptiveGroupNorm(\n",
      "      (norm): GroupNorm(32, 128, eps=1e-06, affine=False)\n",
      "      (style): Linear(in_features=128, out_features=256, bias=True)\n",
      "    )\n",
      "    (Conv_0): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
      "    (Dense_0): Linear(in_features=256, out_features=128, bias=True)\n",
      "    (GroupNorm_1): AdaptiveGroupNorm(\n",
      "      (norm): GroupNorm(32, 128, eps=1e-06, affine=False)\n",
      "      (style): Linear(in_features=128, out_features=256, bias=True)\n",
      "    )\n",
      "    (Dropout_0): Dropout(p=0.1, inplace=False)\n",
      "    (Conv_1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
      "    (act): SiLU()\n",
      "  )\n",
      "  (11): Downsample(\n",
      "    (Conv2d_0): Conv2d()\n",
      "  )\n",
      "  (12): ResnetBlockDDPMpp_Adagn(\n",
      "    (GroupNorm_0): AdaptiveGroupNorm(\n",
      "      (norm): GroupNorm(32, 128, eps=1e-06, affine=False)\n",
      "      (style): Linear(in_features=128, out_features=256, bias=True)\n",
      "    )\n",
      "    (Conv_0): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
      "    (Dense_0): Linear(in_features=256, out_features=128, bias=True)\n",
      "    (GroupNorm_1): AdaptiveGroupNorm(\n",
      "      (norm): GroupNorm(32, 128, eps=1e-06, affine=False)\n",
      "      (style): Linear(in_features=128, out_features=256, bias=True)\n",
      "    )\n",
      "    (Dropout_0): Dropout(p=0.1, inplace=False)\n",
      "    (Conv_1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
      "    (act): SiLU()\n",
      "  )\n",
      "  (13): AttnBlockpp(\n",
      "    (GroupNorm_0): GroupNorm(32, 128, eps=1e-06, affine=True)\n",
      "    (NIN_0): NIN()\n",
      "    (NIN_1): NIN()\n",
      "    (NIN_2): NIN()\n",
      "    (NIN_3): NIN()\n",
      "  )\n",
      "  (14): ResnetBlockDDPMpp_Adagn(\n",
      "    (GroupNorm_0): AdaptiveGroupNorm(\n",
      "      (norm): GroupNorm(32, 128, eps=1e-06, affine=False)\n",
      "      (style): Linear(in_features=128, out_features=256, bias=True)\n",
      "    )\n",
      "    (Conv_0): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
      "    (Dense_0): Linear(in_features=256, out_features=128, bias=True)\n",
      "    (GroupNorm_1): AdaptiveGroupNorm(\n",
      "      (norm): GroupNorm(32, 128, eps=1e-06, affine=False)\n",
      "      (style): Linear(in_features=128, out_features=256, bias=True)\n",
      "    )\n",
      "    (Dropout_0): Dropout(p=0.1, inplace=False)\n",
      "    (Conv_1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
      "    (act): SiLU()\n",
      "  )\n",
      "  (15): AttnBlockpp(\n",
      "    (GroupNorm_0): GroupNorm(32, 128, eps=1e-06, affine=True)\n",
      "    (NIN_0): NIN()\n",
      "    (NIN_1): NIN()\n",
      "    (NIN_2): NIN()\n",
      "    (NIN_3): NIN()\n",
      "  )\n",
      "  (16): ResnetBlockDDPMpp_Adagn(\n",
      "    (GroupNorm_0): AdaptiveGroupNorm(\n",
      "      (norm): GroupNorm(32, 128, eps=1e-06, affine=False)\n",
      "      (style): Linear(in_features=128, out_features=256, bias=True)\n",
      "    )\n",
      "    (Conv_0): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
      "    (Dense_0): Linear(in_features=256, out_features=128, bias=True)\n",
      "    (GroupNorm_1): AdaptiveGroupNorm(\n",
      "      (norm): GroupNorm(32, 128, eps=1e-06, affine=False)\n",
      "      (style): Linear(in_features=128, out_features=256, bias=True)\n",
      "    )\n",
      "    (Dropout_0): Dropout(p=0.1, inplace=False)\n",
      "    (Conv_1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
      "    (act): SiLU()\n",
      "  )\n",
      "  (17): AttnBlockpp(\n",
      "    (GroupNorm_0): GroupNorm(32, 128, eps=1e-06, affine=True)\n",
      "    (NIN_0): NIN()\n",
      "    (NIN_1): NIN()\n",
      "    (NIN_2): NIN()\n",
      "    (NIN_3): NIN()\n",
      "  )\n",
      "  (18): ResnetBlockDDPMpp_Adagn(\n",
      "    (GroupNorm_0): AdaptiveGroupNorm(\n",
      "      (norm): GroupNorm(32, 128, eps=1e-06, affine=False)\n",
      "      (style): Linear(in_features=128, out_features=256, bias=True)\n",
      "    )\n",
      "    (Conv_0): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
      "    (Dense_0): Linear(in_features=256, out_features=128, bias=True)\n",
      "    (GroupNorm_1): AdaptiveGroupNorm(\n",
      "      (norm): GroupNorm(32, 128, eps=1e-06, affine=False)\n",
      "      (style): Linear(in_features=128, out_features=256, bias=True)\n",
      "    )\n",
      "    (Dropout_0): Dropout(p=0.1, inplace=False)\n",
      "    (Conv_1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
      "    (act): SiLU()\n",
      "  )\n",
      "  (19-21): 3 x ResnetBlockDDPMpp_Adagn(\n",
      "    (GroupNorm_0): AdaptiveGroupNorm(\n",
      "      (norm): GroupNorm(32, 256, eps=1e-06, affine=False)\n",
      "      (style): Linear(in_features=128, out_features=512, bias=True)\n",
      "    )\n",
      "    (Conv_0): Conv2d(256, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
      "    (Dense_0): Linear(in_features=256, out_features=128, bias=True)\n",
      "    (GroupNorm_1): AdaptiveGroupNorm(\n",
      "      (norm): GroupNorm(32, 128, eps=1e-06, affine=False)\n",
      "      (style): Linear(in_features=128, out_features=256, bias=True)\n",
      "    )\n",
      "    (Dropout_0): Dropout(p=0.1, inplace=False)\n",
      "    (Conv_1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
      "    (NIN_0): NIN()\n",
      "    (act): SiLU()\n",
      "  )\n",
      "  (22): AttnBlockpp(\n",
      "    (GroupNorm_0): GroupNorm(32, 128, eps=1e-06, affine=True)\n",
      "    (NIN_0): NIN()\n",
      "    (NIN_1): NIN()\n",
      "    (NIN_2): NIN()\n",
      "    (NIN_3): NIN()\n",
      "  )\n",
      "  (23): Upsample(\n",
      "    (Conv2d_0): Conv2d()\n",
      "  )\n",
      "  (24-25): 2 x ResnetBlockDDPMpp_Adagn(\n",
      "    (GroupNorm_0): AdaptiveGroupNorm(\n",
      "      (norm): GroupNorm(32, 256, eps=1e-06, affine=False)\n",
      "      (style): Linear(in_features=128, out_features=512, bias=True)\n",
      "    )\n",
      "    (Conv_0): Conv2d(256, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
      "    (Dense_0): Linear(in_features=256, out_features=128, bias=True)\n",
      "    (GroupNorm_1): AdaptiveGroupNorm(\n",
      "      (norm): GroupNorm(32, 128, eps=1e-06, affine=False)\n",
      "      (style): Linear(in_features=128, out_features=256, bias=True)\n",
      "    )\n",
      "    (Dropout_0): Dropout(p=0.1, inplace=False)\n",
      "    (Conv_1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
      "    (NIN_0): NIN()\n",
      "    (act): SiLU()\n",
      "  )\n",
      "  (26): ResnetBlockDDPMpp_Adagn(\n",
      "    (GroupNorm_0): AdaptiveGroupNorm(\n",
      "      (norm): GroupNorm(32, 192, eps=1e-06, affine=False)\n",
      "      (style): Linear(in_features=128, out_features=384, bias=True)\n",
      "    )\n",
      "    (Conv_0): Conv2d(192, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
      "    (Dense_0): Linear(in_features=256, out_features=128, bias=True)\n",
      "    (GroupNorm_1): AdaptiveGroupNorm(\n",
      "      (norm): GroupNorm(32, 128, eps=1e-06, affine=False)\n",
      "      (style): Linear(in_features=128, out_features=256, bias=True)\n",
      "    )\n",
      "    (Dropout_0): Dropout(p=0.1, inplace=False)\n",
      "    (Conv_1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
      "    (NIN_0): NIN()\n",
      "    (act): SiLU()\n",
      "  )\n",
      "  (27): Upsample(\n",
      "    (Conv2d_0): Conv2d()\n",
      "  )\n",
      "  (28): ResnetBlockDDPMpp_Adagn(\n",
      "    (GroupNorm_0): AdaptiveGroupNorm(\n",
      "      (norm): GroupNorm(32, 192, eps=1e-06, affine=False)\n",
      "      (style): Linear(in_features=128, out_features=384, bias=True)\n",
      "    )\n",
      "    (Conv_0): Conv2d(192, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
      "    (Dense_0): Linear(in_features=256, out_features=64, bias=True)\n",
      "    (GroupNorm_1): AdaptiveGroupNorm(\n",
      "      (norm): GroupNorm(16, 64, eps=1e-06, affine=False)\n",
      "      (style): Linear(in_features=128, out_features=128, bias=True)\n",
      "    )\n",
      "    (Dropout_0): Dropout(p=0.1, inplace=False)\n",
      "    (Conv_1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
      "    (NIN_0): NIN()\n",
      "    (act): SiLU()\n",
      "  )\n",
      "  (29-30): 2 x ResnetBlockDDPMpp_Adagn(\n",
      "    (GroupNorm_0): AdaptiveGroupNorm(\n",
      "      (norm): GroupNorm(32, 128, eps=1e-06, affine=False)\n",
      "      (style): Linear(in_features=128, out_features=256, bias=True)\n",
      "    )\n",
      "    (Conv_0): Conv2d(128, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
      "    (Dense_0): Linear(in_features=256, out_features=64, bias=True)\n",
      "    (GroupNorm_1): AdaptiveGroupNorm(\n",
      "      (norm): GroupNorm(16, 64, eps=1e-06, affine=False)\n",
      "      (style): Linear(in_features=128, out_features=128, bias=True)\n",
      "    )\n",
      "    (Dropout_0): Dropout(p=0.1, inplace=False)\n",
      "    (Conv_1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
      "    (NIN_0): NIN()\n",
      "    (act): SiLU()\n",
      "  )\n",
      "  (31): GroupNorm(16, 64, eps=1e-06, affine=True)\n",
      "  (32): Conv2d(64, 1, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
      ")\n",
      "Sequential(\n",
      "  (0): PixelNorm()\n",
      "  (1): Linear(in_features=100, out_features=128, bias=True)\n",
      "  (2): SiLU()\n",
      "  (3): Linear(in_features=128, out_features=128, bias=True)\n",
      "  (4): SiLU()\n",
      "  (5): Linear(in_features=128, out_features=128, bias=True)\n",
      "  (6): SiLU()\n",
      ")\n"
     ]
    }
   ],
   "source": [
    "from models.generator import NCSNpp  # 假设generator.py在当前目录\n",
    "class Config:\n",
    "    num_channels_dae = 64  # 基础通道数\n",
    "    ch_mult = [1, 2, 2]    # 通道倍增系数\n",
    "    num_res_blocks = 2     # 每个分辨率的残差块数量\n",
    "    attn_resolutions = [16]  # 应用注意力的分辨率\n",
    "    dropout = 0.1\n",
    "    resamp_with_conv = True\n",
    "    image_size = 64        # 3D图像尺寸（D=H=W=64）\n",
    "    conditional = True     # 启用条件生成\n",
    "    fir = True\n",
    "    fir_kernel = [1, 3, 3, 1]\n",
    "    skip_rescale = True\n",
    "    resblock_type = 'ddpm'\n",
    "    progressive = 'none'\n",
    "    progressive_input = 'none'\n",
    "    embedding_type = 'fourier'\n",
    "    fourier_scale = 16.0\n",
    "    not_use_tanh = False\n",
    "    z_emb_dim = 128        # 潜在向量嵌入维度\n",
    "    nz = 100               # 潜在向量z的维度\n",
    "    n_mlp = 2              # 潜在向量映射的MLP层数\n",
    "    dims = 3               # 3D模式\n",
    "    progressive_combine='sum'\n",
    "    centered = True\n",
    "\n",
    "config = Config()\n",
    "# 实例化模型\n",
    "model = NCSNpp(config)\n",
    "# 检查模型结构（可选，确认关键层是否存在）\n",
    "print(model.all_modules)  # 打印核心模块列表\n",
    "print(model.z_transform)  # 检查潜在向量映射层"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "id": "eff512aa",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Input shape (x): torch.Size([2, 1, 64, 64])\n",
      "Output shape: torch.Size([2, 1, 64, 64])\n"
     ]
    }
   ],
   "source": [
    "import torch\n",
    "# 假设你的config参数大致如下（根据模型实际配置调整）\n",
    "class Config:\n",
    "    def __init__(self):\n",
    "        self.nz = 64  # 潜在向量维度，根据实际模型设置\n",
    "        self.num_channels_dae = 64  # 基础通道数\n",
    "        self.ch_mult = [1,1,1,2,2,4,4]  # 通道倍增因子，控制层级\n",
    "        self.num_res_blocks = 2  # 每个分辨率的残差块数量\n",
    "        self.attn_resolutions = [16,8]  # 注意力机制作用的分辨率\n",
    "        self.dropout = 0.1\n",
    "        self.resamp_with_conv = True\n",
    "        self.progressive = 'none'\n",
    "        self.progressive_input = 'none'\n",
    "        self.embedding_type = 'fourier'\n",
    "        self.fourier_scale = 16.0\n",
    "        self.conditional = True\n",
    "        self.not_use_tanh = False\n",
    "        self.z_emb_dim = 64  # 潜在向量映射后的维度\n",
    "        self.n_mlp = 4  # MLP层数\n",
    "        self.centered = False  # 输入是否已归一化到[-1,1]\n",
    "        self.image_size =64\n",
    "        self.fir=True\n",
    "        self.fir_kernel=[1,3,3,1]\n",
    "        self.skip_rescale=True\n",
    "        self.resblock_type = \"biggan\"\n",
    "        self.progressive_combine=\"sum\"\n",
    "\n",
    "# 初始化配置和模型\n",
    "config = Config()\n",
    "model = NCSNpp(config)  # 假设NCSNpp已正确导入\n",
    "\n",
    "# 2D输入形状：确保尺寸适配模型的下采样/上采样层级（如64x64）\n",
    "x = torch.randn(2, 1, 64, 64)  # 噪声图像（2D，单通道）\n",
    "c = torch.randn(2, 1, 64, 64)  # 条件输入（T1图像，与x同形状）\n",
    "time_cond = torch.rand(2)  # 时间步（噪声水平）\n",
    "z = torch.randn(2, config.nz)  # 潜在向量\n",
    "\n",
    "# 执行前向传播\n",
    "output = model(x, c, time_cond=time_cond, z=z)\n",
    "\n",
    "# 检查输出形状\n",
    "print(\"Input shape (x):\", x.shape)\n",
    "print(\"Output shape:\", output.shape)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "id": "b74e5666",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "ResBlock input shape: torch.Size([2, 64, 32, 32, 32])\n",
      "ResBlock output shape: torch.Size([2, 64, 32, 32, 32])\n"
     ]
    }
   ],
   "source": [
    "from models.generator import ResBlockEncoder  # 假设ResBlockEncoder在generator.py中\n",
    "# 实例化3D残差编码器\n",
    "res_block = ResBlockEncoder(channels=64, dropout=0.1, dims=3)\n",
    "# 输入3D特征\n",
    "input_feat = torch.randn(2, 64, 32, 32, 32)\n",
    "output_feat = res_block(input_feat)\n",
    "# 检查输出通道数和空间维度（残差块应保持空间维度，通道数可配置）\n",
    "print(\"ResBlock input shape:\", input_feat.shape)\n",
    "print(\"ResBlock output shape:\", output_feat.shape)  # 预期：(2, 64, 32, 32, 32)（默认out_channels=channels）"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "cdal_cpu",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.9.23"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
