{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 3,
   "id": "initial_id",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-05-25T12:57:53.819689Z",
     "start_time": "2025-05-25T12:57:52.056091Z"
    },
    "collapsed": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "图像生成完成，输出张量 shape: torch.Size([1, 3, 64, 64])\n"
     ]
    }
   ],
   "source": [
    "# Stable Diffusion 示例代码\n",
    "# 本示例简化了 Stable Diffusion 的整体流程，包含 text encoder、U-Net 生成器 和 解码器（VAE Decoder）模块\n",
    "# 注意：实际部署需要大量 GPU 资源，此处用于教学展示\n",
    "\n",
    "import torch  # 导入PyTorch库\n",
    "import torch.nn as nn  # 导入神经网络模块\n",
    "import torch.nn.functional as F  # 导入函数式接口\n",
    "from transformers import CLIPTextModel, CLIPTokenizer  # 导入CLIP模型和分词器\n",
    "\n",
    "# 1. 文本编码器（CLIP Text Encoder）\n",
    "class TextEncoder:  # 定义文本编码器类\n",
    "    def __init__(self, device=\"cuda\"):  # 初始化方法，默认使用CUDA设备\n",
    "        self.tokenizer = CLIPTokenizer.from_pretrained(\"openai/clip-vit-base-patch32\")  # 加载CLIP分词器\n",
    "        self.text_encoder = CLIPTextModel.from_pretrained(\"openai/clip-vit-base-patch32\").to(device)  # 加载CLIP文本编码器并移至指定设备\n",
    "        self.device = device  # 保存设备信息\n",
    "\n",
    "    def encode(self, prompt):  # 编码方法，将文本提示转换为嵌入向量\n",
    "        tokens = self.tokenizer(prompt, return_tensors=\"pt\", padding=True).to(self.device)  # 将提示文本转换为token并移至设备\n",
    "        with torch.no_grad():  # 不计算梯度\n",
    "            text_embeddings = self.text_encoder(**tokens).last_hidden_state  # [B, 77, 768] 获取文本嵌入\n",
    "        return text_embeddings  # 返回文本嵌入\n",
    "\n",
    "# 简单的交叉注意力模块\n",
    "class CrossAttention(nn.Module):  # 定义交叉注意力模块\n",
    "    def __init__(self, latent_dim, context_dim):  # 初始化方法，接收潜在维度和上下文维度\n",
    "        super().__init__()  # 调用父类初始化\n",
    "        self.query = nn.Linear(latent_dim, latent_dim)  # 查询线性层\n",
    "        self.key = nn.Linear(context_dim, latent_dim)  # 键线性层\n",
    "        self.value = nn.Linear(context_dim, latent_dim)  # 值线性层\n",
    "        self.out = nn.Linear(latent_dim, latent_dim)  # 输出线性层\n",
    "\n",
    "    def forward(self, x, context):  # 前向传播方法\n",
    "        # x: [B, C, H, W] -> [B, HW, C]\n",
    "        B, C, H, W = x.shape  # 获取输入张量的形状\n",
    "        x_flat = x.view(B, C, H * W).transpose(1, 2)  # [B, HW, C] 将特征图展平并转置\n",
    "        q = self.query(x_flat)          # [B, HW, C] 计算查询向量\n",
    "        k = self.key(context)           # [B, T, C] 计算键向量\n",
    "        v = self.value(context)         # [B, T, C] 计算值向量\n",
    "\n",
    "        attn_scores = torch.matmul(q, k.transpose(-2, -1)) / (C ** 0.5)  # [B, HW, T] 计算注意力分数\n",
    "        attn_weights = F.softmax(attn_scores, dim=-1)                   # [B, HW, T] 计算注意力权重\n",
    "        attended = torch.matmul(attn_weights, v)                        # [B, HW, C] 应用注意力\n",
    "\n",
    "        out = self.out(attended).transpose(1, 2).view(B, C, H, W)       # [B, C, H, W] 重塑回原始形状\n",
    "        return out  # 返回注意力输出\n",
    "\n",
    "# 2. U-Net 生成器 (增加交叉注意力)\n",
    "class UNet(nn.Module):  # 定义U-Net模型\n",
    "    def __init__(self):  # 初始化方法\n",
    "        super().__init__()  # 调用父类初始化\n",
    "        self.down = nn.Sequential(  # 下采样路径\n",
    "            nn.Conv2d(4, 64, 3, padding=1),  # 第一个卷积层\n",
    "            nn.ReLU(),  # ReLU激活函数\n",
    "            nn.Conv2d(64, 128, 3, padding=1),  # 第二个卷积层\n",
    "            nn.ReLU()  # ReLU激活函数\n",
    "        )\n",
    "        # 修正：确保 latent_dim 和 context_dim 匹配\n",
    "        self.cross_attn = CrossAttention(latent_dim=128, context_dim=768)  # 交叉注意力层\n",
    "        self.mid = nn.Sequential(  # 中间层\n",
    "            nn.Conv2d(128, 128, 3, padding=1),  # 卷积层\n",
    "            nn.ReLU()  # ReLU激活函数\n",
    "        )\n",
    "        self.up = nn.Sequential(  # 上采样路径\n",
    "            nn.ConvTranspose2d(128, 64, 3, padding=1),  # 第一个转置卷积层\n",
    "            nn.ReLU(),  # ReLU激活函数\n",
    "            nn.ConvTranspose2d(64, 4, 3, padding=1)  # 第二个转置卷积层\n",
    "        )\n",
    "\n",
    "    def forward(self, x, text_emb):  # 前向传播方法\n",
    "        x = self.down(x)                      # [B, 128, H, W] 下采样\n",
    "        x = self.cross_attn(x, text_emb)      # 加入文本条件\n",
    "        x = self.mid(x)  # 中间处理，中间层功能见课件\n",
    "        x = self.up(x)  # 上采样\n",
    "        return x  # 返回结果\n",
    "\n",
    "# 3. 解码器 VAE (简化版)\n",
    "class Decoder(nn.Module):  # 定义解码器类\n",
    "    def __init__(self):  # 初始化方法\n",
    "        super().__init__()  # 调用父类初始化\n",
    "        self.net = nn.Sequential(  # 解码网络\n",
    "            nn.Conv2d(4, 64, 3, padding=1),  # 第一个卷积层\n",
    "            nn.ReLU(),  # ReLU激活函数\n",
    "            nn.Conv2d(64, 3, 3, padding=1),  # 第二个卷积层\n",
    "            nn.Tanh()  # Tanh激活函数，输出范围[-1,1]\n",
    "        )\n",
    "\n",
    "    def forward(self, x):  # 前向传播方法\n",
    "        return self.net(x)  # 返回解码结果\n",
    "\n",
    "# 4. 整体流程封装\n",
    "class StableDiffusionDemo:  # 定义Stable Diffusion演示类\n",
    "    def __init__(self, device=\"cuda\"):  # 初始化方法\n",
    "        self.device = device  # 保存设备信息\n",
    "        self.text_encoder = TextEncoder(device)  # 初始化文本编码器\n",
    "        self.unet = UNet().to(device)  # 初始化U-Net并移至设备\n",
    "        self.decoder = Decoder().to(device)  # 初始化解码器并移至设备\n",
    "\n",
    "    def generate(self, prompt):  # 图像生成方法\n",
    "        # 获取文本嵌入\n",
    "        text_embeddings = self.text_encoder.encode(prompt)  # 编码提示文本\n",
    "        \n",
    "        # 确保文本嵌入的维度正确\n",
    "        batch_size = text_embeddings.shape[0]  # 获取批次大小\n",
    "        seq_len = text_embeddings.shape[1]  # 获取序列长度\n",
    "        hidden_dim = text_embeddings.shape[2]  # 获取隐藏维度\n",
    "        \n",
    "        # 如果需要，可以调整文本嵌入的维度以匹配交叉注意力的需求\n",
    "        # 确保文本嵌入的维度是 [B, seq_len, 768]\n",
    "        if hidden_dim != 768:  # 如果隐藏维度不是768\n",
    "            # 创建一个投影层来调整维度\n",
    "            projection = nn.Linear(hidden_dim, 768).to(self.device)  # 创建投影层\n",
    "            text_embeddings = projection(text_embeddings)  # 应用投影\n",
    "        \n",
    "        latent = torch.randn(batch_size, 4, 64, 64).to(self.device)  # 初始 latent，随机噪声\n",
    "        for _ in range(10):  # 简化版的 DDIM 迭代过程，进行10次迭代\n",
    "            noise_pred = self.unet(latent, text_embeddings)  # 预测噪声\n",
    "            latent = latent - noise_pred * 0.1  # 去噪步骤\n",
    "        image = self.decoder(latent)  # 解码生成图像\n",
    "        return image  # 返回生成的图像\n",
    "\n",
    "# 示例运行\n",
    "device = \"cuda\" if torch.cuda.is_available() else \"cpu\"  # 检查是否有GPU可用\n",
    "generator = StableDiffusionDemo(device)  # 创建生成器实例\n",
    "output = generator.generate(\"a cat riding a bike\")  # 生成\"猫骑自行车\"的图像\n",
    "print(\"图像生成完成，输出张量 shape:\", output.shape)  # 输出: [1, 3, 64, 64] 打印输出图像的形状\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "0f75285b",
   "metadata": {},
   "outputs": [],
   "source": [
    "1. VAE (Variational Autoencoder) 结构:变分自编码器​\n",
    "AutoencoderKL(\n",
    "  (encoder): Encoder(\n",
    "    (conv_in): Conv2d(3, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
    "    (down_blocks): ModuleList(\n",
    "      (0): DownEncoderBlock2D(\n",
    "        (resnets): ModuleList(\n",
    "          (0-1): 2 x ResnetBlock2D(\n",
    "            (norm1): GroupNorm(32, 128, eps=1e-06, affine=True)\n",
    "            (conv1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
    "            (norm2): GroupNorm(32, 128, eps=1e-06, affine=True)\n",
    "            (dropout): Dropout(p=0.0, inplace=False)\n",
    "            (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
    "            (nonlinearity): SiLU()\n",
    "          )\n",
    "        )\n",
    "        (downsamplers): ModuleList(\n",
    "          (0): Downsample2D(\n",
    "            (conv): Conv2d(128, 128, kernel_size=(3, 3), stride=(2, 2))\n",
    "          )\n",
    "        )\n",
    "      )\n",
    "      (1): DownEncoderBlock2D(\n",
    "        (resnets): ModuleList(\n",
    "          (0): ResnetBlock2D(\n",
    "            (norm1): GroupNorm(32, 128, eps=1e-06, affine=True)\n",
    "            (conv1): Conv2d(128, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
    "            (norm2): GroupNorm(32, 256, eps=1e-06, affine=True)\n",
    "            (dropout): Dropout(p=0.0, inplace=False)\n",
    "            (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
    "            (nonlinearity): SiLU()\n",
    "            (conv_shortcut): Conv2d(128, 256, kernel_size=(1, 1), stride=(1, 1))\n",
    "          )\n",
    "          (1): ResnetBlock2D(\n",
    "            (norm1): GroupNorm(32, 256, eps=1e-06, affine=True)\n",
    "            (conv1): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
    "            (norm2): GroupNorm(32, 256, eps=1e-06, affine=True)\n",
    "            (dropout): Dropout(p=0.0, inplace=False)\n",
    "            (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
    "            (nonlinearity): SiLU()\n",
    "          )\n",
    "        )\n",
    "        (downsamplers): ModuleList(\n",
    "          (0): Downsample2D(\n",
    "            (conv): Conv2d(256, 256, kernel_size=(3, 3), stride=(2, 2))\n",
    "          )\n",
    "        )\n",
    "      )\n",
    "      (2): DownEncoderBlock2D(\n",
    "        (resnets): ModuleList(\n",
    "          (0): ResnetBlock2D(\n",
    "            (norm1): GroupNorm(32, 256, eps=1e-06, affine=True)\n",
    "            (conv1): Conv2d(256, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
    "            (norm2): GroupNorm(32, 512, eps=1e-06, affine=True)\n",
    "            (dropout): Dropout(p=0.0, inplace=False)\n",
    "            (conv2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
    "            (nonlinearity): SiLU()\n",
    "            (conv_shortcut): Conv2d(256, 512, kernel_size=(1, 1), stride=(1, 1))\n",
    "          )\n",
    "          (1): ResnetBlock2D(\n",
    "            (norm1): GroupNorm(32, 512, eps=1e-06, affine=True)\n",
    "            (conv1): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
    "            (norm2): GroupNorm(32, 512, eps=1e-06, affine=True)\n",
    "            (dropout): Dropout(p=0.0, inplace=False)\n",
    "            (conv2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
    "            (nonlinearity): SiLU()\n",
    "          )\n",
    "        )\n",
    "        (downsamplers): ModuleList(\n",
    "          (0): Downsample2D(\n",
    "            (conv): Conv2d(512, 512, kernel_size=(3, 3), stride=(2, 2))\n",
    "          )\n",
    "        )\n",
    "      )\n",
    "      (3): DownEncoderBlock2D(\n",
    "        (resnets): ModuleList(\n",
    "          (0-1): 2 x ResnetBlock2D(\n",
    "            (norm1): GroupNorm(32, 512, eps=1e-06, affine=True)\n",
    "            (conv1): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
    "            (norm2): GroupNorm(32, 512, eps=1e-06, affine=True)\n",
    "            (dropout): Dropout(p=0.0, inplace=False)\n",
    "            (conv2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
    "            (nonlinearity): SiLU()\n",
    "          )\n",
    "        )\n",
    "      )\n",
    "    )\n",
    "    (mid_block): UNetMidBlock2D(\n",
    "      (attentions): ModuleList(\n",
    "        (0): Attention(\n",
    "          (group_norm): GroupNorm(32, 512, eps=1e-06, affine=True)\n",
    "          (to_q): Linear(in_features=512, out_features=512, bias=True)\n",
    "          (to_k): Linear(in_features=512, out_features=512, bias=True)\n",
    "          (to_v): Linear(in_features=512, out_features=512, bias=True)\n",
    "          (to_out): ModuleList(\n",
    "            (0): Linear(in_features=512, out_features=512, bias=True)\n",
    "            (1): Dropout(p=0.0, inplace=False)\n",
    "          )\n",
    "        )\n",
    "      )\n",
    "      (resnets): ModuleList(\n",
    "        (0-1): 2 x ResnetBlock2D(\n",
    "          (norm1): GroupNorm(32, 512, eps=1e-06, affine=True)\n",
    "          (conv1): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
    "          (norm2): GroupNorm(32, 512, eps=1e-06, affine=True)\n",
    "          (dropout): Dropout(p=0.0, inplace=False)\n",
    "          (conv2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
    "          (nonlinearity): SiLU()\n",
    "        )\n",
    "      )\n",
    "    )\n",
    "    (conv_norm_out): GroupNorm(32, 512, eps=1e-06, affine=True)\n",
    "    (conv_act): SiLU()\n",
    "    (conv_out): Conv2d(512, 8, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
    "  )\n",
    "  (decoder): Decoder(\n",
    "    (conv_in): Conv2d(4, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
    "    (up_blocks): ModuleList(\n",
    "      (0-1): 2 x UpDecoderBlock2D(\n",
    "        (resnets): ModuleList(\n",
    "          (0-2): 3 x ResnetBlock2D(\n",
    "            (norm1): GroupNorm(32, 512, eps=1e-06, affine=True)\n",
    "            (conv1): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
    "            (norm2): GroupNorm(32, 512, eps=1e-06, affine=True)\n",
    "            (dropout): Dropout(p=0.0, inplace=False)\n",
    "            (conv2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
    "            (nonlinearity): SiLU()\n",
    "          )\n",
    "        )\n",
    "        (upsamplers): ModuleList(\n",
    "          (0): Upsample2D(\n",
    "            (conv): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
    "          )\n",
    "        )\n",
    "      )\n",
    "      (2): UpDecoderBlock2D(\n",
    "        (resnets): ModuleList(\n",
    "          (0): ResnetBlock2D(\n",
    "            (norm1): GroupNorm(32, 512, eps=1e-06, affine=True)\n",
    "            (conv1): Conv2d(512, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
    "            (norm2): GroupNorm(32, 256, eps=1e-06, affine=True)\n",
    "            (dropout): Dropout(p=0.0, inplace=False)\n",
    "            (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
    "            (nonlinearity): SiLU()\n",
    "            (conv_shortcut): Conv2d(512, 256, kernel_size=(1, 1), stride=(1, 1))\n",
    "          )\n",
    "          (1-2): 2 x ResnetBlock2D(\n",
    "            (norm1): GroupNorm(32, 256, eps=1e-06, affine=True)\n",
    "            (conv1): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
    "            (norm2): GroupNorm(32, 256, eps=1e-06, affine=True)\n",
    "            (dropout): Dropout(p=0.0, inplace=False)\n",
    "            (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
    "            (nonlinearity): SiLU()\n",
    "          )\n",
    "        )\n",
    "        (upsamplers): ModuleList(\n",
    "          (0): Upsample2D(\n",
    "            (conv): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
    "          )\n",
    "        )\n",
    "      )\n",
    "      (3): UpDecoderBlock2D(\n",
    "        (resnets): ModuleList(\n",
    "          (0): ResnetBlock2D(\n",
    "            (norm1): GroupNorm(32, 256, eps=1e-06, affine=True)\n",
    "            (conv1): Conv2d(256, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
    "            (norm2): GroupNorm(32, 128, eps=1e-06, affine=True)\n",
    "            (dropout): Dropout(p=0.0, inplace=False)\n",
    "            (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
    "            (nonlinearity): SiLU()\n",
    "            (conv_shortcut): Conv2d(256, 128, kernel_size=(1, 1), stride=(1, 1))\n",
    "          )\n",
    "          (1-2): 2 x ResnetBlock2D(\n",
    "            (norm1): GroupNorm(32, 128, eps=1e-06, affine=True)\n",
    "            (conv1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
    "            (norm2): GroupNorm(32, 128, eps=1e-06, affine=True)\n",
    "            (dropout): Dropout(p=0.0, inplace=False)\n",
    "            (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
    "            (nonlinearity): SiLU()\n",
    "          )\n",
    "        )\n",
    "      )\n",
    "    )\n",
    "    (mid_block): UNetMidBlock2D(\n",
    "      (attentions): ModuleList(\n",
    "        (0): Attention(\n",
    "          (group_norm): GroupNorm(32, 512, eps=1e-06, affine=True)\n",
    "          (to_q): Linear(in_features=512, out_features=512, bias=True)\n",
    "          (to_k): Linear(in_features=512, out_features=512, bias=True)\n",
    "          (to_v): Linear(in_features=512, out_features=512, bias=True)\n",
    "          (to_out): ModuleList(\n",
    "            (0): Linear(in_features=512, out_features=512, bias=True)\n",
    "            (1): Dropout(p=0.0, inplace=False)\n",
    "          )\n",
    "        )\n",
    "      )\n",
    "      (resnets): ModuleList(\n",
    "        (0-1): 2 x ResnetBlock2D(\n",
    "          (norm1): GroupNorm(32, 512, eps=1e-06, affine=True)\n",
    "          (conv1): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
    "          (norm2): GroupNorm(32, 512, eps=1e-06, affine=True)\n",
    "          (dropout): Dropout(p=0.0, inplace=False)\n",
    "          (conv2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
    "          (nonlinearity): SiLU()\n",
    "        )\n",
    "      )\n",
    "    )\n",
    "    (conv_norm_out): GroupNorm(32, 128, eps=1e-06, affine=True)\n",
    "    (conv_act): SiLU()\n",
    "    (conv_out): Conv2d(128, 3, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
    "  )\n",
    "  (quant_conv): Conv2d(8, 8, kernel_size=(1, 1), stride=(1, 1))\n",
    "  (post_quant_conv): Conv2d(4, 4, kernel_size=(1, 1), stride=(1, 1))\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "d317e0d4",
   "metadata": {},
   "outputs": [],
   "source": [
    "2. Text Encoder 结构:\n",
    "CLIPTextModel(\n",
    "  (text_model): CLIPTextTransformer(\n",
    "    (embeddings): CLIPTextEmbeddings(\n",
    "      (token_embedding): Embedding(49408, 1024)\n",
    "      (position_embedding): Embedding(77, 1024)\n",
    "    )\n",
    "    (encoder): CLIPEncoder(\n",
    "      (layers): ModuleList(\n",
    "        (0-22): 23 x CLIPEncoderLayer(\n",
    "          (self_attn): CLIPAttention(\n",
    "            (k_proj): Linear(in_features=1024, out_features=1024, bias=True)\n",
    "            (v_proj): Linear(in_features=1024, out_features=1024, bias=True)\n",
    "            (q_proj): Linear(in_features=1024, out_features=1024, bias=True)\n",
    "            (out_proj): Linear(in_features=1024, out_features=1024, bias=True)\n",
    "          )\n",
    "          (layer_norm1): LayerNorm((1024,), eps=1e-05, elementwise_affine=True)\n",
    "          (mlp): CLIPMLP(\n",
    "            (activation_fn): GELUActivation()\n",
    "            (fc1): Linear(in_features=1024, out_features=4096, bias=True)\n",
    "            (fc2): Linear(in_features=4096, out_features=1024, bias=True)\n",
    "          )\n",
    "          (layer_norm2): LayerNorm((1024,), eps=1e-05, elementwise_affine=True)\n",
    "        )\n",
    "      )\n",
    "    )\n",
    "    (final_layer_norm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True)\n",
    "  )\n",
    ")\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "e298c93b",
   "metadata": {},
   "outputs": [],
   "source": [
    "\n",
    "3. Tokenizer 信息:\n",
    "CLIPTokenizer(name_or_path='/root/.cache/huggingface/hub/models--stabilityai--stable-diffusion-2-1-base/snapshots/5ede9e4bf3e3fd1cb0ef2f7a3fff13ee514fdf06/tokenizer', vocab_size=49408, model_max_length=77, is_fast=False, padding_side='right', truncation_side='right', special_tokens={'bos_token': '<|startoftext|>', 'eos_token': '<|endoftext|>', 'unk_token': '<|endoftext|>', 'pad_token': '!'}, clean_up_tokenization_spaces=False, added_tokens_decoder={\n",
    "\t0: AddedToken(\"!\", rstrip=False, lstrip=False, single_word=False, normalized=False, special=True),\n",
    "\t49406: AddedToken(\"<|startoftext|>\", rstrip=False, lstrip=False, single_word=False, normalized=True, special=True),\n",
    "\t49407: AddedToken(\"<|endoftext|>\", rstrip=False, lstrip=False, single_word=False, normalized=True, special=True),\n",
    "}\n",
    ")\n",
    "\n",
    "4. U-Net 结构:\n",
    "UNet2DConditionModel(\n",
    "  (conv_in): Conv2d(4, 320, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
    "  (time_proj): Timesteps()\n",
    "  (time_embedding): TimestepEmbedding(\n",
    "    (linear_1): Linear(in_features=320, out_features=1280, bias=True)\n",
    "    (act): SiLU()\n",
    "    (linear_2): Linear(in_features=1280, out_features=1280, bias=True)\n",
    "  )\n",
    "  (down_blocks): ModuleList(\n",
    "    (0): CrossAttnDownBlock2D(\n",
    "      (attentions): ModuleList(\n",
    "        (0-1): 2 x Transformer2DModel(\n",
    "          (norm): GroupNorm(32, 320, eps=1e-06, affine=True)\n",
    "          (proj_in): Linear(in_features=320, out_features=320, bias=True)\n",
    "          (transformer_blocks): ModuleList(\n",
    "            (0): BasicTransformerBlock(\n",
    "              (norm1): LayerNorm((320,), eps=1e-05, elementwise_affine=True)\n",
    "              (attn1): Attention(\n",
    "                (to_q): Linear(in_features=320, out_features=320, bias=False)\n",
    "                (to_k): Linear(in_features=320, out_features=320, bias=False)\n",
    "                (to_v): Linear(in_features=320, out_features=320, bias=False)\n",
    "                (to_out): ModuleList(\n",
    "                  (0): Linear(in_features=320, out_features=320, bias=True)\n",
    "                  (1): Dropout(p=0.0, inplace=False)\n",
    "                )\n",
    "              )\n",
    "              (norm2): LayerNorm((320,), eps=1e-05, elementwise_affine=True)\n",
    "              (attn2): Attention(\n",
    "                (to_q): Linear(in_features=320, out_features=320, bias=False)\n",
    "                (to_k): Linear(in_features=1024, out_features=320, bias=False)\n",
    "                (to_v): Linear(in_features=1024, out_features=320, bias=False)\n",
    "                (to_out): ModuleList(\n",
    "                  (0): Linear(in_features=320, out_features=320, bias=True)\n",
    "                  (1): Dropout(p=0.0, inplace=False)\n",
    "                )\n",
    "              )\n",
    "              (norm3): LayerNorm((320,), eps=1e-05, elementwise_affine=True)\n",
    "              (ff): FeedForward(\n",
    "                (net): ModuleList(\n",
    "                  (0): GEGLU(\n",
    "                    (proj): Linear(in_features=320, out_features=2560, bias=True)\n",
    "                  )\n",
    "                  (1): Dropout(p=0.0, inplace=False)\n",
    "                  (2): Linear(in_features=1280, out_features=320, bias=True)\n",
    "                )\n",
    "              )\n",
    "            )\n",
    "          )\n",
    "          (proj_out): Linear(in_features=320, out_features=320, bias=True)\n",
    "        )\n",
    "      )\n",
    "      (resnets): ModuleList(\n",
    "        (0-1): 2 x ResnetBlock2D(\n",
    "          (norm1): GroupNorm(32, 320, eps=1e-05, affine=True)\n",
    "          (conv1): Conv2d(320, 320, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
    "          (time_emb_proj): Linear(in_features=1280, out_features=320, bias=True)\n",
    "          (norm2): GroupNorm(32, 320, eps=1e-05, affine=True)\n",
    "          (dropout): Dropout(p=0.0, inplace=False)\n",
    "          (conv2): Conv2d(320, 320, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
    "          (nonlinearity): SiLU()\n",
    "        )\n",
    "      )\n",
    "      (downsamplers): ModuleList(\n",
    "        (0): Downsample2D(\n",
    "          (conv): Conv2d(320, 320, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1))\n",
    "        )\n",
    "      )\n",
    "    )\n",
    "    (1): CrossAttnDownBlock2D(\n",
    "      (attentions): ModuleList(\n",
    "        (0-1): 2 x Transformer2DModel(\n",
    "          (norm): GroupNorm(32, 640, eps=1e-06, affine=True)\n",
    "          (proj_in): Linear(in_features=640, out_features=640, bias=True)\n",
    "          (transformer_blocks): ModuleList(\n",
    "            (0): BasicTransformerBlock(\n",
    "              (norm1): LayerNorm((640,), eps=1e-05, elementwise_affine=True)\n",
    "              (attn1): Attention(\n",
    "                (to_q): Linear(in_features=640, out_features=640, bias=False)\n",
    "                (to_k): Linear(in_features=640, out_features=640, bias=False)\n",
    "                (to_v): Linear(in_features=640, out_features=640, bias=False)\n",
    "                (to_out): ModuleList(\n",
    "                  (0): Linear(in_features=640, out_features=640, bias=True)\n",
    "                  (1): Dropout(p=0.0, inplace=False)\n",
    "                )\n",
    "              )\n",
    "              (norm2): LayerNorm((640,), eps=1e-05, elementwise_affine=True)\n",
    "              (attn2): Attention(\n",
    "                (to_q): Linear(in_features=640, out_features=640, bias=False)\n",
    "                (to_k): Linear(in_features=1024, out_features=640, bias=False)\n",
    "                (to_v): Linear(in_features=1024, out_features=640, bias=False)\n",
    "                (to_out): ModuleList(\n",
    "                  (0): Linear(in_features=640, out_features=640, bias=True)\n",
    "                  (1): Dropout(p=0.0, inplace=False)\n",
    "                )\n",
    "              )\n",
    "              (norm3): LayerNorm((640,), eps=1e-05, elementwise_affine=True)\n",
    "              (ff): FeedForward(\n",
    "                (net): ModuleList(\n",
    "                  (0): GEGLU(\n",
    "                    (proj): Linear(in_features=640, out_features=5120, bias=True)\n",
    "                  )\n",
    "                  (1): Dropout(p=0.0, inplace=False)\n",
    "                  (2): Linear(in_features=2560, out_features=640, bias=True)\n",
    "                )\n",
    "              )\n",
    "            )\n",
    "          )\n",
    "          (proj_out): Linear(in_features=640, out_features=640, bias=True)\n",
    "        )\n",
    "      )\n",
    "      (resnets): ModuleList(\n",
    "        (0): ResnetBlock2D(\n",
    "          (norm1): GroupNorm(32, 320, eps=1e-05, affine=True)\n",
    "          (conv1): Conv2d(320, 640, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
    "          (time_emb_proj): Linear(in_features=1280, out_features=640, bias=True)\n",
    "          (norm2): GroupNorm(32, 640, eps=1e-05, affine=True)\n",
    "          (dropout): Dropout(p=0.0, inplace=False)\n",
    "          (conv2): Conv2d(640, 640, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
    "          (nonlinearity): SiLU()\n",
    "          (conv_shortcut): Conv2d(320, 640, kernel_size=(1, 1), stride=(1, 1))\n",
    "        )\n",
    "        (1): ResnetBlock2D(\n",
    "          (norm1): GroupNorm(32, 640, eps=1e-05, affine=True)\n",
    "          (conv1): Conv2d(640, 640, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
    "          (time_emb_proj): Linear(in_features=1280, out_features=640, bias=True)\n",
    "          (norm2): GroupNorm(32, 640, eps=1e-05, affine=True)\n",
    "          (dropout): Dropout(p=0.0, inplace=False)\n",
    "          (conv2): Conv2d(640, 640, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
    "          (nonlinearity): SiLU()\n",
    "        )\n",
    "      )\n",
    "      (downsamplers): ModuleList(\n",
    "        (0): Downsample2D(\n",
    "          (conv): Conv2d(640, 640, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1))\n",
    "        )\n",
    "      )\n",
    "    )\n",
    "    (2): CrossAttnDownBlock2D(\n",
    "      (attentions): ModuleList(\n",
    "        (0-1): 2 x Transformer2DModel(\n",
    "          (norm): GroupNorm(32, 1280, eps=1e-06, affine=True)\n",
    "          (proj_in): Linear(in_features=1280, out_features=1280, bias=True)\n",
    "          (transformer_blocks): ModuleList(\n",
    "            (0): BasicTransformerBlock(\n",
    "              (norm1): LayerNorm((1280,), eps=1e-05, elementwise_affine=True)\n",
    "              (attn1): Attention(\n",
    "                (to_q): Linear(in_features=1280, out_features=1280, bias=False)\n",
    "                (to_k): Linear(in_features=1280, out_features=1280, bias=False)\n",
    "                (to_v): Linear(in_features=1280, out_features=1280, bias=False)\n",
    "                (to_out): ModuleList(\n",
    "                  (0): Linear(in_features=1280, out_features=1280, bias=True)\n",
    "                  (1): Dropout(p=0.0, inplace=False)\n",
    "                )\n",
    "              )\n",
    "              (norm2): LayerNorm((1280,), eps=1e-05, elementwise_affine=True)\n",
    "              (attn2): Attention(\n",
    "                (to_q): Linear(in_features=1280, out_features=1280, bias=False)\n",
    "                (to_k): Linear(in_features=1024, out_features=1280, bias=False)\n",
    "                (to_v): Linear(in_features=1024, out_features=1280, bias=False)\n",
    "                (to_out): ModuleList(\n",
    "                  (0): Linear(in_features=1280, out_features=1280, bias=True)\n",
    "                  (1): Dropout(p=0.0, inplace=False)\n",
    "                )\n",
    "              )\n",
    "              (norm3): LayerNorm((1280,), eps=1e-05, elementwise_affine=True)\n",
    "              (ff): FeedForward(\n",
    "                (net): ModuleList(\n",
    "                  (0): GEGLU(\n",
    "                    (proj): Linear(in_features=1280, out_features=10240, bias=True)\n",
    "                  )\n",
    "                  (1): Dropout(p=0.0, inplace=False)\n",
    "                  (2): Linear(in_features=5120, out_features=1280, bias=True)\n",
    "                )\n",
    "              )\n",
    "            )\n",
    "          )\n",
    "          (proj_out): Linear(in_features=1280, out_features=1280, bias=True)\n",
    "        )\n",
    "      )\n",
    "      (resnets): ModuleList(\n",
    "        (0): ResnetBlock2D(\n",
    "          (norm1): GroupNorm(32, 640, eps=1e-05, affine=True)\n",
    "          (conv1): Conv2d(640, 1280, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
    "          (time_emb_proj): Linear(in_features=1280, out_features=1280, bias=True)\n",
    "          (norm2): GroupNorm(32, 1280, eps=1e-05, affine=True)\n",
    "          (dropout): Dropout(p=0.0, inplace=False)\n",
    "          (conv2): Conv2d(1280, 1280, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
    "          (nonlinearity): SiLU()\n",
    "          (conv_shortcut): Conv2d(640, 1280, kernel_size=(1, 1), stride=(1, 1))\n",
    "        )\n",
    "        (1): ResnetBlock2D(\n",
    "          (norm1): GroupNorm(32, 1280, eps=1e-05, affine=True)\n",
    "          (conv1): Conv2d(1280, 1280, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
    "          (time_emb_proj): Linear(in_features=1280, out_features=1280, bias=True)\n",
    "          (norm2): GroupNorm(32, 1280, eps=1e-05, affine=True)\n",
    "          (dropout): Dropout(p=0.0, inplace=False)\n",
    "          (conv2): Conv2d(1280, 1280, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
    "          (nonlinearity): SiLU()\n",
    "        )\n",
    "      )\n",
    "      (downsamplers): ModuleList(\n",
    "        (0): Downsample2D(\n",
    "          (conv): Conv2d(1280, 1280, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1))\n",
    "        )\n",
    "      )\n",
    "    )\n",
    "    (3): DownBlock2D(\n",
    "      (resnets): ModuleList(\n",
    "        (0-1): 2 x ResnetBlock2D(\n",
    "          (norm1): GroupNorm(32, 1280, eps=1e-05, affine=True)\n",
    "          (conv1): Conv2d(1280, 1280, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
    "          (time_emb_proj): Linear(in_features=1280, out_features=1280, bias=True)\n",
    "          (norm2): GroupNorm(32, 1280, eps=1e-05, affine=True)\n",
    "          (dropout): Dropout(p=0.0, inplace=False)\n",
    "          (conv2): Conv2d(1280, 1280, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
    "          (nonlinearity): SiLU()\n",
    "        )\n",
    "      )\n",
    "    )\n",
    "  )\n",
    "  (up_blocks): ModuleList(\n",
    "    (0): UpBlock2D(\n",
    "      (resnets): ModuleList(\n",
    "        (0-2): 3 x ResnetBlock2D(\n",
    "          (norm1): GroupNorm(32, 2560, eps=1e-05, affine=True)\n",
    "          (conv1): Conv2d(2560, 1280, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
    "          (time_emb_proj): Linear(in_features=1280, out_features=1280, bias=True)\n",
    "          (norm2): GroupNorm(32, 1280, eps=1e-05, affine=True)\n",
    "          (dropout): Dropout(p=0.0, inplace=False)\n",
    "          (conv2): Conv2d(1280, 1280, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
    "          (nonlinearity): SiLU()\n",
    "          (conv_shortcut): Conv2d(2560, 1280, kernel_size=(1, 1), stride=(1, 1))\n",
    "        )\n",
    "      )\n",
    "      (upsamplers): ModuleList(\n",
    "        (0): Upsample2D(\n",
    "          (conv): Conv2d(1280, 1280, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
    "        )\n",
    "      )\n",
    "    )\n",
    "    (1): CrossAttnUpBlock2D(\n",
    "      (attentions): ModuleList(\n",
    "        (0-2): 3 x Transformer2DModel(\n",
    "          (norm): GroupNorm(32, 1280, eps=1e-06, affine=True)\n",
    "          (proj_in): Linear(in_features=1280, out_features=1280, bias=True)\n",
    "          (transformer_blocks): ModuleList(\n",
    "            (0): BasicTransformerBlock(\n",
    "              (norm1): LayerNorm((1280,), eps=1e-05, elementwise_affine=True)\n",
    "              (attn1): Attention(\n",
    "                (to_q): Linear(in_features=1280, out_features=1280, bias=False)\n",
    "                (to_k): Linear(in_features=1280, out_features=1280, bias=False)\n",
    "                (to_v): Linear(in_features=1280, out_features=1280, bias=False)\n",
    "                (to_out): ModuleList(\n",
    "                  (0): Linear(in_features=1280, out_features=1280, bias=True)\n",
    "                  (1): Dropout(p=0.0, inplace=False)\n",
    "                )\n",
    "              )\n",
    "              (norm2): LayerNorm((1280,), eps=1e-05, elementwise_affine=True)\n",
    "              (attn2): Attention(\n",
    "                (to_q): Linear(in_features=1280, out_features=1280, bias=False)\n",
    "                (to_k): Linear(in_features=1024, out_features=1280, bias=False)\n",
    "                (to_v): Linear(in_features=1024, out_features=1280, bias=False)\n",
    "                (to_out): ModuleList(\n",
    "                  (0): Linear(in_features=1280, out_features=1280, bias=True)\n",
    "                  (1): Dropout(p=0.0, inplace=False)\n",
    "                )\n",
    "              )\n",
    "              (norm3): LayerNorm((1280,), eps=1e-05, elementwise_affine=True)\n",
    "              (ff): FeedForward(\n",
    "                (net): ModuleList(\n",
    "                  (0): GEGLU(\n",
    "                    (proj): Linear(in_features=1280, out_features=10240, bias=True)\n",
    "                  )\n",
    "                  (1): Dropout(p=0.0, inplace=False)\n",
    "                  (2): Linear(in_features=5120, out_features=1280, bias=True)\n",
    "                )\n",
    "              )\n",
    "            )\n",
    "          )\n",
    "          (proj_out): Linear(in_features=1280, out_features=1280, bias=True)\n",
    "        )\n",
    "      )\n",
    "      (resnets): ModuleList(\n",
    "        (0-1): 2 x ResnetBlock2D(\n",
    "          (norm1): GroupNorm(32, 2560, eps=1e-05, affine=True)\n",
    "          (conv1): Conv2d(2560, 1280, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
    "          (time_emb_proj): Linear(in_features=1280, out_features=1280, bias=True)\n",
    "          (norm2): GroupNorm(32, 1280, eps=1e-05, affine=True)\n",
    "          (dropout): Dropout(p=0.0, inplace=False)\n",
    "          (conv2): Conv2d(1280, 1280, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
    "          (nonlinearity): SiLU()\n",
    "          (conv_shortcut): Conv2d(2560, 1280, kernel_size=(1, 1), stride=(1, 1))\n",
    "        )\n",
    "        (2): ResnetBlock2D(\n",
    "          (norm1): GroupNorm(32, 1920, eps=1e-05, affine=True)\n",
    "          (conv1): Conv2d(1920, 1280, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
    "          (time_emb_proj): Linear(in_features=1280, out_features=1280, bias=True)\n",
    "          (norm2): GroupNorm(32, 1280, eps=1e-05, affine=True)\n",
    "          (dropout): Dropout(p=0.0, inplace=False)\n",
    "          (conv2): Conv2d(1280, 1280, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
    "          (nonlinearity): SiLU()\n",
    "          (conv_shortcut): Conv2d(1920, 1280, kernel_size=(1, 1), stride=(1, 1))\n",
    "        )\n",
    "      )\n",
    "      (upsamplers): ModuleList(\n",
    "        (0): Upsample2D(\n",
    "          (conv): Conv2d(1280, 1280, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
    "        )\n",
    "      )\n",
    "    )\n",
    "    (2): CrossAttnUpBlock2D(\n",
    "      (attentions): ModuleList(\n",
    "        (0-2): 3 x Transformer2DModel(\n",
    "          (norm): GroupNorm(32, 640, eps=1e-06, affine=True)\n",
    "          (proj_in): Linear(in_features=640, out_features=640, bias=True)\n",
    "          (transformer_blocks): ModuleList(\n",
    "            (0): BasicTransformerBlock(\n",
    "              (norm1): LayerNorm((640,), eps=1e-05, elementwise_affine=True)\n",
    "              (attn1): Attention(\n",
    "                (to_q): Linear(in_features=640, out_features=640, bias=False)\n",
    "                (to_k): Linear(in_features=640, out_features=640, bias=False)\n",
    "                (to_v): Linear(in_features=640, out_features=640, bias=False)\n",
    "                (to_out): ModuleList(\n",
    "                  (0): Linear(in_features=640, out_features=640, bias=True)\n",
    "                  (1): Dropout(p=0.0, inplace=False)\n",
    "                )\n",
    "              )\n",
    "              (norm2): LayerNorm((640,), eps=1e-05, elementwise_affine=True)\n",
    "              (attn2): Attention(\n",
    "                (to_q): Linear(in_features=640, out_features=640, bias=False)\n",
    "                (to_k): Linear(in_features=1024, out_features=640, bias=False)\n",
    "                (to_v): Linear(in_features=1024, out_features=640, bias=False)\n",
    "                (to_out): ModuleList(\n",
    "                  (0): Linear(in_features=640, out_features=640, bias=True)\n",
    "                  (1): Dropout(p=0.0, inplace=False)\n",
    "                )\n",
    "              )\n",
    "              (norm3): LayerNorm((640,), eps=1e-05, elementwise_affine=True)\n",
    "              (ff): FeedForward(\n",
    "                (net): ModuleList(\n",
    "                  (0): GEGLU(\n",
    "                    (proj): Linear(in_features=640, out_features=5120, bias=True)\n",
    "                  )\n",
    "                  (1): Dropout(p=0.0, inplace=False)\n",
    "                  (2): Linear(in_features=2560, out_features=640, bias=True)\n",
    "                )\n",
    "              )\n",
    "            )\n",
    "          )\n",
    "          (proj_out): Linear(in_features=640, out_features=640, bias=True)\n",
    "        )\n",
    "      )\n",
    "      (resnets): ModuleList(\n",
    "        (0): ResnetBlock2D(\n",
    "          (norm1): GroupNorm(32, 1920, eps=1e-05, affine=True)\n",
    "          (conv1): Conv2d(1920, 640, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
    "          (time_emb_proj): Linear(in_features=1280, out_features=640, bias=True)\n",
    "          (norm2): GroupNorm(32, 640, eps=1e-05, affine=True)\n",
    "          (dropout): Dropout(p=0.0, inplace=False)\n",
    "          (conv2): Conv2d(640, 640, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
    "          (nonlinearity): SiLU()\n",
    "          (conv_shortcut): Conv2d(1920, 640, kernel_size=(1, 1), stride=(1, 1))\n",
    "        )\n",
    "        (1): ResnetBlock2D(\n",
    "          (norm1): GroupNorm(32, 1280, eps=1e-05, affine=True)\n",
    "          (conv1): Conv2d(1280, 640, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
    "          (time_emb_proj): Linear(in_features=1280, out_features=640, bias=True)\n",
    "          (norm2): GroupNorm(32, 640, eps=1e-05, affine=True)\n",
    "          (dropout): Dropout(p=0.0, inplace=False)\n",
    "          (conv2): Conv2d(640, 640, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
    "          (nonlinearity): SiLU()\n",
    "          (conv_shortcut): Conv2d(1280, 640, kernel_size=(1, 1), stride=(1, 1))\n",
    "        )\n",
    "        (2): ResnetBlock2D(\n",
    "          (norm1): GroupNorm(32, 960, eps=1e-05, affine=True)\n",
    "          (conv1): Conv2d(960, 640, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
    "          (time_emb_proj): Linear(in_features=1280, out_features=640, bias=True)\n",
    "          (norm2): GroupNorm(32, 640, eps=1e-05, affine=True)\n",
    "          (dropout): Dropout(p=0.0, inplace=False)\n",
    "          (conv2): Conv2d(640, 640, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
    "          (nonlinearity): SiLU()\n",
    "          (conv_shortcut): Conv2d(960, 640, kernel_size=(1, 1), stride=(1, 1))\n",
    "        )\n",
    "      )\n",
    "      (upsamplers): ModuleList(\n",
    "        (0): Upsample2D(\n",
    "          (conv): Conv2d(640, 640, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
    "        )\n",
    "      )\n",
    "    )\n",
    "    (3): CrossAttnUpBlock2D(\n",
    "      (attentions): ModuleList(\n",
    "        (0-2): 3 x Transformer2DModel(\n",
    "          (norm): GroupNorm(32, 320, eps=1e-06, affine=True)\n",
    "          (proj_in): Linear(in_features=320, out_features=320, bias=True)\n",
    "          (transformer_blocks): ModuleList(\n",
    "            (0): BasicTransformerBlock(\n",
    "              (norm1): LayerNorm((320,), eps=1e-05, elementwise_affine=True)\n",
    "              (attn1): Attention(\n",
    "                (to_q): Linear(in_features=320, out_features=320, bias=False)\n",
    "                (to_k): Linear(in_features=320, out_features=320, bias=False)\n",
    "                (to_v): Linear(in_features=320, out_features=320, bias=False)\n",
    "                (to_out): ModuleList(\n",
    "                  (0): Linear(in_features=320, out_features=320, bias=True)\n",
    "                  (1): Dropout(p=0.0, inplace=False)\n",
    "                )\n",
    "              )\n",
    "              (norm2): LayerNorm((320,), eps=1e-05, elementwise_affine=True)\n",
    "              (attn2): Attention(\n",
    "                (to_q): Linear(in_features=320, out_features=320, bias=False)\n",
    "                (to_k): Linear(in_features=1024, out_features=320, bias=False)\n",
    "                (to_v): Linear(in_features=1024, out_features=320, bias=False)\n",
    "                (to_out): ModuleList(\n",
    "                  (0): Linear(in_features=320, out_features=320, bias=True)\n",
    "                  (1): Dropout(p=0.0, inplace=False)\n",
    "                )\n",
    "              )\n",
    "              (norm3): LayerNorm((320,), eps=1e-05, elementwise_affine=True)\n",
    "              (ff): FeedForward(\n",
    "                (net): ModuleList(\n",
    "                  (0): GEGLU(\n",
    "                    (proj): Linear(in_features=320, out_features=2560, bias=True)\n",
    "                  )\n",
    "                  (1): Dropout(p=0.0, inplace=False)\n",
    "                  (2): Linear(in_features=1280, out_features=320, bias=True)\n",
    "                )\n",
    "              )\n",
    "            )\n",
    "          )\n",
    "          (proj_out): Linear(in_features=320, out_features=320, bias=True)\n",
    "        )\n",
    "      )\n",
    "      (resnets): ModuleList(\n",
    "        (0): ResnetBlock2D(\n",
    "          (norm1): GroupNorm(32, 960, eps=1e-05, affine=True)\n",
    "          (conv1): Conv2d(960, 320, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
    "          (time_emb_proj): Linear(in_features=1280, out_features=320, bias=True)\n",
    "          (norm2): GroupNorm(32, 320, eps=1e-05, affine=True)\n",
    "          (dropout): Dropout(p=0.0, inplace=False)\n",
    "          (conv2): Conv2d(320, 320, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
    "          (nonlinearity): SiLU()\n",
    "          (conv_shortcut): Conv2d(960, 320, kernel_size=(1, 1), stride=(1, 1))\n",
    "        )\n",
    "        (1-2): 2 x ResnetBlock2D(\n",
    "          (norm1): GroupNorm(32, 640, eps=1e-05, affine=True)\n",
    "          (conv1): Conv2d(640, 320, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
    "          (time_emb_proj): Linear(in_features=1280, out_features=320, bias=True)\n",
    "          (norm2): GroupNorm(32, 320, eps=1e-05, affine=True)\n",
    "          (dropout): Dropout(p=0.0, inplace=False)\n",
    "          (conv2): Conv2d(320, 320, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
    "          (nonlinearity): SiLU()\n",
    "          (conv_shortcut): Conv2d(640, 320, kernel_size=(1, 1), stride=(1, 1))\n",
    "        )\n",
    "      )\n",
    "    )\n",
    "  )\n",
    "  mid的作用在课件有讲\n",
    "  (mid_block): UNetMidBlock2DCrossAttn(\n",
    "    (attentions): ModuleList(\n",
    "      (0): Transformer2DModel(\n",
    "        (norm): GroupNorm(32, 1280, eps=1e-06, affine=True)\n",
    "        (proj_in): Linear(in_features=1280, out_features=1280, bias=True)\n",
    "        (transformer_blocks): ModuleList(\n",
    "          (0): BasicTransformerBlock(\n",
    "            (norm1): LayerNorm((1280,), eps=1e-05, elementwise_affine=True)\n",
    "            (attn1): Attention(\n",
    "              (to_q): Linear(in_features=1280, out_features=1280, bias=False)\n",
    "              (to_k): Linear(in_features=1280, out_features=1280, bias=False)\n",
    "              (to_v): Linear(in_features=1280, out_features=1280, bias=False)\n",
    "              (to_out): ModuleList(\n",
    "                (0): Linear(in_features=1280, out_features=1280, bias=True)\n",
    "                (1): Dropout(p=0.0, inplace=False)\n",
    "              )\n",
    "            )\n",
    "            (norm2): LayerNorm((1280,), eps=1e-05, elementwise_affine=True)\n",
    "            (attn2): Attention(\n",
    "              (to_q): Linear(in_features=1280, out_features=1280, bias=False)\n",
    "              (to_k): Linear(in_features=1024, out_features=1280, bias=False)\n",
    "              (to_v): Linear(in_features=1024, out_features=1280, bias=False)\n",
    "              (to_out): ModuleList(\n",
    "                (0): Linear(in_features=1280, out_features=1280, bias=True)\n",
    "                (1): Dropout(p=0.0, inplace=False)\n",
    "              )\n",
    "            )\n",
    "            (norm3): LayerNorm((1280,), eps=1e-05, elementwise_affine=True)\n",
    "            (ff): FeedForward(\n",
    "              (net): ModuleList(\n",
    "                (0): GEGLU(\n",
    "                  (proj): Linear(in_features=1280, out_features=10240, bias=True)\n",
    "                )\n",
    "                (1): Dropout(p=0.0, inplace=False)\n",
    "                (2): Linear(in_features=5120, out_features=1280, bias=True)\n",
    "              )\n",
    "            )\n",
    "          )\n",
    "        )\n",
    "        (proj_out): Linear(in_features=1280, out_features=1280, bias=True)\n",
    "      )\n",
    "    )\n",
    "    (resnets): ModuleList(\n",
    "      (0-1): 2 x ResnetBlock2D(\n",
    "        (norm1): GroupNorm(32, 1280, eps=1e-05, affine=True)\n",
    "        (conv1): Conv2d(1280, 1280, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
    "        (time_emb_proj): Linear(in_features=1280, out_features=1280, bias=True)\n",
    "        (norm2): GroupNorm(32, 1280, eps=1e-05, affine=True)\n",
    "        (dropout): Dropout(p=0.0, inplace=False)\n",
    "        (conv2): Conv2d(1280, 1280, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
    "        (nonlinearity): SiLU()\n",
    "      )\n",
    "    )\n",
    "  )\n",
    "  (conv_norm_out): GroupNorm(32, 320, eps=1e-05, affine=True)\n",
    "  (conv_act): SiLU()\n",
    "  (conv_out): Conv2d(320, 4, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
    ")\n"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 2
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython2",
   "version": "2.7.6"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
