{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "initial_id",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-05-28T13:31:45.991629Z",
     "start_time": "2025-05-28T13:31:44.660158Z"
    },
    "collapsed": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "图像生成完成，输出张量 shape: torch.Size([1, 3, 64, 64])\n"
     ]
    }
   ],
   "source": [
    "# Stable Diffusion 示例代码（增强版）\n",
    "# 加入了：时间步嵌入、位置编码、调度器（beta 线性）、DDIM 采样器\n",
    "\n",
    "import torch  # 导入PyTorch库\n",
    "import torch.nn as nn  # 导入神经网络模块\n",
    "import torch.nn.functional as F  # 导入函数式API\n",
    "from transformers import CLIPTextModel, CLIPTokenizer  # 导入CLIP模型和分词器\n",
    "import math  # 导入数学函数库\n",
    "\n",
    "# 文本编码器（CLIP）\n",
    "class TextEncoder:\n",
    "    def __init__(self, device=\"cuda\"):  # 初始化函数，默认使用CUDA设备\n",
    "        self.tokenizer = CLIPTokenizer.from_pretrained(\"openai/clip-vit-base-patch32\")  # 加载CLIP分词器\n",
    "        self.text_encoder = CLIPTextModel.from_pretrained(\"openai/clip-vit-base-patch32\").to(device)  # 加载CLIP文本编码器并移至指定设备\n",
    "        self.device = device  # 保存设备信息\n",
    "\n",
    "    def encode(self, prompt):  # 编码文本提示的方法\n",
    "        tokens = self.tokenizer(prompt, return_tensors=\"pt\", padding=True).to(self.device)  # 将提示文本转换为token并移至设备\n",
    "        with torch.no_grad():  # 不计算梯度\n",
    "            text_embeddings = self.text_encoder(**tokens).last_hidden_state  # [B, 77, 768]  # 获取文本嵌入\n",
    "        return text_embeddings  # 返回文本嵌入\n",
    "\n",
    "# 时间步嵌入（Sinusoidal）\n",
    "def get_timestep_embedding(timesteps, embedding_dim):  # 获取时间步嵌入的函数\n",
    "    half_dim = embedding_dim // 2  # 计算一半的维度\n",
    "    emb = math.log(10000) / (half_dim - 1)  # 计算嵌入因子\n",
    "    emb = torch.exp(torch.arange(half_dim, device=timesteps.device) * -emb)  # 计算指数项\n",
    "    emb = timesteps[:, None] * emb[None, :]  # 计算时间步与嵌入的乘积\n",
    "    return torch.cat([torch.sin(emb), torch.cos(emb)], dim=-1)  # 返回正弦和余弦的拼接结果\n",
    "\n",
    "# Cross Attention\n",
    "class CrossAttention(nn.Module):  # 交叉注意力模块\n",
    "    def __init__(self, latent_dim, context_dim):  # 初始化函数\n",
    "        super().__init__()  # 调用父类初始化\n",
    "        self.query = nn.Linear(latent_dim, latent_dim)  # 查询线性层\n",
    "        self.key = nn.Linear(context_dim, latent_dim)  # 键线性层\n",
    "        self.value = nn.Linear(context_dim, latent_dim)  # 值线性层\n",
    "        self.out = nn.Linear(latent_dim, latent_dim)  # 输出线性层\n",
    "\n",
    "    def forward(self, x, context):  # 前向传播函数\n",
    "        B, C, H, W = x.shape  # 获取输入形状\n",
    "        x_flat = x.view(B, C, H * W).transpose(1, 2)  # [B, HW, C]  # 将特征图展平并转置\n",
    "        q = self.query(x_flat)  # 计算查询向量\n",
    "        k = self.key(context)  # 计算键向量\n",
    "        v = self.value(context)  # 计算值向量\n",
    "        \n",
    "        # 修复维度不匹配问题\n",
    "        # 确保q和k的最后一个维度匹配以便进行矩阵乘法\n",
    "        attn = torch.matmul(q, k.transpose(-2, -1)) / (C ** 0.5)  # [B, HW, T] 计算注意力分数 # 计算注意力权重\n",
    "        \n",
    "        attended = torch.matmul(attn, v)  # 应用注意力权重\n",
    "        out = self.out(attended).transpose(1, 2).view(B, C, H, W)  # 转换回原始形状\n",
    "        return out  # 返回结果\n",
    "\n",
    "# U-Net（包含时间步嵌入、交叉注意力）\n",
    "class UNet(nn.Module):  # U-Net模型\n",
    "    def __init__(self):  # 初始化函数\n",
    "        super().__init__()  # 调用父类初始化\n",
    "        self.down = nn.Sequential(  # 下采样路径\n",
    "            nn.Conv2d(4, 64, 3, padding=1),  # 第一个卷积层\n",
    "            nn.ReLU(),  # ReLU激活函数\n",
    "            nn.Conv2d(64, 128, 3, padding=1),  # 第二个卷积层\n",
    "            nn.ReLU()  # ReLU激活函数\n",
    "        )\n",
    "        self.time_mlp = nn.Sequential(  # 时间步MLP\n",
    "            nn.Linear(128, 128),  # 线性层\n",
    "            nn.ReLU(),  # ReLU激活函数\n",
    "        )\n",
    "        self.cross_attn = CrossAttention(latent_dim=128, context_dim=768)  # 交叉注意力层\n",
    "        self.mid = nn.Sequential(  # 中间层\n",
    "            nn.Conv2d(128, 128, 3, padding=1),  # 卷积层\n",
    "            nn.ReLU()  # ReLU激活函数\n",
    "        )\n",
    "        self.up = nn.Sequential(  # 上采样路径\n",
    "            nn.ConvTranspose2d(128, 64, 3, padding=1),  # 第一个转置卷积层\n",
    "            nn.ReLU(),  # ReLU激活函数\n",
    "            nn.ConvTranspose2d(64, 4, 3, padding=1)  # 第二个转置卷积层\n",
    "        )\n",
    "\n",
    "    def forward(self, x, text_emb, timestep):  # 前向传播函数\n",
    "        x = self.down(x)  # 下采样\n",
    "        B, C, H, W = x.shape  # 获取形状\n",
    "        t_emb = get_timestep_embedding(timestep, C).view(B, C, 1, 1)  # 获取时间步嵌入并调整形状\n",
    "        x = x + self.time_mlp(t_emb.squeeze(-1).squeeze(-1)).view(B, C, 1, 1)  # 添加时间信息\n",
    "        x = self.cross_attn(x, text_emb)  # 应用交叉注意力\n",
    "        x = self.mid(x)  # 中间处理\n",
    "        x = self.up(x)  # 上采样\n",
    "        return x  # 返回结果\n",
    "\n",
    "# 解码器\n",
    "class Decoder(nn.Module):  # 解码器模型\n",
    "    def __init__(self):  # 初始化函数\n",
    "        super().__init__()  # 调用父类初始化\n",
    "        self.net = nn.Sequential(  # 网络序列\n",
    "            nn.Conv2d(4, 64, 3, padding=1),  # 第一个卷积层\n",
    "            nn.ReLU(),  # ReLU激活函数\n",
    "            nn.Conv2d(64, 3, 3, padding=1),  # 第二个卷积层\n",
    "            nn.Tanh()  # Tanh激活函数，输出范围[-1,1]\n",
    "        )\n",
    "\n",
    "    def forward(self, x):  # 前向传播函数\n",
    "        return self.net(x)  # 返回网络输出\n",
    "\n",
    "# DDIM调度器（简化）\n",
    "class DDIMScheduler:  # DDIM调度器类\n",
    "    def __init__(self, steps=10, beta_start=0.0001, beta_end=0.02):  # 初始化函数\n",
    "        self.steps = steps  # 步数\n",
    "        # 生成beta序列：从beta_start到beta_end的线性插值，表示每一步的噪声方差,shape是一维tensor\n",
    "        self.betas = torch.linspace(beta_start, beta_end, steps)  \n",
    "        # 计算alpha序列：α_t = 1-β_t，表示保留原始信号的比例\n",
    "        self.alphas = 1.0 - self.betas  \n",
    "        # 计算累积乘积：α̅_t = ∏_{i=1}^t α_i，表示从0到t时刻信号保留的总比例，下面有单独cumprod接口示例\n",
    "        self.alpha_bars = torch.cumprod(self.alphas, dim=0)  \n",
    "\n",
    "    def step(self, x, noise_pred, t):  # 单步去噪函数\n",
    "        # 获取当前时间步t的αt值\n",
    "        alpha_bar = self.alpha_bars[t]  \n",
    "        # 获取前一时间步t-1的α{t-1}值，防止t=0时越界\n",
    "        alpha_bar_prev = self.alpha_bars[max(t - 1, 0)]  \n",
    "        \n",
    "        # 预测原始图像x_0：这部分公式详见课件\n",
    "        # 其中noise_pred是模型预测的噪声ε\n",
    "        pred_x0 = (x - (1 - alpha_bar).sqrt() * noise_pred) / alpha_bar.sqrt()  \n",
    "        \n",
    "        # 计算方向项：\n",
    "        # DDIM采样公式中的噪声方向项\n",
    "        # 这部分决定了从t-1到t的噪声变化方向\n",
    "        dir_xt = (1 - alpha_bar_prev).sqrt() * noise_pred  \n",
    "        \n",
    "        # 计算前一时间步t-1的图像：\n",
    "        # 这是DDIM的核心采样公式，与DDPM不同，它是确定性的（没有添加随机噪声）\n",
    "        x_prev = alpha_bar_prev.sqrt() * pred_x0 + dir_xt  \n",
    "        \n",
    "        # 返回去噪后的图像x_{t-1}\n",
    "        return x_prev  \n",
    "\n",
    "# 整体流程\n",
    "class StableDiffusionDemo:  # Stable Diffusion演示类\n",
    "    def __init__(self, device=\"cuda\"):  # 初始化函数\n",
    "        self.device = device  # 设备\n",
    "        self.text_encoder = TextEncoder(device)  # 文本编码器\n",
    "        self.unet = UNet().to(device)  # U-Net模型\n",
    "        self.decoder = Decoder().to(device)  # 解码器\n",
    "        self.scheduler = DDIMScheduler()  # DDIM调度器\n",
    "\n",
    "    def generate(self, prompt):  # 图像生成函数\n",
    "        text_embeddings = self.text_encoder.encode(prompt)  # 编码提示文本\n",
    "        # 确保文本嵌入的维度正确\n",
    "        batch_size = text_embeddings.shape[0]  # 获取批次大小\n",
    "        seq_len = text_embeddings.shape[1]  # 获取序列长度\n",
    "        hidden_dim = text_embeddings.shape[2]  # 获取隐藏维度\n",
    "        \n",
    "        # 如果需要，可以调整文本嵌入的维度以匹配交叉注意力的需求\n",
    "        # 确保文本嵌入的维度是 [B, seq_len, 768]\n",
    "        if hidden_dim != 768:  # 如果隐藏维度不是768\n",
    "            # 创建一个投影层来调整维度\n",
    "            projection = nn.Linear(hidden_dim, 768).to(self.device)  # 创建投影层\n",
    "            text_embeddings = projection(text_embeddings)  # 应用投影\n",
    "\n",
    "        latent = torch.randn(1, 4, 64, 64).to(self.device)  # 生成随机潜变量\n",
    "        for i in reversed(range(self.scheduler.steps)):  # 逆向扩散过程\n",
    "            t = torch.full((1,), i, device=self.device, dtype=torch.long)  # 创建时间步张量\n",
    "            noise_pred = self.unet(latent, text_embeddings, t)  # 预测噪声\n",
    "            latent = self.scheduler.step(latent, noise_pred, i)  # 执行去噪步骤,不断进行\n",
    "        image = self.decoder(latent)  # 解码潜变量为图像\n",
    "        return image  # 返回生成的图像\n",
    "\n",
    "# 示例运行\n",
    "device = \"cuda\" if torch.cuda.is_available() else \"cpu\"  # 选择设备\n",
    "generator = StableDiffusionDemo(device)  # 创建生成器实例\n",
    "output = generator.generate(\"a futuristic city with flying cars\")  # 生成未来城市图像\n",
    "print(\"图像生成完成，输出张量 shape:\", output.shape)  # 打印输出形状\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "id": "8249f374c78d8501",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-05-28T13:32:54.675248Z",
     "start_time": "2025-05-28T13:32:54.671337Z"
    }
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "原始张量: tensor([0.9000, 0.8000, 0.7000, 0.6000])\n",
      "累积乘积结果: tensor([0.9000, 0.7200, 0.5040, 0.3024])\n"
     ]
    }
   ],
   "source": [
    "#举例展示torch.cumprod功能\n",
    "import torch\n",
    "# 创建一个一维张量\n",
    "tensor = torch.tensor([0.9, 0.8, 0.7, 0.6])\n",
    "# 计算累积乘积\n",
    "cumprod_result = torch.cumprod(tensor, dim=0)\n",
    "print(\"原始张量:\", tensor)  # 打印原始张量\n",
    "print(\"累积乘积结果:\", cumprod_result)  # 打印累积乘积结果"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 2
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython2",
   "version": "2.7.6"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
