{
 "cells": [
  {
   "metadata": {},
   "cell_type": "markdown",
   "source": "## 手写Clip\n",
   "id": "9a98d5f456bb0f8c"
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-08-22T05:13:40.643047Z",
     "start_time": "2025-08-22T05:13:40.612988Z"
    }
   },
   "cell_type": "code",
   "source": [
    "from PIL import Image\n",
    "\n",
    "data = [\n",
    "    (\"数字5\", Image.open(\"数字5.png\").convert(\"L\")),\n",
    "    (\"数字0\", Image.open(\"数字0.png\").convert(\"L\")),\n",
    "]"
   ],
   "id": "7a71256164b8860b",
   "outputs": [],
   "execution_count": 1
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-08-22T05:13:40.694488Z",
     "start_time": "2025-08-22T05:13:40.692375Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# 构建词汇表\n",
    "vocab = [\"数\", \"字\", \"5\", \"0\"]\n",
    "word2idx = {word: idx for idx, word in enumerate(vocab)}\n",
    "idx2word = {idx: word for word, idx in word2idx.items()}"
   ],
   "id": "87bd726b7299389c",
   "outputs": [],
   "execution_count": 2
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-08-22T05:24:28.594009Z",
     "start_time": "2025-08-22T05:24:28.584743Z"
    }
   },
   "cell_type": "code",
   "source": [
    "import torch\n",
    "from torchvision.transforms import transforms\n",
    "from torch.utils.data import Dataset, DataLoader\n",
    "\n",
    "# transform = transforms.ToTensor()\n",
    "transform = transforms.Compose([\n",
    "    transforms.Resize((28, 28)),      # 统一尺寸\n",
    "    transforms.ToTensor(),\n",
    "])\n",
    "class ZhouyuClipDataset(Dataset):\n",
    "    def __init__(self, data, word2idx, max_len=5):\n",
    "        self.data = data\n",
    "        self.word2idx = word2idx\n",
    "        self.max_len = max_len\n",
    "\n",
    "    def __len__(self):\n",
    "        return len(self.data)\n",
    "\n",
    "    def __getitem__(self, idx):\n",
    "        text, image = self.data[idx]\n",
    "\n",
    "        text_tokens = [self.word2idx[word] for word in text]\n",
    "        text_tensor = torch.tensor(text_tokens)\n",
    "\n",
    "        image_tensor = transform(image)\n",
    "\n",
    "        return text_tensor, image_tensor\n",
    "\n",
    "dataset = ZhouyuClipDataset(data, word2idx)\n",
    "dataloader = DataLoader(dataset, batch_size=2, shuffle=True)\n",
    "\n",
    "for text_tensor, image_tensor in dataloader:\n",
    "    print(text_tensor.shape)\n",
    "    print(image_tensor.shape)\n",
    "    break"
   ],
   "id": "c67d18f80b665f0",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "torch.Size([2, 3])\n",
      "torch.Size([2, 1, 28, 28])\n"
     ]
    }
   ],
   "execution_count": 18
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-08-22T05:28:44.677073Z",
     "start_time": "2025-08-22T05:28:44.670691Z"
    }
   },
   "cell_type": "code",
   "source": [
    "from torch import nn\n",
    "\n",
    "class TextEncoder(nn.Module):\n",
    "    def __init__(self, vocab_size, embed_dim=64, max_len=5, nhead=2, num_layers=1):\n",
    "        super().__init__()\n",
    "        self.embed_dim = embed_dim\n",
    "        self.token_embedding = nn.Embedding(vocab_size, embed_dim)\n",
    "        self.position_embedding = nn.Embedding(max_len, embed_dim)\n",
    "\n",
    "        encoder_layer = nn.TransformerEncoderLayer(\n",
    "            d_model=embed_dim,\n",
    "            nhead=nhead,\n",
    "            dim_feedforward=2048,\n",
    "            batch_first=True\n",
    "        )\n",
    "        self.transformer = nn.TransformerEncoder(encoder_layer, num_layers)\n",
    "\n",
    "    def forward(self, x):\n",
    "        batch_size, seq_len = x.shape\n",
    "\n",
    "        token_embeddings = self.token_embedding(x)\n",
    "\n",
    "        pos_ids = torch.arange(0, seq_len).unsqueeze(0).repeat(batch_size, 1)\n",
    "        pos_embeddings = self.position_embedding(pos_ids)\n",
    "\n",
    "        embedding = token_embeddings + pos_embeddings\n",
    "        x = self.transformer(embedding)\n",
    "        x = x.mean(dim=1)\n",
    "        return x"
   ],
   "id": "fcfc570d91b9bba0",
   "outputs": [],
   "execution_count": 19
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-08-22T05:30:11.597654Z",
     "start_time": "2025-08-22T05:29:02.856035Z"
    }
   },
   "cell_type": "code",
   "source": [
    "text_encoder = TextEncoder(vocab_size=len(vocab))\n",
    "result = text_encoder(torch.tensor([word2idx[word] for word in '数字5']).unsqueeze(0))\n",
    "print(result.shape)"
   ],
   "id": "ee315c50e62758bb",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "torch.Size([1, 64])\n"
     ]
    }
   ],
   "execution_count": 20
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-08-22T05:31:03.478579Z",
     "start_time": "2025-08-22T05:31:03.469577Z"
    }
   },
   "cell_type": "code",
   "source": [
    "import torch\n",
    "import torch.nn as nn\n",
    "\n",
    "\n",
    "# 作者：大都督周瑜（我的微信: it_zhouyu）\n",
    "\n",
    "class ImageEncoder(nn.Module):\n",
    "    def __init__(self, image_size=28, patch_size=7, in_channels=1, num_classes=10,\n",
    "                 embed_dim=64, num_layers=4, num_heads=8, dim_feedforward=2048):\n",
    "        super().__init__()\n",
    "        assert image_size % patch_size == 0, \"Image size must be divisible by patch size\"\n",
    "\n",
    "        self.image_size = image_size\n",
    "        self.patch_size = patch_size\n",
    "        self.embed_dim = embed_dim\n",
    "        self.num_patches = (image_size // patch_size) ** 2\n",
    "\n",
    "        # 通过卷积层切分为16张图片\n",
    "        self.patch_conv = nn.Conv2d(in_channels, self.num_patches, kernel_size=4, stride=4)\n",
    "        self.patch_embed = nn.Linear(in_channels * patch_size * patch_size, embed_dim)\n",
    "        self.cls_token = torch.zeros(1, 1, embed_dim)\n",
    "        self.pos_embed = nn.Embedding(self.num_patches + 1, embed_dim)\n",
    "\n",
    "        # TransformerEncoder\n",
    "        encoder_layer = nn.TransformerEncoderLayer(\n",
    "            d_model=embed_dim,\n",
    "            nhead=num_heads,\n",
    "            dim_feedforward=dim_feedforward,\n",
    "            batch_first=True\n",
    "        )\n",
    "        self.encoder = nn.TransformerEncoder(encoder_layer, num_layers=num_layers)\n",
    "\n",
    "        # 分类头，不需要分类头了\n",
    "        # self.head = nn.Linear(embed_dim, num_classes)\n",
    "\n",
    "    def forward(self, x):\n",
    "        batch_size = x.shape[0]  # batch size\n",
    "\n",
    "        # 利用卷积层切分为16张图片\n",
    "        x = self.patch_conv(x)  # [batch_size, 1, 28, 28] -> [batch_size, 16, 7, 7]\n",
    "        x = x.view(batch_size, self.num_patches, -1) # [batch_size, 16, 7, 7] -> [batch_size, 16, 49]\n",
    "\n",
    "        # 将图片转成向量\n",
    "        x = self.patch_embed(x)  # [batch_size, 16, 49] -> [batch_size, 16, 64]\n",
    "\n",
    "        # 加入cls_tokens\n",
    "        cls_tokens = self.cls_token.expand(batch_size, -1, -1)  # [1, 1, embed_dim] -> [batch_size, 1, embed_dim]\n",
    "        x = torch.cat((cls_tokens, x), dim=1)  # [batch_size, 16, 64] -> [batch_size, 17, 64]\n",
    "\n",
    "        # 加上位置编码\n",
    "        seq_len = x.shape[1]\n",
    "        pos_ids = torch.arange(0, seq_len).unsqueeze(0).repeat(batch_size, 1)\n",
    "        pos_embeddings = self.pos_embed(pos_ids)\n",
    "        x = x + pos_embeddings\n",
    "\n",
    "        # 送给TransformerEncoder进行编码，得到新的向量\n",
    "        x = self.encoder(x)  # [batch_size, 17, 64]\n",
    "\n",
    "        # 取出cls_tokens对应的向量\n",
    "        cls_output = x[:, 0]\n",
    "\n",
    "        # 进行分类，不需要分类头了\n",
    "        # logits = self.head(cls_output)\n",
    "\n",
    "        return cls_output"
   ],
   "id": "341d82014dfc908e",
   "outputs": [],
   "execution_count": 21
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-08-22T05:31:15.358972Z",
     "start_time": "2025-08-22T05:31:15.346495Z"
    }
   },
   "cell_type": "code",
   "source": [
    "image_encoder = ImageEncoder()\n",
    "result = image_encoder(transform(Image.open(\"数字5.png\").convert(\"L\")).unsqueeze(0))\n",
    "print(result.shape)"
   ],
   "id": "1727ab775d6064b5",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "torch.Size([1, 64])\n"
     ]
    }
   ],
   "execution_count": 22
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-08-22T05:13:42.042646Z",
     "start_time": "2025-08-22T05:13:42.040040Z"
    }
   },
   "cell_type": "code",
   "source": [
    "class ZhouyuCLIP(nn.Module):\n",
    "    def __init__(self, vocab_size):\n",
    "        super().__init__()\n",
    "        self.image_encoder = ImageEncoder(embed_dim=64)\n",
    "        self.text_encoder = TextEncoder(vocab_size, embed_dim=64)\n",
    "\n",
    "    def forward(self, texts, images):\n",
    "        text_features = self.text_encoder(texts)\n",
    "        image_features = self.image_encoder(images)\n",
    "        return text_features, image_features"
   ],
   "id": "80a730299ec43a6a",
   "outputs": [],
   "execution_count": 8
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-08-22T05:45:43.429215Z",
     "start_time": "2025-08-22T05:45:43.425622Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# 对比学习\n",
    "def contrastive_loss(text_features, image_features):\n",
    "    logits = image_features @ text_features.T\n",
    "    labels = torch.arange(logits.size(0))\n",
    "    loss_i2t = nn.CrossEntropyLoss()(logits, labels)\n",
    "    loss_t2i = nn.CrossEntropyLoss()(logits.T, labels)\n",
    "    return (loss_i2t + loss_t2i) / 2"
   ],
   "id": "ea7064e35faa65eb",
   "outputs": [],
   "execution_count": 32
  },
  {
   "metadata": {},
   "cell_type": "markdown",
   "source": [
    "## 对比学习\n",
    "假如有两张图片img1, img2\n",
    "假如有两段文本text1, text2\n",
    "我们期望模型能学会img1和text1相似，img2和text2相似\n",
    "\n",
    "logits如下\n",
    "img1*text1，img1*text2\n",
    "img2*text1，img2*text2\n",
    "\n",
    "logits.T如下\n",
    "img1*text1，img2*text1\n",
    "img1*text2，img2*text2\n",
    "\n",
    "## 为什么要算两次交叉熵\n",
    "loss_i2t的作用是让ImageEncoder更聪明\n",
    "loss_t2i的作用是让TextEncoder更聪明"
   ],
   "id": "2528842bb662e062"
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-08-22T05:43:11.590982Z",
     "start_time": "2025-08-22T05:43:11.586127Z"
    }
   },
   "cell_type": "code",
   "source": [
    "test_logits = torch.tensor([[0.7, 0.3],\n",
    "                            [0.6, 0.4]])\n",
    "\n",
    "test_labels = torch.tensor([0, 1])\n",
    "\n",
    "test_loss_i2t = nn.CrossEntropyLoss()(test_logits, test_labels)\n",
    "print(test_loss_i2t)"
   ],
   "id": "9fee58f3c78f19d7",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "tensor(0.3991)\n"
     ]
    }
   ],
   "execution_count": 30
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-08-22T05:43:12.927345Z",
     "start_time": "2025-08-22T05:43:12.922488Z"
    }
   },
   "cell_type": "code",
   "source": [
    "test_loss_t2i = nn.CrossEntropyLoss()(test_logits.T, test_labels)\n",
    "print(test_loss_t2i)"
   ],
   "id": "6f646c2ac31a8a0d",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "tensor(0.3222)\n"
     ]
    }
   ],
   "execution_count": 31
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-08-22T05:47:30.066404Z",
     "start_time": "2025-08-22T05:47:28.753334Z"
    }
   },
   "cell_type": "code",
   "source": [
    "model = ZhouyuCLIP(vocab_size=len(vocab))\n",
    "optimizer = torch.optim.Adam(model.parameters(), lr=3e-4)\n",
    "\n",
    "for epoch in range(100):\n",
    "    total_loss = 0\n",
    "    for text_batch, img_batch in dataloader:\n",
    "\n",
    "        optimizer.zero_grad()\n",
    "        text_features, image_features = model(text_batch, img_batch)\n",
    "        loss = contrastive_loss(text_features, image_features)\n",
    "\n",
    "        loss.backward()\n",
    "        optimizer.step()\n",
    "\n",
    "        total_loss += loss.item()\n",
    "\n",
    "    if epoch % 10 == 0:\n",
    "        print(f\"Epoch {epoch}, Loss: {total_loss/len(dataloader):.4f}\")"
   ],
   "id": "6aba3fcfd1af67f0",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 0, Loss: 1.0029\n",
      "Epoch 10, Loss: 0.8552\n",
      "Epoch 20, Loss: 0.5965\n",
      "Epoch 30, Loss: 1.3604\n",
      "Epoch 40, Loss: 0.7829\n",
      "Epoch 50, Loss: 0.8465\n",
      "Epoch 60, Loss: 0.7636\n",
      "Epoch 70, Loss: 0.5680\n",
      "Epoch 80, Loss: 1.1473\n",
      "Epoch 90, Loss: 0.6792\n"
     ]
    }
   ],
   "execution_count": 35
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-08-22T05:48:22.328797Z",
     "start_time": "2025-08-22T05:48:22.317682Z"
    }
   },
   "cell_type": "code",
   "source": [
    "model.eval()\n",
    "# 编码两张图像\n",
    "img0 = transform(data[1][1]).unsqueeze(0) # 数字0\n",
    "img5 = transform(data[0][1]).unsqueeze(0) # 数字5\n",
    "\n",
    "with torch.no_grad():\n",
    "    image_features = torch.cat([\n",
    "        model.image_encoder(img0),\n",
    "        model.image_encoder(img5)\n",
    "    ], dim=0)\n",
    "\n",
    "    query_text = torch.tensor([[word2idx[\"数\"], word2idx[\"字\"], word2idx[\"5\"]]])\n",
    "    text_feature = model.text_encoder(query_text)\n",
    "    sims = text_feature @ image_features.T\n",
    "    _, idx = sims.max(dim=1)\n",
    "    print(idx)"
   ],
   "id": "d1f74fdc87e3d782",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "tensor([1])\n"
     ]
    }
   ],
   "execution_count": 38
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 2
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython2",
   "version": "2.7.6"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
