{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "G:\\TEMP\\TEMP\\ipykernel_16524\\2214043317.py:238: FutureWarning: You are using `torch.load` with `weights_only=False` (the current default value), which uses the default pickle module implicitly. It is possible to construct malicious pickle data which will execute arbitrary code during unpickling (See https://github.com/pytorch/pytorch/blob/main/SECURITY.md#untrusted-models for more details). In a future release, the default value for `weights_only` will be flipped to `True`. This limits the functions that could be executed during unpickling. Arbitrary objects will no longer be allowed to be loaded via this mode unless they are explicitly allowlisted by the user via `torch.serialization.add_safe_globals`. We recommend you start setting `weights_only=True` for any use case where you don't have full control of the loaded file. Please open an issue on GitHub for any issues related to this experimental feature.\n",
      "  clip_model.load_state_dict(torch.load('myCLIP.pth'))\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "myCLIP(\n",
       "  (visual): VisualTransformer(\n",
       "    (conv1): Conv2d(3, 768, kernel_size=(16, 16), stride=(16, 16), bias=False)\n",
       "    (ln_pre): LayerNorm((768,), eps=1e-05, elementwise_affine=True)\n",
       "    (transformer): Transformer(\n",
       "      (resblocks): Sequential(\n",
       "        (0): ResidualAttentionBlock(\n",
       "          (attn): MultiheadAttention(\n",
       "            (out_proj): NonDynamicallyQuantizableLinear(in_features=768, out_features=768, bias=True)\n",
       "          )\n",
       "          (ln_1): LayerNorm((768,), eps=1e-05, elementwise_affine=True)\n",
       "          (mlp): Sequential(\n",
       "            (c_fc): Linear(in_features=768, out_features=3072, bias=True)\n",
       "            (gelu): QuickGELU()\n",
       "            (c_proj): Linear(in_features=3072, out_features=768, bias=True)\n",
       "          )\n",
       "          (ln_2): LayerNorm((768,), eps=1e-05, elementwise_affine=True)\n",
       "        )\n",
       "        (1): ResidualAttentionBlock(\n",
       "          (attn): MultiheadAttention(\n",
       "            (out_proj): NonDynamicallyQuantizableLinear(in_features=768, out_features=768, bias=True)\n",
       "          )\n",
       "          (ln_1): LayerNorm((768,), eps=1e-05, elementwise_affine=True)\n",
       "          (mlp): Sequential(\n",
       "            (c_fc): Linear(in_features=768, out_features=3072, bias=True)\n",
       "            (gelu): QuickGELU()\n",
       "            (c_proj): Linear(in_features=3072, out_features=768, bias=True)\n",
       "          )\n",
       "          (ln_2): LayerNorm((768,), eps=1e-05, elementwise_affine=True)\n",
       "        )\n",
       "        (2): ResidualAttentionBlock(\n",
       "          (attn): MultiheadAttention(\n",
       "            (out_proj): NonDynamicallyQuantizableLinear(in_features=768, out_features=768, bias=True)\n",
       "          )\n",
       "          (ln_1): LayerNorm((768,), eps=1e-05, elementwise_affine=True)\n",
       "          (mlp): Sequential(\n",
       "            (c_fc): Linear(in_features=768, out_features=3072, bias=True)\n",
       "            (gelu): QuickGELU()\n",
       "            (c_proj): Linear(in_features=3072, out_features=768, bias=True)\n",
       "          )\n",
       "          (ln_2): LayerNorm((768,), eps=1e-05, elementwise_affine=True)\n",
       "        )\n",
       "        (3): ResidualAttentionBlock(\n",
       "          (attn): MultiheadAttention(\n",
       "            (out_proj): NonDynamicallyQuantizableLinear(in_features=768, out_features=768, bias=True)\n",
       "          )\n",
       "          (ln_1): LayerNorm((768,), eps=1e-05, elementwise_affine=True)\n",
       "          (mlp): Sequential(\n",
       "            (c_fc): Linear(in_features=768, out_features=3072, bias=True)\n",
       "            (gelu): QuickGELU()\n",
       "            (c_proj): Linear(in_features=3072, out_features=768, bias=True)\n",
       "          )\n",
       "          (ln_2): LayerNorm((768,), eps=1e-05, elementwise_affine=True)\n",
       "        )\n",
       "        (4): ResidualAttentionBlock(\n",
       "          (attn): MultiheadAttention(\n",
       "            (out_proj): NonDynamicallyQuantizableLinear(in_features=768, out_features=768, bias=True)\n",
       "          )\n",
       "          (ln_1): LayerNorm((768,), eps=1e-05, elementwise_affine=True)\n",
       "          (mlp): Sequential(\n",
       "            (c_fc): Linear(in_features=768, out_features=3072, bias=True)\n",
       "            (gelu): QuickGELU()\n",
       "            (c_proj): Linear(in_features=3072, out_features=768, bias=True)\n",
       "          )\n",
       "          (ln_2): LayerNorm((768,), eps=1e-05, elementwise_affine=True)\n",
       "        )\n",
       "        (5): ResidualAttentionBlock(\n",
       "          (attn): MultiheadAttention(\n",
       "            (out_proj): NonDynamicallyQuantizableLinear(in_features=768, out_features=768, bias=True)\n",
       "          )\n",
       "          (ln_1): LayerNorm((768,), eps=1e-05, elementwise_affine=True)\n",
       "          (mlp): Sequential(\n",
       "            (c_fc): Linear(in_features=768, out_features=3072, bias=True)\n",
       "            (gelu): QuickGELU()\n",
       "            (c_proj): Linear(in_features=3072, out_features=768, bias=True)\n",
       "          )\n",
       "          (ln_2): LayerNorm((768,), eps=1e-05, elementwise_affine=True)\n",
       "        )\n",
       "      )\n",
       "    )\n",
       "    (ln_post): LayerNorm((768,), eps=1e-05, elementwise_affine=True)\n",
       "  )\n",
       ")"
      ]
     },
     "execution_count": 1,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "from torch import nn\n",
    "import torch\n",
    "from collections import OrderedDict\n",
    "from torch.utils import checkpoint\n",
    "\n",
    "class LayerNorm(nn.LayerNorm):\n",
    "    \"\"\"Subclass torch's LayerNorm to handle fp16.\"\"\"\n",
    "\n",
    "    def forward(self, x: torch.Tensor):\n",
    "        orig_type = x.dtype\n",
    "        ret = super().forward(x.type(torch.float32))\n",
    "        return ret.type(orig_type)\n",
    "\n",
    "\n",
    "class QuickGELU(nn.Module):\n",
    "    def forward(self, x: torch.Tensor):\n",
    "        return x * torch.sigmoid(1.702 * x)\n",
    "\n",
    "\n",
    "class ResidualAttentionBlock(nn.Module):\n",
    "    def __init__(self, d_model: int, n_head: int, attn_mask: torch.Tensor = None, use_flash_attention: bool = False):\n",
    "        super().__init__()\n",
    "\n",
    "        self.attn = nn.MultiheadAttention(d_model, n_head)\n",
    "        self.ln_1 = LayerNorm(d_model)\n",
    "        self.mlp = nn.Sequential(OrderedDict([\n",
    "            (\"c_fc\", nn.Linear(d_model, d_model * 4)),\n",
    "            (\"gelu\", QuickGELU()),\n",
    "            (\"c_proj\", nn.Linear(d_model * 4, d_model))\n",
    "        ]))\n",
    "        self.ln_2 = LayerNorm(d_model)\n",
    "        self.attn_mask = attn_mask\n",
    "        self.use_flash_attention = use_flash_attention\n",
    "\n",
    "    def attention(self, x: torch.Tensor):\n",
    "        self.attn_mask = self.attn_mask.to(dtype=x.dtype, device=x.device) if self.attn_mask is not None else None\n",
    "        if self.use_flash_attention:\n",
    "            # Batch first is needed for FlashAttention. See https://github.com/HazyResearch/flash-attention/issues/84 for more information.\n",
    "            return self.attn(x.transpose(1, 0))[0].transpose(1, 0)\n",
    "        else:\n",
    "            return self.attn(x, x, x, need_weights=False, attn_mask=self.attn_mask)[0]\n",
    "\n",
    "    def forward(self, x: torch.Tensor):\n",
    "        x = x + self.attention(self.ln_1(x))\n",
    "        x = x + self.mlp(self.ln_2(x))\n",
    "        return x\n",
    "\n",
    "class Transformer(nn.Module):\n",
    "    def __init__(self, width: int, layers: int, heads: int, attn_mask: torch.Tensor = None, use_flash_attention: bool = False):\n",
    "        super().__init__()\n",
    "        self.width = width\n",
    "        self.layers = layers\n",
    "        self.grad_checkpointing = False\n",
    "        self.resblocks = nn.Sequential(*[ResidualAttentionBlock(width, heads, attn_mask, use_flash_attention) for _ in range(layers)])\n",
    "\n",
    "    def forward(self, x: torch.Tensor):\n",
    "        if self.grad_checkpointing and not torch.jit.is_scripting():\n",
    "            for r in self.resblocks:\n",
    "                x = checkpoint(r, x)\n",
    "            return x        \n",
    "        return self.resblocks(x)\n",
    "\n",
    "\n",
    "class VisualTransformer(nn.Module):\n",
    "    def __init__(self, input_resolution: int, patch_size: int, width: int, layers: int, heads: int, output_dim: int, use_flash_attention: bool = False):\n",
    "        super().__init__()\n",
    "        self.dtype = torch.float32 #没有这一句会报错\n",
    "        self.input_resolution = input_resolution\n",
    "        self.grid_size = (self.input_resolution // patch_size, self.input_resolution // patch_size)\n",
    "        self.output_dim = output_dim\n",
    "        self.conv1 = nn.Conv2d(in_channels=3, out_channels=width, kernel_size=patch_size, stride=patch_size, bias=False)\n",
    "\n",
    "        scale = width ** -0.5\n",
    "        self.class_embedding = nn.Parameter(scale * torch.randn(width))\n",
    "        self.positional_embedding = nn.Parameter(scale * torch.randn((input_resolution // patch_size) ** 2 + 1, width))\n",
    "        self.ln_pre = LayerNorm(width)\n",
    "\n",
    "        self.transformer = Transformer(width, layers, heads, use_flash_attention=use_flash_attention)\n",
    "\n",
    "        self.ln_post = LayerNorm(width)\n",
    "        self.proj = nn.Parameter(scale * torch.randn(width, output_dim))\n",
    "\n",
    "    @torch.jit.ignore\n",
    "    def set_grad_checkpointing(self, enable=True):\n",
    "        self.transformer.grad_checkpointing = enable\n",
    "\n",
    "    def random_masking(self, x, mask_ratio):\n",
    "        N, L, D = x.shape  # batch, length, dim\n",
    "        len_keep = int((L - 1) * (1 - mask_ratio))\n",
    "\n",
    "        noise = torch.rand(N, L - 1, device=x.device)\n",
    "        ids_shuffle = torch.argsort(noise, dim=1) + torch.ones(N, L - 1, device=x.device,\n",
    "                                                               dtype=int)\n",
    "        ids_keep = ids_shuffle[:, :len_keep]\n",
    "\n",
    "        x_masked = torch.gather(x, dim=1, index=ids_keep.unsqueeze(-1).repeat(1, 1, D))\n",
    "\n",
    "        x0 = x[:, 0, :]\n",
    "        x0 = x0.reshape(N, 1, D)\n",
    "        x_masked_add = torch.cat([x0, x_masked], axis=1)\n",
    "        return x_masked_add\n",
    "\n",
    "    def forward(self, x: torch.Tensor, mask_ratio: float = 0.0):\n",
    "        x = self.conv1(x)  # shape = [*, width, grid, grid]\n",
    "        x = x.reshape(x.shape[0], x.shape[1], -1)  # shape = [*, width, grid ** 2]\n",
    "        x = x.permute(0, 2, 1)  # shape = [*, grid ** 2, width]\n",
    "        x = torch.cat([self.class_embedding.to(x.dtype) + torch.zeros(x.shape[0], 1, x.shape[-1], dtype=x.dtype, device=x.device), x], dim=1)  # shape = [*, grid ** 2 + 1, width]\n",
    "        x = x + self.positional_embedding.to(x.dtype)\n",
    "        if mask_ratio != 0:\n",
    "            x = self.random_masking(x, mask_ratio)\n",
    "        x = self.ln_pre(x)\n",
    "\n",
    "        x = x.permute(1, 0, 2)  # NLD -> LND\n",
    "        x = self.transformer(x)\n",
    "        x = x.permute(1, 0, 2)  # LND -> NLD\n",
    "\n",
    "        x = self.ln_post(x[:, 0, :])\n",
    "\n",
    "        if self.proj is not None:\n",
    "            x = x @ self.proj\n",
    "\n",
    "        return x\n",
    "    \n",
    "from torch import nn\n",
    "import numpy as np\n",
    "from typing import Union\n",
    "from typing import Tuple\n",
    "def contrastive_loss(logits: torch.Tensor) -> torch.Tensor:\n",
    "    return nn.functional.cross_entropy(logits, torch.arange(len(logits), device=logits.device))\n",
    "\n",
    "\n",
    "def clip_loss(similarity: torch.Tensor) -> torch.Tensor:\n",
    "    caption_loss = contrastive_loss(similarity)\n",
    "    image_loss = contrastive_loss(similarity.t())\n",
    "    return (caption_loss + image_loss) / 2.0\n",
    "\n",
    "class myCLIP(nn.Module):\n",
    "    def __init__(self,\n",
    "        embed_dim: int,\n",
    "        # text\n",
    "        text_hidden_size: int, #文本编码器最后一层隐藏层的维度\n",
    "        # vision\n",
    "        image_resolution: int,\n",
    "        vision_layers: Union[Tuple[int, int, int, int], int],\n",
    "        vision_width: int,\n",
    "        vision_patch_size: int,\n",
    "        # vision head width, added this param for ViT-H\n",
    "        vision_head_width: int = 64,\n",
    "        ):\n",
    "        super().__init__()\n",
    "        self.dtype = torch.float32 \n",
    "        vision_heads = vision_width // vision_head_width\n",
    "        self.visual = VisualTransformer(\n",
    "                input_resolution=image_resolution,\n",
    "                patch_size=vision_patch_size,\n",
    "                width=vision_width,\n",
    "                layers=vision_layers,\n",
    "                heads=vision_heads,\n",
    "                output_dim=embed_dim,\n",
    "                # use_flash_attention=use_flash_attention\n",
    "            )\n",
    "\n",
    "        self.text_projection = nn.Parameter(torch.empty(text_hidden_size, embed_dim))\n",
    "        \n",
    "        self.logit_scale = nn.Parameter(torch.ones([]) * np.log(1 / 0.07))\n",
    "        \n",
    "        if self.text_projection is not None:\n",
    "            nn.init.normal_(self.text_projection, std=text_hidden_size ** -0.5)\n",
    "\n",
    "    def encode_image(self, image, mask_ratio=0):\n",
    "        return self.visual(image, mask_ratio)\n",
    "    \n",
    "    def forward(self, image, text_features, mask_ratio=0):\n",
    "        assert image is not None or text_features is not None, \"text_features and image cannot both be None!\"\n",
    "        \n",
    "        image_features = self.encode_image(image, mask_ratio)\n",
    "        if self.text_projection is not None:\n",
    "            text_features = text_features @ self.text_projection\n",
    "\n",
    "        image_features = image_features / image_features.norm(dim=-1, keepdim=True)\n",
    "        text_features = text_features / text_features.norm(dim=-1, keepdim=True)\n",
    "        logit_scale = self.logit_scale.exp()\n",
    "        logits_per_text = logit_scale * text_features @ image_features.t()\n",
    "        loss = clip_loss(logits_per_text)\n",
    "        return loss,image_features, text_features, self.logit_scale.exp()\n",
    "    def get_similarity(self, image, text_features):\n",
    "        image_features = self.encode_image(image)\n",
    "        text_features = text_features @ self.text_projection\n",
    "\n",
    "        # normalized features\n",
    "        image_features = image_features / image_features.norm(dim=1, keepdim=True)\n",
    "        text_features = text_features / text_features.norm(dim=1, keepdim=True)\n",
    "\n",
    "        # cosine similarity as logits\n",
    "        logit_scale = self.logit_scale.exp()\n",
    "        logits_per_image = logit_scale * image_features @ text_features.t()\n",
    "        logits_per_text = logits_per_image.t()\n",
    "\n",
    "        # shape = [global_batch_size, global_batch_size]\n",
    "        return logits_per_image, logits_per_text   \n",
    "    \n",
    "import json\n",
    "args=\"\"\"{\n",
    "    \"embed_dim\": 512,\n",
    "    \"image_resolution\": 224,\n",
    "    \"vision_layers\": 6,\n",
    "    \"vision_width\": 768,\n",
    "    \"vision_patch_size\": 16\n",
    "}\"\"\"\n",
    "args=json.loads(args)\n",
    "embed_dim, text_hidden_size,image_resolution,vision_layers=args[\"embed_dim\"],768,args[\"image_resolution\"],args[\"vision_layers\"]\n",
    "vision_width=args[\"vision_width\"]\n",
    "vision_patch_size=args[\"vision_patch_size\"]\n",
    "vision_head_width=64\n",
    "clip_model=myCLIP(embed_dim=embed_dim,text_hidden_size=text_hidden_size,image_resolution=image_resolution,\n",
    "             vision_layers=vision_layers,vision_width=vision_width,vision_patch_size=vision_patch_size,\n",
    "             vision_head_width=vision_head_width)\n",
    "clip_model.load_state_dict(torch.load('myCLIP.pth'))\n",
    "\n",
    "from transformers import AutoTokenizer, AutoModel\n",
    "import torch\n",
    "import numpy as np\n",
    "import pandas as pd\n",
    "import os\n",
    "from PIL import Image\n",
    "from transformers import ChineseCLIPProcessor\n",
    "image_root=\"E:/AI/CLIP-Chinese-master/data/images\"\n",
    "with open(\"E:/AI/CLIP-Chinese-master/data/test1.csv\",\"r\",encoding=\"utf-8\") as f:\n",
    "    df=pd.read_csv(f)\n",
    "# 加载BERT模型和分词器\n",
    "tokenizer = AutoTokenizer.from_pretrained(\"bert-base-chinese\")\n",
    "text_model = AutoModel.from_pretrained(\"bert-base-chinese\")\n",
    "\n",
    "images=[]\n",
    "text_embeds=[]\n",
    "for i in range(3):\n",
    "    inputs = tokenizer(df[\"text\"][i], return_tensors='pt', padding=True, truncation=True)\n",
    "    with torch.no_grad():\n",
    "        text_embed = text_model(**inputs).last_hidden_state[:, 0, :]\n",
    "    text_embeds.append(text_embed)\n",
    "    file=os.path.join(image_root,df[\"filename\"][i])\n",
    "    image = Image.open(file).convert('RGB')\n",
    "    images.append(image)\n",
    "text_embed=torch.concat(text_embeds)\n",
    "processor = ChineseCLIPProcessor.from_pretrained(\"OFA-Sys/chinese-clip-vit-base-patch16\")\n",
    "pixel_values=processor(images=images, return_tensors=\"pt\")[\"pixel_values\"]\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "G:\\TEMP\\TEMP\\ipykernel_16524\\2214043317.py:109: TracerWarning: Converting a tensor to a Python boolean might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs!\n",
      "  if mask_ratio != 0:\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Serving './demo1.pth' at http://localhost:8080\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "('localhost', 8080)"
      ]
     },
     "execution_count": 6,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# import torch\n",
    "# import hiddenlayer as h\n",
    "# from torchvision.models import resnet18  # 以 resnet18 为例\n",
    "\n",
    "# myNet = resnet18()  # 实例化 resnet18\n",
    "# x = torch.randn(16, 3, 64, 64)  # 随机生成一个输入\n",
    "# myNetGraph = h.build_graph(myNet, x)  # 建立网络模型图\n",
    "# # myNetGraph.theme = h.graph.THEMES['blue']  # blue 和 basic 两种颜色，可以不要\n",
    "# myNetGraph.save(path='./demoModel.png', format='png')  # 保存网络模型图，可以设置 png 和 pdf 等\n",
    "# 针对有网络模型，但还没有训练保存 .pth 文件的情况\n",
    "import netron\n",
    "import torch.onnx\n",
    "# from torch.autograd import Variable\n",
    "# from torchvision.models import resnet18  # 以 resnet18 为例\n",
    "modelData = \"./demo1.pth\"  # 定义模型数据保存的路径\n",
    "# modelData = \"./demo.onnx\"  # 有人说应该是 onnx 文件，但我尝试 pth 是可以的 \n",
    "torch.onnx.export(clip_model.visual, (pixel_values), modelData)  # 将 pytorch 模型以 onnx 格式导出并保存\n",
    "netron.start(modelData)  # 输出网络结构\n",
    "\n",
    "# #  针对已经存在网络模型 .pth 文件的情况\n",
    "# import netron\n",
    "\n",
    "# modelData = \"./demo.pth\"  # 定义模型数据保存的路径\n",
    "# netron.start(modelData)  # 输出网络结构\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "G:\\TEMP\\TEMP\\ipykernel_16524\\2041455708.py:89: FutureWarning: You are using `torch.load` with `weights_only=False` (the current default value), which uses the default pickle module implicitly. It is possible to construct malicious pickle data which will execute arbitrary code during unpickling (See https://github.com/pytorch/pytorch/blob/main/SECURITY.md#untrusted-models for more details). In a future release, the default value for `weights_only` will be flipped to `True`. This limits the functions that could be executed during unpickling. Arbitrary objects will no longer be allowed to be loaded via this mode unless they are explicitly allowlisted by the user via `torch.serialization.add_safe_globals`. We recommend you start setting `weights_only=True` for any use case where you don't have full control of the loaded file. Please open an issue on GitHub for any issues related to this experimental feature.\n",
      "  visual.load_state_dict(torch.load(\"./visual.pth\"))\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "x conv1 input torch.Size([2, 3, 224, 224])\n",
      "x conv1 output torch.Size([2, 768, 14, 14])\n",
      "x reshape output torch.Size([2, 768, 196])\n",
      "x permute output torch.Size([2, 196, 768])\n",
      "class_embedding shape torch.Size([768])\n",
      "x cat input torch.Size([2, 196, 768])\n",
      "x cat output torch.Size([2, 197, 768])\n",
      " positional_embedding shape torch.Size([197, 768])\n",
      "x LayerNorm pre input torch.Size([2, 197, 768])\n",
      "x LayerNorm pre output torch.Size([2, 197, 768])\n",
      "x transformer input torch.Size([197, 2, 768])\n",
      "x transformer output torch.Size([197, 2, 768])\n",
      "x LayerNorm post input torch.Size([2, 197, 768])\n",
      "x LayerNorm post output torch.Size([2, 768])\n",
      "proj shape torch.Size([768, 512])\n",
      "x return torch.Size([2, 512])\n",
      "torch.Size([2, 512])\n"
     ]
    }
   ],
   "source": [
    "class VisualTransformer1(nn.Module):\n",
    "    def __init__(self, input_resolution: int, patch_size: int, width: int, layers: int, heads: int, output_dim: int, use_flash_attention: bool = False):\n",
    "        super().__init__()\n",
    "        self.dtype = torch.float32 #没有这一句会报错\n",
    "        self.input_resolution = input_resolution\n",
    "        self.grid_size = (self.input_resolution // patch_size, self.input_resolution // patch_size)\n",
    "        self.output_dim = output_dim\n",
    "        self.conv1 = nn.Conv2d(in_channels=3, out_channels=width, kernel_size=patch_size, stride=patch_size, bias=False)\n",
    "\n",
    "        scale = width ** -0.5\n",
    "        self.class_embedding = nn.Parameter(scale * torch.randn(width))\n",
    "        self.positional_embedding = nn.Parameter(scale * torch.randn((input_resolution // patch_size) ** 2 + 1, width))\n",
    "        self.ln_pre = LayerNorm(width)\n",
    "\n",
    "        self.transformer = Transformer(width, layers, heads, use_flash_attention=use_flash_attention)\n",
    "\n",
    "        self.ln_post = LayerNorm(width)\n",
    "        self.proj = nn.Parameter(scale * torch.randn(width, output_dim))\n",
    "\n",
    "    @torch.jit.ignore\n",
    "    def set_grad_checkpointing(self, enable=True):\n",
    "        self.transformer.grad_checkpointing = enable\n",
    "\n",
    "    def random_masking(self, x, mask_ratio):\n",
    "        N, L, D = x.shape  # batch, length, dim\n",
    "        len_keep = int((L - 1) * (1 - mask_ratio))\n",
    "\n",
    "        noise = torch.rand(N, L - 1, device=x.device)\n",
    "        ids_shuffle = torch.argsort(noise, dim=1) + torch.ones(N, L - 1, device=x.device,\n",
    "                                                               dtype=int)\n",
    "        ids_keep = ids_shuffle[:, :len_keep]\n",
    "\n",
    "        x_masked = torch.gather(x, dim=1, index=ids_keep.unsqueeze(-1).repeat(1, 1, D))\n",
    "\n",
    "        x0 = x[:, 0, :]\n",
    "        x0 = x0.reshape(N, 1, D)\n",
    "        x_masked_add = torch.cat([x0, x_masked], axis=1)\n",
    "        return x_masked_add\n",
    "\n",
    "    def forward(self, x: torch.Tensor, mask_ratio: float = 0.0):\n",
    "        print('x conv1 input',x.shape)\n",
    "        x = self.conv1(x)  # shape = [*, width, grid, grid]\n",
    "        print('x conv1 output',x.shape)\n",
    "        x = x.reshape(x.shape[0], x.shape[1], -1)  # shape = [*, width, grid ** 2]\n",
    "        print('x reshape output',x.shape)\n",
    "        x = x.permute(0, 2, 1)  # shape = [*, grid ** 2, width]\n",
    "        print('x permute output',x.shape)\n",
    "        print('class_embedding shape',self.class_embedding.shape)\n",
    "        print('x cat input',x.shape)\n",
    "        x = torch.cat([self.class_embedding.to(x.dtype) + torch.zeros(x.shape[0], 1, x.shape[-1], dtype=x.dtype, device=x.device), x], dim=1)  # shape = [*, grid ** 2 + 1, width]\n",
    "        print('x cat output',x.shape)\n",
    "        print(' positional_embedding shape',self.positional_embedding.shape)\n",
    "        x = x + self.positional_embedding.to(x.dtype)\n",
    "        if mask_ratio != 0:\n",
    "            x = self.random_masking(x, mask_ratio)\n",
    "        print('x LayerNorm pre input',x.shape)\n",
    "        x = self.ln_pre(x)\n",
    "        print('x LayerNorm pre output',x.shape)\n",
    "        x = x.permute(1, 0, 2)  # NLD -> LND\n",
    "        print('x transformer input',x.shape)\n",
    "        x = self.transformer(x)\n",
    "        print('x transformer output',x.shape)\n",
    "        x = x.permute(1, 0, 2)  # LND -> NLD\n",
    "        print('x LayerNorm post input',x.shape)\n",
    "        x = self.ln_post(x[:, 0, :])\n",
    "        print('x LayerNorm post output',x.shape)\n",
    "        if self.proj is not None:\n",
    "            print('proj shape',self.proj.shape)\n",
    "            x = x @ self.proj\n",
    "        print('x return',x.shape)\n",
    "        return x\n",
    "    \n",
    "\n",
    "\n",
    "#保存模型visual模型\n",
    "visual=clip_model.visual\n",
    "torch.save(visual.state_dict(), \"./visual.pth\")\n",
    "#加载visual模型\n",
    "vision_heads = vision_width // vision_head_width\n",
    "visual = VisualTransformer1(\n",
    "                input_resolution=image_resolution,\n",
    "                patch_size=vision_patch_size,\n",
    "                width=vision_width,\n",
    "                layers=vision_layers,\n",
    "                heads=vision_heads,\n",
    "                output_dim=embed_dim,\n",
    "                # use_flash_attention=use_flash_attention\n",
    "            )\n",
    "visual.load_state_dict(torch.load(\"./visual.pth\"))\n",
    "\n",
    "print(visual(pixel_values[:2]).shape)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 31,
   "metadata": {},
   "outputs": [],
   "source": [
    "from torch import nn\n",
    "import torch\n",
    "from collections import OrderedDict\n",
    "from torch.utils import checkpoint\n",
    "\n",
    "class LayerNorm(nn.LayerNorm):\n",
    "    \"\"\"Subclass torch's LayerNorm to handle fp16.\"\"\"\n",
    "\n",
    "    def forward(self, x: torch.Tensor):\n",
    "        orig_type = x.dtype\n",
    "        ret = super().forward(x.type(torch.float32))\n",
    "        return ret.type(orig_type)\n",
    "\n",
    "\n",
    "class QuickGELU(nn.Module):\n",
    "    def forward(self, x: torch.Tensor):\n",
    "        return x * torch.sigmoid(1.702 * x)\n",
    "\n",
    "\n",
    "class ResidualAttentionBlock(nn.Module):\n",
    "    def __init__(self, d_model: int, n_head: int, attn_mask: torch.Tensor = None, use_flash_attention: bool = False):\n",
    "        super().__init__()\n",
    "\n",
    "        self.attn = nn.MultiheadAttention(d_model, n_head)\n",
    "        self.ln_1 = LayerNorm(d_model)\n",
    "        self.mlp = nn.Sequential(OrderedDict([\n",
    "            (\"c_fc\", nn.Linear(d_model, d_model * 4)),\n",
    "            (\"gelu\", QuickGELU()),\n",
    "            (\"c_proj\", nn.Linear(d_model * 4, d_model))\n",
    "        ]))\n",
    "        self.ln_2 = LayerNorm(d_model)\n",
    "        self.attn_mask = attn_mask\n",
    "        self.use_flash_attention = use_flash_attention\n",
    "\n",
    "    def attention(self, x: torch.Tensor):\n",
    "        self.attn_mask = self.attn_mask.to(dtype=x.dtype, device=x.device) if self.attn_mask is not None else None\n",
    "        if self.use_flash_attention:\n",
    "            # Batch first is needed for FlashAttention. See https://github.com/HazyResearch/flash-attention/issues/84 for more information.\n",
    "            return self.attn(x.transpose(1, 0))[0].transpose(1, 0)\n",
    "        else:\n",
    "            return self.attn(x, x, x, need_weights=False, attn_mask=self.attn_mask)[0]\n",
    "\n",
    "    def forward(self, x: torch.Tensor):\n",
    "        x = x + self.attention(self.ln_1(x))\n",
    "        x = x + self.mlp(self.ln_2(x))\n",
    "        return x\n",
    "\n",
    "class Transformer(nn.Module):\n",
    "    def __init__(self, width: int, layers: int, heads: int, attn_mask: torch.Tensor = None, use_flash_attention: bool = False):\n",
    "        super().__init__()\n",
    "        self.width = width\n",
    "        self.layers = layers\n",
    "        self.grad_checkpointing = False\n",
    "        self.resblocks = nn.Sequential(*[ResidualAttentionBlock(width, heads, attn_mask, use_flash_attention) for _ in range(layers)])\n",
    "\n",
    "    def forward(self, x: torch.Tensor):\n",
    "        if self.grad_checkpointing and not torch.jit.is_scripting():\n",
    "            for r in self.resblocks:\n",
    "                x = checkpoint(r, x)\n",
    "            return x        \n",
    "        return self.resblocks(x)\n",
    "\n",
    "\n",
    "class VisualTransformer(nn.Module):\n",
    "    def __init__(self, input_resolution: int, patch_size: int, width: int, layers: int, heads: int, output_dim: int, use_flash_attention: bool = False):\n",
    "        super().__init__()\n",
    "        self.input_resolution = input_resolution\n",
    "        self.grid_size = (self.input_resolution // patch_size, self.input_resolution // patch_size)\n",
    "        self.output_dim = output_dim\n",
    "        self.conv1 = nn.Conv2d(in_channels=3, out_channels=width, kernel_size=patch_size, stride=patch_size, bias=False)\n",
    "\n",
    "        scale = width ** -0.5\n",
    "        self.class_embedding = nn.Parameter(scale * torch.randn(width))\n",
    "        self.positional_embedding = nn.Parameter(scale * torch.randn((input_resolution // patch_size) ** 2 + 1, width))\n",
    "        self.ln_pre = LayerNorm(width)\n",
    "\n",
    "        self.transformer = Transformer(width, layers, heads, use_flash_attention=use_flash_attention)\n",
    "\n",
    "        self.ln_post = LayerNorm(width)\n",
    "        self.proj = nn.Parameter(scale * torch.randn(width, output_dim))\n",
    "\n",
    "    @torch.jit.ignore\n",
    "    def set_grad_checkpointing(self, enable=True):\n",
    "        self.transformer.grad_checkpointing = enable\n",
    "\n",
    "    def random_masking(self, x, mask_ratio):\n",
    "        N, L, D = x.shape  # batch, length, dim\n",
    "        len_keep = int((L - 1) * (1 - mask_ratio))\n",
    "\n",
    "        noise = torch.rand(N, L - 1, device=x.device)\n",
    "        ids_shuffle = torch.argsort(noise, dim=1) + torch.ones(N, L - 1, device=x.device,\n",
    "                                                               dtype=int)\n",
    "        ids_keep = ids_shuffle[:, :len_keep]\n",
    "\n",
    "        x_masked = torch.gather(x, dim=1, index=ids_keep.unsqueeze(-1).repeat(1, 1, D))\n",
    "\n",
    "        x0 = x[:, 0, :]\n",
    "        x0 = x0.reshape(N, 1, D)\n",
    "        x_masked_add = torch.cat([x0, x_masked], axis=1)\n",
    "        return x_masked_add\n",
    "\n",
    "    def forward(self, x: torch.Tensor, mask_ratio: float = 0.0):\n",
    "        x = self.conv1(x)  # shape = [*, width, grid, grid]\n",
    "        x = x.reshape(x.shape[0], x.shape[1], -1)  # shape = [*, width, grid ** 2]\n",
    "        x = x.permute(0, 2, 1)  # shape = [*, grid ** 2, width]\n",
    "        x = torch.cat([self.class_embedding.to(x.dtype) + torch.zeros(x.shape[0], 1, x.shape[-1], dtype=x.dtype, device=x.device), x], dim=1)  # shape = [*, grid ** 2 + 1, width]\n",
    "        x = x + self.positional_embedding.to(x.dtype)\n",
    "        if mask_ratio != 0:\n",
    "            x = self.random_masking(x, mask_ratio)\n",
    "        x = self.ln_pre(x)\n",
    "\n",
    "        x = x.permute(1, 0, 2)  # NLD -> LND\n",
    "        x = self.transformer(x)\n",
    "        x = x.permute(1, 0, 2)  # LND -> NLD\n",
    "\n",
    "        x = self.ln_post(x[:, 0, :])\n",
    "\n",
    "        if self.proj is not None:\n",
    "            x = x @ self.proj\n",
    "\n",
    "        return x\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [
    {
     "ename": "NameError",
     "evalue": "name 'clip_model' is not defined",
     "output_type": "error",
     "traceback": [
      "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[1;31mNameError\u001b[0m                                 Traceback (most recent call last)",
      "Cell \u001b[1;32mIn[1], line 7\u001b[0m\n\u001b[0;32m      5\u001b[0m \u001b[38;5;66;03m# myNet = resnet18()  # 实例化 resnet18\u001b[39;00m\n\u001b[0;32m      6\u001b[0m x \u001b[38;5;241m=\u001b[39m torch\u001b[38;5;241m.\u001b[39mrandn(\u001b[38;5;241m16\u001b[39m, \u001b[38;5;241m3\u001b[39m, \u001b[38;5;241m224\u001b[39m, \u001b[38;5;241m224\u001b[39m)  \u001b[38;5;66;03m# 随机生成一个输入\u001b[39;00m\n\u001b[1;32m----> 7\u001b[0m myNetGraph \u001b[38;5;241m=\u001b[39m h\u001b[38;5;241m.\u001b[39mbuild_graph(\u001b[43mclip_model\u001b[49m, (pixel_values, text_embed))  \u001b[38;5;66;03m# 建立网络模型图\u001b[39;00m\n\u001b[0;32m      8\u001b[0m \u001b[38;5;66;03m# myNetGraph.theme = h.graph.THEMES['blue']  # blue 和 basic 两种颜色，可以不要\u001b[39;00m\n\u001b[0;32m      9\u001b[0m myNetGraph\u001b[38;5;241m.\u001b[39msave(path\u001b[38;5;241m=\u001b[39m\u001b[38;5;124m'\u001b[39m\u001b[38;5;124m./demoModel.png\u001b[39m\u001b[38;5;124m'\u001b[39m, \u001b[38;5;28mformat\u001b[39m\u001b[38;5;241m=\u001b[39m\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mpng\u001b[39m\u001b[38;5;124m'\u001b[39m)  \u001b[38;5;66;03m# 保存网络模型图，可以设置 png 和 pdf 等\u001b[39;00m\n",
      "\u001b[1;31mNameError\u001b[0m: name 'clip_model' is not defined"
     ]
    }
   ],
   "source": [
    "import torch\n",
    "import hiddenlayer as h\n",
    "from torchvision.models import resnet18  # 以 resnet18 为例\n",
    "\n",
    "# myNet = resnet18()  # 实例化 resnet18\n",
    "x = torch.randn(16, 3, 224, 224)  # 随机生成一个输入\n",
    "myNetGraph = h.build_graph(clip_model, (pixel_values, text_embed))  # 建立网络模型图\n",
    "# myNetGraph.theme = h.graph.THEMES['blue']  # blue 和 basic 两种颜色，可以不要\n",
    "myNetGraph.save(path='./demoModel.png', format='png')  # 保存网络模型图，可以设置 png 和 pdf 等\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 33,
   "metadata": {},
   "outputs": [],
   "source": [
    "from torch import nn\n",
    "import numpy as np\n",
    "from typing import Union\n",
    "from typing import Tuple\n",
    "def contrastive_loss(logits: torch.Tensor) -> torch.Tensor:\n",
    "    return nn.functional.cross_entropy(logits, torch.arange(len(logits), device=logits.device))\n",
    "\n",
    "\n",
    "def clip_loss(similarity: torch.Tensor) -> torch.Tensor:\n",
    "    caption_loss = contrastive_loss(similarity)\n",
    "    image_loss = contrastive_loss(similarity.t())\n",
    "    return (caption_loss + image_loss) / 2.0\n",
    "\n",
    "class myCLIP(nn.Module):\n",
    "    def __init__(self,\n",
    "        embed_dim: int,\n",
    "        # text\n",
    "        text_hidden_size: int, #文本编码器最后一层隐藏层的维度\n",
    "        # vision\n",
    "        image_resolution: int,\n",
    "        vision_layers: Union[Tuple[int, int, int, int], int],\n",
    "        vision_width: int,\n",
    "        vision_patch_size: int,\n",
    "        # vision head width, added this param for ViT-H\n",
    "        vision_head_width: int = 64,\n",
    "        ):\n",
    "        super().__init__()\n",
    "        vision_heads = vision_width // vision_head_width\n",
    "        self.visual = VisualTransformer(\n",
    "                input_resolution=image_resolution,\n",
    "                patch_size=vision_patch_size,\n",
    "                width=vision_width,\n",
    "                layers=vision_layers,\n",
    "                heads=vision_heads,\n",
    "                output_dim=embed_dim,\n",
    "                # use_flash_attention=use_flash_attention\n",
    "            )\n",
    "        # self.visual = ModifiedResNet(\n",
    "        #         layers=vision_layers,\n",
    "        #         output_dim=embed_dim,\n",
    "        #         heads=vision_heads,\n",
    "        #         input_resolution=image_resolution,\n",
    "        #         width=vision_width\n",
    "        #     )\n",
    "        self.text_projection = nn.Parameter(torch.empty(text_hidden_size, embed_dim))\n",
    "        self.logit_scale = nn.Parameter(torch.ones([]) * np.log(1 / 0.07))\n",
    "        \n",
    "        # if isinstance(self.visual, ModifiedResNet):\n",
    "        #     if self.visual.attnpool is not None:\n",
    "        #         std = self.visual.attnpool.c_proj.in_features ** -0.5\n",
    "        #         nn.init.normal_(self.visual.attnpool.q_proj.weight, std=std)\n",
    "        #         nn.init.normal_(self.visual.attnpool.k_proj.weight, std=std)\n",
    "        #         nn.init.normal_(self.visual.attnpool.v_proj.weight, std=std)\n",
    "        #         nn.init.normal_(self.visual.attnpool.c_proj.weight, std=std)\n",
    "\n",
    "        #     for resnet_block in [self.visual.layer1, self.visual.layer2, self.visual.layer3, self.visual.layer4]:\n",
    "        #         for name, param in resnet_block.named_parameters():\n",
    "        #             if name.endswith(\"bn3.weight\"):\n",
    "        #                 nn.init.zeros_(param)\n",
    "\n",
    "        if self.text_projection is not None:\n",
    "            nn.init.normal_(self.text_projection, std=text_hidden_size ** -0.5)\n",
    "    @torch.jit.ignore\n",
    "    def set_grad_checkpointing(self, enable=True):\n",
    "        self.visual.set_grad_checkpointing(enable)\n",
    "\n",
    "    def encode_image(self, image, mask_ratio=0):\n",
    "        # if isinstance(self.visual, ModifiedResNet):\n",
    "        #     # mask_ratio > 0 (FLIP strategy) is currently only implemented for VisualTransformer.\n",
    "        #     return self.visual(image.type(self.dtype))\n",
    "        return self.visual(image.type(self.dtype), mask_ratio)\n",
    "    \n",
    "    def forward(self, image, text_features, mask_ratio=0):\n",
    "        assert image is not None or text_features is not None, \"text_features and image cannot both be None!\"\n",
    "        \n",
    "        image_features = self.encode_image(image, mask_ratio)\n",
    "        if self.text_projection is not None:\n",
    "            text_features = self.text_projection(text_features)\n",
    "\n",
    "        image_features = image_features / image_features.norm(dim=-1, keepdim=True)\n",
    "        text_features = text_features / text_features.norm(dim=-1, keepdim=True)\n",
    "        logit_scale = self.logit_scale.exp()\n",
    "        logits_per_text = logit_scale * text_features @ image_features.t()\n",
    "        loss = clip_loss(logits_per_text)\n",
    "        return loss,image_features, text_features, self.logit_scale.exp()\n",
    "        \n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 35,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "myCLIP(\n",
      "  (visual): VisualTransformer(\n",
      "    (conv1): Conv2d(3, 768, kernel_size=(16, 16), stride=(16, 16), bias=False)\n",
      "    (ln_pre): LayerNorm((768,), eps=1e-05, elementwise_affine=True)\n",
      "    (transformer): Transformer(\n",
      "      (resblocks): Sequential(\n",
      "        (0): ResidualAttentionBlock(\n",
      "          (attn): MultiheadAttention(\n",
      "            (out_proj): NonDynamicallyQuantizableLinear(in_features=768, out_features=768, bias=True)\n",
      "          )\n",
      "          (ln_1): LayerNorm((768,), eps=1e-05, elementwise_affine=True)\n",
      "          (mlp): Sequential(\n",
      "            (c_fc): Linear(in_features=768, out_features=3072, bias=True)\n",
      "            (gelu): QuickGELU()\n",
      "            (c_proj): Linear(in_features=3072, out_features=768, bias=True)\n",
      "          )\n",
      "          (ln_2): LayerNorm((768,), eps=1e-05, elementwise_affine=True)\n",
      "        )\n",
      "        (1): ResidualAttentionBlock(\n",
      "          (attn): MultiheadAttention(\n",
      "            (out_proj): NonDynamicallyQuantizableLinear(in_features=768, out_features=768, bias=True)\n",
      "          )\n",
      "          (ln_1): LayerNorm((768,), eps=1e-05, elementwise_affine=True)\n",
      "          (mlp): Sequential(\n",
      "            (c_fc): Linear(in_features=768, out_features=3072, bias=True)\n",
      "            (gelu): QuickGELU()\n",
      "            (c_proj): Linear(in_features=3072, out_features=768, bias=True)\n",
      "          )\n",
      "          (ln_2): LayerNorm((768,), eps=1e-05, elementwise_affine=True)\n",
      "        )\n",
      "        (2): ResidualAttentionBlock(\n",
      "          (attn): MultiheadAttention(\n",
      "            (out_proj): NonDynamicallyQuantizableLinear(in_features=768, out_features=768, bias=True)\n",
      "          )\n",
      "          (ln_1): LayerNorm((768,), eps=1e-05, elementwise_affine=True)\n",
      "          (mlp): Sequential(\n",
      "            (c_fc): Linear(in_features=768, out_features=3072, bias=True)\n",
      "            (gelu): QuickGELU()\n",
      "            (c_proj): Linear(in_features=3072, out_features=768, bias=True)\n",
      "          )\n",
      "          (ln_2): LayerNorm((768,), eps=1e-05, elementwise_affine=True)\n",
      "        )\n",
      "        (3): ResidualAttentionBlock(\n",
      "          (attn): MultiheadAttention(\n",
      "            (out_proj): NonDynamicallyQuantizableLinear(in_features=768, out_features=768, bias=True)\n",
      "          )\n",
      "          (ln_1): LayerNorm((768,), eps=1e-05, elementwise_affine=True)\n",
      "          (mlp): Sequential(\n",
      "            (c_fc): Linear(in_features=768, out_features=3072, bias=True)\n",
      "            (gelu): QuickGELU()\n",
      "            (c_proj): Linear(in_features=3072, out_features=768, bias=True)\n",
      "          )\n",
      "          (ln_2): LayerNorm((768,), eps=1e-05, elementwise_affine=True)\n",
      "        )\n",
      "        (4): ResidualAttentionBlock(\n",
      "          (attn): MultiheadAttention(\n",
      "            (out_proj): NonDynamicallyQuantizableLinear(in_features=768, out_features=768, bias=True)\n",
      "          )\n",
      "          (ln_1): LayerNorm((768,), eps=1e-05, elementwise_affine=True)\n",
      "          (mlp): Sequential(\n",
      "            (c_fc): Linear(in_features=768, out_features=3072, bias=True)\n",
      "            (gelu): QuickGELU()\n",
      "            (c_proj): Linear(in_features=3072, out_features=768, bias=True)\n",
      "          )\n",
      "          (ln_2): LayerNorm((768,), eps=1e-05, elementwise_affine=True)\n",
      "        )\n",
      "        (5): ResidualAttentionBlock(\n",
      "          (attn): MultiheadAttention(\n",
      "            (out_proj): NonDynamicallyQuantizableLinear(in_features=768, out_features=768, bias=True)\n",
      "          )\n",
      "          (ln_1): LayerNorm((768,), eps=1e-05, elementwise_affine=True)\n",
      "          (mlp): Sequential(\n",
      "            (c_fc): Linear(in_features=768, out_features=3072, bias=True)\n",
      "            (gelu): QuickGELU()\n",
      "            (c_proj): Linear(in_features=3072, out_features=768, bias=True)\n",
      "          )\n",
      "          (ln_2): LayerNorm((768,), eps=1e-05, elementwise_affine=True)\n",
      "        )\n",
      "      )\n",
      "    )\n",
      "    (ln_post): LayerNorm((768,), eps=1e-05, elementwise_affine=True)\n",
      "  )\n",
      ")\n"
     ]
    }
   ],
   "source": [
    "import json\n",
    "args=\"\"\"{\n",
    "    \"embed_dim\": 512,\n",
    "    \"image_resolution\": 224,\n",
    "    \"vision_layers\": 6,\n",
    "    \"vision_width\": 768,\n",
    "    \"vision_patch_size\": 16\n",
    "}\"\"\"\n",
    "args=json.loads(args)\n",
    "embed_dim, text_hidden_size,image_resolution,vision_layers=args[\"embed_dim\"],768,args[\"image_resolution\"],args[\"vision_layers\"]\n",
    "vision_width=args[\"vision_width\"]\n",
    "vision_patch_size=args[\"vision_patch_size\"]\n",
    "vision_head_width=64\n",
    "model=myCLIP(embed_dim=embed_dim,text_hidden_size=text_hidden_size,image_resolution=image_resolution,\n",
    "             vision_layers=vision_layers,vision_width=vision_width,vision_patch_size=vision_patch_size,\n",
    "             vision_head_width=vision_head_width)\n",
    "print(model)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "base",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.10.13"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
