{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import torch\n",
    "from datasets import load_dataset\n",
    "from torch.utils.data import DataLoader, Dataset\n",
    "# from transformers import AutoModelForVision2Seq, AutoProcessor, BitsAndBytesConfig\n",
    "from component.datacollator import CLIPCollator\n",
    "from component.dataset import CLIPDataset\n",
    "from transformers import ChineseCLIPProcessor, ChineseCLIPModel\n",
    "import os\n",
    "root='E:/AI/CLIP-Chinese-master/data'\n",
    "train_file = os.join(root, 'train1.csv')\n",
    "test_file = os.join(root, 'test1.csv')\n",
    "model = ChineseCLIPModel.from_pretrained(\"OFA-Sys/chinese-clip-vit-base-patch16\")\n",
    "processor = ChineseCLIPProcessor.from_pretrained(\"OFA-Sys/chinese-clip-vit-base-patch16\")\n",
    "\n",
    "# 加载数据集\n",
    "train_dataset = CLIPDataset(train_file, processor, root+'/images')\n",
    "# # 初始化collator\n",
    "# data_collator = CLIPCollator(clip_processor=processor, max_seq_length=512)\n",
    "\n",
    "train_dataloader = DataLoader(train_dataset, shuffle=True, batch_size=2, collate_fn=collator)\n",
    "\n",
    "optimizer = torch.optim.AdamW(model.parameters(), lr=5e-5)\n",
    "\n",
    "device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n",
    "\n",
    "model.train()\n",
    "\n",
    "for epoch in range(50):\n",
    "    print(\"Epoch:\", epoch)\n",
    "    for idx, batch in enumerate(train_dataloader):\n",
    "        input_ids = batch.pop(\"input_ids\").to(device)\n",
    "        pixel_values = batch.pop(\"pixel_values\").to(device, torch.float16)\n",
    "\n",
    "        outputs = model(input_ids=input_ids, pixel_values=pixel_values, labels=input_ids)\n",
    "\n",
    "        loss = outputs.loss\n",
    "\n",
    "        print(\"Loss:\", loss.item())\n",
    "\n",
    "        loss.backward()\n",
    "\n",
    "        optimizer.step()\n",
    "        optimizer.zero_grad()\n",
    "\n",
    "        if idx % 10 == 0:\n",
    "            generated_output = model.generate(pixel_values=pixel_values)\n",
    "            print(processor.batch_decode(generated_output, skip_special_tokens=True))\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "from torch import nn\n",
    "import torch\n",
    "from collections import OrderedDict\n",
    "from torch.utils import checkpoint\n",
    "\n",
    "class LayerNorm(nn.LayerNorm):\n",
    "    \"\"\"Subclass torch's LayerNorm to handle fp16.\"\"\"\n",
    "\n",
    "    def forward(self, x: torch.Tensor):\n",
    "        orig_type = x.dtype\n",
    "        ret = super().forward(x.type(torch.float32))\n",
    "        return ret.type(orig_type)\n",
    "\n",
    "\n",
    "class QuickGELU(nn.Module):\n",
    "    def forward(self, x: torch.Tensor):\n",
    "        return x * torch.sigmoid(1.702 * x)\n",
    "\n",
    "\n",
    "class ResidualAttentionBlock(nn.Module):\n",
    "    def __init__(self, d_model: int, n_head: int, attn_mask: torch.Tensor = None, use_flash_attention: bool = False):\n",
    "        super().__init__()\n",
    "\n",
    "        self.attn = nn.MultiheadAttention(d_model, n_head)\n",
    "        self.ln_1 = LayerNorm(d_model)\n",
    "        self.mlp = nn.Sequential(OrderedDict([\n",
    "            (\"c_fc\", nn.Linear(d_model, d_model * 4)),\n",
    "            (\"gelu\", QuickGELU()),\n",
    "            (\"c_proj\", nn.Linear(d_model * 4, d_model))\n",
    "        ]))\n",
    "        self.ln_2 = LayerNorm(d_model)\n",
    "        self.attn_mask = attn_mask\n",
    "        self.use_flash_attention = use_flash_attention\n",
    "\n",
    "    def attention(self, x: torch.Tensor):\n",
    "        self.attn_mask = self.attn_mask.to(dtype=x.dtype, device=x.device) if self.attn_mask is not None else None\n",
    "        if self.use_flash_attention:\n",
    "            # Batch first is needed for FlashAttention. See https://github.com/HazyResearch/flash-attention/issues/84 for more information.\n",
    "            return self.attn(x.transpose(1, 0))[0].transpose(1, 0)\n",
    "        else:\n",
    "            return self.attn(x, x, x, need_weights=False, attn_mask=self.attn_mask)[0]\n",
    "\n",
    "    def forward(self, x: torch.Tensor):\n",
    "        x = x + self.attention(self.ln_1(x))\n",
    "        x = x + self.mlp(self.ln_2(x))\n",
    "        return x\n",
    "\n",
    "class Transformer(nn.Module):\n",
    "    def __init__(self, width: int, layers: int, heads: int, attn_mask: torch.Tensor = None, use_flash_attention: bool = False):\n",
    "        super().__init__()\n",
    "        self.width = width\n",
    "        self.layers = layers\n",
    "        self.grad_checkpointing = False\n",
    "        self.resblocks = nn.Sequential(*[ResidualAttentionBlock(width, heads, attn_mask, use_flash_attention) for _ in range(layers)])\n",
    "\n",
    "    def forward(self, x: torch.Tensor):\n",
    "        if self.grad_checkpointing and not torch.jit.is_scripting():\n",
    "            for r in self.resblocks:\n",
    "                x = checkpoint(r, x)\n",
    "            return x        \n",
    "        return self.resblocks(x)\n",
    "\n",
    "\n",
    "class VisualTransformer(nn.Module):\n",
    "    def __init__(self, input_resolution: int, patch_size: int, width: int, layers: int, heads: int, output_dim: int, use_flash_attention: bool = False):\n",
    "        super().__init__()\n",
    "        self.dtype = torch.float32 #没有这一句会报错\n",
    "        self.input_resolution = input_resolution\n",
    "        self.grid_size = (self.input_resolution // patch_size, self.input_resolution // patch_size)\n",
    "        self.output_dim = output_dim\n",
    "        self.conv1 = nn.Conv2d(in_channels=3, out_channels=width, kernel_size=patch_size, stride=patch_size, bias=False)\n",
    "\n",
    "        scale = width ** -0.5\n",
    "        self.class_embedding = nn.Parameter(scale * torch.randn(width))\n",
    "        self.positional_embedding = nn.Parameter(scale * torch.randn((input_resolution // patch_size) ** 2 + 1, width))\n",
    "        self.ln_pre = LayerNorm(width)\n",
    "\n",
    "        self.transformer = Transformer(width, layers, heads, use_flash_attention=use_flash_attention)\n",
    "\n",
    "        self.ln_post = LayerNorm(width)\n",
    "        self.proj = nn.Parameter(scale * torch.randn(width, output_dim))\n",
    "\n",
    "    @torch.jit.ignore\n",
    "    def set_grad_checkpointing(self, enable=True):\n",
    "        self.transformer.grad_checkpointing = enable\n",
    "\n",
    "    def random_masking(self, x, mask_ratio):\n",
    "        N, L, D = x.shape  # batch, length, dim\n",
    "        len_keep = int((L - 1) * (1 - mask_ratio))\n",
    "\n",
    "        noise = torch.rand(N, L - 1, device=x.device)\n",
    "        ids_shuffle = torch.argsort(noise, dim=1) + torch.ones(N, L - 1, device=x.device,\n",
    "                                                               dtype=int)\n",
    "        ids_keep = ids_shuffle[:, :len_keep]\n",
    "\n",
    "        x_masked = torch.gather(x, dim=1, index=ids_keep.unsqueeze(-1).repeat(1, 1, D))\n",
    "\n",
    "        x0 = x[:, 0, :]\n",
    "        x0 = x0.reshape(N, 1, D)\n",
    "        x_masked_add = torch.cat([x0, x_masked], axis=1)\n",
    "        return x_masked_add\n",
    "\n",
    "    def forward(self, x: torch.Tensor, mask_ratio: float = 0.0):\n",
    "        x = self.conv1(x)  # shape = [*, width, grid, grid]\n",
    "        x = x.reshape(x.shape[0], x.shape[1], -1)  # shape = [*, width, grid ** 2]\n",
    "        x = x.permute(0, 2, 1)  # shape = [*, grid ** 2, width]\n",
    "        x = torch.cat([self.class_embedding.to(x.dtype) + torch.zeros(x.shape[0], 1, x.shape[-1], dtype=x.dtype, device=x.device), x], dim=1)  # shape = [*, grid ** 2 + 1, width]\n",
    "        x = x + self.positional_embedding.to(x.dtype)\n",
    "        if mask_ratio != 0:\n",
    "            x = self.random_masking(x, mask_ratio)\n",
    "        x = self.ln_pre(x)\n",
    "\n",
    "        x = x.permute(1, 0, 2)  # NLD -> LND\n",
    "        x = self.transformer(x)\n",
    "        x = x.permute(1, 0, 2)  # LND -> NLD\n",
    "\n",
    "        x = self.ln_post(x[:, 0, :])\n",
    "\n",
    "        if self.proj is not None:\n",
    "            x = x @ self.proj\n",
    "\n",
    "        return x\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "from torch import nn\n",
    "import numpy as np\n",
    "from typing import Union\n",
    "from typing import Tuple\n",
    "def contrastive_loss(logits: torch.Tensor) -> torch.Tensor:\n",
    "    return nn.functional.cross_entropy(logits, torch.arange(len(logits), device=logits.device))\n",
    "\n",
    "\n",
    "def clip_loss(similarity: torch.Tensor) -> torch.Tensor:\n",
    "    caption_loss = contrastive_loss(similarity)\n",
    "    image_loss = contrastive_loss(similarity.t())\n",
    "    return (caption_loss + image_loss) / 2.0\n",
    "\n",
    "class myCLIP(nn.Module):\n",
    "    def __init__(self,\n",
    "        embed_dim: int,\n",
    "        # text\n",
    "        text_hidden_size: int, #文本编码器最后一层隐藏层的维度\n",
    "        # vision\n",
    "        image_resolution: int,\n",
    "        vision_layers: Union[Tuple[int, int, int, int], int],\n",
    "        vision_width: int,\n",
    "        vision_patch_size: int,\n",
    "        # vision head width, added this param for ViT-H\n",
    "        vision_head_width: int = 64,\n",
    "        ):\n",
    "        super().__init__()\n",
    "        self.dtype = torch.float32 #没有这一句会报错\n",
    "        vision_heads = vision_width // vision_head_width\n",
    "        self.visual = VisualTransformer(\n",
    "                input_resolution=image_resolution,\n",
    "                patch_size=vision_patch_size,\n",
    "                width=vision_width,\n",
    "                layers=vision_layers,\n",
    "                heads=vision_heads,\n",
    "                output_dim=embed_dim,\n",
    "                # use_flash_attention=use_flash_attention\n",
    "            )\n",
    "        # self.visual = ModifiedResNet(\n",
    "        #         layers=vision_layers,\n",
    "        #         output_dim=embed_dim,\n",
    "        #         heads=vision_heads,\n",
    "        #         input_resolution=image_resolution,\n",
    "        #         width=vision_width\n",
    "        #     )\n",
    "        self.text_projection = nn.Parameter(torch.empty(text_hidden_size, embed_dim))\n",
    "        \n",
    "        self.logit_scale = nn.Parameter(torch.ones([]) * np.log(1 / 0.07))\n",
    "        \n",
    "        # if isinstance(self.visual, ModifiedResNet):\n",
    "        #     if self.visual.attnpool is not None:\n",
    "        #         std = self.visual.attnpool.c_proj.in_features ** -0.5\n",
    "        #         nn.init.normal_(self.visual.attnpool.q_proj.weight, std=std)\n",
    "        #         nn.init.normal_(self.visual.attnpool.k_proj.weight, std=std)\n",
    "        #         nn.init.normal_(self.visual.attnpool.v_proj.weight, std=std)\n",
    "        #         nn.init.normal_(self.visual.attnpool.c_proj.weight, std=std)\n",
    "\n",
    "        #     for resnet_block in [self.visual.layer1, self.visual.layer2, self.visual.layer3, self.visual.layer4]:\n",
    "        #         for name, param in resnet_block.named_parameters():\n",
    "        #             if name.endswith(\"bn3.weight\"):\n",
    "        #                 nn.init.zeros_(param)\n",
    "\n",
    "        if self.text_projection is not None:\n",
    "            nn.init.normal_(self.text_projection, std=text_hidden_size ** -0.5)\n",
    "\n",
    "    def encode_image(self, image, mask_ratio=0):\n",
    "        # if isinstance(self.visual, ModifiedResNet):\n",
    "        #     # mask_ratio > 0 (FLIP strategy) is currently only implemented for VisualTransformer.\n",
    "        #     return self.visual(image.type(self.dtype))\n",
    "        return self.visual(image, mask_ratio)\n",
    "    \n",
    "    def forward(self, image, text_features, mask_ratio=0):\n",
    "        assert image is not None or text_features is not None, \"text_features and image cannot both be None!\"\n",
    "        \n",
    "        image_features = self.encode_image(image, mask_ratio)\n",
    "        if self.text_projection is not None:\n",
    "            text_features = text_features @ self.text_projection\n",
    "\n",
    "        image_features = image_features / image_features.norm(dim=-1, keepdim=True)\n",
    "        text_features = text_features / text_features.norm(dim=-1, keepdim=True)\n",
    "        logit_scale = self.logit_scale.exp()\n",
    "        logits_per_text = logit_scale * text_features @ image_features.t()\n",
    "        loss = clip_loss(logits_per_text)\n",
    "        return loss,image_features, text_features, self.logit_scale.exp()\n",
    "        \n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "import json\n",
    "args=\"\"\"{\n",
    "    \"embed_dim\": 512,\n",
    "    \"image_resolution\": 224,\n",
    "    \"vision_layers\": 6,\n",
    "    \"vision_width\": 768,\n",
    "    \"vision_patch_size\": 16\n",
    "}\"\"\"\n",
    "args=json.loads(args)\n",
    "embed_dim, text_hidden_size,image_resolution,vision_layers=args[\"embed_dim\"],768,args[\"image_resolution\"],args[\"vision_layers\"]\n",
    "vision_width=args[\"vision_width\"]\n",
    "vision_patch_size=args[\"vision_patch_size\"]\n",
    "vision_head_width=64\n",
    "model=myCLIP(embed_dim=embed_dim,text_hidden_size=text_hidden_size,image_resolution=image_resolution,\n",
    "             vision_layers=vision_layers,vision_width=vision_width,vision_patch_size=vision_patch_size,\n",
    "             vision_head_width=vision_head_width)\n",
    "# print(model)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "C:\\Users\\jack\\AppData\\Roaming\\Python\\Python310\\site-packages\\torch\\nn\\functional.py:5560: UserWarning: 1Torch was not compiled with flash attention. (Triggered internally at C:\\actions-runner\\_work\\pytorch\\pytorch\\builder\\windows\\pytorch\\aten\\src\\ATen\\native\\transformers\\cuda\\sdp_utils.cpp:555.)\n",
      "  attn_output = scaled_dot_product_attention(q, k, v, attn_mask, dropout_p, is_causal)\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch [1/10], Step [65/105], Loss: 3.479010581970215\n",
      "Epoch [1/10], Step [225/105], Loss: 3.4549765586853027\n",
      "Epoch [1/10], Step [385/105], Loss: 3.4799466133117676\n",
      "Epoch [1/10], Step [545/105], Loss: 3.4544925689697266\n",
      "Epoch [1/10], Step [705/105], Loss: 3.4353623390197754\n",
      "Epoch [1/10], Step [865/105], Loss: 3.4242453575134277\n",
      "Epoch [1/10], Step [1025/105], Loss: 3.44101619720459\n",
      "Epoch [1/10], Step [1185/105], Loss: 3.404916763305664\n",
      "Epoch [1/10], Step [1345/105], Loss: 3.4018609523773193\n",
      "Epoch [1/10], Step [1505/105], Loss: 3.387694835662842\n",
      "Epoch [1/10], Step [1665/105], Loss: 3.432204246520996\n",
      "Epoch [1/10], Step [1825/105], Loss: 3.28590726852417\n",
      "Epoch [1/10], Step [1985/105], Loss: 3.28888201713562\n",
      "Epoch [1/10], Step [2145/105], Loss: 3.4116430282592773\n",
      "Epoch [1/10], Step [2305/105], Loss: 3.3744735717773438\n",
      "Epoch [1/10], Step [2465/105], Loss: 3.3653292655944824\n",
      "Epoch [1/10], Step [2625/105], Loss: 3.3496878147125244\n",
      "Epoch [1/10], Step [2785/105], Loss: 3.4652857780456543\n",
      "Epoch [1/10], Step [2945/105], Loss: 3.369840145111084\n",
      "Epoch [1/10], Step [3105/105], Loss: 3.2730841636657715\n",
      "Epoch [1/10], Step [3265/105], Loss: 3.4137167930603027\n",
      "Epoch [2/10], Step [65/105], Loss: 3.3403873443603516\n",
      "Epoch [2/10], Step [225/105], Loss: 3.3489937782287598\n",
      "Epoch [2/10], Step [385/105], Loss: 3.280287981033325\n",
      "Epoch [2/10], Step [545/105], Loss: 3.205000877380371\n",
      "Epoch [2/10], Step [705/105], Loss: 3.2821125984191895\n",
      "Epoch [2/10], Step [865/105], Loss: 3.2510986328125\n",
      "Epoch [2/10], Step [1025/105], Loss: 3.2786946296691895\n",
      "Epoch [2/10], Step [1185/105], Loss: 3.2461349964141846\n",
      "Epoch [2/10], Step [1345/105], Loss: 3.262538433074951\n",
      "Epoch [2/10], Step [1505/105], Loss: 3.402519702911377\n",
      "Epoch [2/10], Step [1665/105], Loss: 3.3102831840515137\n",
      "Epoch [2/10], Step [1825/105], Loss: 3.12666654586792\n",
      "Epoch [2/10], Step [1985/105], Loss: 3.148800849914551\n",
      "Epoch [2/10], Step [2145/105], Loss: 3.329193592071533\n",
      "Epoch [2/10], Step [2305/105], Loss: 3.3968570232391357\n",
      "Epoch [2/10], Step [2465/105], Loss: 3.18587064743042\n",
      "Epoch [2/10], Step [2625/105], Loss: 3.1873679161071777\n",
      "Epoch [2/10], Step [2785/105], Loss: 3.340311050415039\n",
      "Epoch [2/10], Step [2945/105], Loss: 3.2210259437561035\n",
      "Epoch [2/10], Step [3105/105], Loss: 3.0913825035095215\n",
      "Epoch [2/10], Step [3265/105], Loss: 3.1759262084960938\n",
      "Epoch [3/10], Step [65/105], Loss: 3.2062253952026367\n",
      "Epoch [3/10], Step [225/105], Loss: 3.254777431488037\n",
      "Epoch [3/10], Step [385/105], Loss: 3.11427640914917\n",
      "Epoch [3/10], Step [545/105], Loss: 2.891247272491455\n",
      "Epoch [3/10], Step [705/105], Loss: 3.101572036743164\n",
      "Epoch [3/10], Step [865/105], Loss: 3.0306997299194336\n",
      "Epoch [3/10], Step [1025/105], Loss: 3.1289589405059814\n",
      "Epoch [3/10], Step [1185/105], Loss: 2.990884304046631\n",
      "Epoch [3/10], Step [1345/105], Loss: 3.077237606048584\n",
      "Epoch [3/10], Step [1505/105], Loss: 3.185579538345337\n",
      "Epoch [3/10], Step [1665/105], Loss: 3.029421329498291\n",
      "Epoch [3/10], Step [1825/105], Loss: 2.7599847316741943\n",
      "Epoch [3/10], Step [1985/105], Loss: 3.0533571243286133\n",
      "Epoch [3/10], Step [2145/105], Loss: 3.2292072772979736\n",
      "Epoch [3/10], Step [2305/105], Loss: 3.2358365058898926\n",
      "Epoch [3/10], Step [2465/105], Loss: 3.05833101272583\n",
      "Epoch [3/10], Step [2625/105], Loss: 3.046529769897461\n",
      "Epoch [3/10], Step [2785/105], Loss: 3.0519628524780273\n",
      "Epoch [3/10], Step [2945/105], Loss: 2.969090700149536\n",
      "Epoch [3/10], Step [3105/105], Loss: 2.743945360183716\n",
      "Epoch [3/10], Step [3265/105], Loss: 2.907327175140381\n",
      "Epoch [4/10], Step [65/105], Loss: 3.034078598022461\n",
      "Epoch [4/10], Step [225/105], Loss: 2.9434003829956055\n",
      "Epoch [4/10], Step [385/105], Loss: 2.76446795463562\n",
      "Epoch [4/10], Step [545/105], Loss: 2.700199604034424\n",
      "Epoch [4/10], Step [705/105], Loss: 2.770768642425537\n",
      "Epoch [4/10], Step [865/105], Loss: 2.716386556625366\n",
      "Epoch [4/10], Step [1025/105], Loss: 2.7538204193115234\n",
      "Epoch [4/10], Step [1185/105], Loss: 2.7165403366088867\n",
      "Epoch [4/10], Step [1345/105], Loss: 2.7417798042297363\n",
      "Epoch [4/10], Step [1505/105], Loss: 3.0600204467773438\n",
      "Epoch [4/10], Step [1665/105], Loss: 2.7489964962005615\n",
      "Epoch [4/10], Step [1825/105], Loss: 2.498720169067383\n",
      "Epoch [4/10], Step [1985/105], Loss: 2.8657965660095215\n",
      "Epoch [4/10], Step [2145/105], Loss: 3.0335469245910645\n",
      "Epoch [4/10], Step [2305/105], Loss: 3.0389912128448486\n",
      "Epoch [4/10], Step [2465/105], Loss: 2.7283132076263428\n",
      "Epoch [4/10], Step [2625/105], Loss: 2.8418526649475098\n",
      "Epoch [4/10], Step [2785/105], Loss: 2.8406572341918945\n",
      "Epoch [4/10], Step [2945/105], Loss: 2.6426148414611816\n",
      "Epoch [4/10], Step [3105/105], Loss: 2.560917854309082\n",
      "Epoch [4/10], Step [3265/105], Loss: 2.737191677093506\n",
      "Epoch [5/10], Step [65/105], Loss: 2.733316659927368\n",
      "Epoch [5/10], Step [225/105], Loss: 2.5912084579467773\n",
      "Epoch [5/10], Step [385/105], Loss: 2.3197014331817627\n",
      "Epoch [5/10], Step [545/105], Loss: 2.3137404918670654\n",
      "Epoch [5/10], Step [705/105], Loss: 2.5224103927612305\n",
      "Epoch [5/10], Step [865/105], Loss: 2.4492239952087402\n",
      "Epoch [5/10], Step [1025/105], Loss: 2.3824048042297363\n",
      "Epoch [5/10], Step [1185/105], Loss: 2.2778046131134033\n",
      "Epoch [5/10], Step [1345/105], Loss: 2.214639186859131\n",
      "Epoch [5/10], Step [1505/105], Loss: 2.717825412750244\n",
      "Epoch [5/10], Step [1665/105], Loss: 2.1711525917053223\n",
      "Epoch [5/10], Step [1825/105], Loss: 2.229917526245117\n",
      "Epoch [5/10], Step [1985/105], Loss: 2.547445774078369\n",
      "Epoch [5/10], Step [2145/105], Loss: 2.583320140838623\n",
      "Epoch [5/10], Step [2305/105], Loss: 2.803114414215088\n",
      "Epoch [5/10], Step [2465/105], Loss: 2.2967281341552734\n",
      "Epoch [5/10], Step [2625/105], Loss: 2.2228684425354004\n",
      "Epoch [5/10], Step [2785/105], Loss: 2.4695916175842285\n",
      "Epoch [5/10], Step [2945/105], Loss: 2.3920953273773193\n",
      "Epoch [5/10], Step [3105/105], Loss: 2.179622173309326\n",
      "Epoch [5/10], Step [3265/105], Loss: 2.1296324729919434\n",
      "Epoch [6/10], Step [65/105], Loss: 2.3149490356445312\n",
      "Epoch [6/10], Step [225/105], Loss: 2.3275227546691895\n",
      "Epoch [6/10], Step [385/105], Loss: 2.090639114379883\n",
      "Epoch [6/10], Step [545/105], Loss: 1.9722270965576172\n",
      "Epoch [6/10], Step [705/105], Loss: 2.0876011848449707\n",
      "Epoch [6/10], Step [865/105], Loss: 2.2193098068237305\n",
      "Epoch [6/10], Step [1025/105], Loss: 2.13908052444458\n",
      "Epoch [6/10], Step [1185/105], Loss: 1.9536511898040771\n",
      "Epoch [6/10], Step [1345/105], Loss: 1.954484224319458\n",
      "Epoch [6/10], Step [1505/105], Loss: 1.9488977193832397\n",
      "Epoch [6/10], Step [1665/105], Loss: 1.748151183128357\n",
      "Epoch [6/10], Step [1825/105], Loss: 1.6379411220550537\n",
      "Epoch [6/10], Step [1985/105], Loss: 2.115736961364746\n",
      "Epoch [6/10], Step [2145/105], Loss: 1.9584931135177612\n",
      "Epoch [6/10], Step [2305/105], Loss: 2.304429531097412\n",
      "Epoch [6/10], Step [2465/105], Loss: 2.0465259552001953\n",
      "Epoch [6/10], Step [2625/105], Loss: 1.7759525775909424\n",
      "Epoch [6/10], Step [2785/105], Loss: 1.8741917610168457\n",
      "Epoch [6/10], Step [2945/105], Loss: 1.631443977355957\n",
      "Epoch [6/10], Step [3105/105], Loss: 1.754677414894104\n",
      "Epoch [6/10], Step [3265/105], Loss: 1.5270814895629883\n",
      "Epoch [7/10], Step [65/105], Loss: 1.7803760766983032\n",
      "Epoch [7/10], Step [225/105], Loss: 1.5508954524993896\n",
      "Epoch [7/10], Step [385/105], Loss: 1.6198241710662842\n",
      "Epoch [7/10], Step [545/105], Loss: 1.3332608938217163\n",
      "Epoch [7/10], Step [705/105], Loss: 1.5233521461486816\n",
      "Epoch [7/10], Step [865/105], Loss: 1.6374332904815674\n",
      "Epoch [7/10], Step [1025/105], Loss: 1.438016414642334\n",
      "Epoch [7/10], Step [1185/105], Loss: 1.4726396799087524\n",
      "Epoch [7/10], Step [1345/105], Loss: 1.463893175125122\n",
      "Epoch [7/10], Step [1505/105], Loss: 1.22914719581604\n",
      "Epoch [7/10], Step [1665/105], Loss: 1.302115797996521\n",
      "Epoch [7/10], Step [1825/105], Loss: 1.1607544422149658\n",
      "Epoch [7/10], Step [1985/105], Loss: 1.6355705261230469\n",
      "Epoch [7/10], Step [2145/105], Loss: 1.3744840621948242\n",
      "Epoch [7/10], Step [2305/105], Loss: 1.4922807216644287\n",
      "Epoch [7/10], Step [2465/105], Loss: 1.2838420867919922\n",
      "Epoch [7/10], Step [2625/105], Loss: 1.130462884902954\n",
      "Epoch [7/10], Step [2785/105], Loss: 1.164279580116272\n",
      "Epoch [7/10], Step [2945/105], Loss: 1.1983962059020996\n",
      "Epoch [7/10], Step [3105/105], Loss: 0.9463258981704712\n",
      "Epoch [7/10], Step [3265/105], Loss: 1.084416389465332\n",
      "Epoch [8/10], Step [65/105], Loss: 1.119576334953308\n",
      "Epoch [8/10], Step [225/105], Loss: 0.9297726154327393\n",
      "Epoch [8/10], Step [385/105], Loss: 0.9458064436912537\n",
      "Epoch [8/10], Step [545/105], Loss: 0.7381664514541626\n",
      "Epoch [8/10], Step [705/105], Loss: 0.8613042831420898\n",
      "Epoch [8/10], Step [865/105], Loss: 0.8506321907043457\n",
      "Epoch [8/10], Step [1025/105], Loss: 1.045301914215088\n",
      "Epoch [8/10], Step [1185/105], Loss: 0.7994223833084106\n",
      "Epoch [8/10], Step [1345/105], Loss: 0.9589629173278809\n",
      "Epoch [8/10], Step [1505/105], Loss: 0.7892000675201416\n",
      "Epoch [8/10], Step [1665/105], Loss: 0.7410238981246948\n",
      "Epoch [8/10], Step [1825/105], Loss: 0.8075377941131592\n",
      "Epoch [8/10], Step [1985/105], Loss: 0.9082546234130859\n",
      "Epoch [8/10], Step [2145/105], Loss: 0.9330880641937256\n",
      "Epoch [8/10], Step [2305/105], Loss: 0.8238955736160278\n",
      "Epoch [8/10], Step [2465/105], Loss: 0.7797421813011169\n",
      "Epoch [8/10], Step [2625/105], Loss: 0.6061492562294006\n",
      "Epoch [8/10], Step [2785/105], Loss: 0.6823115348815918\n",
      "Epoch [8/10], Step [2945/105], Loss: 0.7547576427459717\n",
      "Epoch [8/10], Step [3105/105], Loss: 0.5133464932441711\n",
      "Epoch [8/10], Step [3265/105], Loss: 0.5336889028549194\n",
      "Epoch [9/10], Step [65/105], Loss: 0.5785513520240784\n",
      "Epoch [9/10], Step [225/105], Loss: 0.597764253616333\n",
      "Epoch [9/10], Step [385/105], Loss: 0.5332001447677612\n",
      "Epoch [9/10], Step [545/105], Loss: 0.35583990812301636\n",
      "Epoch [9/10], Step [705/105], Loss: 0.47511398792266846\n",
      "Epoch [9/10], Step [865/105], Loss: 0.4339509904384613\n",
      "Epoch [9/10], Step [1025/105], Loss: 0.5346246957778931\n",
      "Epoch [9/10], Step [1185/105], Loss: 0.4775712788105011\n",
      "Epoch [9/10], Step [1345/105], Loss: 0.45774754881858826\n",
      "Epoch [9/10], Step [1505/105], Loss: 0.46870237588882446\n",
      "Epoch [9/10], Step [1665/105], Loss: 0.3859632611274719\n",
      "Epoch [9/10], Step [1825/105], Loss: 0.35845082998275757\n",
      "Epoch [9/10], Step [1985/105], Loss: 0.48266565799713135\n",
      "Epoch [9/10], Step [2145/105], Loss: 0.30001455545425415\n",
      "Epoch [9/10], Step [2305/105], Loss: 0.3789997696876526\n",
      "Epoch [9/10], Step [2465/105], Loss: 0.4052216410636902\n",
      "Epoch [9/10], Step [2625/105], Loss: 0.2640526592731476\n",
      "Epoch [9/10], Step [2785/105], Loss: 0.3311815857887268\n",
      "Epoch [9/10], Step [2945/105], Loss: 0.2883320450782776\n",
      "Epoch [9/10], Step [3105/105], Loss: 0.2091393768787384\n",
      "Epoch [9/10], Step [3265/105], Loss: 0.15782389044761658\n",
      "Epoch [10/10], Step [65/105], Loss: 0.2886154055595398\n",
      "Epoch [10/10], Step [225/105], Loss: 0.24417191743850708\n",
      "Epoch [10/10], Step [385/105], Loss: 0.2804645299911499\n",
      "Epoch [10/10], Step [545/105], Loss: 0.20499849319458008\n",
      "Epoch [10/10], Step [705/105], Loss: 0.2589229345321655\n",
      "Epoch [10/10], Step [865/105], Loss: 0.2711346447467804\n",
      "Epoch [10/10], Step [1025/105], Loss: 0.1949523389339447\n",
      "Epoch [10/10], Step [1185/105], Loss: 0.273104727268219\n",
      "Epoch [10/10], Step [1345/105], Loss: 0.2934439778327942\n",
      "Epoch [10/10], Step [1505/105], Loss: 0.2307315170764923\n",
      "Epoch [10/10], Step [1665/105], Loss: 0.21382522583007812\n",
      "Epoch [10/10], Step [1825/105], Loss: 0.2221931517124176\n",
      "Epoch [10/10], Step [1985/105], Loss: 0.13157275319099426\n",
      "Epoch [10/10], Step [2145/105], Loss: 0.20867742598056793\n",
      "Epoch [10/10], Step [2305/105], Loss: 0.18844710290431976\n",
      "Epoch [10/10], Step [2465/105], Loss: 0.19716554880142212\n",
      "Epoch [10/10], Step [2625/105], Loss: 0.15663740038871765\n",
      "Epoch [10/10], Step [2785/105], Loss: 0.1515667736530304\n",
      "Epoch [10/10], Step [2945/105], Loss: 0.18394823372364044\n",
      "Epoch [10/10], Step [3105/105], Loss: 0.1414945274591446\n",
      "Epoch [10/10], Step [3265/105], Loss: 0.12858277559280396\n"
     ]
    }
   ],
   "source": [
    "import pickle\n",
    "import os\n",
    "from PIL import Image\n",
    "from transformers import ChineseCLIPProcessor\n",
    "device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n",
    "image_root=\"E:/AI/CLIP-Chinese-master/data/images\"\n",
    "with open(\"train.pkl\",\"rb\") as f:\n",
    "    data=pickle.load(f)\n",
    "images=[]\n",
    "text_features=[]\n",
    "for i in range(len(data)):\n",
    "    file=os.path.join(image_root,data[i]['filename'])\n",
    "    image = Image.open(file).convert('RGB')\n",
    "    images.append(image)\n",
    "    text_features.append(torch.tensor(data[i]['text_embed']))\n",
    "text_features=torch.concat(text_features)\n",
    "processor = ChineseCLIPProcessor.from_pretrained(\"OFA-Sys/chinese-clip-vit-base-patch16\")\n",
    "pixel_values=processor(images=images, return_tensors=\"pt\")[\"pixel_values\"]\n",
    "\n",
    "# 定义训练函数\n",
    "def train(model, text_features, pixel_values , epochs=10, batch_size=32, learning_rate=1e-4):\n",
    "    # 定义优化器\n",
    "    optimizer = torch.optim.AdamW(model.parameters(), lr=learning_rate)\n",
    "    # 训练模型\n",
    "    model.to(device)\n",
    "    for epoch in range(epochs):\n",
    "        for i in range(0, len(text_features), batch_size):\n",
    "            # 获取当前批次的数据\n",
    "            images = pixel_values[i:i+batch_size].to(device)\n",
    "            texts = text_features[i:i+batch_size].to(device)\n",
    "            # 前向传播\n",
    "            loss,image_features1, text_features1, logit_scale_exp = model(images, texts)\n",
    "            # 反向传播和优化\n",
    "            optimizer.zero_grad()\n",
    "            loss.backward()\n",
    "            optimizer.step()\n",
    "            # print(f\"Epoch [{epoch+1}/{epochs}], Step [{i+1}/{len(text_features)//batch_size}], Loss: {loss.item()}\")\n",
    "            # 打印训练信息\n",
    "            if (i+1) % 5 == 0:\n",
    "                print(f\"Epoch [{epoch+1}/{epochs}], Step [{i+1}/{len(text_features)//batch_size}], Loss: {loss.item()}\")\n",
    "\n",
    "train(model, text_features, pixel_values)\n",
    "# 保存模型\n",
    "torch.save(model.state_dict(), 'myCLIP.pth')\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 81,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "G:\\TEMP\\TEMP\\ipykernel_5948\\1939856756.py:5: FutureWarning: You are using `torch.load` with `weights_only=False` (the current default value), which uses the default pickle module implicitly. It is possible to construct malicious pickle data which will execute arbitrary code during unpickling (See https://github.com/pytorch/pytorch/blob/main/SECURITY.md#untrusted-models for more details). In a future release, the default value for `weights_only` will be flipped to `True`. This limits the functions that could be executed during unpickling. Arbitrary objects will no longer be allowed to be loaded via this mode unless they are explicitly allowlisted by the user via `torch.serialization.add_safe_globals`. We recommend you start setting `weights_only=True` for any use case where you don't have full control of the loaded file. Please open an issue on GitHub for any issues related to this experimental feature.\n",
      "  model1.load_state_dict(torch.load('myCLIP.pth'))\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "tensor(1.0866, grad_fn=<DivBackward0>)\n"
     ]
    }
   ],
   "source": [
    "torch.save(model.state_dict(), 'myCLIP.pth')\n",
    "model1=myCLIP(embed_dim=embed_dim,text_hidden_size=text_hidden_size,image_resolution=image_resolution,\n",
    "             vision_layers=vision_layers,vision_width=vision_width,vision_patch_size=vision_patch_size,\n",
    "             vision_head_width=vision_head_width)\n",
    "model1.load_state_dict(torch.load('myCLIP.pth'))\n",
    "loss,image_features1, text_features1, logit_scale_exp = model1(pixel_values,text_features)\n",
    "print(loss)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 76,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "tensor(2.3139, grad_fn=<DivBackward0>)\n"
     ]
    }
   ],
   "source": [
    "print(loss)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "metadata": {},
   "outputs": [],
   "source": [
    "from PIL import Image\n",
    "import requests\n",
    "from transformers import ChineseCLIPProcessor, ChineseCLIPModel\n",
    "\n",
    "model = ChineseCLIPModel.from_pretrained(\"OFA-Sys/chinese-clip-vit-base-patch16\")\n",
    "processor = ChineseCLIPProcessor.from_pretrained(\"OFA-Sys/chinese-clip-vit-base-patch16\")\n",
    "\n",
    "url = \"https://clip-cn-beijing.oss-cn-beijing.aliyuncs.com/pokemon.jpeg\"\n",
    "image = Image.open(requests.get(url, stream=True).raw)\n",
    "# Squirtle, Bulbasaur, Charmander, Pikachu in English\n",
    "texts = [\"杰尼龟\", \"妙蛙种子\", \"小火龙\", \"皮卡丘\"]\n",
    "\n",
    "# compute image feature\n",
    "inputs = processor(images=image, return_tensors=\"pt\")\n",
    "image_features = model.get_image_features(**inputs)\n",
    "image_features = image_features / image_features.norm(p=2, dim=-1, keepdim=True)  # normalize\n",
    "\n",
    "# compute text features\n",
    "inputs = processor(text=texts, padding=True, return_tensors=\"pt\")\n",
    "text_features = model.get_text_features(**inputs)\n",
    "text_features = text_features / text_features.norm(p=2, dim=-1, keepdim=True)  # normalize\n",
    "\n",
    "# compute image-text similarity scores\n",
    "inputs = processor(text=texts, images=image, return_tensors=\"pt\", padding=True)\n",
    "outputs = model(**inputs)\n",
    "logits_per_image = outputs.logits_per_image  # this is the image-text similarity score\n",
    "probs = logits_per_image.softmax(dim=1)  # probs: [[1.2686e-03, 5.4499e-02, 6.7968e-04, 9.4355e-01]]"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "base",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.10.13"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
