{
 "cells": [
  {
   "metadata": {},
   "cell_type": "markdown",
   "source": [
    "## LLaVA\n",
    "https://arxiv.org/abs/2304.08485"
   ],
   "id": "d3790d87b1ef0dca"
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-08-22T07:32:25.743719Z",
     "start_time": "2025-08-22T07:32:25.739691Z"
    }
   },
   "cell_type": "code",
   "source": [
    "import os\n",
    "# 设置环境变量\n",
    "os.environ[\"HF_ENDPOINT\"] = \"https://hf-mirror.com\"\n",
    "os.environ[\"HF_HUB_ENABLE_HF_TRANSFER\"] = \"1\"  # 启用高速下载"
   ],
   "id": "d343bffcd327dcd8",
   "outputs": [],
   "execution_count": 1
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-08-22T07:32:45.748882Z",
     "start_time": "2025-08-22T07:32:26.920709Z"
    }
   },
   "cell_type": "code",
   "source": [
    "from transformers import ChineseCLIPModel, ChineseCLIPProcessor\n",
    "from modelscope import AutoModelForCausalLM\n",
    "from modelscope import AutoTokenizer\n",
    "\n",
    "clip_model_name = \"OFA-Sys/chinese-clip-vit-base-patch16\"\n",
    "clip_model = ChineseCLIPModel.from_pretrained(clip_model_name)\n",
    "clip_processor = ChineseCLIPProcessor.from_pretrained(clip_model_name)\n",
    "\n",
    "qwen_model_name = \"Qwen/Qwen3-0.6B\"\n",
    "qwen_model = AutoModelForCausalLM.from_pretrained(qwen_model_name)\n",
    "qwen_tokenizer = AutoTokenizer.from_pretrained(qwen_model_name)\n",
    "qwen_tokenizer.pad_token = qwen_tokenizer.eos_token"
   ],
   "id": "a844cddd7ce9ec46",
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/Users/dadudu/miniconda3/envs/mini-gpt/lib/python3.10/site-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n",
      "  from .autonotebook import tqdm as notebook_tqdm\n",
      "Using a slow image processor as `use_fast` is unset and a slow processor was saved with this model. `use_fast=True` will be the default behavior in v4.52, even if the model was saved with a slow processor. This will result in minor differences in outputs. You'll still be able to use a slow processor with `use_fast=False`.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Downloading Model from https://www.modelscope.cn to directory: /Users/dadudu/.cache/modelscope/hub/models/Qwen/Qwen3-0.6B\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2025-08-22 15:32:39,695 - modelscope - INFO - Target directory already exists, skipping creation.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Downloading Model from https://www.modelscope.cn to directory: /Users/dadudu/.cache/modelscope/hub/models/Qwen/Qwen3-0.6B\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2025-08-22 15:32:45,549 - modelscope - INFO - Target directory already exists, skipping creation.\n"
     ]
    }
   ],
   "execution_count": 2
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-08-22T07:32:49.404874Z",
     "start_time": "2025-08-22T07:32:47.768606Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# qwen_tokenizer添加一个特殊token\n",
    "qwen_tokenizer.add_tokens([\"<|IMG|>\"])\n",
    "qwen_model.resize_token_embeddings(len(qwen_tokenizer))"
   ],
   "id": "e2068ee0ebddfcc9",
   "outputs": [
    {
     "data": {
      "text/plain": [
       "Embedding(151670, 1024)"
      ]
     },
     "execution_count": 3,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "execution_count": 3
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-08-22T07:32:51.517657Z",
     "start_time": "2025-08-22T07:32:51.515029Z"
    }
   },
   "cell_type": "code",
   "source": [
    "data = [\n",
    "    (\"数字5.png\", \"这张图片里有什么<|IMG|>图片里是数字5<|im_end|>\"),\n",
    "    (\"数字0.png\", \"描述一下这张图片<|IMG|>这张图片背景是黑色的，中间是一个数字0<|im_end|>\"),\n",
    "]"
   ],
   "id": "ac8b6ac0d7ba5c5c",
   "outputs": [],
   "execution_count": 4
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-08-22T07:33:24.236462Z",
     "start_time": "2025-08-22T07:33:24.230978Z"
    }
   },
   "cell_type": "code",
   "source": [
    "from torch.utils.data import Dataset\n",
    "from PIL import Image\n",
    "\n",
    "class LLaVADataset(Dataset):\n",
    "    def __init__(self, data, clip_processor, qwen_tokenizer):\n",
    "        self.data = data\n",
    "        self.clip_processor = clip_processor\n",
    "        self.qwen_tokenizer = qwen_tokenizer\n",
    "\n",
    "    def __len__(self):\n",
    "        return len(self.data)\n",
    "\n",
    "    def __getitem__(self, idx):\n",
    "        image_path, caption = self.data[idx]\n",
    "\n",
    "        image = Image.open(image_path)\n",
    "        pixel_values = self.clip_processor(images=image, return_tensors=\"pt\")['pixel_values']\n",
    "\n",
    "        text_tokenized = self.qwen_tokenizer(caption, return_tensors=\"pt\", padding=True, truncation=True,\n",
    "                                             max_length=128)\n",
    "\n",
    "        return pixel_values.squeeze(0), text_tokenized['input_ids'].squeeze(0), text_tokenized[\n",
    "            'attention_mask'].squeeze(0)\n"
   ],
   "id": "2e6c481744218ff2",
   "outputs": [],
   "execution_count": 6
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-08-22T07:33:25.712551Z",
     "start_time": "2025-08-22T07:33:25.676790Z"
    }
   },
   "cell_type": "code",
   "source": [
    "from torch.utils.data import DataLoader\n",
    "\n",
    "dataset = LLaVADataset(data, clip_processor, qwen_tokenizer)\n",
    "dataloader = DataLoader(dataset, batch_size=1, shuffle=True)\n",
    "for pixel_values, text_inputs, attention_mask in dataloader:\n",
    "    print(pixel_values.shape)\n",
    "    print(text_inputs.shape)\n",
    "    print(attention_mask.shape)\n",
    "    break"
   ],
   "id": "48262e08b5463d9f",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "torch.Size([1, 3, 224, 224])\n",
      "torch.Size([1, 11])\n",
      "torch.Size([1, 11])\n"
     ]
    }
   ],
   "execution_count": 7
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-08-22T07:44:21.036704Z",
     "start_time": "2025-08-22T07:44:21.023114Z"
    }
   },
   "cell_type": "code",
   "source": [
    "import torch\n",
    "from torch import nn\n",
    "\n",
    "# 大都督周瑜（我的微信: it_zhouyu）\n",
    "\n",
    "class ZhouyuLLaVAModel(nn.Module):\n",
    "    def __init__(self, clip_model, qwen_model, qwen_tokenizer):\n",
    "        super().__init__()\n",
    "        self.image_model = clip_model.vision_model\n",
    "        self.language_model = qwen_model\n",
    "        self.qwen_tokenizer = qwen_tokenizer\n",
    "\n",
    "        # 线性投影层，用于对齐维度\n",
    "        self.image_proj = nn.Linear(\n",
    "            self.image_model.config.hidden_size,\n",
    "            self.language_model.config.hidden_size\n",
    "        )\n",
    "\n",
    "        # 获取特殊 token ID\n",
    "        self.img_token_id = self.qwen_tokenizer.encode(\"<|IMG|>\")[0]\n",
    "\n",
    "        # 冻结大部分参数，只训练投影层\n",
    "        self.freeze_models()\n",
    "\n",
    "    def freeze_models(self):\n",
    "        # 冻结视觉模型\n",
    "        for param in self.image_model.parameters():\n",
    "            param.requires_grad = False\n",
    "\n",
    "        # 冻结语言模型\n",
    "        for param in self.language_model.parameters():\n",
    "            param.requires_grad = False\n",
    "        # # 冻结语言模型的部分层\n",
    "        # for name, param in self.language_model.named_parameters():\n",
    "        #     if \"layer.11\" in name:\n",
    "        #         param.requires_grad = False\n",
    "\n",
    "\n",
    "        # 投影层是需要训练的\n",
    "        for param in self.image_proj.parameters():\n",
    "            param.requires_grad = True\n",
    "\n",
    "    def forward(self, pixel_values, text_input_ids, attention_mask):\n",
    "        # 1. 获取图像特征\n",
    "        image_features = self.image_model(pixel_values=pixel_values, return_dict=True).pooler_output\n",
    "\n",
    "        # 2. 投影图像特征\n",
    "        projected_image_features = self.image_proj(image_features)\n",
    "\n",
    "        # 3. 获取文本特征\n",
    "        text_embeddings = self.language_model.get_input_embeddings()(text_input_ids)\n",
    "\n",
    "        # 4. 融合图像与文本特征\n",
    "        final_embeddings = []\n",
    "        for i in range(text_embeddings.shape[0]):  # 遍历 batch\n",
    "            # 找到 <|IMG|> token 的位置\n",
    "            img_token_pos = (text_input_ids[i] == self.img_token_id).nonzero(as_tuple=True)[0][0]\n",
    "\n",
    "            # 替换 <|IMG|> 的 embedding 为投影后的图片特征\n",
    "            current_embedding = torch.cat([\n",
    "                text_embeddings[i, :img_token_pos],\n",
    "                projected_image_features[i].unsqueeze(0),\n",
    "                text_embeddings[i, img_token_pos + 1:]\n",
    "            ], dim=0)\n",
    "            final_embeddings.append(current_embedding)\n",
    "        inputs_embeds = torch.stack(final_embeddings)\n",
    "\n",
    "        # 5. 准备Labels\n",
    "        labels = text_input_ids.clone()\n",
    "        for i in range(labels.shape[0]):\n",
    "            img_token_pos = (labels[i] == self.img_token_id).nonzero(as_tuple=True)[0]\n",
    "            pos = img_token_pos[0]\n",
    "            # 屏蔽掉问题部分，只在答案部分计算 Loss\n",
    "            labels[i, :pos + 1] = -100\n",
    "\n",
    "        # 6. 将融合后的 embedding 输入 LLM\n",
    "        outputs = self.language_model(\n",
    "            inputs_embeds=inputs_embeds,\n",
    "            attention_mask=attention_mask,\n",
    "            labels=labels,\n",
    "            return_dict=True\n",
    "        )\n",
    "\n",
    "        return outputs.loss"
   ],
   "id": "55c098c79897d7e5",
   "outputs": [],
   "execution_count": 17
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-08-22T07:39:41.279641Z",
     "start_time": "2025-08-22T07:39:41.276359Z"
    }
   },
   "cell_type": "code",
   "source": [
    "import torch\n",
    "\n",
    "# 找出等于2的下标\n",
    "# (0,1)的值是2, (1,0)的值是2, (1,1)的值是2\n",
    "(torch.tensor([[1, 2, 3],\n",
    "               [2, 2, 3]]) == torch.tensor(2)).nonzero(as_tuple=True)"
   ],
   "id": "c2e34555baf7d25c",
   "outputs": [
    {
     "data": {
      "text/plain": [
       "(tensor([0, 1, 1]), tensor([1, 0, 1]))"
      ]
     },
     "execution_count": 10,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "execution_count": 10
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-08-22T07:40:12.926488Z",
     "start_time": "2025-08-22T07:40:12.921367Z"
    }
   },
   "cell_type": "code",
   "source": [
    "import torch\n",
    "\n",
    "# 找出等于2的下标\n",
    "# (0,1)的值是2, (1,0)的值是2, (1,1)的值是2\n",
    "(torch.tensor([1, 2, 3]) == torch.tensor(2)).nonzero(as_tuple=True)[0][0]"
   ],
   "id": "d57ac9d7fcd29289",
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor(1)"
      ]
     },
     "execution_count": 13,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "execution_count": 13
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-08-22T07:45:07.616059Z",
     "start_time": "2025-08-22T07:44:24.970005Z"
    }
   },
   "cell_type": "code",
   "source": [
    "EPOCHS = 50\n",
    "\n",
    "model = ZhouyuLLaVAModel(clip_model, qwen_model, qwen_tokenizer)\n",
    "trainable_params = [p for p in model.parameters() if p.requires_grad]\n",
    "optimizer = torch.optim.AdamW(trainable_params, lr=1e-3)\n",
    "\n",
    "for epoch in range(EPOCHS):\n",
    "    total_loss = 0\n",
    "    for image_inputs, text_inputs, attention_mask in dataloader:\n",
    "        optimizer.zero_grad()\n",
    "        loss = model(image_inputs, text_inputs, attention_mask)\n",
    "        loss.backward()\n",
    "        optimizer.step()\n",
    "        total_loss += loss.item()\n",
    "\n",
    "    avg_loss = total_loss / len(dataloader)\n",
    "    print(f\"Epoch {epoch + 1} finished. Average Loss: {avg_loss:.4f}\")\n",
    "\n",
    "print(\"Training finished.\")"
   ],
   "id": "b34c2c4ff8bf9f62",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 1 finished. Average Loss: 7.6486\n",
      "Epoch 2 finished. Average Loss: 6.0873\n",
      "Epoch 3 finished. Average Loss: 5.6330\n",
      "Epoch 4 finished. Average Loss: 5.4640\n",
      "Epoch 5 finished. Average Loss: 5.2307\n",
      "Epoch 6 finished. Average Loss: 5.0236\n",
      "Epoch 7 finished. Average Loss: 4.8705\n",
      "Epoch 8 finished. Average Loss: 4.7339\n",
      "Epoch 9 finished. Average Loss: 4.6032\n",
      "Epoch 10 finished. Average Loss: 4.4468\n",
      "Epoch 11 finished. Average Loss: 4.2724\n",
      "Epoch 12 finished. Average Loss: 4.0944\n",
      "Epoch 13 finished. Average Loss: 3.9449\n",
      "Epoch 14 finished. Average Loss: 3.7480\n",
      "Epoch 15 finished. Average Loss: 3.5399\n",
      "Epoch 16 finished. Average Loss: 3.3245\n",
      "Epoch 17 finished. Average Loss: 3.1158\n",
      "Epoch 18 finished. Average Loss: 2.9177\n",
      "Epoch 19 finished. Average Loss: 2.6748\n",
      "Epoch 20 finished. Average Loss: 2.4946\n",
      "Epoch 21 finished. Average Loss: 2.2506\n",
      "Epoch 22 finished. Average Loss: 1.9987\n",
      "Epoch 23 finished. Average Loss: 1.7650\n",
      "Epoch 24 finished. Average Loss: 1.4866\n",
      "Epoch 25 finished. Average Loss: 1.1767\n",
      "Epoch 26 finished. Average Loss: 0.9405\n",
      "Epoch 27 finished. Average Loss: 0.6826\n",
      "Epoch 28 finished. Average Loss: 0.4545\n",
      "Epoch 29 finished. Average Loss: 0.3350\n",
      "Epoch 30 finished. Average Loss: 0.2527\n",
      "Epoch 31 finished. Average Loss: 0.1953\n",
      "Epoch 32 finished. Average Loss: 0.1496\n",
      "Epoch 33 finished. Average Loss: 0.1262\n",
      "Epoch 34 finished. Average Loss: 0.0953\n",
      "Epoch 35 finished. Average Loss: 0.0803\n",
      "Epoch 36 finished. Average Loss: 0.0670\n",
      "Epoch 37 finished. Average Loss: 0.0553\n",
      "Epoch 38 finished. Average Loss: 0.0486\n",
      "Epoch 39 finished. Average Loss: 0.0391\n",
      "Epoch 40 finished. Average Loss: 0.0353\n",
      "Epoch 41 finished. Average Loss: 0.0293\n",
      "Epoch 42 finished. Average Loss: 0.0269\n",
      "Epoch 43 finished. Average Loss: 0.0238\n",
      "Epoch 44 finished. Average Loss: 0.0212\n",
      "Epoch 45 finished. Average Loss: 0.0191\n",
      "Epoch 46 finished. Average Loss: 0.0173\n",
      "Epoch 47 finished. Average Loss: 0.0158\n",
      "Epoch 48 finished. Average Loss: 0.0146\n",
      "Epoch 49 finished. Average Loss: 0.0135\n",
      "Epoch 50 finished. Average Loss: 0.0126\n",
      "Training finished.\n"
     ]
    }
   ],
   "execution_count": 18
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-08-22T07:47:37.466243Z",
     "start_time": "2025-08-22T07:47:36.360972Z"
    }
   },
   "cell_type": "code",
   "source": [
    "def generate(model, pixel_values, prompt_text):\n",
    "\n",
    "    prompt_ids = model.qwen_tokenizer(prompt_text, return_tensors=\"pt\")['input_ids']\n",
    "\n",
    "\n",
    "    image_features = model.image_model(pixel_values=pixel_values, return_dict=True).pooler_output\n",
    "    projected_image_features = model.image_proj(image_features)\n",
    "\n",
    "\n",
    "    prompt_embeddings = model.language_model.get_input_embeddings()(prompt_ids)\n",
    "\n",
    "\n",
    "    img_token_pos = (prompt_ids[0] == model.img_token_id).nonzero(as_tuple=True)[0][0]\n",
    "    inputs_embeds = torch.cat([\n",
    "        prompt_embeddings[0, :img_token_pos],\n",
    "        projected_image_features[0].unsqueeze(0),\n",
    "        prompt_embeddings[0, img_token_pos + 1:]\n",
    "    ], dim=0).unsqueeze(0)\n",
    "\n",
    "    with torch.no_grad():\n",
    "        outputs = model.language_model.generate(\n",
    "            inputs_embeds=inputs_embeds,\n",
    "            max_new_tokens=50,\n",
    "            pad_token_id=model.qwen_tokenizer.pad_token_id,\n",
    "            eos_token_id=model.qwen_tokenizer.eos_token_id\n",
    "        )\n",
    "\n",
    "    return model.qwen_tokenizer.decode(outputs[0], skip_special_tokens=True)\n",
    "\n",
    "\n",
    "model.eval()\n",
    "\n",
    "image_path_to_predict = \"数字0.png\"\n",
    "# prompt = \"这张图片里有什么<|IMG|>\"\n",
    "prompt = \"描述一下这张图片<|IMG|>\"\n",
    "\n",
    "image = Image.open(image_path_to_predict)\n",
    "pixel_values = clip_processor(images=image, return_tensors=\"pt\")['pixel_values']\n",
    "\n",
    "generate(model, pixel_values, prompt)"
   ],
   "id": "6b88676fca1d747d",
   "outputs": [
    {
     "data": {
      "text/plain": [
       "'这张图片背景是黑色的，中间是一个数字0'"
      ]
     },
     "execution_count": 22,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "execution_count": 22
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 2
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython2",
   "version": "2.7.6"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
