{
 "cells": [
  {
   "cell_type": "markdown",
   "id": "45c969ae",
   "metadata": {},
   "source": [
    "# BLIP模拟模型搭建理解"
   ]
  },
  {
   "cell_type": "code",
   "id": "initial_id",
   "metadata": {
    "collapsed": true,
    "ExecuteTime": {
     "end_time": "2025-08-07T02:47:14.455085Z",
     "start_time": "2025-08-07T02:41:06.663751Z"
    }
   },
   "source": [
    "import torch  # 导入PyTorch库\n",
    "import torch.nn as nn  # 导入PyTorch神经网络模块\n",
    "from transformers import BertModel, ViTModel, BertTokenizer  # 从transformers库导入预训练模型和分词器\n",
    "\n",
    "class BLIP_MED(nn.Module):  # 定义BLIP多模态编码解码模型\n",
    "    def __init__(self, config):  # 初始化函数，接收配置参数\n",
    "        super().__init__()  # 调用父类初始化\n",
    "        self.image_encoder = ViTModel.from_pretrained(\"google/vit-base-patch16-224\")  # 加载预训练的ViT图像编码器\n",
    "        self.text_encoder = BertModel.from_pretrained(\"bert-base-uncased\")  # 加载预训练的BERT文本编码器\n",
    "        self.itc_head = nn.Sequential(  # 定义图像-文本对比(ITC)头部\n",
    "            nn.Linear(config.hidden_size, config.hidden_size),  # 线性层\n",
    "            nn.LayerNorm(config.hidden_size)  # 层归一化\n",
    "        )\n",
    "        self.itm_head = nn.Linear(config.hidden_size, 2)  # 图像-文本匹配(ITM)头部\n",
    "        self.lm_head = nn.Linear(config.hidden_size, config.vocab_size)  # 语言模型头部，用于词汇预测\n",
    "        self.text_decoder = ImageGroundedTextDecoder(config)  # 初始化基于图像的文本解码器\n",
    "\n",
    "    def forward(self, image, input_ids, attention_mask, decoder_input_ids=None):  # 前向传播函数\n",
    "        # ViT image features\n",
    "        image_outputs = self.image_encoder(pixel_values=image)  # 通过ViT编码图像\n",
    "        image_feat = image_outputs.last_hidden_state  # [B, num_patches+1, C]  # 获取图像特征\n",
    "\n",
    "        # Text encoding\n",
    "        text_outputs = self.text_encoder(input_ids=input_ids, attention_mask=attention_mask)  # 通过BERT编码文本\n",
    "        text_feat = text_outputs.last_hidden_state  # 获取文本特征\n",
    "        cls_feat = text_feat[:, 0]  # [CLS] token  # 获取CLS标记的特征\n",
    "        print(f'cls_feat: {cls_feat.shape}')\n",
    "        # ITC: use cls_feat and image_feat[CLS]\n",
    "        image_cls = image_feat[:, 0]  # 获取图像的CLS特征\n",
    "        itc_score = torch.cosine_similarity(self.itc_head(cls_feat), self.itc_head(image_cls))  # 计算文本和图像特征的余弦相似度\n",
    "\n",
    "        # ITM,这里是要做交叉注意力后的，不是很准确\n",
    "        itm_logits = self.itm_head(cls_feat)  # 计算图像-文本匹配分数\n",
    "\n",
    "        # LM decoding if decoder_input_ids provided\n",
    "        if decoder_input_ids is not None:  # 如果提供了解码器输入\n",
    "            lm_outputs = self.text_decoder(decoder_input_ids, image_feat)  # 使用文本解码器生成输出\n",
    "            lm_logits = self.lm_head(lm_outputs)  # 计算语言模型的词汇预测\n",
    "        else:\n",
    "            lm_logits = None  # 否则设置为None\n",
    "\n",
    "        return {  # 返回结果字典\n",
    "            \"itc_score\": itc_score,  # 图像-文本对比分数\n",
    "            \"itm_logits\": itm_logits,  # 图像-文本匹配分数\n",
    "            \"lm_logits\": lm_logits  # 语言模型预测结果\n",
    "        }\n",
    "\n",
    "class ImageGroundedTextDecoder(nn.Module):  # 定义基于图像的文本解码器\n",
    "    def __init__(self, config):  # 初始化函数\n",
    "        super().__init__()  # 调用父类初始化\n",
    "        self.layers = nn.ModuleList([  # 创建Transformer解码器层列表\n",
    "            TransformerDecoderBlock(config) for _ in range(config.num_hidden_layers)  # 根据配置创建多个解码器块\n",
    "        ])\n",
    "        self.embedding = nn.Embedding(config.vocab_size, config.hidden_size)  # 词嵌入层\n",
    "        self.position_embedding = nn.Embedding(config.max_position_embeddings, config.hidden_size)  # 位置嵌入层\n",
    "        self.layernorm = nn.LayerNorm(config.hidden_size)  # 层归一化\n",
    "\n",
    "    def forward(self, decoder_input_ids, image_feats):  # 前向传播函数\n",
    "        bsz, seq_len = decoder_input_ids.shape  # 获取批次大小和序列长度\n",
    "        pos_ids = torch.arange(0, seq_len, dtype=torch.long, device=decoder_input_ids.device)  # 创建位置ID\n",
    "        pos_ids = pos_ids.unsqueeze(0).expand_as(decoder_input_ids)  # 扩展位置ID到与输入相同的形状\n",
    "\n",
    "        x = self.embedding(decoder_input_ids) + self.position_embedding(pos_ids)  # 词嵌入加位置嵌入\n",
    "        x = self.layernorm(x)  # 应用层归一化\n",
    "\n",
    "        for layer in self.layers:  # 遍历所有解码器层\n",
    "            x = layer(x, image_feats)  # 通过每一层处理\n",
    "\n",
    "        return x  # 返回解码结果\n",
    "\n",
    "class TransformerDecoderBlock(nn.Module):  # 定义Transformer解码器块\n",
    "    def __init__(self, config):  # 初始化函数\n",
    "        super().__init__()  # 调用父类初始化\n",
    "        self.self_attn = nn.MultiheadAttention(config.hidden_size, config.num_attention_heads, batch_first=True)  # 自注意力机制\n",
    "        self.cross_attn = nn.MultiheadAttention(config.hidden_size, config.num_attention_heads, batch_first=True)  # 交叉注意力机制\n",
    "        self.feed_forward = nn.Sequential(  # 前馈神经网络\n",
    "            nn.Linear(config.hidden_size, config.intermediate_size),  # 第一个线性层\n",
    "            nn.GELU(),  # GELU激活函数\n",
    "            nn.Linear(config.intermediate_size, config.hidden_size)  # 第二个线性层\n",
    "        )\n",
    "        self.norm1 = nn.LayerNorm(config.hidden_size)  # 第一个层归一化\n",
    "        self.norm2 = nn.LayerNorm(config.hidden_size)  # 第二个层归一化\n",
    "        self.norm3 = nn.LayerNorm(config.hidden_size)  # 第三个层归一化\n",
    "\n",
    "    def forward(self, x, visual_feats):  # 前向传播函数\n",
    "        # Causal mask\n",
    "        seq_len = x.size(1)  # 获取序列长度\n",
    "        attn_mask = torch.triu(torch.ones(seq_len, seq_len, device=x.device), diagonal=1).bool()  # 创建因果掩码，look ahead mask\n",
    "        \n",
    "        # 修复自注意力机制\n",
    "        x2, _ = self.self_attn(x, x, x, attn_mask=attn_mask)  # 应用自注意力\n",
    "        x = self.norm1(x + x2)  # 残差连接和层归一化\n",
    "        \n",
    "        x2, _ = self.cross_attn(query=x,  # 应用交叉注意力\n",
    "                               key=visual_feats,  # 使用视觉特征作为键\n",
    "                               value=visual_feats)  # 使用视觉特征作为值\n",
    "        x = self.norm2(x + x2)  # 残差连接和层归一化\n",
    "\n",
    "        x = self.norm3(x + self.feed_forward(x))  # 应用前馈网络，残差连接和层归一化\n",
    "        return x  # 返回处理后的特征\n",
    "\n",
    "# ---------- 推理示例 ----------\n",
    "if __name__ == '__main__':  # 主程序入口\n",
    "    from transformers import BertConfig  # 导入BERT配置\n",
    "    config = BertConfig()  # 创建配置对象\n",
    "    model = BLIP_MED(config)  # 初始化模型\n",
    "    tokenizer = BertTokenizer.from_pretrained(\"bert-base-uncased\")  # 加载分词器\n",
    "\n",
    "    # 构造输入\n",
    "    dummy_image = torch.randn(2, 3, 224, 224)  # batch of 2  # 创建随机图像张量\n",
    "    text = [\"a girl holding a kitten\", \"a man riding a horse\"]  # 示例文本\n",
    "    inputs = tokenizer(text, return_tensors=\"pt\", padding=True, truncation=True)  # 对文本进行分词处理\n",
    "\n",
    "    decoder_input = tokenizer([\"a little\",\"a small\"], return_tensors=\"pt\", padding=True).input_ids  # 创建解码器输入\n",
    "    print(decoder_input.shape)\n",
    "    # 前向推理\n",
    "    outputs = model(dummy_image, inputs['input_ids'], inputs['attention_mask'], decoder_input)  # 执行模型推理\n",
    "    print(\"ITC score:\", outputs['itc_score'].shape)  # 打印ITC分数形状\n",
    "    print(\"ITM logits:\", outputs['itm_logits'].shape)  # 打印ITM逻辑形状\n",
    "    print(\"LM logits:\", outputs['lm_logits'].shape if outputs['lm_logits'] is not None else None)  # 打印LM逻辑形状\n"
   ],
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Some weights of ViTModel were not initialized from the model checkpoint at google/vit-base-patch16-224 and are newly initialized: ['pooler.dense.bias', 'pooler.dense.weight']\n",
      "You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.\n",
      "Xet Storage is enabled for this repo, but the 'hf_xet' package is not installed. Falling back to regular HTTP download. For better performance, install the package with: `pip install huggingface_hub[hf_xet]` or `pip install hf_xet`\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "model.safetensors:   7%|6         | 31.5M/472M [00:00<?, ?B/s]"
      ],
      "application/vnd.jupyter.widget-view+json": {
       "version_major": 2,
       "version_minor": 0,
       "model_id": "b1c7a39f2f3748eb89e1fd178cdf3a02"
      }
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "C:\\Users\\YuBin\\AppData\\Roaming\\Python\\Python312\\site-packages\\huggingface_hub\\file_download.py:143: UserWarning: `huggingface_hub` cache-system uses symlinks by default to efficiently store duplicated files but your machine does not support them in C:\\Users\\YuBin\\.cache\\huggingface\\hub\\models--bert-base-uncased. Caching files will still work but in a degraded version that might require more space on your disk. This warning can be disabled by setting the `HF_HUB_DISABLE_SYMLINKS_WARNING` environment variable. For more details, see https://huggingface.co/docs/huggingface_hub/how-to-cache#limitations.\n",
      "To support symlinks on Windows, you either need to activate Developer Mode or to run Python as an administrator. In order to activate developer mode, see this article: https://docs.microsoft.com/en-us/windows/apps/get-started/enable-your-device-for-development\n",
      "  warnings.warn(message)\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "tokenizer_config.json:   0%|          | 0.00/48.0 [00:00<?, ?B/s]"
      ],
      "application/vnd.jupyter.widget-view+json": {
       "version_major": 2,
       "version_minor": 0,
       "model_id": "9c3e1b856bd548fa82369a7a027b2bc5"
      }
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "data": {
      "text/plain": [
       "vocab.txt:   0%|          | 0.00/232k [00:00<?, ?B/s]"
      ],
      "application/vnd.jupyter.widget-view+json": {
       "version_major": 2,
       "version_minor": 0,
       "model_id": "e9ff0b110d6f4fcfbe8bcdd629bd937b"
      }
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "data": {
      "text/plain": [
       "tokenizer.json:   0%|          | 0.00/466k [00:00<?, ?B/s]"
      ],
      "application/vnd.jupyter.widget-view+json": {
       "version_major": 2,
       "version_minor": 0,
       "model_id": "f1425d0143d34dff840628f6183080a7"
      }
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "torch.Size([2, 4])\n",
      "cls_feat: torch.Size([2, 768])\n",
      "ITC score: torch.Size([2])\n",
      "ITM logits: torch.Size([2, 2])\n",
      "LM logits: torch.Size([2, 4, 30522])\n"
     ]
    }
   ],
   "execution_count": 1
  },
  {
   "cell_type": "markdown",
   "id": "fd2c971f",
   "metadata": {},
   "source": "# 理解Image Captioning"
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "e7c77f31",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 版本\n",
    "# torch                                    2.7.1\n",
    "# transformers                             4.53.2"
   ]
  },
  {
   "cell_type": "code",
   "id": "a58f4dbd",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-08-07T03:25:01.359056Z",
     "start_time": "2025-08-07T02:51:49.019055Z"
    }
   },
   "source": [
    "from PIL import Image\n",
    "import torch\n",
    "from transformers import (\n",
    "    BlipProcessor, BlipForConditionalGeneration,\n",
    "    BlipForQuestionAnswering, BlipForImageTextRetrieval\n",
    ")\n",
    "import requests\n",
    "from io import BytesIO\n",
    "import warnings\n",
    "\n",
    "# 忽略一些不重要的警告\n",
    "warnings.filterwarnings(\"ignore\", message=\"Some weights of BlipModel were not initialized\")\n",
    "warnings.filterwarnings(\"ignore\", message=\"`BlipModel` is going to be deprecated\")\n",
    "\n",
    "device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n",
    "print(f\"Using device: {device}\")\n",
    "\n",
    "def load_demo_image():\n",
    "    \"\"\"加载演示图片\"\"\"\n",
    "    # 可以使用本地图片或者网络图片\n",
    "    try:\n",
    "        # 尝试加载本地图片\n",
    "        image = Image.open('demo.jpg').convert('RGB')\n",
    "        print(\"Loaded local image: demo.jpg\")\n",
    "    except FileNotFoundError:\n",
    "        # 如果本地图片不存在，使用网络图片\n",
    "        img_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg'\n",
    "        response = requests.get(img_url)\n",
    "        image = Image.open(BytesIO(response.content)).convert('RGB')\n",
    "        print(\"Loaded image from URL\")\n",
    "    \n",
    "    print(f\"Image size: {image.size}\")\n",
    "    return image\n",
    "\n",
    "# =============================================================================\n",
    "# 1. Image Captioning (图像描述生成)\n",
    "# =============================================================================\n",
    "def image_captioning_demo():\n",
    "    print(\"\\n\" + \"=\"*50)\n",
    "    print(\"1. IMAGE CAPTIONING\")\n",
    "    print(\"=\"*50)\n",
    "    \n",
    "    # 加载图像\n",
    "    image = load_demo_image()\n",
    "    \n",
    "    # 加载模型和处理器\n",
    "    processor = BlipProcessor.from_pretrained(\"Salesforce/blip-image-captioning-base\")\n",
    "    model = BlipForConditionalGeneration.from_pretrained(\"Salesforce/blip-image-captioning-base\")\n",
    "    print('-'*100)\n",
    "    print(model)\n",
    "    print('-'*100)\n",
    "    model = model.to(device)\n",
    "    \n",
    "    print(f\"Model parameters: {sum(p.numel() for p in model.parameters() if p.requires_grad)/1e6:.2f}M\")\n",
    "    \n",
    "    # 处理图像\n",
    "    inputs = processor(image, return_tensors=\"pt\").to(device)\n",
    "    \n",
    "    # 生成描述\n",
    "    with torch.no_grad():\n",
    "        # Beam search生成  top-k生成\n",
    "        print(\"\\nBeam Search Caption:\")\n",
    "        out = model.generate(**inputs, max_length=50, num_beams=3, early_stopping=True)\n",
    "        caption_beam = processor.decode(out[0], skip_special_tokens=True)\n",
    "        print(f\"Caption: {caption_beam}\")\n",
    "        \n",
    "        # Nucleus sampling生成 top-p生成\n",
    "        print(\"\\nNucleus Sampling Caption:\")\n",
    "        out = model.generate(**inputs, max_length=50, do_sample=True, top_p=0.9, temperature=0.7)\n",
    "        caption_sample = processor.decode(out[0], skip_special_tokens=True)\n",
    "        print(f\"Caption: {caption_sample}\")\n",
    "\n",
    "# =============================================================================\n",
    "# 2. Visual Question Answering (视觉问答)\n",
    "# =============================================================================\n",
    "def visual_question_answering_demo():\n",
    "    print(\"\\n\" + \"=\"*50)\n",
    "    print(\"2. VISUAL QUESTION ANSWERING\")\n",
    "    print(\"=\"*50)\n",
    "    \n",
    "    # 加载图像\n",
    "    image = load_demo_image()\n",
    "    \n",
    "    # 加载VQA模型\n",
    "    processor = BlipProcessor.from_pretrained(\"Salesforce/blip-vqa-base\")\n",
    "    model = BlipForQuestionAnswering.from_pretrained(\"Salesforce/blip-vqa-base\")\n",
    "    model = model.to(device)\n",
    "    \n",
    "    print(f\"Model parameters: {sum(p.numel() for p in model.parameters() if p.requires_grad)/1e6:.2f}M\")\n",
    "    \n",
    "    # 问题列表\n",
    "    questions = [\n",
    "        \"where is the woman sitting?\",\n",
    "        \"what is the woman doing?\",\n",
    "        \"what animal is in the image?\",\n",
    "        \"what is the weather like?\",\n",
    "        \"what color is the dog?\"\n",
    "    ]\n",
    "    \n",
    "    for question in questions:\n",
    "        # 处理输入\n",
    "        inputs = processor(image, question, return_tensors=\"pt\").to(device)\n",
    "        \n",
    "        # 生成答案\n",
    "        with torch.no_grad():\n",
    "            out = model.generate(**inputs, max_length=50)\n",
    "            answer = processor.decode(out[0], skip_special_tokens=True)\n",
    "            print(f\"Q: {question}\")\n",
    "            print(f\"A: {answer}\\n\")\n",
    "\n",
    "# =============================================================================\n",
    "# 3. Feature Extraction (特征提取)\n",
    "# =============================================================================\n",
    "def feature_extraction_demo():\n",
    "    print(\"\\n\" + \"=\"*50)\n",
    "    print(\"3. FEATURE EXTRACTION\")\n",
    "    print(\"=\"*50)\n",
    "    \n",
    "    # 加载图像\n",
    "    image = load_demo_image()\n",
    "    \n",
    "    # 使用BlipForImageTextRetrieval模型进行特征提取，这个模型专门用于图像-文本检索任务\n",
    "    from transformers import BlipForImageTextRetrieval\n",
    "    \n",
    "    processor = BlipProcessor.from_pretrained(\"Salesforce/blip-itm-base-coco\")\n",
    "    model = BlipForImageTextRetrieval.from_pretrained(\"Salesforce/blip-itm-base-coco\")\n",
    "    model = model.to(device)\n",
    "    \n",
    "    print(f\"Model parameters: {sum(p.numel() for p in model.parameters() if p.requires_grad)/1e6:.2f}M\")\n",
    "    \n",
    "    text = \"a woman sitting on the beach with a dog\"\n",
    "    \n",
    "    # 处理输入\n",
    "    inputs = processor(images=image, text=text, return_tensors=\"pt\", padding=True).to(device)\n",
    "    \n",
    "    with torch.no_grad():\n",
    "        outputs = model(**inputs)\n",
    "        \n",
    "        # 获取不同类型的特征\n",
    "        # 图像特征\n",
    "        image_embeds = outputs.image_embeds  # [batch_size, embed_dim]\n",
    "        print(f\"Image embeddings shape: {image_embeds.shape}\")\n",
    "        \n",
    "        # 文本特征\n",
    "        text_embeds = outputs.text_embeds    # [batch_size, embed_dim]\n",
    "        print(f\"Text embeddings shape: {text_embeds.shape}\")\n",
    "        \n",
    "        # 计算图像-文本相似度\n",
    "        similarity = torch.cosine_similarity(image_embeds, text_embeds, dim=1)\n",
    "        print(f\"Image-Text cosine similarity: {similarity.item():.4f}\")\n",
    "        \n",
    "        # 如果需要更详细的特征，可以单独提取图像和文本特征\n",
    "        print(\"\\n--- Separate Feature Extraction ---\")\n",
    "        \n",
    "        # 仅图像特征\n",
    "        image_inputs = processor(images=image, return_tensors=\"pt\").to(device)\n",
    "        image_outputs = model.get_image_features(**image_inputs)\n",
    "        print(f\"Pure image features shape: {image_outputs.shape}\")\n",
    "        \n",
    "        # 仅文本特征\n",
    "        text_inputs = processor(text=text, return_tensors=\"pt\", padding=True).to(device)\n",
    "        text_outputs = model.get_text_features(**text_inputs)\n",
    "        print(f\"Pure text features shape: {text_outputs.shape}\")\n",
    "        \n",
    "        # 计算归一化后的相似度分数\n",
    "        image_features_norm = image_outputs / image_outputs.norm(dim=1, keepdim=True)\n",
    "        text_features_norm = text_outputs / text_outputs.norm(dim=1, keepdim=True)\n",
    "        similarity_score = torch.matmul(image_features_norm, text_features_norm.T).squeeze()\n",
    "        print(f\"Normalized similarity score: {similarity_score.item():.4f}\")\n",
    "        \n",
    "        print(f\"Feature dimension: {image_outputs.shape[1]}\")\n",
    "        print(f\"Image feature norm: {image_outputs.norm(dim=1).item():.4f}\")\n",
    "        print(f\"Text feature norm: {text_outputs.norm(dim=1).item():.4f}\")\n",
    "\n",
    "# =============================================================================\n",
    "# 4. Image-Text Matching (图像-文本匹配)\n",
    "# =============================================================================\n",
    "def image_text_matching_demo():\n",
    "    print(\"\\n\" + \"=\"*50)\n",
    "    print(\"4. IMAGE-TEXT MATCHING\")\n",
    "    print(\"=\"*50)\n",
    "    \n",
    "    # 加载图像\n",
    "    image = load_demo_image()\n",
    "    \n",
    "    # 使用专门的图像-文本匹配模型\n",
    "    from transformers import BlipForImageTextRetrieval\n",
    "    \n",
    "    processor = BlipProcessor.from_pretrained(\"Salesforce/blip-itm-base-coco\")\n",
    "    model = BlipForImageTextRetrieval.from_pretrained(\"Salesforce/blip-itm-base-coco\")\n",
    "    model = model.to(device)\n",
    "    \n",
    "    # 测试不同的文本描述\n",
    "    texts = [\n",
    "        \"a woman sitting on the beach with a dog\",  # 匹配的描述\n",
    "        \"a man playing football in the park\",       # 不匹配的描述\n",
    "        \"a person relaxing by the water\",           # 部分匹配的描述\n",
    "        \"a dog running on the beach\",               # 部分匹配的描述\n",
    "        \"a beautiful sunset over the ocean\"         # 不匹配的描述\n",
    "    ]\n",
    "    \n",
    "    print(f\"Computing similarity scores for different texts:\")\n",
    "    print(\"-\" * 60)\n",
    "    \n",
    "    for text in texts:\n",
    "        # 处理输入\n",
    "        inputs = processor(images=image, text=text, return_tensors=\"pt\", padding=True).to(device)\n",
    "        \n",
    "        with torch.no_grad():\n",
    "            # 方法1: 使用ITM分数（Image-Text Matching）\n",
    "            outputs = model(**inputs)\n",
    "            itm_score = torch.nn.functional.softmax(outputs.itm_score, dim=1)\n",
    "            match_probability = itm_score[0, 1].item()  # 索引1表示匹配\n",
    "            \n",
    "            # 方法2: 使用余弦相似度\n",
    "            image_embeds = outputs.image_embeds\n",
    "            text_embeds = outputs.text_embeds\n",
    "            \n",
    "            # 归一化嵌入\n",
    "            image_embeds_norm = image_embeds / image_embeds.norm(dim=1, keepdim=True)\n",
    "            text_embeds_norm = text_embeds / text_embeds.norm(dim=1, keepdim=True)\n",
    "            \n",
    "            # 计算余弦相似度\n",
    "            cosine_similarity = torch.matmul(image_embeds_norm, text_embeds_norm.T).squeeze().item()\n",
    "            \n",
    "            print(f\"Text: '{text}'\")\n",
    "            print(f\"ITM Match Probability: {match_probability:.4f}\")\n",
    "            print(f\"Cosine Similarity: {cosine_similarity:.4f}\")\n",
    "            print(\"-\" * 60)\n",
    "\n",
    "# =============================================================================\n",
    "# 5. 综合演示函数\n",
    "# =============================================================================\n",
    "def run_all_demos():\n",
    "    \"\"\"运行所有演示\"\"\"\n",
    "    print(\"BLIP Model Demonstrations using Transformers Library\")\n",
    "    print(\"=\" * 60)\n",
    "    \n",
    "    try:\n",
    "        # 1. 图像描述生成\n",
    "        image_captioning_demo()\n",
    "        \n",
    "        # 2. 视觉问答\n",
    "        visual_question_answering_demo()\n",
    "        \n",
    "        # 3. 特征提取\n",
    "        # feature_extraction_demo()\n",
    "        \n",
    "        # 4. 图像-文本匹配\n",
    "        # image_text_matching_demo()\n",
    "        \n",
    "        print(\"\\n\" + \"=\"*60)\n",
    "        print(\"All demonstrations completed successfully!\")\n",
    "        print(\"=\"*60)\n",
    "        \n",
    "    except Exception as e:\n",
    "        print(f\"Error occurred: {str(e)}\")\n",
    "        print(\"Please make sure you have the required dependencies installed:\")\n",
    "        print(\"pip install transformers torch torchvision pillow requests\")\n",
    "\n",
    "# =============================================================================\n",
    "# 主函数\n",
    "# =============================================================================\n",
    "if __name__ == \"__main__\":\n",
    "    # 检查依赖\n",
    "    try:\n",
    "        import transformers\n",
    "        print(f\"Transformers version: {transformers.__version__}\")\n",
    "        print(f\"PyTorch version: {torch.__version__}\")\n",
    "        print(f\"Device: {device}\")\n",
    "        print(\"-\" * 60)\n",
    "        \n",
    "        # 运行演示\n",
    "        run_all_demos()\n",
    "        \n",
    "    except ImportError as e:\n",
    "        print(f\"Missing dependency: {e}\")\n",
    "        print(\"Please install required packages:\")\n",
    "        print(\"pip install transformers torch torchvision pillow requests\")\n",
    "\n"
   ],
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Using device: cuda\n",
      "Transformers version: 4.53.3\n",
      "PyTorch version: 2.7.1+cu126\n",
      "Device: cuda\n",
      "------------------------------------------------------------\n",
      "BLIP Model Demonstrations using Transformers Library\n",
      "============================================================\n",
      "\n",
      "==================================================\n",
      "1. IMAGE CAPTIONING\n",
      "==================================================\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Using a slow image processor as `use_fast` is unset and a slow processor was saved with this model. `use_fast=True` will be the default behavior in v4.52, even if the model was saved with a slow processor. This will result in minor differences in outputs. You'll still be able to use a slow processor with `use_fast=False`.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Loaded image from URL\n",
      "Image size: (2048, 1365)\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "preprocessor_config.json:   0%|          | 0.00/287 [00:00<?, ?B/s]"
      ],
      "application/vnd.jupyter.widget-view+json": {
       "version_major": 2,
       "version_minor": 0,
       "model_id": "98946a6b45ac4417a4f7c3ae5221612d"
      }
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "C:\\Users\\YuBin\\AppData\\Roaming\\Python\\Python312\\site-packages\\huggingface_hub\\file_download.py:143: UserWarning: `huggingface_hub` cache-system uses symlinks by default to efficiently store duplicated files but your machine does not support them in C:\\Users\\YuBin\\.cache\\huggingface\\hub\\models--Salesforce--blip-image-captioning-base. Caching files will still work but in a degraded version that might require more space on your disk. This warning can be disabled by setting the `HF_HUB_DISABLE_SYMLINKS_WARNING` environment variable. For more details, see https://huggingface.co/docs/huggingface_hub/how-to-cache#limitations.\n",
      "To support symlinks on Windows, you either need to activate Developer Mode or to run Python as an administrator. In order to activate developer mode, see this article: https://docs.microsoft.com/en-us/windows/apps/get-started/enable-your-device-for-development\n",
      "  warnings.warn(message)\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "tokenizer_config.json:   0%|          | 0.00/506 [00:00<?, ?B/s]"
      ],
      "application/vnd.jupyter.widget-view+json": {
       "version_major": 2,
       "version_minor": 0,
       "model_id": "aa1570c05be94990bbfda0b569ada15e"
      }
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "data": {
      "text/plain": [
       "vocab.txt: 0.00B [00:00, ?B/s]"
      ],
      "application/vnd.jupyter.widget-view+json": {
       "version_major": 2,
       "version_minor": 0,
       "model_id": "5f700edc7dcf499196799b87de26b15a"
      }
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "data": {
      "text/plain": [
       "tokenizer.json: 0.00B [00:00, ?B/s]"
      ],
      "application/vnd.jupyter.widget-view+json": {
       "version_major": 2,
       "version_minor": 0,
       "model_id": "41bc4b4129b54be7ba7160460d07cde3"
      }
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "data": {
      "text/plain": [
       "special_tokens_map.json:   0%|          | 0.00/125 [00:00<?, ?B/s]"
      ],
      "application/vnd.jupyter.widget-view+json": {
       "version_major": 2,
       "version_minor": 0,
       "model_id": "f1b05b7966f44258b2ca0f1e7411b3f7"
      }
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "data": {
      "text/plain": [
       "config.json: 0.00B [00:00, ?B/s]"
      ],
      "application/vnd.jupyter.widget-view+json": {
       "version_major": 2,
       "version_minor": 0,
       "model_id": "1061fc8044724b388025b4cf54ae20e5"
      }
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "data": {
      "text/plain": [
       "pytorch_model.bin:   0%|          | 0.00/990M [00:00<?, ?B/s]"
      ],
      "application/vnd.jupyter.widget-view+json": {
       "version_major": 2,
       "version_minor": 0,
       "model_id": "007a19309b2548aaad51b1918279a6a3"
      }
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "----------------------------------------------------------------------------------------------------\n",
      "BlipForConditionalGeneration(\n",
      "  (vision_model): BlipVisionModel(\n",
      "    (embeddings): BlipVisionEmbeddings(\n",
      "      (patch_embedding): Conv2d(3, 768, kernel_size=(16, 16), stride=(16, 16))\n",
      "    )\n",
      "    (encoder): BlipEncoder(\n",
      "      (layers): ModuleList(\n",
      "        (0-11): 12 x BlipEncoderLayer(\n",
      "          (self_attn): BlipAttention(\n",
      "            (dropout): Dropout(p=0.0, inplace=False)\n",
      "            (qkv): Linear(in_features=768, out_features=2304, bias=True)\n",
      "            (projection): Linear(in_features=768, out_features=768, bias=True)\n",
      "          )\n",
      "          (layer_norm1): LayerNorm((768,), eps=1e-05, elementwise_affine=True)\n",
      "          (mlp): BlipMLP(\n",
      "            (activation_fn): GELUActivation()\n",
      "            (fc1): Linear(in_features=768, out_features=3072, bias=True)\n",
      "            (fc2): Linear(in_features=3072, out_features=768, bias=True)\n",
      "          )\n",
      "          (layer_norm2): LayerNorm((768,), eps=1e-05, elementwise_affine=True)\n",
      "        )\n",
      "      )\n",
      "    )\n",
      "    (post_layernorm): LayerNorm((768,), eps=1e-05, elementwise_affine=True)\n",
      "  )\n",
      "  (text_decoder): BlipTextLMHeadModel(\n",
      "    (bert): BlipTextModel(\n",
      "      (embeddings): BlipTextEmbeddings(\n",
      "        (word_embeddings): Embedding(30524, 768, padding_idx=0)\n",
      "        (position_embeddings): Embedding(512, 768)\n",
      "        (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n",
      "        (dropout): Dropout(p=0.0, inplace=False)\n",
      "      )\n",
      "      (encoder): BlipTextEncoder(\n",
      "        (layer): ModuleList(\n",
      "          (0-11): 12 x BlipTextLayer(\n",
      "            (attention): BlipTextAttention(\n",
      "              (self): BlipTextSelfAttention(\n",
      "                (query): Linear(in_features=768, out_features=768, bias=True)\n",
      "                (key): Linear(in_features=768, out_features=768, bias=True)\n",
      "                (value): Linear(in_features=768, out_features=768, bias=True)\n",
      "                (dropout): Dropout(p=0.0, inplace=False)\n",
      "              )\n",
      "              (output): BlipTextSelfOutput(\n",
      "                (dense): Linear(in_features=768, out_features=768, bias=True)\n",
      "                (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n",
      "                (dropout): Dropout(p=0.0, inplace=False)\n",
      "              )\n",
      "            )\n",
      "            (crossattention): BlipTextAttention(\n",
      "              (self): BlipTextSelfAttention(\n",
      "                (query): Linear(in_features=768, out_features=768, bias=True)\n",
      "                (key): Linear(in_features=768, out_features=768, bias=True)\n",
      "                (value): Linear(in_features=768, out_features=768, bias=True)\n",
      "                (dropout): Dropout(p=0.0, inplace=False)\n",
      "              )\n",
      "              (output): BlipTextSelfOutput(\n",
      "                (dense): Linear(in_features=768, out_features=768, bias=True)\n",
      "                (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n",
      "                (dropout): Dropout(p=0.0, inplace=False)\n",
      "              )\n",
      "            )\n",
      "            (intermediate): BlipTextIntermediate(\n",
      "              (dense): Linear(in_features=768, out_features=3072, bias=True)\n",
      "              (intermediate_act_fn): GELUActivation()\n",
      "            )\n",
      "            (output): BlipTextOutput(\n",
      "              (dense): Linear(in_features=3072, out_features=768, bias=True)\n",
      "              (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n",
      "              (dropout): Dropout(p=0.0, inplace=False)\n",
      "            )\n",
      "          )\n",
      "        )\n",
      "      )\n",
      "    )\n",
      "    (cls): BlipTextOnlyMLMHead(\n",
      "      (predictions): BlipTextLMPredictionHead(\n",
      "        (transform): BlipTextPredictionHeadTransform(\n",
      "          (dense): Linear(in_features=768, out_features=768, bias=True)\n",
      "          (transform_act_fn): GELUActivation()\n",
      "          (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n",
      "        )\n",
      "        (decoder): Linear(in_features=768, out_features=30524, bias=True)\n",
      "      )\n",
      "    )\n",
      "  )\n",
      ")\n",
      "----------------------------------------------------------------------------------------------------\n",
      "Model parameters: 247.41M\n",
      "\n",
      "Beam Search Caption:\n",
      "Caption: a woman sitting on the beach with her dog\n",
      "\n",
      "Nucleus Sampling Caption:\n",
      "Caption: a dog sitting on the sand of a beach\n",
      "\n",
      "==================================================\n",
      "2. VISUAL QUESTION ANSWERING\n",
      "==================================================\n",
      "Loaded image from URL\n",
      "Image size: (2048, 1365)\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "preprocessor_config.json:   0%|          | 0.00/445 [00:00<?, ?B/s]"
      ],
      "application/vnd.jupyter.widget-view+json": {
       "version_major": 2,
       "version_minor": 0,
       "model_id": "a429c4c492f64d9e8f3d853a53eb4f6d"
      }
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "C:\\Users\\YuBin\\AppData\\Roaming\\Python\\Python312\\site-packages\\huggingface_hub\\file_download.py:143: UserWarning: `huggingface_hub` cache-system uses symlinks by default to efficiently store duplicated files but your machine does not support them in C:\\Users\\YuBin\\.cache\\huggingface\\hub\\models--Salesforce--blip-vqa-base. Caching files will still work but in a degraded version that might require more space on your disk. This warning can be disabled by setting the `HF_HUB_DISABLE_SYMLINKS_WARNING` environment variable. For more details, see https://huggingface.co/docs/huggingface_hub/how-to-cache#limitations.\n",
      "To support symlinks on Windows, you either need to activate Developer Mode or to run Python as an administrator. In order to activate developer mode, see this article: https://docs.microsoft.com/en-us/windows/apps/get-started/enable-your-device-for-development\n",
      "  warnings.warn(message)\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "tokenizer_config.json:   0%|          | 0.00/592 [00:00<?, ?B/s]"
      ],
      "application/vnd.jupyter.widget-view+json": {
       "version_major": 2,
       "version_minor": 0,
       "model_id": "fadb382168624e09bad9e9ed61c579ae"
      }
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "data": {
      "text/plain": [
       "vocab.txt: 0.00B [00:00, ?B/s]"
      ],
      "application/vnd.jupyter.widget-view+json": {
       "version_major": 2,
       "version_minor": 0,
       "model_id": "8d3b58d22b404d66989ce7ce4979c83a"
      }
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "data": {
      "text/plain": [
       "tokenizer.json: 0.00B [00:00, ?B/s]"
      ],
      "application/vnd.jupyter.widget-view+json": {
       "version_major": 2,
       "version_minor": 0,
       "model_id": "da5c102720354601af72b11a8856b15c"
      }
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "data": {
      "text/plain": [
       "special_tokens_map.json:   0%|          | 0.00/125 [00:00<?, ?B/s]"
      ],
      "application/vnd.jupyter.widget-view+json": {
       "version_major": 2,
       "version_minor": 0,
       "model_id": "d40ed1079b934463830254d60c06ba93"
      }
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "data": {
      "text/plain": [
       "config.json: 0.00B [00:00, ?B/s]"
      ],
      "application/vnd.jupyter.widget-view+json": {
       "version_major": 2,
       "version_minor": 0,
       "model_id": "a4c9a9e925414a3f82535f2ac4daf22b"
      }
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "data": {
      "text/plain": [
       "model.safetensors:   0%|          | 0.00/990M [00:00<?, ?B/s]"
      ],
      "application/vnd.jupyter.widget-view+json": {
       "version_major": 2,
       "version_minor": 0,
       "model_id": "1ae765be6ceb42f8abf258ab152fcbce"
      }
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "data": {
      "text/plain": [
       "model.safetensors:   0%|          | 0.00/1.54G [00:00<?, ?B/s]"
      ],
      "application/vnd.jupyter.widget-view+json": {
       "version_major": 2,
       "version_minor": 0,
       "model_id": "df2cbcdc5fe14d9f945a73fda7e45906"
      }
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Model parameters: 384.67M\n",
      "Q: where is the woman sitting?\n",
      "A: on beach\n",
      "\n",
      "Q: what is the woman doing?\n",
      "A: petting dog\n",
      "\n",
      "Q: what animal is in the image?\n",
      "A: dog\n",
      "\n",
      "Q: what is the weather like?\n",
      "A: sunny\n",
      "\n",
      "Q: what color is the dog?\n",
      "A: tan\n",
      "\n",
      "\n",
      "============================================================\n",
      "All demonstrations completed successfully!\n",
      "============================================================\n"
     ]
    }
   ],
   "execution_count": 2
  },
  {
   "cell_type": "markdown",
   "id": "373b5f3e",
   "metadata": {},
   "source": [
    "#学习Salesforce/blip-image-captioning-base模型结构\n",
    "vision_model是ViT"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "70051768",
   "metadata": {},
   "outputs": [],
   "source": [
    "BlipForConditionalGeneration(\n",
    "  (vision_model): BlipVisionModel(\n",
    "    (embeddings): BlipVisionEmbeddings(\n",
    "      (patch_embedding): Conv2d(3, 768, kernel_size=(16, 16), stride=(16, 16))\n",
    "    )\n",
    "    (encoder): BlipEncoder(\n",
    "      (layers): ModuleList(\n",
    "        (0-11): 12 x BlipEncoderLayer(\n",
    "          (self_attn): BlipAttention(\n",
    "            (dropout): Dropout(p=0.0, inplace=False)\n",
    "            (qkv): Linear(in_features=768, out_features=2304, bias=True)\n",
    "            (projection): Linear(in_features=768, out_features=768, bias=True)\n",
    "          )\n",
    "          (layer_norm1): LayerNorm((768,), eps=1e-05, elementwise_affine=True)\n",
    "          (mlp): BlipMLP(\n",
    "            (activation_fn): GELUActivation()\n",
    "            (fc1): Linear(in_features=768, out_features=3072, bias=True)\n",
    "            (fc2): Linear(in_features=3072, out_features=768, bias=True)\n",
    "          )\n",
    "          (layer_norm2): LayerNorm((768,), eps=1e-05, elementwise_affine=True)\n",
    "        )\n",
    "      )\n",
    "    )\n",
    "    (post_layernorm): LayerNorm((768,), eps=1e-05, elementwise_affine=True)\n",
    "  )\n",
    "  (text_decoder): BlipTextLMHeadModel(\n",
    "    (bert): BlipTextModel(\n",
    "      (embeddings): BlipTextEmbeddings(\n",
    "        (word_embeddings): Embedding(30524, 768, padding_idx=0)\n",
    "        (position_embeddings): Embedding(512, 768)\n",
    "        (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n",
    "        (dropout): Dropout(p=0.0, inplace=False)\n",
    "      )\n",
    "      (encoder): BlipTextEncoder(\n",
    "        (layer): ModuleList(\n",
    "          (0-11): 12 x BlipTextLayer(\n",
    "            (attention): BlipTextAttention(\n",
    "              (self): BlipTextSelfAttention(\n",
    "                (query): Linear(in_features=768, out_features=768, bias=True)\n",
    "                (key): Linear(in_features=768, out_features=768, bias=True)\n",
    "                (value): Linear(in_features=768, out_features=768, bias=True)\n",
    "                (dropout): Dropout(p=0.0, inplace=False)\n",
    "              )\n",
    "              (output): BlipTextSelfOutput(\n",
    "                (dense): Linear(in_features=768, out_features=768, bias=True)\n",
    "                (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n",
    "                (dropout): Dropout(p=0.0, inplace=False)\n",
    "              )\n",
    "            )\n",
    "            (crossattention): BlipTextAttention(\n",
    "              (self): BlipTextSelfAttention(\n",
    "                (query): Linear(in_features=768, out_features=768, bias=True)\n",
    "                (key): Linear(in_features=768, out_features=768, bias=True)\n",
    "                (value): Linear(in_features=768, out_features=768, bias=True)\n",
    "                (dropout): Dropout(p=0.0, inplace=False)\n",
    "              )\n",
    "              (output): BlipTextSelfOutput(\n",
    "                (dense): Linear(in_features=768, out_features=768, bias=True)\n",
    "                (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n",
    "                (dropout): Dropout(p=0.0, inplace=False)\n",
    "              )\n",
    "            )\n",
    "            (intermediate): BlipTextIntermediate(\n",
    "              (dense): Linear(in_features=768, out_features=3072, bias=True)\n",
    "              (intermediate_act_fn): GELUActivation()\n",
    "            )\n",
    "            (output): BlipTextOutput(\n",
    "              (dense): Linear(in_features=3072, out_features=768, bias=True)\n",
    "              (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n",
    "              (dropout): Dropout(p=0.0, inplace=False)\n",
    "            )\n",
    "          )\n",
    "        )\n",
    "      )\n",
    "    )\n",
    "    (cls): BlipTextOnlyMLMHead(\n",
    "      (predictions): BlipTextLMPredictionHead(\n",
    "        (transform): BlipTextPredictionHeadTransform(\n",
    "          (dense): Linear(in_features=768, out_features=768, bias=True)\n",
    "          (transform_act_fn): GELUActivation()\n",
    "          (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n",
    "        )\n",
    "        (decoder): Linear(in_features=768, out_features=30524, bias=True)\n",
    "      )\n",
    "    )\n",
    "  )\n",
    ")"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.12.3"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
