{
  "cells": [
    {
      "cell_type": "code",
      "execution_count": 9,
      "metadata": {
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 1000,
          "referenced_widgets": [
            "07a35e1bc22346f79a262ed0a5fd73b9",
            "b90ca558327e4c57be376cfae69223e1",
            "39f81e354e78460b92d829f985cb395d",
            "8b230f62b0214ddc9b856b07030a5569",
            "1d0e1c510b674fc9ac28d209851c7784",
            "d616796df73f4f0bba52bd4248022881",
            "dc0b503a0f3f4f0ab2ea857aae470496",
            "7dc2d9a18d55424b97305026f6f3ed0a",
            "68b6e05932844140ab526d56aa152ab3",
            "e351888a8a8748a690e8eb3a0e59f4ae",
            "03a4fa7f8564488586dc94993bcd73e5"
          ]
        },
        "id": "vZdtzBHn9U2u",
        "outputId": "ad6395da-6a65-41d9-c2d6-0a7965070af9"
      },
      "outputs": [
        {
          "data": {
            "application/vnd.jupyter.widget-view+json": {
              "model_id": "e2bdc659e318475d8f0582afd11190be",
              "version_major": 2,
              "version_minor": 0
            },
            "text/plain": [
              "Loading checkpoint shards:   0%|          | 0/2 [00:00<?, ?it/s]"
            ]
          },
          "metadata": {},
          "output_type": "display_data"
        },
        {
          "name": "stdout",
          "output_type": "stream",
          "text": [
            "Blip2Processor:\n",
            "- image_processor: BlipImageProcessor {\n",
            "  \"do_convert_rgb\": true,\n",
            "  \"do_normalize\": true,\n",
            "  \"do_rescale\": true,\n",
            "  \"do_resize\": true,\n",
            "  \"image_mean\": [\n",
            "    0.48145466,\n",
            "    0.4578275,\n",
            "    0.40821073\n",
            "  ],\n",
            "  \"image_processor_type\": \"BlipImageProcessor\",\n",
            "  \"image_std\": [\n",
            "    0.26862954,\n",
            "    0.26130258,\n",
            "    0.27577711\n",
            "  ],\n",
            "  \"processor_class\": \"Blip2Processor\",\n",
            "  \"resample\": 3,\n",
            "  \"rescale_factor\": 0.00392156862745098,\n",
            "  \"size\": {\n",
            "    \"height\": 224,\n",
            "    \"width\": 224\n",
            "  }\n",
            "}\n",
            "\n",
            "- tokenizer: GPT2TokenizerFast(name_or_path='Salesforce/blip2-opt-2.7b', vocab_size=50265, model_max_length=1000000000000000019884624838656, is_fast=True, padding_side='right', truncation_side='right', special_tokens={'bos_token': '</s>', 'eos_token': '</s>', 'unk_token': '</s>', 'pad_token': '<pad>'}, clean_up_tokenization_spaces=False, added_tokens_decoder={\n",
            "\t1: AddedToken(\"<pad>\", rstrip=False, lstrip=False, single_word=False, normalized=True, special=True),\n",
            "\t2: AddedToken(\"</s>\", rstrip=False, lstrip=False, single_word=False, normalized=True, special=True),\n",
            "\t50265: AddedToken(\"<image>\", rstrip=False, lstrip=False, single_word=False, normalized=False, special=True),\n",
            "}\n",
            ")\n",
            "\n",
            "{\n",
            "  \"num_query_tokens\": 32,\n",
            "  \"processor_class\": \"Blip2Processor\"\n",
            "}\n",
            "\n",
            "----------------------------------------------------------------------------------------------------\n",
            "torch.Size([1, 3, 224, 224])\n",
            "----------------------------------------------------------------------------------------------------\n",
            "Blip2ForConditionalGeneration(\n",
            "  (vision_model): Blip2VisionModel(\n",
            "    (embeddings): Blip2VisionEmbeddings(\n",
            "      (patch_embedding): Conv2d(3, 1408, kernel_size=(14, 14), stride=(14, 14))\n",
            "    )\n",
            "    (encoder): Blip2Encoder(\n",
            "      (layers): ModuleList(\n",
            "        (0-38): 39 x Blip2EncoderLayer(\n",
            "          (self_attn): Blip2Attention(\n",
            "            (qkv): Linear(in_features=1408, out_features=4224, bias=True)\n",
            "            (projection): Linear(in_features=1408, out_features=1408, bias=True)\n",
            "          )\n",
            "          (layer_norm1): LayerNorm((1408,), eps=1e-06, elementwise_affine=True)\n",
            "          (mlp): Blip2MLP(\n",
            "            (activation_fn): GELUActivation()\n",
            "            (fc1): Linear(in_features=1408, out_features=6144, bias=True)\n",
            "            (fc2): Linear(in_features=6144, out_features=1408, bias=True)\n",
            "          )\n",
            "          (layer_norm2): LayerNorm((1408,), eps=1e-06, elementwise_affine=True)\n",
            "        )\n",
            "      )\n",
            "    )\n",
            "    (post_layernorm): LayerNorm((1408,), eps=1e-06, elementwise_affine=True)\n",
            "  )\n",
            "  (qformer): Blip2QFormerModel(\n",
            "    (layernorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n",
            "    (dropout): Dropout(p=0.1, inplace=False)\n",
            "    (encoder): Blip2QFormerEncoder(\n",
            "      (layer): ModuleList(\n",
            "        (0): Blip2QFormerLayer(\n",
            "          (attention): Blip2QFormerAttention(\n",
            "            (attention): Blip2QFormerMultiHeadAttention(\n",
            "              (query): Linear(in_features=768, out_features=768, bias=True)\n",
            "              (key): Linear(in_features=768, out_features=768, bias=True)\n",
            "              (value): Linear(in_features=768, out_features=768, bias=True)\n",
            "              (dropout): Dropout(p=0.1, inplace=False)\n",
            "            )\n",
            "            (output): Blip2QFormerSelfOutput(\n",
            "              (dense): Linear(in_features=768, out_features=768, bias=True)\n",
            "              (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n",
            "              (dropout): Dropout(p=0.1, inplace=False)\n",
            "            )\n",
            "          )\n",
            "          (crossattention): Blip2QFormerAttention(\n",
            "            (attention): Blip2QFormerMultiHeadAttention(\n",
            "              (query): Linear(in_features=768, out_features=768, bias=True)\n",
            "              (key): Linear(in_features=1408, out_features=768, bias=True)\n",
            "              (value): Linear(in_features=1408, out_features=768, bias=True)\n",
            "              (dropout): Dropout(p=0.1, inplace=False)\n",
            "            )\n",
            "            (output): Blip2QFormerSelfOutput(\n",
            "              (dense): Linear(in_features=768, out_features=768, bias=True)\n",
            "              (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n",
            "              (dropout): Dropout(p=0.1, inplace=False)\n",
            "            )\n",
            "          )\n",
            "          (intermediate_query): Blip2QFormerIntermediate(\n",
            "            (dense): Linear(in_features=768, out_features=3072, bias=True)\n",
            "            (intermediate_act_fn): GELUActivation()\n",
            "          )\n",
            "          (output_query): Blip2QFormerOutput(\n",
            "            (dense): Linear(in_features=3072, out_features=768, bias=True)\n",
            "            (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n",
            "            (dropout): Dropout(p=0.1, inplace=False)\n",
            "          )\n",
            "        )\n",
            "        (1): Blip2QFormerLayer(\n",
            "          (attention): Blip2QFormerAttention(\n",
            "            (attention): Blip2QFormerMultiHeadAttention(\n",
            "              (query): Linear(in_features=768, out_features=768, bias=True)\n",
            "              (key): Linear(in_features=768, out_features=768, bias=True)\n",
            "              (value): Linear(in_features=768, out_features=768, bias=True)\n",
            "              (dropout): Dropout(p=0.1, inplace=False)\n",
            "            )\n",
            "            (output): Blip2QFormerSelfOutput(\n",
            "              (dense): Linear(in_features=768, out_features=768, bias=True)\n",
            "              (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n",
            "              (dropout): Dropout(p=0.1, inplace=False)\n",
            "            )\n",
            "          )\n",
            "          (intermediate_query): Blip2QFormerIntermediate(\n",
            "            (dense): Linear(in_features=768, out_features=3072, bias=True)\n",
            "            (intermediate_act_fn): GELUActivation()\n",
            "          )\n",
            "          (output_query): Blip2QFormerOutput(\n",
            "            (dense): Linear(in_features=3072, out_features=768, bias=True)\n",
            "            (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n",
            "            (dropout): Dropout(p=0.1, inplace=False)\n",
            "          )\n",
            "        )\n",
            "        (2): Blip2QFormerLayer(\n",
            "          (attention): Blip2QFormerAttention(\n",
            "            (attention): Blip2QFormerMultiHeadAttention(\n",
            "              (query): Linear(in_features=768, out_features=768, bias=True)\n",
            "              (key): Linear(in_features=768, out_features=768, bias=True)\n",
            "              (value): Linear(in_features=768, out_features=768, bias=True)\n",
            "              (dropout): Dropout(p=0.1, inplace=False)\n",
            "            )\n",
            "            (output): Blip2QFormerSelfOutput(\n",
            "              (dense): Linear(in_features=768, out_features=768, bias=True)\n",
            "              (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n",
            "              (dropout): Dropout(p=0.1, inplace=False)\n",
            "            )\n",
            "          )\n",
            "          (crossattention): Blip2QFormerAttention(\n",
            "            (attention): Blip2QFormerMultiHeadAttention(\n",
            "              (query): Linear(in_features=768, out_features=768, bias=True)\n",
            "              (key): Linear(in_features=1408, out_features=768, bias=True)\n",
            "              (value): Linear(in_features=1408, out_features=768, bias=True)\n",
            "              (dropout): Dropout(p=0.1, inplace=False)\n",
            "            )\n",
            "            (output): Blip2QFormerSelfOutput(\n",
            "              (dense): Linear(in_features=768, out_features=768, bias=True)\n",
            "              (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n",
            "              (dropout): Dropout(p=0.1, inplace=False)\n",
            "            )\n",
            "          )\n",
            "          (intermediate_query): Blip2QFormerIntermediate(\n",
            "            (dense): Linear(in_features=768, out_features=3072, bias=True)\n",
            "            (intermediate_act_fn): GELUActivation()\n",
            "          )\n",
            "          (output_query): Blip2QFormerOutput(\n",
            "            (dense): Linear(in_features=3072, out_features=768, bias=True)\n",
            "            (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n",
            "            (dropout): Dropout(p=0.1, inplace=False)\n",
            "          )\n",
            "        )\n",
            "        (3): Blip2QFormerLayer(\n",
            "          (attention): Blip2QFormerAttention(\n",
            "            (attention): Blip2QFormerMultiHeadAttention(\n",
            "              (query): Linear(in_features=768, out_features=768, bias=True)\n",
            "              (key): Linear(in_features=768, out_features=768, bias=True)\n",
            "              (value): Linear(in_features=768, out_features=768, bias=True)\n",
            "              (dropout): Dropout(p=0.1, inplace=False)\n",
            "            )\n",
            "            (output): Blip2QFormerSelfOutput(\n",
            "              (dense): Linear(in_features=768, out_features=768, bias=True)\n",
            "              (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n",
            "              (dropout): Dropout(p=0.1, inplace=False)\n",
            "            )\n",
            "          )\n",
            "          (intermediate_query): Blip2QFormerIntermediate(\n",
            "            (dense): Linear(in_features=768, out_features=3072, bias=True)\n",
            "            (intermediate_act_fn): GELUActivation()\n",
            "          )\n",
            "          (output_query): Blip2QFormerOutput(\n",
            "            (dense): Linear(in_features=3072, out_features=768, bias=True)\n",
            "            (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n",
            "            (dropout): Dropout(p=0.1, inplace=False)\n",
            "          )\n",
            "        )\n",
            "        (4): Blip2QFormerLayer(\n",
            "          (attention): Blip2QFormerAttention(\n",
            "            (attention): Blip2QFormerMultiHeadAttention(\n",
            "              (query): Linear(in_features=768, out_features=768, bias=True)\n",
            "              (key): Linear(in_features=768, out_features=768, bias=True)\n",
            "              (value): Linear(in_features=768, out_features=768, bias=True)\n",
            "              (dropout): Dropout(p=0.1, inplace=False)\n",
            "            )\n",
            "            (output): Blip2QFormerSelfOutput(\n",
            "              (dense): Linear(in_features=768, out_features=768, bias=True)\n",
            "              (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n",
            "              (dropout): Dropout(p=0.1, inplace=False)\n",
            "            )\n",
            "          )\n",
            "          (crossattention): Blip2QFormerAttention(\n",
            "            (attention): Blip2QFormerMultiHeadAttention(\n",
            "              (query): Linear(in_features=768, out_features=768, bias=True)\n",
            "              (key): Linear(in_features=1408, out_features=768, bias=True)\n",
            "              (value): Linear(in_features=1408, out_features=768, bias=True)\n",
            "              (dropout): Dropout(p=0.1, inplace=False)\n",
            "            )\n",
            "            (output): Blip2QFormerSelfOutput(\n",
            "              (dense): Linear(in_features=768, out_features=768, bias=True)\n",
            "              (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n",
            "              (dropout): Dropout(p=0.1, inplace=False)\n",
            "            )\n",
            "          )\n",
            "          (intermediate_query): Blip2QFormerIntermediate(\n",
            "            (dense): Linear(in_features=768, out_features=3072, bias=True)\n",
            "            (intermediate_act_fn): GELUActivation()\n",
            "          )\n",
            "          (output_query): Blip2QFormerOutput(\n",
            "            (dense): Linear(in_features=3072, out_features=768, bias=True)\n",
            "            (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n",
            "            (dropout): Dropout(p=0.1, inplace=False)\n",
            "          )\n",
            "        )\n",
            "        (5): Blip2QFormerLayer(\n",
            "          (attention): Blip2QFormerAttention(\n",
            "            (attention): Blip2QFormerMultiHeadAttention(\n",
            "              (query): Linear(in_features=768, out_features=768, bias=True)\n",
            "              (key): Linear(in_features=768, out_features=768, bias=True)\n",
            "              (value): Linear(in_features=768, out_features=768, bias=True)\n",
            "              (dropout): Dropout(p=0.1, inplace=False)\n",
            "            )\n",
            "            (output): Blip2QFormerSelfOutput(\n",
            "              (dense): Linear(in_features=768, out_features=768, bias=True)\n",
            "              (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n",
            "              (dropout): Dropout(p=0.1, inplace=False)\n",
            "            )\n",
            "          )\n",
            "          (intermediate_query): Blip2QFormerIntermediate(\n",
            "            (dense): Linear(in_features=768, out_features=3072, bias=True)\n",
            "            (intermediate_act_fn): GELUActivation()\n",
            "          )\n",
            "          (output_query): Blip2QFormerOutput(\n",
            "            (dense): Linear(in_features=3072, out_features=768, bias=True)\n",
            "            (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n",
            "            (dropout): Dropout(p=0.1, inplace=False)\n",
            "          )\n",
            "        )\n",
            "        (6): Blip2QFormerLayer(\n",
            "          (attention): Blip2QFormerAttention(\n",
            "            (attention): Blip2QFormerMultiHeadAttention(\n",
            "              (query): Linear(in_features=768, out_features=768, bias=True)\n",
            "              (key): Linear(in_features=768, out_features=768, bias=True)\n",
            "              (value): Linear(in_features=768, out_features=768, bias=True)\n",
            "              (dropout): Dropout(p=0.1, inplace=False)\n",
            "            )\n",
            "            (output): Blip2QFormerSelfOutput(\n",
            "              (dense): Linear(in_features=768, out_features=768, bias=True)\n",
            "              (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n",
            "              (dropout): Dropout(p=0.1, inplace=False)\n",
            "            )\n",
            "          )\n",
            "          (crossattention): Blip2QFormerAttention(\n",
            "            (attention): Blip2QFormerMultiHeadAttention(\n",
            "              (query): Linear(in_features=768, out_features=768, bias=True)\n",
            "              (key): Linear(in_features=1408, out_features=768, bias=True)\n",
            "              (value): Linear(in_features=1408, out_features=768, bias=True)\n",
            "              (dropout): Dropout(p=0.1, inplace=False)\n",
            "            )\n",
            "            (output): Blip2QFormerSelfOutput(\n",
            "              (dense): Linear(in_features=768, out_features=768, bias=True)\n",
            "              (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n",
            "              (dropout): Dropout(p=0.1, inplace=False)\n",
            "            )\n",
            "          )\n",
            "          (intermediate_query): Blip2QFormerIntermediate(\n",
            "            (dense): Linear(in_features=768, out_features=3072, bias=True)\n",
            "            (intermediate_act_fn): GELUActivation()\n",
            "          )\n",
            "          (output_query): Blip2QFormerOutput(\n",
            "            (dense): Linear(in_features=3072, out_features=768, bias=True)\n",
            "            (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n",
            "            (dropout): Dropout(p=0.1, inplace=False)\n",
            "          )\n",
            "        )\n",
            "        (7): Blip2QFormerLayer(\n",
            "          (attention): Blip2QFormerAttention(\n",
            "            (attention): Blip2QFormerMultiHeadAttention(\n",
            "              (query): Linear(in_features=768, out_features=768, bias=True)\n",
            "              (key): Linear(in_features=768, out_features=768, bias=True)\n",
            "              (value): Linear(in_features=768, out_features=768, bias=True)\n",
            "              (dropout): Dropout(p=0.1, inplace=False)\n",
            "            )\n",
            "            (output): Blip2QFormerSelfOutput(\n",
            "              (dense): Linear(in_features=768, out_features=768, bias=True)\n",
            "              (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n",
            "              (dropout): Dropout(p=0.1, inplace=False)\n",
            "            )\n",
            "          )\n",
            "          (intermediate_query): Blip2QFormerIntermediate(\n",
            "            (dense): Linear(in_features=768, out_features=3072, bias=True)\n",
            "            (intermediate_act_fn): GELUActivation()\n",
            "          )\n",
            "          (output_query): Blip2QFormerOutput(\n",
            "            (dense): Linear(in_features=3072, out_features=768, bias=True)\n",
            "            (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n",
            "            (dropout): Dropout(p=0.1, inplace=False)\n",
            "          )\n",
            "        )\n",
            "        (8): Blip2QFormerLayer(\n",
            "          (attention): Blip2QFormerAttention(\n",
            "            (attention): Blip2QFormerMultiHeadAttention(\n",
            "              (query): Linear(in_features=768, out_features=768, bias=True)\n",
            "              (key): Linear(in_features=768, out_features=768, bias=True)\n",
            "              (value): Linear(in_features=768, out_features=768, bias=True)\n",
            "              (dropout): Dropout(p=0.1, inplace=False)\n",
            "            )\n",
            "            (output): Blip2QFormerSelfOutput(\n",
            "              (dense): Linear(in_features=768, out_features=768, bias=True)\n",
            "              (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n",
            "              (dropout): Dropout(p=0.1, inplace=False)\n",
            "            )\n",
            "          )\n",
            "          (crossattention): Blip2QFormerAttention(\n",
            "            (attention): Blip2QFormerMultiHeadAttention(\n",
            "              (query): Linear(in_features=768, out_features=768, bias=True)\n",
            "              (key): Linear(in_features=1408, out_features=768, bias=True)\n",
            "              (value): Linear(in_features=1408, out_features=768, bias=True)\n",
            "              (dropout): Dropout(p=0.1, inplace=False)\n",
            "            )\n",
            "            (output): Blip2QFormerSelfOutput(\n",
            "              (dense): Linear(in_features=768, out_features=768, bias=True)\n",
            "              (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n",
            "              (dropout): Dropout(p=0.1, inplace=False)\n",
            "            )\n",
            "          )\n",
            "          (intermediate_query): Blip2QFormerIntermediate(\n",
            "            (dense): Linear(in_features=768, out_features=3072, bias=True)\n",
            "            (intermediate_act_fn): GELUActivation()\n",
            "          )\n",
            "          (output_query): Blip2QFormerOutput(\n",
            "            (dense): Linear(in_features=3072, out_features=768, bias=True)\n",
            "            (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n",
            "            (dropout): Dropout(p=0.1, inplace=False)\n",
            "          )\n",
            "        )\n",
            "        (9): Blip2QFormerLayer(\n",
            "          (attention): Blip2QFormerAttention(\n",
            "            (attention): Blip2QFormerMultiHeadAttention(\n",
            "              (query): Linear(in_features=768, out_features=768, bias=True)\n",
            "              (key): Linear(in_features=768, out_features=768, bias=True)\n",
            "              (value): Linear(in_features=768, out_features=768, bias=True)\n",
            "              (dropout): Dropout(p=0.1, inplace=False)\n",
            "            )\n",
            "            (output): Blip2QFormerSelfOutput(\n",
            "              (dense): Linear(in_features=768, out_features=768, bias=True)\n",
            "              (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n",
            "              (dropout): Dropout(p=0.1, inplace=False)\n",
            "            )\n",
            "          )\n",
            "          (intermediate_query): Blip2QFormerIntermediate(\n",
            "            (dense): Linear(in_features=768, out_features=3072, bias=True)\n",
            "            (intermediate_act_fn): GELUActivation()\n",
            "          )\n",
            "          (output_query): Blip2QFormerOutput(\n",
            "            (dense): Linear(in_features=3072, out_features=768, bias=True)\n",
            "            (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n",
            "            (dropout): Dropout(p=0.1, inplace=False)\n",
            "          )\n",
            "        )\n",
            "        (10): Blip2QFormerLayer(\n",
            "          (attention): Blip2QFormerAttention(\n",
            "            (attention): Blip2QFormerMultiHeadAttention(\n",
            "              (query): Linear(in_features=768, out_features=768, bias=True)\n",
            "              (key): Linear(in_features=768, out_features=768, bias=True)\n",
            "              (value): Linear(in_features=768, out_features=768, bias=True)\n",
            "              (dropout): Dropout(p=0.1, inplace=False)\n",
            "            )\n",
            "            (output): Blip2QFormerSelfOutput(\n",
            "              (dense): Linear(in_features=768, out_features=768, bias=True)\n",
            "              (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n",
            "              (dropout): Dropout(p=0.1, inplace=False)\n",
            "            )\n",
            "          )\n",
            "          (crossattention): Blip2QFormerAttention(\n",
            "            (attention): Blip2QFormerMultiHeadAttention(\n",
            "              (query): Linear(in_features=768, out_features=768, bias=True)\n",
            "              (key): Linear(in_features=1408, out_features=768, bias=True)\n",
            "              (value): Linear(in_features=1408, out_features=768, bias=True)\n",
            "              (dropout): Dropout(p=0.1, inplace=False)\n",
            "            )\n",
            "            (output): Blip2QFormerSelfOutput(\n",
            "              (dense): Linear(in_features=768, out_features=768, bias=True)\n",
            "              (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n",
            "              (dropout): Dropout(p=0.1, inplace=False)\n",
            "            )\n",
            "          )\n",
            "          (intermediate_query): Blip2QFormerIntermediate(\n",
            "            (dense): Linear(in_features=768, out_features=3072, bias=True)\n",
            "            (intermediate_act_fn): GELUActivation()\n",
            "          )\n",
            "          (output_query): Blip2QFormerOutput(\n",
            "            (dense): Linear(in_features=3072, out_features=768, bias=True)\n",
            "            (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n",
            "            (dropout): Dropout(p=0.1, inplace=False)\n",
            "          )\n",
            "        )\n",
            "        (11): Blip2QFormerLayer(\n",
            "          (attention): Blip2QFormerAttention(\n",
            "            (attention): Blip2QFormerMultiHeadAttention(\n",
            "              (query): Linear(in_features=768, out_features=768, bias=True)\n",
            "              (key): Linear(in_features=768, out_features=768, bias=True)\n",
            "              (value): Linear(in_features=768, out_features=768, bias=True)\n",
            "              (dropout): Dropout(p=0.1, inplace=False)\n",
            "            )\n",
            "            (output): Blip2QFormerSelfOutput(\n",
            "              (dense): Linear(in_features=768, out_features=768, bias=True)\n",
            "              (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n",
            "              (dropout): Dropout(p=0.1, inplace=False)\n",
            "            )\n",
            "          )\n",
            "          (intermediate_query): Blip2QFormerIntermediate(\n",
            "            (dense): Linear(in_features=768, out_features=3072, bias=True)\n",
            "            (intermediate_act_fn): GELUActivation()\n",
            "          )\n",
            "          (output_query): Blip2QFormerOutput(\n",
            "            (dense): Linear(in_features=3072, out_features=768, bias=True)\n",
            "            (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n",
            "            (dropout): Dropout(p=0.1, inplace=False)\n",
            "          )\n",
            "        )\n",
            "      )\n",
            "    )\n",
            "  )\n",
            "  (language_projection): Linear(in_features=768, out_features=2560, bias=True)\n",
            "  (language_model): OPTForCausalLM(\n",
            "    (model): OPTModel(\n",
            "      (decoder): OPTDecoder(\n",
            "        (embed_tokens): Embedding(50304, 2560, padding_idx=1)\n",
            "        (embed_positions): OPTLearnedPositionalEmbedding(2050, 2560)\n",
            "        (final_layer_norm): LayerNorm((2560,), eps=1e-05, elementwise_affine=True)\n",
            "        (layers): ModuleList(\n",
            "          (0-31): 32 x OPTDecoderLayer(\n",
            "            (self_attn): OPTAttention(\n",
            "              (k_proj): Linear(in_features=2560, out_features=2560, bias=True)\n",
            "              (v_proj): Linear(in_features=2560, out_features=2560, bias=True)\n",
            "              (q_proj): Linear(in_features=2560, out_features=2560, bias=True)\n",
            "              (out_proj): Linear(in_features=2560, out_features=2560, bias=True)\n",
            "            )\n",
            "            (activation_fn): ReLU()\n",
            "            (self_attn_layer_norm): LayerNorm((2560,), eps=1e-05, elementwise_affine=True)\n",
            "            (fc1): Linear(in_features=2560, out_features=10240, bias=True)\n",
            "            (fc2): Linear(in_features=10240, out_features=2560, bias=True)\n",
            "            (final_layer_norm): LayerNorm((2560,), eps=1e-05, elementwise_affine=True)\n",
            "          )\n",
            "        )\n",
            "      )\n",
            "    )\n",
            "    (lm_head): Linear(in_features=2560, out_features=50304, bias=False)\n",
            "  )\n",
            ")\n",
            "two cats laying on a couch\n"
          ]
        }
      ],
      "source": [
        "from PIL import Image  # 导入PIL库中的Image模块，用于图像处理\n",
        "import requests  # 导入requests库，用于发送HTTP请求\n",
        "from transformers import Blip2Processor, Blip2ForConditionalGeneration  # 导入transformers库中的BLIP2相关模型\n",
        "import torch  # 导入PyTorch库\n",
        "\n",
        "device = \"cuda\" if torch.cuda.is_available() else \"cpu\"  # 检查是否有GPU可用，如果有则使用GPU，否则使用CPU\n",
        "\n",
        "processor = Blip2Processor.from_pretrained(\"Salesforce/blip2-opt-2.7b\")  # 加载BLIP2处理器，用于处理输入图像和文本\n",
        "model = Blip2ForConditionalGeneration.from_pretrained(\n",
        "    \"Salesforce/blip2-opt-2.7b\", torch_dtype=torch.float16  # 加载BLIP2模型，使用float16精度以节省内存\n",
        ")\n",
        "\n",
        "\n",
        "model.to(device)  # 将模型移动到指定设备（GPU或CPU）\n",
        "# url = \"http://images.cocodataset.org/val2017/000000039769.jpg\"  # 设置要处理的图像URL\n",
        "image = Image.open('000000039769.jpg')  # 下载并打开图像\n",
        "print(processor)  # 打印处理器信息\n",
        "print('-'*100)  # 打印分隔线\n",
        "inputs = processor(images=image, return_tensors=\"pt\").to(device, torch.float16)  # 使用处理器处理图像，转换为PyTorch张量并移至设备\n",
        "print(inputs['pixel_values'].shape)  # 打印处理后的图像张量形状\n",
        "generated_ids = model.generate(**inputs)  # 使用模型生成文本的token IDs\n",
        "print('-'*100)  # 打印分隔线\n",
        "print(model)  # 打印模型结构\n",
        "generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)[0].strip()  # 将生成的token IDs解码为文本\n",
        "print(generated_text)  # 打印生成的文本"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": 4,
      "metadata": {},
      "outputs": [
        {
          "name": "stdout",
          "output_type": "stream",
          "text": [
            "Using device: cpu\n",
            "\n",
            "============================================================\n",
            "Alternative Method: Using Forward Hooks\n",
            "============================================================\n"
          ]
        },
        {
          "data": {
            "application/vnd.jupyter.widget-view+json": {
              "model_id": "80745faab2eb4f2cbee46cc4f86ba1ca",
              "version_major": 2,
              "version_minor": 0
            },
            "text/plain": [
              "Loading checkpoint shards:   0%|          | 0/2 [00:00<?, ?it/s]"
            ]
          },
          "metadata": {},
          "output_type": "display_data"
        },
        {
          "name": "stdout",
          "output_type": "stream",
          "text": [
            "Using forward hooks method...\n",
            "Q-former attention input shape: torch.Size([1, 32, 768])\n",
            "Q-former crossattention input shape: torch.Size([1, 32, 768])\n",
            "Encoder hidden states shape: torch.Size([1, 257, 1408])\n",
            "Generated caption (using hooks): two cats laying on a couch\n",
            "Hooks removed successfully\n"
          ]
        }
      ],
      "source": [
        "from PIL import Image  # 导入PIL库中的Image模块，用于图像处理\n",
        "import requests  # 导入requests库，用于发送HTTP请求\n",
        "from transformers import Blip2Processor, Blip2ForConditionalGeneration  # 导入transformers库中的BLIP2相关模型\n",
        "import torch  # 导入PyTorch库\n",
        "import types  # 导入types模块，用于方法绑定\n",
        "\n",
        "device = \"cuda\" if torch.cuda.is_available() else \"cpu\"  # 检查是否有GPU可用，如果有则使用GPU，否则使用CPU\n",
        "image = Image.open('000000039769.jpg')\n",
        "print(f\"Using device: {device}\")\n",
        "# =============================================================================\n",
        "# 方法2：更安全的Hook方式（推荐）\n",
        "# =============================================================================\n",
        "print(\"\\n\" + \"=\"*60)\n",
        "print(\"Alternative Method: Using Forward Hooks\")\n",
        "print(\"=\"*60)\n",
        "# 加载BLIP2处理器和模型\n",
        "processor = Blip2Processor.from_pretrained(\"Salesforce/blip2-opt-2.7b\")  # 加载BLIP2处理器，用于处理输入图像和文本\n",
        "def reset_model_and_use_hooks():\n",
        "    \"\"\"重新加载模型并使用forward hooks的方式来监控\"\"\"\n",
        "    \n",
        "    # 重新加载干净的模型\n",
        "    model_clean = Blip2ForConditionalGeneration.from_pretrained(\n",
        "        \"Salesforce/blip2-opt-2.7b\", \n",
        "        torch_dtype=torch.float16\n",
        "    )\n",
        "    model_clean.to(device)\n",
        "    \n",
        "    # 定义hook函数\n",
        "    def attention_hook(module, input, output):\n",
        "        if len(input) > 0:\n",
        "            print(f\"Q-former attention input shape: {input[0].shape}\")\n",
        "    \n",
        "    def crossattention_hook(module, input, output):\n",
        "        if len(input) > 0:\n",
        "            print(f\"Q-former crossattention input shape: {input[0].shape}\")\n",
        "            if len(input) > 3 and input[3] is not None:  # encoder_hidden_states\n",
        "                print(f\"Encoder hidden states shape: {input[3].shape}\")\n",
        "    \n",
        "    # 注册forward hooks\n",
        "    attention_handle = model_clean.qformer.encoder.layer[0].attention.register_forward_hook(attention_hook)\n",
        "    crossattention_handle = model_clean.qformer.encoder.layer[0].crossattention.register_forward_hook(crossattention_hook)\n",
        "    \n",
        "    try:\n",
        "        print(\"Using forward hooks method...\")\n",
        "        \n",
        "        # 处理图像\n",
        "        inputs = processor(images=image, return_tensors=\"pt\").to(device, torch.float16)\n",
        "        \n",
        "        # 生成文本\n",
        "        with torch.no_grad():\n",
        "            generated_ids = model_clean.generate(**inputs, max_length=50)\n",
        "        \n",
        "        # 解码生成的文本\n",
        "        generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)[0].strip()\n",
        "        print(f\"Generated caption (using hooks): {generated_text}\")\n",
        "        \n",
        "    except Exception as e:\n",
        "        print(f\"Hook method error: {str(e)}\")\n",
        "    \n",
        "    finally:\n",
        "        # 清理hooks\n",
        "        attention_handle.remove()\n",
        "        crossattention_handle.remove()\n",
        "        print(\"Hooks removed successfully\")\n",
        "\n",
        "# 运行hook方法\n",
        "try:\n",
        "    reset_model_and_use_hooks()\n",
        "except Exception as e:\n",
        "    print(f\"Hook method failed: {str(e)}\")\n",
        "\n"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {},
      "outputs": [],
      "source": [
        "Blip2ForConditionalGeneration(\n",
        "  (vision_model): Blip2VisionModel(\n",
        "    (embeddings): Blip2VisionEmbeddings(\n",
        "      (patch_embedding): Conv2d(3, 1408, kernel_size=(14, 14), stride=(14, 14))\n",
        "    )\n",
        "    (encoder): Blip2Encoder(\n",
        "      (layers): ModuleList(\n",
        "        (0-38): 39 x Blip2EncoderLayer(\n",
        "          (self_attn): Blip2Attention(\n",
        "            (dropout): Dropout(p=0.0, inplace=False)\n",
        "            (qkv): Linear(in_features=1408, out_features=4224, bias=True)\n",
        "            (projection): Linear(in_features=1408, out_features=1408, bias=True)\n",
        "          )\n",
        "          (layer_norm1): LayerNorm((1408,), eps=1e-06, elementwise_affine=True)\n",
        "          (mlp): Blip2MLP(\n",
        "            (activation_fn): GELUActivation()\n",
        "            (fc1): Linear(in_features=1408, out_features=6144, bias=True)\n",
        "            (fc2): Linear(in_features=6144, out_features=1408, bias=True)\n",
        "          )\n",
        "          (layer_norm2): LayerNorm((1408,), eps=1e-06, elementwise_affine=True)\n",
        "        )\n",
        "      )\n",
        "    )\n",
        "    (post_layernorm): LayerNorm((1408,), eps=1e-06, elementwise_affine=True)\n",
        "  )\n",
        "  (qformer): Blip2QFormerModel(\n",
        "    (layernorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n",
        "    (dropout): Dropout(p=0.1, inplace=False)\n",
        "    (encoder): Blip2QFormerEncoder(\n",
        "      (layer): ModuleList(\n",
        "        (0): Blip2QFormerLayer(\n",
        "          (attention): Blip2QFormerAttention(\n",
        "            (attention): Blip2QFormerMultiHeadAttention(\n",
        "              (query): Linear(in_features=768, out_features=768, bias=True)\n",
        "              (key): Linear(in_features=768, out_features=768, bias=True)\n",
        "              (value): Linear(in_features=768, out_features=768, bias=True)\n",
        "              (dropout): Dropout(p=0.1, inplace=False)\n",
        "            )\n",
        "            (output): Blip2QFormerSelfOutput(\n",
        "              (dense): Linear(in_features=768, out_features=768, bias=True)\n",
        "              (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n",
        "              (dropout): Dropout(p=0.1, inplace=False)\n",
        "            )\n",
        "          )\n",
        "          (crossattention): Blip2QFormerAttention(\n",
        "            (attention): Blip2QFormerMultiHeadAttention(\n",
        "              (query): Linear(in_features=768, out_features=768, bias=True)\n",
        "              (key): Linear(in_features=1408, out_features=768, bias=True)\n",
        "              (value): Linear(in_features=1408, out_features=768, bias=True)\n",
        "              (dropout): Dropout(p=0.1, inplace=False)\n",
        "            )\n",
        "            (output): Blip2QFormerSelfOutput(\n",
        "              (dense): Linear(in_features=768, out_features=768, bias=True)\n",
        "              (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n",
        "              (dropout): Dropout(p=0.1, inplace=False)\n",
        "            )\n",
        "          )\n",
        "          (intermediate_query): Blip2QFormerIntermediate(\n",
        "            (dense): Linear(in_features=768, out_features=3072, bias=True)\n",
        "            (intermediate_act_fn): GELUActivation()\n",
        "          )\n",
        "          (output_query): Blip2QFormerOutput(\n",
        "            (dense): Linear(in_features=3072, out_features=768, bias=True)\n",
        "            (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n",
        "            (dropout): Dropout(p=0.1, inplace=False)\n",
        "          )\n",
        "        )\n",
        "        (1): Blip2QFormerLayer(\n",
        "          (attention): Blip2QFormerAttention(\n",
        "            (attention): Blip2QFormerMultiHeadAttention(\n",
        "              (query): Linear(in_features=768, out_features=768, bias=True)\n",
        "              (key): Linear(in_features=768, out_features=768, bias=True)\n",
        "              (value): Linear(in_features=768, out_features=768, bias=True)\n",
        "              (dropout): Dropout(p=0.1, inplace=False)\n",
        "            )\n",
        "            (output): Blip2QFormerSelfOutput(\n",
        "              (dense): Linear(in_features=768, out_features=768, bias=True)\n",
        "              (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n",
        "              (dropout): Dropout(p=0.1, inplace=False)\n",
        "            )\n",
        "          )\n",
        "          (intermediate_query): Blip2QFormerIntermediate(\n",
        "            (dense): Linear(in_features=768, out_features=3072, bias=True)\n",
        "            (intermediate_act_fn): GELUActivation()\n",
        "          )\n",
        "          (output_query): Blip2QFormerOutput(\n",
        "            (dense): Linear(in_features=3072, out_features=768, bias=True)\n",
        "            (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n",
        "            (dropout): Dropout(p=0.1, inplace=False)\n",
        "          )\n",
        "        )\n",
        "        (2): Blip2QFormerLayer(\n",
        "          (attention): Blip2QFormerAttention(\n",
        "            (attention): Blip2QFormerMultiHeadAttention(\n",
        "              (query): Linear(in_features=768, out_features=768, bias=True)\n",
        "              (key): Linear(in_features=768, out_features=768, bias=True)\n",
        "              (value): Linear(in_features=768, out_features=768, bias=True)\n",
        "              (dropout): Dropout(p=0.1, inplace=False)\n",
        "            )\n",
        "            (output): Blip2QFormerSelfOutput(\n",
        "              (dense): Linear(in_features=768, out_features=768, bias=True)\n",
        "              (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n",
        "              (dropout): Dropout(p=0.1, inplace=False)\n",
        "            )\n",
        "          )\n",
        "          (crossattention): Blip2QFormerAttention(\n",
        "            (attention): Blip2QFormerMultiHeadAttention(\n",
        "              (query): Linear(in_features=768, out_features=768, bias=True)\n",
        "              (key): Linear(in_features=1408, out_features=768, bias=True)\n",
        "              (value): Linear(in_features=1408, out_features=768, bias=True)\n",
        "              (dropout): Dropout(p=0.1, inplace=False)\n",
        "            )\n",
        "            (output): Blip2QFormerSelfOutput(\n",
        "              (dense): Linear(in_features=768, out_features=768, bias=True)\n",
        "              (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n",
        "              (dropout): Dropout(p=0.1, inplace=False)\n",
        "            )\n",
        "          )\n",
        "          (intermediate_query): Blip2QFormerIntermediate(\n",
        "            (dense): Linear(in_features=768, out_features=3072, bias=True)\n",
        "            (intermediate_act_fn): GELUActivation()\n",
        "          )\n",
        "          (output_query): Blip2QFormerOutput(\n",
        "            (dense): Linear(in_features=3072, out_features=768, bias=True)\n",
        "            (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n",
        "            (dropout): Dropout(p=0.1, inplace=False)\n",
        "          )\n",
        "        )\n",
        "        (3): Blip2QFormerLayer(\n",
        "          (attention): Blip2QFormerAttention(\n",
        "            (attention): Blip2QFormerMultiHeadAttention(\n",
        "              (query): Linear(in_features=768, out_features=768, bias=True)\n",
        "              (key): Linear(in_features=768, out_features=768, bias=True)\n",
        "              (value): Linear(in_features=768, out_features=768, bias=True)\n",
        "              (dropout): Dropout(p=0.1, inplace=False)\n",
        "            )\n",
        "            (output): Blip2QFormerSelfOutput(\n",
        "              (dense): Linear(in_features=768, out_features=768, bias=True)\n",
        "              (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n",
        "              (dropout): Dropout(p=0.1, inplace=False)\n",
        "            )\n",
        "          )\n",
        "          (intermediate_query): Blip2QFormerIntermediate(\n",
        "            (dense): Linear(in_features=768, out_features=3072, bias=True)\n",
        "            (intermediate_act_fn): GELUActivation()\n",
        "          )\n",
        "          (output_query): Blip2QFormerOutput(\n",
        "            (dense): Linear(in_features=3072, out_features=768, bias=True)\n",
        "            (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n",
        "            (dropout): Dropout(p=0.1, inplace=False)\n",
        "          )\n",
        "        )\n",
        "        (4): Blip2QFormerLayer(\n",
        "          (attention): Blip2QFormerAttention(\n",
        "            (attention): Blip2QFormerMultiHeadAttention(\n",
        "              (query): Linear(in_features=768, out_features=768, bias=True)\n",
        "              (key): Linear(in_features=768, out_features=768, bias=True)\n",
        "              (value): Linear(in_features=768, out_features=768, bias=True)\n",
        "              (dropout): Dropout(p=0.1, inplace=False)\n",
        "            )\n",
        "            (output): Blip2QFormerSelfOutput(\n",
        "              (dense): Linear(in_features=768, out_features=768, bias=True)\n",
        "              (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n",
        "              (dropout): Dropout(p=0.1, inplace=False)\n",
        "            )\n",
        "          )\n",
        "          (crossattention): Blip2QFormerAttention(\n",
        "            (attention): Blip2QFormerMultiHeadAttention(\n",
        "              (query): Linear(in_features=768, out_features=768, bias=True)\n",
        "              (key): Linear(in_features=1408, out_features=768, bias=True)\n",
        "              (value): Linear(in_features=1408, out_features=768, bias=True)\n",
        "              (dropout): Dropout(p=0.1, inplace=False)\n",
        "            )\n",
        "            (output): Blip2QFormerSelfOutput(\n",
        "              (dense): Linear(in_features=768, out_features=768, bias=True)\n",
        "              (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n",
        "              (dropout): Dropout(p=0.1, inplace=False)\n",
        "            )\n",
        "          )\n",
        "          (intermediate_query): Blip2QFormerIntermediate(\n",
        "            (dense): Linear(in_features=768, out_features=3072, bias=True)\n",
        "            (intermediate_act_fn): GELUActivation()\n",
        "          )\n",
        "          (output_query): Blip2QFormerOutput(\n",
        "            (dense): Linear(in_features=3072, out_features=768, bias=True)\n",
        "            (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n",
        "            (dropout): Dropout(p=0.1, inplace=False)\n",
        "          )\n",
        "        )\n",
        "        (5): Blip2QFormerLayer(\n",
        "          (attention): Blip2QFormerAttention(\n",
        "            (attention): Blip2QFormerMultiHeadAttention(\n",
        "              (query): Linear(in_features=768, out_features=768, bias=True)\n",
        "              (key): Linear(in_features=768, out_features=768, bias=True)\n",
        "              (value): Linear(in_features=768, out_features=768, bias=True)\n",
        "              (dropout): Dropout(p=0.1, inplace=False)\n",
        "            )\n",
        "            (output): Blip2QFormerSelfOutput(\n",
        "              (dense): Linear(in_features=768, out_features=768, bias=True)\n",
        "              (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n",
        "              (dropout): Dropout(p=0.1, inplace=False)\n",
        "            )\n",
        "          )\n",
        "          (intermediate_query): Blip2QFormerIntermediate(\n",
        "            (dense): Linear(in_features=768, out_features=3072, bias=True)\n",
        "            (intermediate_act_fn): GELUActivation()\n",
        "          )\n",
        "          (output_query): Blip2QFormerOutput(\n",
        "            (dense): Linear(in_features=3072, out_features=768, bias=True)\n",
        "            (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n",
        "            (dropout): Dropout(p=0.1, inplace=False)\n",
        "          )\n",
        "        )\n",
        "        (6): Blip2QFormerLayer(\n",
        "          (attention): Blip2QFormerAttention(\n",
        "            (attention): Blip2QFormerMultiHeadAttention(\n",
        "              (query): Linear(in_features=768, out_features=768, bias=True)\n",
        "              (key): Linear(in_features=768, out_features=768, bias=True)\n",
        "              (value): Linear(in_features=768, out_features=768, bias=True)\n",
        "              (dropout): Dropout(p=0.1, inplace=False)\n",
        "            )\n",
        "            (output): Blip2QFormerSelfOutput(\n",
        "              (dense): Linear(in_features=768, out_features=768, bias=True)\n",
        "              (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n",
        "              (dropout): Dropout(p=0.1, inplace=False)\n",
        "            )\n",
        "          )\n",
        "          (crossattention): Blip2QFormerAttention(\n",
        "            (attention): Blip2QFormerMultiHeadAttention(\n",
        "              (query): Linear(in_features=768, out_features=768, bias=True)\n",
        "              (key): Linear(in_features=1408, out_features=768, bias=True)\n",
        "              (value): Linear(in_features=1408, out_features=768, bias=True)\n",
        "              (dropout): Dropout(p=0.1, inplace=False)\n",
        "            )\n",
        "            (output): Blip2QFormerSelfOutput(\n",
        "              (dense): Linear(in_features=768, out_features=768, bias=True)\n",
        "              (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n",
        "              (dropout): Dropout(p=0.1, inplace=False)\n",
        "            )\n",
        "          )\n",
        "          (intermediate_query): Blip2QFormerIntermediate(\n",
        "            (dense): Linear(in_features=768, out_features=3072, bias=True)\n",
        "            (intermediate_act_fn): GELUActivation()\n",
        "          )\n",
        "          (output_query): Blip2QFormerOutput(\n",
        "            (dense): Linear(in_features=3072, out_features=768, bias=True)\n",
        "            (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n",
        "            (dropout): Dropout(p=0.1, inplace=False)\n",
        "          )\n",
        "        )\n",
        "        (7): Blip2QFormerLayer(\n",
        "          (attention): Blip2QFormerAttention(\n",
        "            (attention): Blip2QFormerMultiHeadAttention(\n",
        "              (query): Linear(in_features=768, out_features=768, bias=True)\n",
        "              (key): Linear(in_features=768, out_features=768, bias=True)\n",
        "              (value): Linear(in_features=768, out_features=768, bias=True)\n",
        "              (dropout): Dropout(p=0.1, inplace=False)\n",
        "            )\n",
        "            (output): Blip2QFormerSelfOutput(\n",
        "              (dense): Linear(in_features=768, out_features=768, bias=True)\n",
        "              (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n",
        "              (dropout): Dropout(p=0.1, inplace=False)\n",
        "            )\n",
        "          )\n",
        "          (intermediate_query): Blip2QFormerIntermediate(\n",
        "            (dense): Linear(in_features=768, out_features=3072, bias=True)\n",
        "            (intermediate_act_fn): GELUActivation()\n",
        "          )\n",
        "          (output_query): Blip2QFormerOutput(\n",
        "            (dense): Linear(in_features=3072, out_features=768, bias=True)\n",
        "            (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n",
        "            (dropout): Dropout(p=0.1, inplace=False)\n",
        "          )\n",
        "        )\n",
        "        (8): Blip2QFormerLayer(\n",
        "          (attention): Blip2QFormerAttention(\n",
        "            (attention): Blip2QFormerMultiHeadAttention(\n",
        "              (query): Linear(in_features=768, out_features=768, bias=True)\n",
        "              (key): Linear(in_features=768, out_features=768, bias=True)\n",
        "              (value): Linear(in_features=768, out_features=768, bias=True)\n",
        "              (dropout): Dropout(p=0.1, inplace=False)\n",
        "            )\n",
        "            (output): Blip2QFormerSelfOutput(\n",
        "              (dense): Linear(in_features=768, out_features=768, bias=True)\n",
        "              (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n",
        "              (dropout): Dropout(p=0.1, inplace=False)\n",
        "            )\n",
        "          )\n",
        "          (crossattention): Blip2QFormerAttention(\n",
        "            (attention): Blip2QFormerMultiHeadAttention(\n",
        "              (query): Linear(in_features=768, out_features=768, bias=True)\n",
        "              (key): Linear(in_features=1408, out_features=768, bias=True)\n",
        "              (value): Linear(in_features=1408, out_features=768, bias=True)\n",
        "              (dropout): Dropout(p=0.1, inplace=False)\n",
        "            )\n",
        "            (output): Blip2QFormerSelfOutput(\n",
        "              (dense): Linear(in_features=768, out_features=768, bias=True)\n",
        "              (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n",
        "              (dropout): Dropout(p=0.1, inplace=False)\n",
        "            )\n",
        "          )\n",
        "          (intermediate_query): Blip2QFormerIntermediate(\n",
        "            (dense): Linear(in_features=768, out_features=3072, bias=True)\n",
        "            (intermediate_act_fn): GELUActivation()\n",
        "          )\n",
        "          (output_query): Blip2QFormerOutput(\n",
        "            (dense): Linear(in_features=3072, out_features=768, bias=True)\n",
        "            (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n",
        "            (dropout): Dropout(p=0.1, inplace=False)\n",
        "          )\n",
        "        )\n",
        "        (9): Blip2QFormerLayer(\n",
        "          (attention): Blip2QFormerAttention(\n",
        "            (attention): Blip2QFormerMultiHeadAttention(\n",
        "              (query): Linear(in_features=768, out_features=768, bias=True)\n",
        "              (key): Linear(in_features=768, out_features=768, bias=True)\n",
        "              (value): Linear(in_features=768, out_features=768, bias=True)\n",
        "              (dropout): Dropout(p=0.1, inplace=False)\n",
        "            )\n",
        "            (output): Blip2QFormerSelfOutput(\n",
        "              (dense): Linear(in_features=768, out_features=768, bias=True)\n",
        "              (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n",
        "              (dropout): Dropout(p=0.1, inplace=False)\n",
        "            )\n",
        "          )\n",
        "          (intermediate_query): Blip2QFormerIntermediate(\n",
        "            (dense): Linear(in_features=768, out_features=3072, bias=True)\n",
        "            (intermediate_act_fn): GELUActivation()\n",
        "          )\n",
        "          (output_query): Blip2QFormerOutput(\n",
        "            (dense): Linear(in_features=3072, out_features=768, bias=True)\n",
        "            (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n",
        "            (dropout): Dropout(p=0.1, inplace=False)\n",
        "          )\n",
        "        )\n",
        "        (10): Blip2QFormerLayer(\n",
        "          (attention): Blip2QFormerAttention(\n",
        "            (attention): Blip2QFormerMultiHeadAttention(\n",
        "              (query): Linear(in_features=768, out_features=768, bias=True)\n",
        "              (key): Linear(in_features=768, out_features=768, bias=True)\n",
        "              (value): Linear(in_features=768, out_features=768, bias=True)\n",
        "              (dropout): Dropout(p=0.1, inplace=False)\n",
        "            )\n",
        "            (output): Blip2QFormerSelfOutput(\n",
        "              (dense): Linear(in_features=768, out_features=768, bias=True)\n",
        "              (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n",
        "              (dropout): Dropout(p=0.1, inplace=False)\n",
        "            )\n",
        "          )\n",
        "          (crossattention): Blip2QFormerAttention(\n",
        "            (attention): Blip2QFormerMultiHeadAttention(\n",
        "              (query): Linear(in_features=768, out_features=768, bias=True)\n",
        "              (key): Linear(in_features=1408, out_features=768, bias=True)\n",
        "              (value): Linear(in_features=1408, out_features=768, bias=True)\n",
        "              (dropout): Dropout(p=0.1, inplace=False)\n",
        "            )\n",
        "            (output): Blip2QFormerSelfOutput(\n",
        "              (dense): Linear(in_features=768, out_features=768, bias=True)\n",
        "              (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n",
        "              (dropout): Dropout(p=0.1, inplace=False)\n",
        "            )\n",
        "          )\n",
        "          (intermediate_query): Blip2QFormerIntermediate(\n",
        "            (dense): Linear(in_features=768, out_features=3072, bias=True)\n",
        "            (intermediate_act_fn): GELUActivation()\n",
        "          )\n",
        "          (output_query): Blip2QFormerOutput(\n",
        "            (dense): Linear(in_features=3072, out_features=768, bias=True)\n",
        "            (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n",
        "            (dropout): Dropout(p=0.1, inplace=False)\n",
        "          )\n",
        "        )\n",
        "        (11): Blip2QFormerLayer(\n",
        "          (attention): Blip2QFormerAttention(\n",
        "            (attention): Blip2QFormerMultiHeadAttention(\n",
        "              (query): Linear(in_features=768, out_features=768, bias=True)\n",
        "              (key): Linear(in_features=768, out_features=768, bias=True)\n",
        "              (value): Linear(in_features=768, out_features=768, bias=True)\n",
        "              (dropout): Dropout(p=0.1, inplace=False)\n",
        "            )\n",
        "            (output): Blip2QFormerSelfOutput(\n",
        "              (dense): Linear(in_features=768, out_features=768, bias=True)\n",
        "              (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n",
        "              (dropout): Dropout(p=0.1, inplace=False)\n",
        "            )\n",
        "          )\n",
        "          (intermediate_query): Blip2QFormerIntermediate(\n",
        "            (dense): Linear(in_features=768, out_features=3072, bias=True)\n",
        "            (intermediate_act_fn): GELUActivation()\n",
        "          )\n",
        "          (output_query): Blip2QFormerOutput(\n",
        "            (dense): Linear(in_features=3072, out_features=768, bias=True)\n",
        "            (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n",
        "            (dropout): Dropout(p=0.1, inplace=False)\n",
        "          )\n",
        "        )\n",
        "      )\n",
        "    )\n",
        "  )\n",
        "  (language_projection): Linear(in_features=768, out_features=2560, bias=True)\n",
        "  (language_model): OPTForCausalLM(\n",
        "    (model): OPTModel(\n",
        "      (decoder): OPTDecoder(\n",
        "        (embed_tokens): Embedding(50304, 2560, padding_idx=1)\n",
        "        (embed_positions): OPTLearnedPositionalEmbedding(2050, 2560)\n",
        "        (final_layer_norm): LayerNorm((2560,), eps=1e-05, elementwise_affine=True)\n",
        "        (layers): ModuleList(\n",
        "          (0-31): 32 x OPTDecoderLayer(\n",
        "            (self_attn): OPTSdpaAttention(\n",
        "              (k_proj): Linear(in_features=2560, out_features=2560, bias=True)\n",
        "              (v_proj): Linear(in_features=2560, out_features=2560, bias=True)\n",
        "              (q_proj): Linear(in_features=2560, out_features=2560, bias=True)\n",
        "              (out_proj): Linear(in_features=2560, out_features=2560, bias=True)\n",
        "            )\n",
        "            (activation_fn): ReLU()\n",
        "            (self_attn_layer_norm): LayerNorm((2560,), eps=1e-05, elementwise_affine=True)\n",
        "            (fc1): Linear(in_features=2560, out_features=10240, bias=True)\n",
        "            (fc2): Linear(in_features=10240, out_features=2560, bias=True)\n",
        "            (final_layer_norm): LayerNorm((2560,), eps=1e-05, elementwise_affine=True)\n",
        "          )\n",
        "        )\n",
        "      )\n",
        "    )\n",
        "    (lm_head): Linear(in_features=2560, out_features=50304, bias=False)\n",
        "  )\n",
        ")"
      ]
    }
  ],
  "metadata": {
    "accelerator": "GPU",
    "colab": {
      "gpuType": "L4",
      "provenance": []
    },
    "kernelspec": {
      "display_name": "Python 3",
      "name": "python3"
    },
    "language_info": {
      "codemirror_mode": {
        "name": "ipython",
        "version": 3
      },
      "file_extension": ".py",
      "mimetype": "text/x-python",
      "name": "python",
      "nbconvert_exporter": "python",
      "pygments_lexer": "ipython3",
      "version": "3.12.3"
    },
    "widgets": {
      "application/vnd.jupyter.widget-state+json": {
        "03a4fa7f8564488586dc94993bcd73e5": {
          "model_module": "@jupyter-widgets/controls",
          "model_module_version": "1.5.0",
          "model_name": "DescriptionStyleModel",
          "state": {
            "_model_module": "@jupyter-widgets/controls",
            "_model_module_version": "1.5.0",
            "_model_name": "DescriptionStyleModel",
            "_view_count": null,
            "_view_module": "@jupyter-widgets/base",
            "_view_module_version": "1.2.0",
            "_view_name": "StyleView",
            "description_width": ""
          }
        },
        "07a35e1bc22346f79a262ed0a5fd73b9": {
          "model_module": "@jupyter-widgets/controls",
          "model_module_version": "1.5.0",
          "model_name": "HBoxModel",
          "state": {
            "_dom_classes": [],
            "_model_module": "@jupyter-widgets/controls",
            "_model_module_version": "1.5.0",
            "_model_name": "HBoxModel",
            "_view_count": null,
            "_view_module": "@jupyter-widgets/controls",
            "_view_module_version": "1.5.0",
            "_view_name": "HBoxView",
            "box_style": "",
            "children": [
              "IPY_MODEL_b90ca558327e4c57be376cfae69223e1",
              "IPY_MODEL_39f81e354e78460b92d829f985cb395d",
              "IPY_MODEL_8b230f62b0214ddc9b856b07030a5569"
            ],
            "layout": "IPY_MODEL_1d0e1c510b674fc9ac28d209851c7784"
          }
        },
        "1d0e1c510b674fc9ac28d209851c7784": {
          "model_module": "@jupyter-widgets/base",
          "model_module_version": "1.2.0",
          "model_name": "LayoutModel",
          "state": {
            "_model_module": "@jupyter-widgets/base",
            "_model_module_version": "1.2.0",
            "_model_name": "LayoutModel",
            "_view_count": null,
            "_view_module": "@jupyter-widgets/base",
            "_view_module_version": "1.2.0",
            "_view_name": "LayoutView",
            "align_content": null,
            "align_items": null,
            "align_self": null,
            "border": null,
            "bottom": null,
            "display": null,
            "flex": null,
            "flex_flow": null,
            "grid_area": null,
            "grid_auto_columns": null,
            "grid_auto_flow": null,
            "grid_auto_rows": null,
            "grid_column": null,
            "grid_gap": null,
            "grid_row": null,
            "grid_template_areas": null,
            "grid_template_columns": null,
            "grid_template_rows": null,
            "height": null,
            "justify_content": null,
            "justify_items": null,
            "left": null,
            "margin": null,
            "max_height": null,
            "max_width": null,
            "min_height": null,
            "min_width": null,
            "object_fit": null,
            "object_position": null,
            "order": null,
            "overflow": null,
            "overflow_x": null,
            "overflow_y": null,
            "padding": null,
            "right": null,
            "top": null,
            "visibility": null,
            "width": null
          }
        },
        "39f81e354e78460b92d829f985cb395d": {
          "model_module": "@jupyter-widgets/controls",
          "model_module_version": "1.5.0",
          "model_name": "FloatProgressModel",
          "state": {
            "_dom_classes": [],
            "_model_module": "@jupyter-widgets/controls",
            "_model_module_version": "1.5.0",
            "_model_name": "FloatProgressModel",
            "_view_count": null,
            "_view_module": "@jupyter-widgets/controls",
            "_view_module_version": "1.5.0",
            "_view_name": "ProgressView",
            "bar_style": "success",
            "description": "",
            "description_tooltip": null,
            "layout": "IPY_MODEL_7dc2d9a18d55424b97305026f6f3ed0a",
            "max": 2,
            "min": 0,
            "orientation": "horizontal",
            "style": "IPY_MODEL_68b6e05932844140ab526d56aa152ab3",
            "value": 2
          }
        },
        "68b6e05932844140ab526d56aa152ab3": {
          "model_module": "@jupyter-widgets/controls",
          "model_module_version": "1.5.0",
          "model_name": "ProgressStyleModel",
          "state": {
            "_model_module": "@jupyter-widgets/controls",
            "_model_module_version": "1.5.0",
            "_model_name": "ProgressStyleModel",
            "_view_count": null,
            "_view_module": "@jupyter-widgets/base",
            "_view_module_version": "1.2.0",
            "_view_name": "StyleView",
            "bar_color": null,
            "description_width": ""
          }
        },
        "7dc2d9a18d55424b97305026f6f3ed0a": {
          "model_module": "@jupyter-widgets/base",
          "model_module_version": "1.2.0",
          "model_name": "LayoutModel",
          "state": {
            "_model_module": "@jupyter-widgets/base",
            "_model_module_version": "1.2.0",
            "_model_name": "LayoutModel",
            "_view_count": null,
            "_view_module": "@jupyter-widgets/base",
            "_view_module_version": "1.2.0",
            "_view_name": "LayoutView",
            "align_content": null,
            "align_items": null,
            "align_self": null,
            "border": null,
            "bottom": null,
            "display": null,
            "flex": null,
            "flex_flow": null,
            "grid_area": null,
            "grid_auto_columns": null,
            "grid_auto_flow": null,
            "grid_auto_rows": null,
            "grid_column": null,
            "grid_gap": null,
            "grid_row": null,
            "grid_template_areas": null,
            "grid_template_columns": null,
            "grid_template_rows": null,
            "height": null,
            "justify_content": null,
            "justify_items": null,
            "left": null,
            "margin": null,
            "max_height": null,
            "max_width": null,
            "min_height": null,
            "min_width": null,
            "object_fit": null,
            "object_position": null,
            "order": null,
            "overflow": null,
            "overflow_x": null,
            "overflow_y": null,
            "padding": null,
            "right": null,
            "top": null,
            "visibility": null,
            "width": null
          }
        },
        "8b230f62b0214ddc9b856b07030a5569": {
          "model_module": "@jupyter-widgets/controls",
          "model_module_version": "1.5.0",
          "model_name": "HTMLModel",
          "state": {
            "_dom_classes": [],
            "_model_module": "@jupyter-widgets/controls",
            "_model_module_version": "1.5.0",
            "_model_name": "HTMLModel",
            "_view_count": null,
            "_view_module": "@jupyter-widgets/controls",
            "_view_module_version": "1.5.0",
            "_view_name": "HTMLView",
            "description": "",
            "description_tooltip": null,
            "layout": "IPY_MODEL_e351888a8a8748a690e8eb3a0e59f4ae",
            "placeholder": "​",
            "style": "IPY_MODEL_03a4fa7f8564488586dc94993bcd73e5",
            "value": " 2/2 [00:01&lt;00:00,  1.11it/s]"
          }
        },
        "b90ca558327e4c57be376cfae69223e1": {
          "model_module": "@jupyter-widgets/controls",
          "model_module_version": "1.5.0",
          "model_name": "HTMLModel",
          "state": {
            "_dom_classes": [],
            "_model_module": "@jupyter-widgets/controls",
            "_model_module_version": "1.5.0",
            "_model_name": "HTMLModel",
            "_view_count": null,
            "_view_module": "@jupyter-widgets/controls",
            "_view_module_version": "1.5.0",
            "_view_name": "HTMLView",
            "description": "",
            "description_tooltip": null,
            "layout": "IPY_MODEL_d616796df73f4f0bba52bd4248022881",
            "placeholder": "​",
            "style": "IPY_MODEL_dc0b503a0f3f4f0ab2ea857aae470496",
            "value": "Loading checkpoint shards: 100%"
          }
        },
        "d616796df73f4f0bba52bd4248022881": {
          "model_module": "@jupyter-widgets/base",
          "model_module_version": "1.2.0",
          "model_name": "LayoutModel",
          "state": {
            "_model_module": "@jupyter-widgets/base",
            "_model_module_version": "1.2.0",
            "_model_name": "LayoutModel",
            "_view_count": null,
            "_view_module": "@jupyter-widgets/base",
            "_view_module_version": "1.2.0",
            "_view_name": "LayoutView",
            "align_content": null,
            "align_items": null,
            "align_self": null,
            "border": null,
            "bottom": null,
            "display": null,
            "flex": null,
            "flex_flow": null,
            "grid_area": null,
            "grid_auto_columns": null,
            "grid_auto_flow": null,
            "grid_auto_rows": null,
            "grid_column": null,
            "grid_gap": null,
            "grid_row": null,
            "grid_template_areas": null,
            "grid_template_columns": null,
            "grid_template_rows": null,
            "height": null,
            "justify_content": null,
            "justify_items": null,
            "left": null,
            "margin": null,
            "max_height": null,
            "max_width": null,
            "min_height": null,
            "min_width": null,
            "object_fit": null,
            "object_position": null,
            "order": null,
            "overflow": null,
            "overflow_x": null,
            "overflow_y": null,
            "padding": null,
            "right": null,
            "top": null,
            "visibility": null,
            "width": null
          }
        },
        "dc0b503a0f3f4f0ab2ea857aae470496": {
          "model_module": "@jupyter-widgets/controls",
          "model_module_version": "1.5.0",
          "model_name": "DescriptionStyleModel",
          "state": {
            "_model_module": "@jupyter-widgets/controls",
            "_model_module_version": "1.5.0",
            "_model_name": "DescriptionStyleModel",
            "_view_count": null,
            "_view_module": "@jupyter-widgets/base",
            "_view_module_version": "1.2.0",
            "_view_name": "StyleView",
            "description_width": ""
          }
        },
        "e351888a8a8748a690e8eb3a0e59f4ae": {
          "model_module": "@jupyter-widgets/base",
          "model_module_version": "1.2.0",
          "model_name": "LayoutModel",
          "state": {
            "_model_module": "@jupyter-widgets/base",
            "_model_module_version": "1.2.0",
            "_model_name": "LayoutModel",
            "_view_count": null,
            "_view_module": "@jupyter-widgets/base",
            "_view_module_version": "1.2.0",
            "_view_name": "LayoutView",
            "align_content": null,
            "align_items": null,
            "align_self": null,
            "border": null,
            "bottom": null,
            "display": null,
            "flex": null,
            "flex_flow": null,
            "grid_area": null,
            "grid_auto_columns": null,
            "grid_auto_flow": null,
            "grid_auto_rows": null,
            "grid_column": null,
            "grid_gap": null,
            "grid_row": null,
            "grid_template_areas": null,
            "grid_template_columns": null,
            "grid_template_rows": null,
            "height": null,
            "justify_content": null,
            "justify_items": null,
            "left": null,
            "margin": null,
            "max_height": null,
            "max_width": null,
            "min_height": null,
            "min_width": null,
            "object_fit": null,
            "object_position": null,
            "order": null,
            "overflow": null,
            "overflow_x": null,
            "overflow_y": null,
            "padding": null,
            "right": null,
            "top": null,
            "visibility": null,
            "width": null
          }
        }
      }
    }
  },
  "nbformat": 4,
  "nbformat_minor": 0
}
