{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "%load_ext autoreload\n",
    "%autoreload 2\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "import sys\n",
    "\n",
    "sys.path.append(\"/home/wenhongli/workspace/openvla_q\")\n",
    "\n",
    "from quantize import build_vla,patch_model_for_train\n",
    "import torch\n",
    "import torch.nn as nn\n",
    "from pathlib import Path\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "model_path = Path(\n",
    "    r\"/home/wenhongli/openvla/checkpoints/pick_banana_state_diff_224/model/openvla-7b+casia_franka+b16+lr-0.0005+lora-r32+dropout-0.0--image_aug/\"\n",
    ")\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "[autoreload of quantize.utils.modeling_vla failed: Traceback (most recent call last):\n",
      "  File \"/home/wenhongli/miniforge3/envs/deepseek/lib/python3.10/site-packages/IPython/extensions/autoreload.py\", line 276, in check\n",
      "    superreload(m, reload, self.old_objects)\n",
      "  File \"/home/wenhongli/miniforge3/envs/deepseek/lib/python3.10/site-packages/IPython/extensions/autoreload.py\", line 475, in superreload\n",
      "    module = reload(module)\n",
      "  File \"/home/wenhongli/miniforge3/envs/deepseek/lib/python3.10/importlib/__init__.py\", line 169, in reload\n",
      "    _bootstrap._exec(spec, module)\n",
      "  File \"<frozen importlib._bootstrap>\", line 619, in _exec\n",
      "  File \"<frozen importlib._bootstrap_external>\", line 879, in exec_module\n",
      "  File \"<frozen importlib._bootstrap_external>\", line 1017, in get_code\n",
      "  File \"<frozen importlib._bootstrap_external>\", line 947, in source_to_code\n",
      "  File \"<frozen importlib._bootstrap>\", line 241, in _call_with_frames_removed\n",
      "  File \"/home/wenhongli/workspace/openvla_q/quantize/utils/modeling_vla.py\", line 417\n",
      "    def build_vla(model_path:Path)->tuple[]:\n",
      "                                         ^\n",
      "SyntaxError: expected ':'\n",
      "]\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "PrismaticProcessor:\n",
       "- image_processor: PrismaticImageProcessor {\n",
       "  \"auto_map\": {\n",
       "    \"AutoImageProcessor\": \"processing_prismatic.PrismaticImageProcessor\",\n",
       "    \"AutoProcessor\": \"processing_prismatic.PrismaticProcessor\"\n",
       "  },\n",
       "  \"image_processor_type\": \"PrismaticImageProcessor\",\n",
       "  \"image_resize_strategy\": \"resize-naive\",\n",
       "  \"input_sizes\": [\n",
       "    [\n",
       "      3,\n",
       "      224,\n",
       "      224\n",
       "    ],\n",
       "    [\n",
       "      3,\n",
       "      224,\n",
       "      224\n",
       "    ]\n",
       "  ],\n",
       "  \"interpolations\": [\n",
       "    \"bicubic\",\n",
       "    \"bicubic\"\n",
       "  ],\n",
       "  \"means\": [\n",
       "    [\n",
       "      0.485,\n",
       "      0.456,\n",
       "      0.406\n",
       "    ],\n",
       "    [\n",
       "      0.5,\n",
       "      0.5,\n",
       "      0.5\n",
       "    ]\n",
       "  ],\n",
       "  \"processor_class\": \"PrismaticProcessor\",\n",
       "  \"stds\": [\n",
       "    [\n",
       "      0.229,\n",
       "      0.224,\n",
       "      0.225\n",
       "    ],\n",
       "    [\n",
       "      0.5,\n",
       "      0.5,\n",
       "      0.5\n",
       "    ]\n",
       "  ],\n",
       "  \"tvf_crop_params\": [\n",
       "    {\n",
       "      \"output_size\": [\n",
       "        224,\n",
       "        224\n",
       "      ]\n",
       "    },\n",
       "    {\n",
       "      \"output_size\": [\n",
       "        224,\n",
       "        224\n",
       "      ]\n",
       "    }\n",
       "  ],\n",
       "  \"tvf_do_letterbox\": false,\n",
       "  \"tvf_letterbox_fill\": null,\n",
       "  \"tvf_normalize_params\": [\n",
       "    {\n",
       "      \"inplace\": false,\n",
       "      \"mean\": [\n",
       "        0.484375,\n",
       "        0.455078125,\n",
       "        0.40625\n",
       "      ],\n",
       "      \"std\": [\n",
       "        0.228515625,\n",
       "        0.2236328125,\n",
       "        0.224609375\n",
       "      ]\n",
       "    },\n",
       "    {\n",
       "      \"inplace\": false,\n",
       "      \"mean\": [\n",
       "        0.5,\n",
       "        0.5,\n",
       "        0.5\n",
       "      ],\n",
       "      \"std\": [\n",
       "        0.5,\n",
       "        0.5,\n",
       "        0.5\n",
       "      ]\n",
       "    }\n",
       "  ],\n",
       "  \"tvf_resize_params\": [\n",
       "    {\n",
       "      \"antialias\": true,\n",
       "      \"interpolation\": 3,\n",
       "      \"max_size\": null,\n",
       "      \"size\": [\n",
       "        224,\n",
       "        224\n",
       "      ]\n",
       "    },\n",
       "    {\n",
       "      \"antialias\": true,\n",
       "      \"interpolation\": 3,\n",
       "      \"max_size\": null,\n",
       "      \"size\": [\n",
       "        224,\n",
       "        224\n",
       "      ]\n",
       "    }\n",
       "  ],\n",
       "  \"use_fused_vision_backbone\": true\n",
       "}\n",
       "\n",
       "- tokenizer: LlamaTokenizerFast(name_or_path='/home/wenhongli/openvla/checkpoints/pick_banana_state_diff_224/model/openvla-7b+casia_franka+b16+lr-0.0005+lora-r32+dropout-0.0--image_aug', vocab_size=32000, model_max_length=2048, is_fast=True, padding_side='right', truncation_side='right', special_tokens={'bos_token': '<s>', 'eos_token': '</s>', 'unk_token': '<unk>', 'pad_token': '<PAD>'}, clean_up_tokenization_spaces=False, added_tokens_decoder={\n",
       "\t0: AddedToken(\"<unk>\", rstrip=False, lstrip=False, single_word=False, normalized=False, special=True),\n",
       "\t1: AddedToken(\"<s>\", rstrip=False, lstrip=False, single_word=False, normalized=False, special=True),\n",
       "\t2: AddedToken(\"</s>\", rstrip=False, lstrip=False, single_word=False, normalized=False, special=True),\n",
       "\t32000: AddedToken(\"<PAD>\", rstrip=False, lstrip=False, single_word=False, normalized=False, special=True),\n",
       "}\n",
       ")\n",
       "\n",
       "{\n",
       "  \"processor_class\": \"PrismaticProcessor\"\n",
       "}"
      ]
     },
     "execution_count": 5,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "processor, vla, inputs, img, action_org, vits = build_vla(model_path)\n",
    "vla\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [],
   "source": [
    "vla = patch_model_for_train(vla,**inputs)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "[224, 224]"
      ]
     },
     "execution_count": 6,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "vla.config.image_sizes"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": []
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "deepseek",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.10.16"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
