{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "%load_ext autoreload\n",
    "%autoreload 2"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "The default implementation runs in 4368.545 microseconds\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/tmp/ipykernel_1281754/3640729754.py:36: UserWarning: Implicit dimension choice for softmax has been deprecated. Change the call to include dim=X as an argument.\n",
      "  attn_w = torch.nn.functional.softmax(attn_w,dtype=torch.float32).to(q.dtype)\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "The tc implementation runs in 43834.311 microseconds\n",
      "The flex_attention implementation runs in 93869.371 microseconds\n",
      "The math implementation runs in 80661.202 microseconds\n",
      "The cudnn implementation runs in 4647.196 microseconds\n",
      "The flash attention implementation runs in 4388.902 microseconds\n",
      "The memory efficient implementation runs in 7805.385 microseconds\n"
     ]
    }
   ],
   "source": [
    "import torch\n",
    "import torch.nn as nn\n",
    "import torch.nn.functional as F\n",
    "from pathlib import Path\n",
    "import numpy as np\n",
    "\n",
    "# Lets define a helpful benchmarking function:\n",
    "import torch.utils.benchmark as benchmark\n",
    "\n",
    "# Lets explore the speed of each of the 3 implementations\n",
    "from torch.nn.attention import SDPBackend, sdpa_kernel\n",
    "from torch.nn.attention.flex_attention import flex_attention, create_block_mask\n",
    "\n",
    "device = \"cuda\"\n",
    "\n",
    "\n",
    "def benchmark_torch_function_in_microseconds(f, *args, **kwargs):\n",
    "    # Warm-up phase (run several times without timing)\n",
    "    for _ in range(10):\n",
    "        f(*args, **kwargs)\n",
    "\n",
    "    # Synchronize if using CUDA\n",
    "    if torch.cuda.is_available():\n",
    "        torch.cuda.synchronize()\n",
    "    \n",
    "    t0 = benchmark.Timer(\n",
    "        stmt=\"f(*args, **kwargs)\",\n",
    "        globals={\"args\": args, \"kwargs\": kwargs, \"f\": f},\n",
    "        sub_label=f\"{f.__name__}\",\n",
    "    )\n",
    "    return t0.blocked_autorange().mean * 1e6\n",
    "\n",
    "def sdpa(q,k,v,is_causal=None):\n",
    "    attn_w =  torch.matmul(q,k.transpose(-1,-2))\n",
    "    attn_w = attn_w / q.shape[-1] ** 0.5\n",
    "    attn_w = torch.nn.functional.softmax(attn_w,dtype=torch.float32).to(q.dtype)\n",
    "    \n",
    "    return torch.matmul(attn_w,v)\n",
    "\n",
    "# Lets define the hyper-parameters of our input\n",
    "batch_size = 12\n",
    "max_sequence_len = 2048\n",
    "num_heads = 32\n",
    "embed_dimension = 128\n",
    "\n",
    "dtype = torch.bfloat16\n",
    "\n",
    "query = torch.rand(\n",
    "    batch_size, num_heads, max_sequence_len, embed_dimension, device=device, dtype=dtype\n",
    ")\n",
    "key = torch.rand(\n",
    "    batch_size, num_heads, max_sequence_len, embed_dimension, device=device, dtype=dtype\n",
    ")\n",
    "value = torch.rand(\n",
    "    batch_size, num_heads, max_sequence_len, embed_dimension, device=device, dtype=dtype\n",
    ")\n",
    "\n",
    "print(\n",
    "    f\"The default implementation runs in {benchmark_torch_function_in_microseconds(F.scaled_dot_product_attention, query, key, value):.3f} microseconds\"\n",
    ")\n",
    "\n",
    "\n",
    "def noop(score, b, h, q_idx, kv_idx):\n",
    "    return score\n",
    "\n",
    "\n",
    "def causal(b, h, q_idx, kv_idx):\n",
    "    return q_idx >= kv_idx\n",
    "\n",
    "\n",
    "\n",
    "SLIDING_WINDOW = 512\n",
    "# from torch.nn.attention import and_masks\n",
    "\n",
    "# def sliding_window(b, h, q_idx, kv_idx):\n",
    "#     return q_idx - kv_idx <= SLIDING_WINDOW\n",
    "\n",
    "# sliding_window_causal = and_masks(causal, sliding_window)\n",
    "\n",
    "def sliding_window_causal(b, h, q_idx, kv_idx):\n",
    "    causal_mask = q_idx >= kv_idx\n",
    "    window_mask = q_idx - kv_idx <= SLIDING_WINDOW\n",
    "    return causal_mask & window_mask\n",
    "\n",
    "\n",
    "block_mask = create_block_mask(\n",
    "    sliding_window_causal,\n",
    "    B=None,\n",
    "    H=None,\n",
    "    Q_LEN=max_sequence_len,\n",
    "    KV_LEN=max_sequence_len,\n",
    ")\n",
    "\n",
    "tc_time = benchmark_torch_function_in_microseconds(\n",
    "    sdpa,\n",
    "    query,\n",
    "    key,\n",
    "    value,\n",
    "    # score_mod=noop,\n",
    "    # block_mask=block_mask,\n",
    ")\n",
    "print(f\"The tc implementation runs in {tc_time:.3f} microseconds\")\n",
    "\n",
    "\n",
    "flex_time = benchmark_torch_function_in_microseconds(\n",
    "    flex_attention,\n",
    "    query,\n",
    "    key,\n",
    "    value,\n",
    "    # score_mod=noop,\n",
    "    # block_mask=block_mask,\n",
    ")\n",
    "print(f\"The flex_attention implementation runs in {flex_time:.3f} microseconds\")\n",
    "\n",
    "\n",
    "with sdpa_kernel(SDPBackend.MATH):\n",
    "    math_time = benchmark_torch_function_in_microseconds(\n",
    "        F.scaled_dot_product_attention, query, key, value\n",
    "    )\n",
    "    print(f\"The math implementation runs in {math_time:.3f} microseconds\")\n",
    "\n",
    "with sdpa_kernel(SDPBackend.CUDNN_ATTENTION):\n",
    "    math_time = benchmark_torch_function_in_microseconds(\n",
    "        F.scaled_dot_product_attention, query, key, value\n",
    "    )\n",
    "    print(f\"The cudnn implementation runs in {math_time:.3f} microseconds\")\n",
    "\n",
    "with sdpa_kernel(SDPBackend.FLASH_ATTENTION):\n",
    "    try:\n",
    "        flash_time = benchmark_torch_function_in_microseconds(\n",
    "            F.scaled_dot_product_attention, query, key, value\n",
    "        )\n",
    "        print(\n",
    "            f\"The flash attention implementation runs in {flash_time:.3f} microseconds\"\n",
    "        )\n",
    "    except RuntimeError:\n",
    "        print(\"FlashAttention is not supported. See warnings for reasons.\")\n",
    "\n",
    "with sdpa_kernel(SDPBackend.EFFICIENT_ATTENTION):\n",
    "    try:\n",
    "        efficient_time = benchmark_torch_function_in_microseconds(\n",
    "            F.scaled_dot_product_attention, query, key, value\n",
    "        )\n",
    "        print(\n",
    "            f\"The memory efficient implementation runs in {efficient_time:.3f} microseconds\"\n",
    "        )\n",
    "    except RuntimeError:\n",
    "        print(\"EfficientAttention is not supported. See warnings for reasons.\")\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "for _ in range(1000):\n",
    "    flex_attention(\n",
    "        query,\n",
    "        key,\n",
    "        value,\n",
    "        # score_mod=noop,\n",
    "        # block_mask=block_mask,\n",
    "    )\n",
    "torch.cuda.synchronize()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [],
   "source": [
    "for _ in range(1000):\n",
    "    with sdpa_kernel(SDPBackend.CUDNN_ATTENTION):\n",
    "        F.scaled_dot_product_attention(query, key, value, is_causal=True)\n",
    "torch.cuda.synchronize()\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "def get_from_camera():\n",
    "    from PIL import Image, ImageDraw, ImageFont\n",
    "\n",
    "    # 创建一个空白图像，背景为白色\n",
    "    image = Image.new('RGB', (100, 100), color = (255, 255, 255))\n",
    "\n",
    "    # 创建一个绘图对象\n",
    "    draw = ImageDraw.Draw(image)\n",
    "\n",
    "    # 加载字体（可以使用系统自带的字体，或者指定字体文件路径）\n",
    "    try:\n",
    "        font = ImageFont.truetype(\"arial.ttf\", 80)\n",
    "    except IOError:\n",
    "        font = ImageFont.load_default()\n",
    "\n",
    "    # 在图像上绘制字母 \"W\"\n",
    "    draw.text((10, 10), \"W\", font=font, fill=(0, 0, 0))\n",
    "\n",
    "    return image"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Using a slow image processor as `use_fast` is unset and a slow processor was saved with this model. `use_fast=True` will be the default behavior in v4.48, even if the model was saved with a slow processor. This will result in minor differences in outputs. You'll still be able to use a slow processor with `use_fast=False`.\n",
      "Expected `transformers==4.40.1` and `tokenizers==0.19.1` but got `transformers==4.48.3` and `tokenizers==0.21.0`; there might be inference-time regressions due to dependency changes. If in doubt, pleaseuse the above versions.\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "8654da9d84f544059258ac8ff93c1f0d",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "Loading checkpoint shards:   0%|          | 0/4 [00:00<?, ?it/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "{'input_ids': tensor([[    1,   512, 29901,  1724,  3158,   881,   278, 19964,  2125,   304,\n",
      "           426, 29966,  1177, 10810, 29965,  9838, 29958, 29913, 29973,    13,\n",
      "          3744, 29901]], device='cuda:7'), 'attention_mask': tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]],\n",
      "       device='cuda:7'), 'pixel_values': tensor([[[[2.2500, 2.2500, 2.2500,  ..., 2.2500, 2.2500, 2.2500],\n",
      "          [2.2500, 2.2500, 2.2500,  ..., 2.2500, 2.2500, 2.2500],\n",
      "          [2.2500, 2.2500, 2.2500,  ..., 2.2500, 2.2500, 2.2500],\n",
      "          ...,\n",
      "          [2.2500, 2.2500, 2.2500,  ..., 2.2500, 2.2500, 2.2500],\n",
      "          [2.2500, 2.2500, 2.2500,  ..., 2.2500, 2.2500, 2.2500],\n",
      "          [2.2500, 2.2500, 2.2500,  ..., 2.2500, 2.2500, 2.2500]],\n",
      "\n",
      "         [[2.4375, 2.4375, 2.4375,  ..., 2.4375, 2.4375, 2.4375],\n",
      "          [2.4375, 2.4375, 2.4375,  ..., 2.4375, 2.4375, 2.4375],\n",
      "          [2.4375, 2.4375, 2.4375,  ..., 2.4375, 2.4375, 2.4375],\n",
      "          ...,\n",
      "          [2.4375, 2.4375, 2.4375,  ..., 2.4375, 2.4375, 2.4375],\n",
      "          [2.4375, 2.4375, 2.4375,  ..., 2.4375, 2.4375, 2.4375],\n",
      "          [2.4375, 2.4375, 2.4375,  ..., 2.4375, 2.4375, 2.4375]],\n",
      "\n",
      "         [[2.6406, 2.6406, 2.6406,  ..., 2.6406, 2.6406, 2.6406],\n",
      "          [2.6406, 2.6406, 2.6406,  ..., 2.6406, 2.6406, 2.6406],\n",
      "          [2.6406, 2.6406, 2.6406,  ..., 2.6406, 2.6406, 2.6406],\n",
      "          ...,\n",
      "          [2.6406, 2.6406, 2.6406,  ..., 2.6406, 2.6406, 2.6406],\n",
      "          [2.6406, 2.6406, 2.6406,  ..., 2.6406, 2.6406, 2.6406],\n",
      "          [2.6406, 2.6406, 2.6406,  ..., 2.6406, 2.6406, 2.6406]],\n",
      "\n",
      "         [[1.0000, 1.0000, 1.0000,  ..., 1.0000, 1.0000, 1.0000],\n",
      "          [1.0000, 1.0000, 1.0000,  ..., 1.0000, 1.0000, 1.0000],\n",
      "          [1.0000, 1.0000, 1.0000,  ..., 1.0000, 1.0000, 1.0000],\n",
      "          ...,\n",
      "          [1.0000, 1.0000, 1.0000,  ..., 1.0000, 1.0000, 1.0000],\n",
      "          [1.0000, 1.0000, 1.0000,  ..., 1.0000, 1.0000, 1.0000],\n",
      "          [1.0000, 1.0000, 1.0000,  ..., 1.0000, 1.0000, 1.0000]],\n",
      "\n",
      "         [[1.0000, 1.0000, 1.0000,  ..., 1.0000, 1.0000, 1.0000],\n",
      "          [1.0000, 1.0000, 1.0000,  ..., 1.0000, 1.0000, 1.0000],\n",
      "          [1.0000, 1.0000, 1.0000,  ..., 1.0000, 1.0000, 1.0000],\n",
      "          ...,\n",
      "          [1.0000, 1.0000, 1.0000,  ..., 1.0000, 1.0000, 1.0000],\n",
      "          [1.0000, 1.0000, 1.0000,  ..., 1.0000, 1.0000, 1.0000],\n",
      "          [1.0000, 1.0000, 1.0000,  ..., 1.0000, 1.0000, 1.0000]],\n",
      "\n",
      "         [[1.0000, 1.0000, 1.0000,  ..., 1.0000, 1.0000, 1.0000],\n",
      "          [1.0000, 1.0000, 1.0000,  ..., 1.0000, 1.0000, 1.0000],\n",
      "          [1.0000, 1.0000, 1.0000,  ..., 1.0000, 1.0000, 1.0000],\n",
      "          ...,\n",
      "          [1.0000, 1.0000, 1.0000,  ..., 1.0000, 1.0000, 1.0000],\n",
      "          [1.0000, 1.0000, 1.0000,  ..., 1.0000, 1.0000, 1.0000],\n",
      "          [1.0000, 1.0000, 1.0000,  ..., 1.0000, 1.0000, 1.0000]]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16)}\n",
      "PrismaticCausalLMOutputWithPast(loss=None, logits=tensor([[[  2.5625,   4.3750,   0.4766,  ...,   4.1875,   4.0625,   3.0156],\n",
      "         [ -4.5625,  -6.3125,   1.0625,  ...,  -3.3906,  -3.8594,  -4.4688],\n",
      "         [ -2.3594,  -4.8438,  -1.7578,  ...,  -3.8125,   1.1719,  -2.3906],\n",
      "         ...,\n",
      "         [  2.4688,   3.8125,   0.5859,  ...,   3.9844,   3.9375,   2.9062],\n",
      "         [ -5.5938,  -6.5625,   0.1465,  ...,  -6.6250,  -6.2500,  -8.6250],\n",
      "         [ -8.9375,  -6.6562,  -2.3594,  ...,  -8.8750,  -9.6250, -11.3750]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16), past_key_values=DynamicCache(), hidden_states=None, attentions=None, projector_features=tensor([[[ 0.1157,  0.2119, -0.2051,  ..., -0.1465,  0.0869,  0.2812],\n",
      "         [ 0.0374,  0.1260, -0.4297,  ...,  0.2773, -0.1562,  0.0762],\n",
      "         [ 0.0157,  0.2441,  0.0295,  ...,  0.0508,  0.2158,  0.0967],\n",
      "         ...,\n",
      "         [ 0.2520, -0.1006, -0.2002,  ...,  0.0007,  0.2383, -0.3066],\n",
      "         [ 0.2412,  0.1118, -0.1855,  ..., -0.1260,  0.0227, -0.1118],\n",
      "         [ 0.1147,  0.2109, -0.3496,  ..., -0.4883, -0.4648, -0.3086]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16))\n"
     ]
    }
   ],
   "source": [
    "from transformers import AutoModelForVision2Seq, AutoProcessor\n",
    "from PIL import Image\n",
    "\n",
    "\n",
    "\n",
    "from pathlib import Path\n",
    "\n",
    "import torch\n",
    "model_path = Path(\n",
    "    r\"/home/wenhongli/openvla/checkpoints/pick_banana_state_diff_224/model/openvla-7b+casia_franka+b16+lr-0.0005+lora-r32+dropout-0.0--image_aug/\"\n",
    ")\n",
    "\n",
    "# Load Processor & VLA\n",
    "processor = AutoProcessor.from_pretrained(model_path, trust_remote_code=True)\n",
    "vla = AutoModelForVision2Seq.from_pretrained(\n",
    "    model_path,\n",
    "    attn_implementation=\"sdpa\",  # [Optional] Requires `flash_attn`\n",
    "    torch_dtype=torch.bfloat16,\n",
    "    low_cpu_mem_usage=True,\n",
    "    trust_remote_code=True,\n",
    "    device_map=\"cuda:7\"\n",
    ")\n",
    "vla = vla.eval()\n",
    "\n",
    "# # Grab image input & format prompt\n",
    "image: Image.Image = get_from_camera()\n",
    "prompt = \"In: What action should the robot take to {<INSTRUCTION>}?\\nOut:\"\n",
    "\n",
    "# # Predict Action (7-DoF; un-normalize for BridgeV2)\n",
    "with torch.inference_mode():\n",
    "    inputs = processor(prompt, image).to(device=vla.device, dtype=torch.bfloat16)\n",
    "    img = inputs.pixel_values\n",
    "    print(inputs)\n",
    "    print(vla(**inputs))\n",
    "#     action_org = vla.predict_action(\n",
    "#         **inputs, unnorm_key=\"bridge_orig\", do_sample=False, use_cache=False\n",
    "#     )\n",
    "\n",
    "# vits = (vla.vision_backbone.featurizer, vla.vision_backbone.fused_featurizer)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "{'input_ids': tensor([[    1,   512, 29901,  1724,  3158,   881,   278, 19964,  2125,   304,\n",
      "           426, 29966,  1177, 10810, 29965,  9838, 29958, 29913, 29973,    13,\n",
      "          3744, 29901]], device='cuda:7'), 'attention_mask': tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]],\n",
      "       device='cuda:7'), 'pixel_values': tensor([[[[2.2500, 2.2500, 2.2500,  ..., 2.2500, 2.2500, 2.2500],\n",
      "          [2.2500, 2.2500, 2.2500,  ..., 2.2500, 2.2500, 2.2500],\n",
      "          [2.2500, 2.2500, 2.2500,  ..., 2.2500, 2.2500, 2.2500],\n",
      "          ...,\n",
      "          [2.2500, 2.2500, 2.2500,  ..., 2.2500, 2.2500, 2.2500],\n",
      "          [2.2500, 2.2500, 2.2500,  ..., 2.2500, 2.2500, 2.2500],\n",
      "          [2.2500, 2.2500, 2.2500,  ..., 2.2500, 2.2500, 2.2500]],\n",
      "\n",
      "         [[2.4375, 2.4375, 2.4375,  ..., 2.4375, 2.4375, 2.4375],\n",
      "          [2.4375, 2.4375, 2.4375,  ..., 2.4375, 2.4375, 2.4375],\n",
      "          [2.4375, 2.4375, 2.4375,  ..., 2.4375, 2.4375, 2.4375],\n",
      "          ...,\n",
      "          [2.4375, 2.4375, 2.4375,  ..., 2.4375, 2.4375, 2.4375],\n",
      "          [2.4375, 2.4375, 2.4375,  ..., 2.4375, 2.4375, 2.4375],\n",
      "          [2.4375, 2.4375, 2.4375,  ..., 2.4375, 2.4375, 2.4375]],\n",
      "\n",
      "         [[2.6406, 2.6406, 2.6406,  ..., 2.6406, 2.6406, 2.6406],\n",
      "          [2.6406, 2.6406, 2.6406,  ..., 2.6406, 2.6406, 2.6406],\n",
      "          [2.6406, 2.6406, 2.6406,  ..., 2.6406, 2.6406, 2.6406],\n",
      "          ...,\n",
      "          [2.6406, 2.6406, 2.6406,  ..., 2.6406, 2.6406, 2.6406],\n",
      "          [2.6406, 2.6406, 2.6406,  ..., 2.6406, 2.6406, 2.6406],\n",
      "          [2.6406, 2.6406, 2.6406,  ..., 2.6406, 2.6406, 2.6406]],\n",
      "\n",
      "         [[1.0000, 1.0000, 1.0000,  ..., 1.0000, 1.0000, 1.0000],\n",
      "          [1.0000, 1.0000, 1.0000,  ..., 1.0000, 1.0000, 1.0000],\n",
      "          [1.0000, 1.0000, 1.0000,  ..., 1.0000, 1.0000, 1.0000],\n",
      "          ...,\n",
      "          [1.0000, 1.0000, 1.0000,  ..., 1.0000, 1.0000, 1.0000],\n",
      "          [1.0000, 1.0000, 1.0000,  ..., 1.0000, 1.0000, 1.0000],\n",
      "          [1.0000, 1.0000, 1.0000,  ..., 1.0000, 1.0000, 1.0000]],\n",
      "\n",
      "         [[1.0000, 1.0000, 1.0000,  ..., 1.0000, 1.0000, 1.0000],\n",
      "          [1.0000, 1.0000, 1.0000,  ..., 1.0000, 1.0000, 1.0000],\n",
      "          [1.0000, 1.0000, 1.0000,  ..., 1.0000, 1.0000, 1.0000],\n",
      "          ...,\n",
      "          [1.0000, 1.0000, 1.0000,  ..., 1.0000, 1.0000, 1.0000],\n",
      "          [1.0000, 1.0000, 1.0000,  ..., 1.0000, 1.0000, 1.0000],\n",
      "          [1.0000, 1.0000, 1.0000,  ..., 1.0000, 1.0000, 1.0000]],\n",
      "\n",
      "         [[1.0000, 1.0000, 1.0000,  ..., 1.0000, 1.0000, 1.0000],\n",
      "          [1.0000, 1.0000, 1.0000,  ..., 1.0000, 1.0000, 1.0000],\n",
      "          [1.0000, 1.0000, 1.0000,  ..., 1.0000, 1.0000, 1.0000],\n",
      "          ...,\n",
      "          [1.0000, 1.0000, 1.0000,  ..., 1.0000, 1.0000, 1.0000],\n",
      "          [1.0000, 1.0000, 1.0000,  ..., 1.0000, 1.0000, 1.0000],\n",
      "          [1.0000, 1.0000, 1.0000,  ..., 1.0000, 1.0000, 1.0000]]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16)}\n",
      "torch.Size([1, 278, 32064])\n"
     ]
    }
   ],
   "source": [
    "with torch.inference_mode():\n",
    "    inputs = processor(prompt, image).to(device=vla.device, dtype=torch.bfloat16)\n",
    "    img = inputs.pixel_values\n",
    "    print(inputs)\n",
    "    print(vla(**inputs).logits.shape)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "OpenVLAForActionPrediction(\n",
       "  (vision_backbone): PrismaticVisionBackbone(\n",
       "    (featurizer): ViT(\n",
       "      (patch_embed): PatchEmbed(\n",
       "        (proj): Conv2d(3, 1024, kernel_size=(14, 14), stride=(14, 14))\n",
       "      )\n",
       "      (blocks): Sequential(\n",
       "        (0): Block(\n",
       "          (ls1): LayerScale()\n",
       "          (norm1): LayerNorm((1024,), eps=1e-06, elementwise_affine=True)\n",
       "          (attn): Attntion(\n",
       "            (qkv): Linear(in_features=1024, out_features=3072, bias=True)\n",
       "            (proj): Linear(in_features=1024, out_features=1024, bias=True)\n",
       "          )\n",
       "          (norm2): LayerNorm((1024,), eps=1e-06, elementwise_affine=True)\n",
       "          (mlp): MLP(\n",
       "            (fc1): Linear(in_features=1024, out_features=4096, bias=True)\n",
       "            (act): GELU(approximate='none')\n",
       "            (fc2): Linear(in_features=4096, out_features=1024, bias=True)\n",
       "          )\n",
       "          (ls2): LayerScale()\n",
       "        )\n",
       "        (1): Block(\n",
       "          (ls1): LayerScale()\n",
       "          (norm1): LayerNorm((1024,), eps=1e-06, elementwise_affine=True)\n",
       "          (attn): Attntion(\n",
       "            (qkv): Linear(in_features=1024, out_features=3072, bias=True)\n",
       "            (proj): Linear(in_features=1024, out_features=1024, bias=True)\n",
       "          )\n",
       "          (norm2): LayerNorm((1024,), eps=1e-06, elementwise_affine=True)\n",
       "          (mlp): MLP(\n",
       "            (fc1): Linear(in_features=1024, out_features=4096, bias=True)\n",
       "            (act): GELU(approximate='none')\n",
       "            (fc2): Linear(in_features=4096, out_features=1024, bias=True)\n",
       "          )\n",
       "          (ls2): LayerScale()\n",
       "        )\n",
       "        (2): Block(\n",
       "          (ls1): LayerScale()\n",
       "          (norm1): LayerNorm((1024,), eps=1e-06, elementwise_affine=True)\n",
       "          (attn): Attntion(\n",
       "            (qkv): Linear(in_features=1024, out_features=3072, bias=True)\n",
       "            (proj): Linear(in_features=1024, out_features=1024, bias=True)\n",
       "          )\n",
       "          (norm2): LayerNorm((1024,), eps=1e-06, elementwise_affine=True)\n",
       "          (mlp): MLP(\n",
       "            (fc1): Linear(in_features=1024, out_features=4096, bias=True)\n",
       "            (act): GELU(approximate='none')\n",
       "            (fc2): Linear(in_features=4096, out_features=1024, bias=True)\n",
       "          )\n",
       "          (ls2): LayerScale()\n",
       "        )\n",
       "        (3): Block(\n",
       "          (ls1): LayerScale()\n",
       "          (norm1): LayerNorm((1024,), eps=1e-06, elementwise_affine=True)\n",
       "          (attn): Attntion(\n",
       "            (qkv): Linear(in_features=1024, out_features=3072, bias=True)\n",
       "            (proj): Linear(in_features=1024, out_features=1024, bias=True)\n",
       "          )\n",
       "          (norm2): LayerNorm((1024,), eps=1e-06, elementwise_affine=True)\n",
       "          (mlp): MLP(\n",
       "            (fc1): Linear(in_features=1024, out_features=4096, bias=True)\n",
       "            (act): GELU(approximate='none')\n",
       "            (fc2): Linear(in_features=4096, out_features=1024, bias=True)\n",
       "          )\n",
       "          (ls2): LayerScale()\n",
       "        )\n",
       "        (4): Block(\n",
       "          (ls1): LayerScale()\n",
       "          (norm1): LayerNorm((1024,), eps=1e-06, elementwise_affine=True)\n",
       "          (attn): Attntion(\n",
       "            (qkv): Linear(in_features=1024, out_features=3072, bias=True)\n",
       "            (proj): Linear(in_features=1024, out_features=1024, bias=True)\n",
       "          )\n",
       "          (norm2): LayerNorm((1024,), eps=1e-06, elementwise_affine=True)\n",
       "          (mlp): MLP(\n",
       "            (fc1): Linear(in_features=1024, out_features=4096, bias=True)\n",
       "            (act): GELU(approximate='none')\n",
       "            (fc2): Linear(in_features=4096, out_features=1024, bias=True)\n",
       "          )\n",
       "          (ls2): LayerScale()\n",
       "        )\n",
       "        (5): Block(\n",
       "          (ls1): LayerScale()\n",
       "          (norm1): LayerNorm((1024,), eps=1e-06, elementwise_affine=True)\n",
       "          (attn): Attntion(\n",
       "            (qkv): Linear(in_features=1024, out_features=3072, bias=True)\n",
       "            (proj): Linear(in_features=1024, out_features=1024, bias=True)\n",
       "          )\n",
       "          (norm2): LayerNorm((1024,), eps=1e-06, elementwise_affine=True)\n",
       "          (mlp): MLP(\n",
       "            (fc1): Linear(in_features=1024, out_features=4096, bias=True)\n",
       "            (act): GELU(approximate='none')\n",
       "            (fc2): Linear(in_features=4096, out_features=1024, bias=True)\n",
       "          )\n",
       "          (ls2): LayerScale()\n",
       "        )\n",
       "        (6): Block(\n",
       "          (ls1): LayerScale()\n",
       "          (norm1): LayerNorm((1024,), eps=1e-06, elementwise_affine=True)\n",
       "          (attn): Attntion(\n",
       "            (qkv): Linear(in_features=1024, out_features=3072, bias=True)\n",
       "            (proj): Linear(in_features=1024, out_features=1024, bias=True)\n",
       "          )\n",
       "          (norm2): LayerNorm((1024,), eps=1e-06, elementwise_affine=True)\n",
       "          (mlp): MLP(\n",
       "            (fc1): Linear(in_features=1024, out_features=4096, bias=True)\n",
       "            (act): GELU(approximate='none')\n",
       "            (fc2): Linear(in_features=4096, out_features=1024, bias=True)\n",
       "          )\n",
       "          (ls2): LayerScale()\n",
       "        )\n",
       "        (7): Block(\n",
       "          (ls1): LayerScale()\n",
       "          (norm1): LayerNorm((1024,), eps=1e-06, elementwise_affine=True)\n",
       "          (attn): Attntion(\n",
       "            (qkv): Linear(in_features=1024, out_features=3072, bias=True)\n",
       "            (proj): Linear(in_features=1024, out_features=1024, bias=True)\n",
       "          )\n",
       "          (norm2): LayerNorm((1024,), eps=1e-06, elementwise_affine=True)\n",
       "          (mlp): MLP(\n",
       "            (fc1): Linear(in_features=1024, out_features=4096, bias=True)\n",
       "            (act): GELU(approximate='none')\n",
       "            (fc2): Linear(in_features=4096, out_features=1024, bias=True)\n",
       "          )\n",
       "          (ls2): LayerScale()\n",
       "        )\n",
       "        (8): Block(\n",
       "          (ls1): LayerScale()\n",
       "          (norm1): LayerNorm((1024,), eps=1e-06, elementwise_affine=True)\n",
       "          (attn): Attntion(\n",
       "            (qkv): Linear(in_features=1024, out_features=3072, bias=True)\n",
       "            (proj): Linear(in_features=1024, out_features=1024, bias=True)\n",
       "          )\n",
       "          (norm2): LayerNorm((1024,), eps=1e-06, elementwise_affine=True)\n",
       "          (mlp): MLP(\n",
       "            (fc1): Linear(in_features=1024, out_features=4096, bias=True)\n",
       "            (act): GELU(approximate='none')\n",
       "            (fc2): Linear(in_features=4096, out_features=1024, bias=True)\n",
       "          )\n",
       "          (ls2): LayerScale()\n",
       "        )\n",
       "        (9): Block(\n",
       "          (ls1): LayerScale()\n",
       "          (norm1): LayerNorm((1024,), eps=1e-06, elementwise_affine=True)\n",
       "          (attn): Attntion(\n",
       "            (qkv): Linear(in_features=1024, out_features=3072, bias=True)\n",
       "            (proj): Linear(in_features=1024, out_features=1024, bias=True)\n",
       "          )\n",
       "          (norm2): LayerNorm((1024,), eps=1e-06, elementwise_affine=True)\n",
       "          (mlp): MLP(\n",
       "            (fc1): Linear(in_features=1024, out_features=4096, bias=True)\n",
       "            (act): GELU(approximate='none')\n",
       "            (fc2): Linear(in_features=4096, out_features=1024, bias=True)\n",
       "          )\n",
       "          (ls2): LayerScale()\n",
       "        )\n",
       "        (10): Block(\n",
       "          (ls1): LayerScale()\n",
       "          (norm1): LayerNorm((1024,), eps=1e-06, elementwise_affine=True)\n",
       "          (attn): Attntion(\n",
       "            (qkv): Linear(in_features=1024, out_features=3072, bias=True)\n",
       "            (proj): Linear(in_features=1024, out_features=1024, bias=True)\n",
       "          )\n",
       "          (norm2): LayerNorm((1024,), eps=1e-06, elementwise_affine=True)\n",
       "          (mlp): MLP(\n",
       "            (fc1): Linear(in_features=1024, out_features=4096, bias=True)\n",
       "            (act): GELU(approximate='none')\n",
       "            (fc2): Linear(in_features=4096, out_features=1024, bias=True)\n",
       "          )\n",
       "          (ls2): LayerScale()\n",
       "        )\n",
       "        (11): Block(\n",
       "          (ls1): LayerScale()\n",
       "          (norm1): LayerNorm((1024,), eps=1e-06, elementwise_affine=True)\n",
       "          (attn): Attntion(\n",
       "            (qkv): Linear(in_features=1024, out_features=3072, bias=True)\n",
       "            (proj): Linear(in_features=1024, out_features=1024, bias=True)\n",
       "          )\n",
       "          (norm2): LayerNorm((1024,), eps=1e-06, elementwise_affine=True)\n",
       "          (mlp): MLP(\n",
       "            (fc1): Linear(in_features=1024, out_features=4096, bias=True)\n",
       "            (act): GELU(approximate='none')\n",
       "            (fc2): Linear(in_features=4096, out_features=1024, bias=True)\n",
       "          )\n",
       "          (ls2): LayerScale()\n",
       "        )\n",
       "        (12): Block(\n",
       "          (ls1): LayerScale()\n",
       "          (norm1): LayerNorm((1024,), eps=1e-06, elementwise_affine=True)\n",
       "          (attn): Attntion(\n",
       "            (qkv): Linear(in_features=1024, out_features=3072, bias=True)\n",
       "            (proj): Linear(in_features=1024, out_features=1024, bias=True)\n",
       "          )\n",
       "          (norm2): LayerNorm((1024,), eps=1e-06, elementwise_affine=True)\n",
       "          (mlp): MLP(\n",
       "            (fc1): Linear(in_features=1024, out_features=4096, bias=True)\n",
       "            (act): GELU(approximate='none')\n",
       "            (fc2): Linear(in_features=4096, out_features=1024, bias=True)\n",
       "          )\n",
       "          (ls2): LayerScale()\n",
       "        )\n",
       "        (13): Block(\n",
       "          (ls1): LayerScale()\n",
       "          (norm1): LayerNorm((1024,), eps=1e-06, elementwise_affine=True)\n",
       "          (attn): Attntion(\n",
       "            (qkv): Linear(in_features=1024, out_features=3072, bias=True)\n",
       "            (proj): Linear(in_features=1024, out_features=1024, bias=True)\n",
       "          )\n",
       "          (norm2): LayerNorm((1024,), eps=1e-06, elementwise_affine=True)\n",
       "          (mlp): MLP(\n",
       "            (fc1): Linear(in_features=1024, out_features=4096, bias=True)\n",
       "            (act): GELU(approximate='none')\n",
       "            (fc2): Linear(in_features=4096, out_features=1024, bias=True)\n",
       "          )\n",
       "          (ls2): LayerScale()\n",
       "        )\n",
       "        (14): Block(\n",
       "          (ls1): LayerScale()\n",
       "          (norm1): LayerNorm((1024,), eps=1e-06, elementwise_affine=True)\n",
       "          (attn): Attntion(\n",
       "            (qkv): Linear(in_features=1024, out_features=3072, bias=True)\n",
       "            (proj): Linear(in_features=1024, out_features=1024, bias=True)\n",
       "          )\n",
       "          (norm2): LayerNorm((1024,), eps=1e-06, elementwise_affine=True)\n",
       "          (mlp): MLP(\n",
       "            (fc1): Linear(in_features=1024, out_features=4096, bias=True)\n",
       "            (act): GELU(approximate='none')\n",
       "            (fc2): Linear(in_features=4096, out_features=1024, bias=True)\n",
       "          )\n",
       "          (ls2): LayerScale()\n",
       "        )\n",
       "        (15): Block(\n",
       "          (ls1): LayerScale()\n",
       "          (norm1): LayerNorm((1024,), eps=1e-06, elementwise_affine=True)\n",
       "          (attn): Attntion(\n",
       "            (qkv): Linear(in_features=1024, out_features=3072, bias=True)\n",
       "            (proj): Linear(in_features=1024, out_features=1024, bias=True)\n",
       "          )\n",
       "          (norm2): LayerNorm((1024,), eps=1e-06, elementwise_affine=True)\n",
       "          (mlp): MLP(\n",
       "            (fc1): Linear(in_features=1024, out_features=4096, bias=True)\n",
       "            (act): GELU(approximate='none')\n",
       "            (fc2): Linear(in_features=4096, out_features=1024, bias=True)\n",
       "          )\n",
       "          (ls2): LayerScale()\n",
       "        )\n",
       "        (16): Block(\n",
       "          (ls1): LayerScale()\n",
       "          (norm1): LayerNorm((1024,), eps=1e-06, elementwise_affine=True)\n",
       "          (attn): Attntion(\n",
       "            (qkv): Linear(in_features=1024, out_features=3072, bias=True)\n",
       "            (proj): Linear(in_features=1024, out_features=1024, bias=True)\n",
       "          )\n",
       "          (norm2): LayerNorm((1024,), eps=1e-06, elementwise_affine=True)\n",
       "          (mlp): MLP(\n",
       "            (fc1): Linear(in_features=1024, out_features=4096, bias=True)\n",
       "            (act): GELU(approximate='none')\n",
       "            (fc2): Linear(in_features=4096, out_features=1024, bias=True)\n",
       "          )\n",
       "          (ls2): LayerScale()\n",
       "        )\n",
       "        (17): Block(\n",
       "          (ls1): LayerScale()\n",
       "          (norm1): LayerNorm((1024,), eps=1e-06, elementwise_affine=True)\n",
       "          (attn): Attntion(\n",
       "            (qkv): Linear(in_features=1024, out_features=3072, bias=True)\n",
       "            (proj): Linear(in_features=1024, out_features=1024, bias=True)\n",
       "          )\n",
       "          (norm2): LayerNorm((1024,), eps=1e-06, elementwise_affine=True)\n",
       "          (mlp): MLP(\n",
       "            (fc1): Linear(in_features=1024, out_features=4096, bias=True)\n",
       "            (act): GELU(approximate='none')\n",
       "            (fc2): Linear(in_features=4096, out_features=1024, bias=True)\n",
       "          )\n",
       "          (ls2): LayerScale()\n",
       "        )\n",
       "        (18): Block(\n",
       "          (ls1): LayerScale()\n",
       "          (norm1): LayerNorm((1024,), eps=1e-06, elementwise_affine=True)\n",
       "          (attn): Attntion(\n",
       "            (qkv): Linear(in_features=1024, out_features=3072, bias=True)\n",
       "            (proj): Linear(in_features=1024, out_features=1024, bias=True)\n",
       "          )\n",
       "          (norm2): LayerNorm((1024,), eps=1e-06, elementwise_affine=True)\n",
       "          (mlp): MLP(\n",
       "            (fc1): Linear(in_features=1024, out_features=4096, bias=True)\n",
       "            (act): GELU(approximate='none')\n",
       "            (fc2): Linear(in_features=4096, out_features=1024, bias=True)\n",
       "          )\n",
       "          (ls2): LayerScale()\n",
       "        )\n",
       "        (19): Block(\n",
       "          (ls1): LayerScale()\n",
       "          (norm1): LayerNorm((1024,), eps=1e-06, elementwise_affine=True)\n",
       "          (attn): Attntion(\n",
       "            (qkv): Linear(in_features=1024, out_features=3072, bias=True)\n",
       "            (proj): Linear(in_features=1024, out_features=1024, bias=True)\n",
       "          )\n",
       "          (norm2): LayerNorm((1024,), eps=1e-06, elementwise_affine=True)\n",
       "          (mlp): MLP(\n",
       "            (fc1): Linear(in_features=1024, out_features=4096, bias=True)\n",
       "            (act): GELU(approximate='none')\n",
       "            (fc2): Linear(in_features=4096, out_features=1024, bias=True)\n",
       "          )\n",
       "          (ls2): LayerScale()\n",
       "        )\n",
       "        (20): Block(\n",
       "          (ls1): LayerScale()\n",
       "          (norm1): LayerNorm((1024,), eps=1e-06, elementwise_affine=True)\n",
       "          (attn): Attntion(\n",
       "            (qkv): Linear(in_features=1024, out_features=3072, bias=True)\n",
       "            (proj): Linear(in_features=1024, out_features=1024, bias=True)\n",
       "          )\n",
       "          (norm2): LayerNorm((1024,), eps=1e-06, elementwise_affine=True)\n",
       "          (mlp): MLP(\n",
       "            (fc1): Linear(in_features=1024, out_features=4096, bias=True)\n",
       "            (act): GELU(approximate='none')\n",
       "            (fc2): Linear(in_features=4096, out_features=1024, bias=True)\n",
       "          )\n",
       "          (ls2): LayerScale()\n",
       "        )\n",
       "        (21): Block(\n",
       "          (ls1): LayerScale()\n",
       "          (norm1): LayerNorm((1024,), eps=1e-06, elementwise_affine=True)\n",
       "          (attn): Attntion(\n",
       "            (qkv): Linear(in_features=1024, out_features=3072, bias=True)\n",
       "            (proj): Linear(in_features=1024, out_features=1024, bias=True)\n",
       "          )\n",
       "          (norm2): LayerNorm((1024,), eps=1e-06, elementwise_affine=True)\n",
       "          (mlp): MLP(\n",
       "            (fc1): Linear(in_features=1024, out_features=4096, bias=True)\n",
       "            (act): GELU(approximate='none')\n",
       "            (fc2): Linear(in_features=4096, out_features=1024, bias=True)\n",
       "          )\n",
       "          (ls2): LayerScale()\n",
       "        )\n",
       "        (22): Block(\n",
       "          (ls1): LayerScale()\n",
       "          (norm1): LayerNorm((1024,), eps=1e-06, elementwise_affine=True)\n",
       "          (attn): Attntion(\n",
       "            (qkv): Linear(in_features=1024, out_features=3072, bias=True)\n",
       "            (proj): Linear(in_features=1024, out_features=1024, bias=True)\n",
       "          )\n",
       "          (norm2): LayerNorm((1024,), eps=1e-06, elementwise_affine=True)\n",
       "          (mlp): MLP(\n",
       "            (fc1): Linear(in_features=1024, out_features=4096, bias=True)\n",
       "            (act): GELU(approximate='none')\n",
       "            (fc2): Linear(in_features=4096, out_features=1024, bias=True)\n",
       "          )\n",
       "          (ls2): LayerScale()\n",
       "        )\n",
       "        (23): Block(\n",
       "          (ls1): LayerScale()\n",
       "          (norm1): LayerNorm((1024,), eps=1e-06, elementwise_affine=True)\n",
       "          (attn): Attntion(\n",
       "            (qkv): Linear(in_features=1024, out_features=3072, bias=True)\n",
       "            (proj): Linear(in_features=1024, out_features=1024, bias=True)\n",
       "          )\n",
       "          (norm2): LayerNorm((1024,), eps=1e-06, elementwise_affine=True)\n",
       "          (mlp): MLP(\n",
       "            (fc1): Linear(in_features=1024, out_features=4096, bias=True)\n",
       "            (act): GELU(approximate='none')\n",
       "            (fc2): Linear(in_features=4096, out_features=1024, bias=True)\n",
       "          )\n",
       "          (ls2): LayerScale()\n",
       "        )\n",
       "      )\n",
       "      (norm): LayerNorm((1024,), eps=1e-06, elementwise_affine=True)\n",
       "    )\n",
       "    (fused_featurizer): ViT(\n",
       "      (patch_embed): PatchEmbed(\n",
       "        (proj): Conv2d(3, 1152, kernel_size=(14, 14), stride=(14, 14))\n",
       "      )\n",
       "      (blocks): Sequential(\n",
       "        (0): Block(\n",
       "          (ls1): Identity()\n",
       "          (norm1): LayerNorm((1152,), eps=1e-06, elementwise_affine=True)\n",
       "          (attn): Attntion(\n",
       "            (qkv): Linear(in_features=1152, out_features=3456, bias=True)\n",
       "            (proj): Linear(in_features=1152, out_features=1152, bias=True)\n",
       "          )\n",
       "          (norm2): LayerNorm((1152,), eps=1e-06, elementwise_affine=True)\n",
       "          (mlp): MLP(\n",
       "            (fc1): Linear(in_features=1152, out_features=4304, bias=True)\n",
       "            (act): GELU(approximate='none')\n",
       "            (fc2): Linear(in_features=4304, out_features=1152, bias=True)\n",
       "          )\n",
       "          (ls2): Identity()\n",
       "        )\n",
       "        (1): Block(\n",
       "          (ls1): Identity()\n",
       "          (norm1): LayerNorm((1152,), eps=1e-06, elementwise_affine=True)\n",
       "          (attn): Attntion(\n",
       "            (qkv): Linear(in_features=1152, out_features=3456, bias=True)\n",
       "            (proj): Linear(in_features=1152, out_features=1152, bias=True)\n",
       "          )\n",
       "          (norm2): LayerNorm((1152,), eps=1e-06, elementwise_affine=True)\n",
       "          (mlp): MLP(\n",
       "            (fc1): Linear(in_features=1152, out_features=4304, bias=True)\n",
       "            (act): GELU(approximate='none')\n",
       "            (fc2): Linear(in_features=4304, out_features=1152, bias=True)\n",
       "          )\n",
       "          (ls2): Identity()\n",
       "        )\n",
       "        (2): Block(\n",
       "          (ls1): Identity()\n",
       "          (norm1): LayerNorm((1152,), eps=1e-06, elementwise_affine=True)\n",
       "          (attn): Attntion(\n",
       "            (qkv): Linear(in_features=1152, out_features=3456, bias=True)\n",
       "            (proj): Linear(in_features=1152, out_features=1152, bias=True)\n",
       "          )\n",
       "          (norm2): LayerNorm((1152,), eps=1e-06, elementwise_affine=True)\n",
       "          (mlp): MLP(\n",
       "            (fc1): Linear(in_features=1152, out_features=4304, bias=True)\n",
       "            (act): GELU(approximate='none')\n",
       "            (fc2): Linear(in_features=4304, out_features=1152, bias=True)\n",
       "          )\n",
       "          (ls2): Identity()\n",
       "        )\n",
       "        (3): Block(\n",
       "          (ls1): Identity()\n",
       "          (norm1): LayerNorm((1152,), eps=1e-06, elementwise_affine=True)\n",
       "          (attn): Attntion(\n",
       "            (qkv): Linear(in_features=1152, out_features=3456, bias=True)\n",
       "            (proj): Linear(in_features=1152, out_features=1152, bias=True)\n",
       "          )\n",
       "          (norm2): LayerNorm((1152,), eps=1e-06, elementwise_affine=True)\n",
       "          (mlp): MLP(\n",
       "            (fc1): Linear(in_features=1152, out_features=4304, bias=True)\n",
       "            (act): GELU(approximate='none')\n",
       "            (fc2): Linear(in_features=4304, out_features=1152, bias=True)\n",
       "          )\n",
       "          (ls2): Identity()\n",
       "        )\n",
       "        (4): Block(\n",
       "          (ls1): Identity()\n",
       "          (norm1): LayerNorm((1152,), eps=1e-06, elementwise_affine=True)\n",
       "          (attn): Attntion(\n",
       "            (qkv): Linear(in_features=1152, out_features=3456, bias=True)\n",
       "            (proj): Linear(in_features=1152, out_features=1152, bias=True)\n",
       "          )\n",
       "          (norm2): LayerNorm((1152,), eps=1e-06, elementwise_affine=True)\n",
       "          (mlp): MLP(\n",
       "            (fc1): Linear(in_features=1152, out_features=4304, bias=True)\n",
       "            (act): GELU(approximate='none')\n",
       "            (fc2): Linear(in_features=4304, out_features=1152, bias=True)\n",
       "          )\n",
       "          (ls2): Identity()\n",
       "        )\n",
       "        (5): Block(\n",
       "          (ls1): Identity()\n",
       "          (norm1): LayerNorm((1152,), eps=1e-06, elementwise_affine=True)\n",
       "          (attn): Attntion(\n",
       "            (qkv): Linear(in_features=1152, out_features=3456, bias=True)\n",
       "            (proj): Linear(in_features=1152, out_features=1152, bias=True)\n",
       "          )\n",
       "          (norm2): LayerNorm((1152,), eps=1e-06, elementwise_affine=True)\n",
       "          (mlp): MLP(\n",
       "            (fc1): Linear(in_features=1152, out_features=4304, bias=True)\n",
       "            (act): GELU(approximate='none')\n",
       "            (fc2): Linear(in_features=4304, out_features=1152, bias=True)\n",
       "          )\n",
       "          (ls2): Identity()\n",
       "        )\n",
       "        (6): Block(\n",
       "          (ls1): Identity()\n",
       "          (norm1): LayerNorm((1152,), eps=1e-06, elementwise_affine=True)\n",
       "          (attn): Attntion(\n",
       "            (qkv): Linear(in_features=1152, out_features=3456, bias=True)\n",
       "            (proj): Linear(in_features=1152, out_features=1152, bias=True)\n",
       "          )\n",
       "          (norm2): LayerNorm((1152,), eps=1e-06, elementwise_affine=True)\n",
       "          (mlp): MLP(\n",
       "            (fc1): Linear(in_features=1152, out_features=4304, bias=True)\n",
       "            (act): GELU(approximate='none')\n",
       "            (fc2): Linear(in_features=4304, out_features=1152, bias=True)\n",
       "          )\n",
       "          (ls2): Identity()\n",
       "        )\n",
       "        (7): Block(\n",
       "          (ls1): Identity()\n",
       "          (norm1): LayerNorm((1152,), eps=1e-06, elementwise_affine=True)\n",
       "          (attn): Attntion(\n",
       "            (qkv): Linear(in_features=1152, out_features=3456, bias=True)\n",
       "            (proj): Linear(in_features=1152, out_features=1152, bias=True)\n",
       "          )\n",
       "          (norm2): LayerNorm((1152,), eps=1e-06, elementwise_affine=True)\n",
       "          (mlp): MLP(\n",
       "            (fc1): Linear(in_features=1152, out_features=4304, bias=True)\n",
       "            (act): GELU(approximate='none')\n",
       "            (fc2): Linear(in_features=4304, out_features=1152, bias=True)\n",
       "          )\n",
       "          (ls2): Identity()\n",
       "        )\n",
       "        (8): Block(\n",
       "          (ls1): Identity()\n",
       "          (norm1): LayerNorm((1152,), eps=1e-06, elementwise_affine=True)\n",
       "          (attn): Attntion(\n",
       "            (qkv): Linear(in_features=1152, out_features=3456, bias=True)\n",
       "            (proj): Linear(in_features=1152, out_features=1152, bias=True)\n",
       "          )\n",
       "          (norm2): LayerNorm((1152,), eps=1e-06, elementwise_affine=True)\n",
       "          (mlp): MLP(\n",
       "            (fc1): Linear(in_features=1152, out_features=4304, bias=True)\n",
       "            (act): GELU(approximate='none')\n",
       "            (fc2): Linear(in_features=4304, out_features=1152, bias=True)\n",
       "          )\n",
       "          (ls2): Identity()\n",
       "        )\n",
       "        (9): Block(\n",
       "          (ls1): Identity()\n",
       "          (norm1): LayerNorm((1152,), eps=1e-06, elementwise_affine=True)\n",
       "          (attn): Attntion(\n",
       "            (qkv): Linear(in_features=1152, out_features=3456, bias=True)\n",
       "            (proj): Linear(in_features=1152, out_features=1152, bias=True)\n",
       "          )\n",
       "          (norm2): LayerNorm((1152,), eps=1e-06, elementwise_affine=True)\n",
       "          (mlp): MLP(\n",
       "            (fc1): Linear(in_features=1152, out_features=4304, bias=True)\n",
       "            (act): GELU(approximate='none')\n",
       "            (fc2): Linear(in_features=4304, out_features=1152, bias=True)\n",
       "          )\n",
       "          (ls2): Identity()\n",
       "        )\n",
       "        (10): Block(\n",
       "          (ls1): Identity()\n",
       "          (norm1): LayerNorm((1152,), eps=1e-06, elementwise_affine=True)\n",
       "          (attn): Attntion(\n",
       "            (qkv): Linear(in_features=1152, out_features=3456, bias=True)\n",
       "            (proj): Linear(in_features=1152, out_features=1152, bias=True)\n",
       "          )\n",
       "          (norm2): LayerNorm((1152,), eps=1e-06, elementwise_affine=True)\n",
       "          (mlp): MLP(\n",
       "            (fc1): Linear(in_features=1152, out_features=4304, bias=True)\n",
       "            (act): GELU(approximate='none')\n",
       "            (fc2): Linear(in_features=4304, out_features=1152, bias=True)\n",
       "          )\n",
       "          (ls2): Identity()\n",
       "        )\n",
       "        (11): Block(\n",
       "          (ls1): Identity()\n",
       "          (norm1): LayerNorm((1152,), eps=1e-06, elementwise_affine=True)\n",
       "          (attn): Attntion(\n",
       "            (qkv): Linear(in_features=1152, out_features=3456, bias=True)\n",
       "            (proj): Linear(in_features=1152, out_features=1152, bias=True)\n",
       "          )\n",
       "          (norm2): LayerNorm((1152,), eps=1e-06, elementwise_affine=True)\n",
       "          (mlp): MLP(\n",
       "            (fc1): Linear(in_features=1152, out_features=4304, bias=True)\n",
       "            (act): GELU(approximate='none')\n",
       "            (fc2): Linear(in_features=4304, out_features=1152, bias=True)\n",
       "          )\n",
       "          (ls2): Identity()\n",
       "        )\n",
       "        (12): Block(\n",
       "          (ls1): Identity()\n",
       "          (norm1): LayerNorm((1152,), eps=1e-06, elementwise_affine=True)\n",
       "          (attn): Attntion(\n",
       "            (qkv): Linear(in_features=1152, out_features=3456, bias=True)\n",
       "            (proj): Linear(in_features=1152, out_features=1152, bias=True)\n",
       "          )\n",
       "          (norm2): LayerNorm((1152,), eps=1e-06, elementwise_affine=True)\n",
       "          (mlp): MLP(\n",
       "            (fc1): Linear(in_features=1152, out_features=4304, bias=True)\n",
       "            (act): GELU(approximate='none')\n",
       "            (fc2): Linear(in_features=4304, out_features=1152, bias=True)\n",
       "          )\n",
       "          (ls2): Identity()\n",
       "        )\n",
       "        (13): Block(\n",
       "          (ls1): Identity()\n",
       "          (norm1): LayerNorm((1152,), eps=1e-06, elementwise_affine=True)\n",
       "          (attn): Attntion(\n",
       "            (qkv): Linear(in_features=1152, out_features=3456, bias=True)\n",
       "            (proj): Linear(in_features=1152, out_features=1152, bias=True)\n",
       "          )\n",
       "          (norm2): LayerNorm((1152,), eps=1e-06, elementwise_affine=True)\n",
       "          (mlp): MLP(\n",
       "            (fc1): Linear(in_features=1152, out_features=4304, bias=True)\n",
       "            (act): GELU(approximate='none')\n",
       "            (fc2): Linear(in_features=4304, out_features=1152, bias=True)\n",
       "          )\n",
       "          (ls2): Identity()\n",
       "        )\n",
       "        (14): Block(\n",
       "          (ls1): Identity()\n",
       "          (norm1): LayerNorm((1152,), eps=1e-06, elementwise_affine=True)\n",
       "          (attn): Attntion(\n",
       "            (qkv): Linear(in_features=1152, out_features=3456, bias=True)\n",
       "            (proj): Linear(in_features=1152, out_features=1152, bias=True)\n",
       "          )\n",
       "          (norm2): LayerNorm((1152,), eps=1e-06, elementwise_affine=True)\n",
       "          (mlp): MLP(\n",
       "            (fc1): Linear(in_features=1152, out_features=4304, bias=True)\n",
       "            (act): GELU(approximate='none')\n",
       "            (fc2): Linear(in_features=4304, out_features=1152, bias=True)\n",
       "          )\n",
       "          (ls2): Identity()\n",
       "        )\n",
       "        (15): Block(\n",
       "          (ls1): Identity()\n",
       "          (norm1): LayerNorm((1152,), eps=1e-06, elementwise_affine=True)\n",
       "          (attn): Attntion(\n",
       "            (qkv): Linear(in_features=1152, out_features=3456, bias=True)\n",
       "            (proj): Linear(in_features=1152, out_features=1152, bias=True)\n",
       "          )\n",
       "          (norm2): LayerNorm((1152,), eps=1e-06, elementwise_affine=True)\n",
       "          (mlp): MLP(\n",
       "            (fc1): Linear(in_features=1152, out_features=4304, bias=True)\n",
       "            (act): GELU(approximate='none')\n",
       "            (fc2): Linear(in_features=4304, out_features=1152, bias=True)\n",
       "          )\n",
       "          (ls2): Identity()\n",
       "        )\n",
       "        (16): Block(\n",
       "          (ls1): Identity()\n",
       "          (norm1): LayerNorm((1152,), eps=1e-06, elementwise_affine=True)\n",
       "          (attn): Attntion(\n",
       "            (qkv): Linear(in_features=1152, out_features=3456, bias=True)\n",
       "            (proj): Linear(in_features=1152, out_features=1152, bias=True)\n",
       "          )\n",
       "          (norm2): LayerNorm((1152,), eps=1e-06, elementwise_affine=True)\n",
       "          (mlp): MLP(\n",
       "            (fc1): Linear(in_features=1152, out_features=4304, bias=True)\n",
       "            (act): GELU(approximate='none')\n",
       "            (fc2): Linear(in_features=4304, out_features=1152, bias=True)\n",
       "          )\n",
       "          (ls2): Identity()\n",
       "        )\n",
       "        (17): Block(\n",
       "          (ls1): Identity()\n",
       "          (norm1): LayerNorm((1152,), eps=1e-06, elementwise_affine=True)\n",
       "          (attn): Attntion(\n",
       "            (qkv): Linear(in_features=1152, out_features=3456, bias=True)\n",
       "            (proj): Linear(in_features=1152, out_features=1152, bias=True)\n",
       "          )\n",
       "          (norm2): LayerNorm((1152,), eps=1e-06, elementwise_affine=True)\n",
       "          (mlp): MLP(\n",
       "            (fc1): Linear(in_features=1152, out_features=4304, bias=True)\n",
       "            (act): GELU(approximate='none')\n",
       "            (fc2): Linear(in_features=4304, out_features=1152, bias=True)\n",
       "          )\n",
       "          (ls2): Identity()\n",
       "        )\n",
       "        (18): Block(\n",
       "          (ls1): Identity()\n",
       "          (norm1): LayerNorm((1152,), eps=1e-06, elementwise_affine=True)\n",
       "          (attn): Attntion(\n",
       "            (qkv): Linear(in_features=1152, out_features=3456, bias=True)\n",
       "            (proj): Linear(in_features=1152, out_features=1152, bias=True)\n",
       "          )\n",
       "          (norm2): LayerNorm((1152,), eps=1e-06, elementwise_affine=True)\n",
       "          (mlp): MLP(\n",
       "            (fc1): Linear(in_features=1152, out_features=4304, bias=True)\n",
       "            (act): GELU(approximate='none')\n",
       "            (fc2): Linear(in_features=4304, out_features=1152, bias=True)\n",
       "          )\n",
       "          (ls2): Identity()\n",
       "        )\n",
       "        (19): Block(\n",
       "          (ls1): Identity()\n",
       "          (norm1): LayerNorm((1152,), eps=1e-06, elementwise_affine=True)\n",
       "          (attn): Attntion(\n",
       "            (qkv): Linear(in_features=1152, out_features=3456, bias=True)\n",
       "            (proj): Linear(in_features=1152, out_features=1152, bias=True)\n",
       "          )\n",
       "          (norm2): LayerNorm((1152,), eps=1e-06, elementwise_affine=True)\n",
       "          (mlp): MLP(\n",
       "            (fc1): Linear(in_features=1152, out_features=4304, bias=True)\n",
       "            (act): GELU(approximate='none')\n",
       "            (fc2): Linear(in_features=4304, out_features=1152, bias=True)\n",
       "          )\n",
       "          (ls2): Identity()\n",
       "        )\n",
       "        (20): Block(\n",
       "          (ls1): Identity()\n",
       "          (norm1): LayerNorm((1152,), eps=1e-06, elementwise_affine=True)\n",
       "          (attn): Attntion(\n",
       "            (qkv): Linear(in_features=1152, out_features=3456, bias=True)\n",
       "            (proj): Linear(in_features=1152, out_features=1152, bias=True)\n",
       "          )\n",
       "          (norm2): LayerNorm((1152,), eps=1e-06, elementwise_affine=True)\n",
       "          (mlp): MLP(\n",
       "            (fc1): Linear(in_features=1152, out_features=4304, bias=True)\n",
       "            (act): GELU(approximate='none')\n",
       "            (fc2): Linear(in_features=4304, out_features=1152, bias=True)\n",
       "          )\n",
       "          (ls2): Identity()\n",
       "        )\n",
       "        (21): Block(\n",
       "          (ls1): Identity()\n",
       "          (norm1): LayerNorm((1152,), eps=1e-06, elementwise_affine=True)\n",
       "          (attn): Attntion(\n",
       "            (qkv): Linear(in_features=1152, out_features=3456, bias=True)\n",
       "            (proj): Linear(in_features=1152, out_features=1152, bias=True)\n",
       "          )\n",
       "          (norm2): LayerNorm((1152,), eps=1e-06, elementwise_affine=True)\n",
       "          (mlp): MLP(\n",
       "            (fc1): Linear(in_features=1152, out_features=4304, bias=True)\n",
       "            (act): GELU(approximate='none')\n",
       "            (fc2): Linear(in_features=4304, out_features=1152, bias=True)\n",
       "          )\n",
       "          (ls2): Identity()\n",
       "        )\n",
       "        (22): Block(\n",
       "          (ls1): Identity()\n",
       "          (norm1): LayerNorm((1152,), eps=1e-06, elementwise_affine=True)\n",
       "          (attn): Attntion(\n",
       "            (qkv): Linear(in_features=1152, out_features=3456, bias=True)\n",
       "            (proj): Linear(in_features=1152, out_features=1152, bias=True)\n",
       "          )\n",
       "          (norm2): LayerNorm((1152,), eps=1e-06, elementwise_affine=True)\n",
       "          (mlp): MLP(\n",
       "            (fc1): Linear(in_features=1152, out_features=4304, bias=True)\n",
       "            (act): GELU(approximate='none')\n",
       "            (fc2): Linear(in_features=4304, out_features=1152, bias=True)\n",
       "          )\n",
       "          (ls2): Identity()\n",
       "        )\n",
       "        (23): Block(\n",
       "          (ls1): Identity()\n",
       "          (norm1): LayerNorm((1152,), eps=1e-06, elementwise_affine=True)\n",
       "          (attn): Attntion(\n",
       "            (qkv): Linear(in_features=1152, out_features=3456, bias=True)\n",
       "            (proj): Linear(in_features=1152, out_features=1152, bias=True)\n",
       "          )\n",
       "          (norm2): LayerNorm((1152,), eps=1e-06, elementwise_affine=True)\n",
       "          (mlp): MLP(\n",
       "            (fc1): Linear(in_features=1152, out_features=4304, bias=True)\n",
       "            (act): GELU(approximate='none')\n",
       "            (fc2): Linear(in_features=4304, out_features=1152, bias=True)\n",
       "          )\n",
       "          (ls2): Identity()\n",
       "        )\n",
       "        (24): Block(\n",
       "          (ls1): Identity()\n",
       "          (norm1): LayerNorm((1152,), eps=1e-06, elementwise_affine=True)\n",
       "          (attn): Attntion(\n",
       "            (qkv): Linear(in_features=1152, out_features=3456, bias=True)\n",
       "            (proj): Linear(in_features=1152, out_features=1152, bias=True)\n",
       "          )\n",
       "          (norm2): LayerNorm((1152,), eps=1e-06, elementwise_affine=True)\n",
       "          (mlp): MLP(\n",
       "            (fc1): Linear(in_features=1152, out_features=4304, bias=True)\n",
       "            (act): GELU(approximate='none')\n",
       "            (fc2): Linear(in_features=4304, out_features=1152, bias=True)\n",
       "          )\n",
       "          (ls2): Identity()\n",
       "        )\n",
       "        (25): Block(\n",
       "          (ls1): Identity()\n",
       "          (norm1): LayerNorm((1152,), eps=1e-06, elementwise_affine=True)\n",
       "          (attn): Attntion(\n",
       "            (qkv): Linear(in_features=1152, out_features=3456, bias=True)\n",
       "            (proj): Linear(in_features=1152, out_features=1152, bias=True)\n",
       "          )\n",
       "          (norm2): LayerNorm((1152,), eps=1e-06, elementwise_affine=True)\n",
       "          (mlp): MLP(\n",
       "            (fc1): Linear(in_features=1152, out_features=4304, bias=True)\n",
       "            (act): GELU(approximate='none')\n",
       "            (fc2): Linear(in_features=4304, out_features=1152, bias=True)\n",
       "          )\n",
       "          (ls2): Identity()\n",
       "        )\n",
       "        (26): Block(\n",
       "          (ls1): Identity()\n",
       "          (norm1): LayerNorm((1152,), eps=1e-06, elementwise_affine=True)\n",
       "          (attn): Attntion(\n",
       "            (qkv): Linear(in_features=1152, out_features=3456, bias=True)\n",
       "            (proj): Linear(in_features=1152, out_features=1152, bias=True)\n",
       "          )\n",
       "          (norm2): LayerNorm((1152,), eps=1e-06, elementwise_affine=True)\n",
       "          (mlp): MLP(\n",
       "            (fc1): Linear(in_features=1152, out_features=4304, bias=True)\n",
       "            (act): GELU(approximate='none')\n",
       "            (fc2): Linear(in_features=4304, out_features=1152, bias=True)\n",
       "          )\n",
       "          (ls2): Identity()\n",
       "        )\n",
       "      )\n",
       "      (norm): LayerNorm((1152,), eps=1e-06, elementwise_affine=True)\n",
       "      (attn_pool): AttentionPoolLatent(\n",
       "        (q): Linear(in_features=1152, out_features=1152, bias=True)\n",
       "        (kv): Linear(in_features=1152, out_features=2304, bias=True)\n",
       "        (proj): Linear(in_features=1152, out_features=1152, bias=True)\n",
       "        (mlp): MLP(\n",
       "          (fc1): Linear(in_features=1152, out_features=4304, bias=True)\n",
       "          (act): GELU(approximate='none')\n",
       "          (fc2): Linear(in_features=4304, out_features=1152, bias=True)\n",
       "        )\n",
       "        (norm): LayerNorm((1152,), eps=1e-06, elementwise_affine=True)\n",
       "      )\n",
       "    )\n",
       "  )\n",
       "  (projector): VLAProjector(\n",
       "    (fc1): Linear(in_features=2176, out_features=8704, bias=True)\n",
       "    (fc2): Linear(in_features=8704, out_features=4096, bias=True)\n",
       "    (fc3): Linear(in_features=4096, out_features=4096, bias=True)\n",
       "    (act_fn1): GELU(approximate='none')\n",
       "    (act_fn2): GELU(approximate='none')\n",
       "  )\n",
       "  (language_model): LlamaForCausalLM(\n",
       "    (model): LlamaModel(\n",
       "      (embed_tokens): Embedding(32064, 4096, padding_idx=32000)\n",
       "      (layers): ModuleList(\n",
       "        (0-31): 32 x LlamaDecoderLayer(\n",
       "          (self_attn): LlamaAttention(\n",
       "            (q_proj): Linear(in_features=4096, out_features=4096, bias=False)\n",
       "            (k_proj): Linear(in_features=4096, out_features=4096, bias=False)\n",
       "            (v_proj): Linear(in_features=4096, out_features=4096, bias=False)\n",
       "            (o_proj): Linear(in_features=4096, out_features=4096, bias=False)\n",
       "          )\n",
       "          (mlp): LlamaMLP(\n",
       "            (gate_proj): Linear(in_features=4096, out_features=11008, bias=False)\n",
       "            (up_proj): Linear(in_features=4096, out_features=11008, bias=False)\n",
       "            (down_proj): Linear(in_features=11008, out_features=4096, bias=False)\n",
       "            (act_fn): SiLU()\n",
       "          )\n",
       "          (input_layernorm): LlamaRMSNorm((4096,), eps=1e-06)\n",
       "          (post_attention_layernorm): LlamaRMSNorm((4096,), eps=1e-06)\n",
       "        )\n",
       "      )\n",
       "      (norm): LlamaRMSNorm((4096,), eps=1e-06)\n",
       "      (rotary_emb): LlamaRotaryEmbedding()\n",
       "    )\n",
       "    (lm_head): Linear(in_features=4096, out_features=32064, bias=False)\n",
       "  )\n",
       ")"
      ]
     },
     "execution_count": 5,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "import sys\n",
    "\n",
    "sys.path.append(\"/home/wenhongli/workspace/openvla_q\")\n",
    "# from quantize.blocks import quant_llama_model\n",
    "# from quantize.config import QuantizationArgs\n",
    "# from quantize.quantizer import Quantizer\n",
    "from quantize import (\n",
    "    Quantizer,\n",
    "    QuantizationArgs,\n",
    "    patch_model_for_train,\n",
    "    disable_params,\n",
    "    set_module_names,\n",
    ")\n",
    "from quantize.utils.modeling_vla import vit1, vit2,proj\n",
    "\n",
    "\n",
    "v1 = vit1()\n",
    "v2 = vit2()\n",
    "p = proj()\n",
    "\n",
    "v1 = v1.to(vla.device).bfloat16()\n",
    "v1.load_state_dict(vits[0].state_dict())\n",
    "v2 = v2.to(vla.device).bfloat16()\n",
    "v2.load_state_dict(vits[1].state_dict())\n",
    "p.to(vla.device).bfloat16()\n",
    "p.load_state_dict(vla.projector.state_dict())\n",
    "v1.patch_for_vla()\n",
    "v2.patch_for_vla()\n",
    "\n",
    "vla.vision_backbone.featurizer = v1\n",
    "vla.vision_backbone.fused_featurizer = v2\n",
    "vla.projector = p\n",
    "\n",
    "vla"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [],
   "source": [
    "with torch.inference_mode():\n",
    "    action = vla.predict_action(**inputs, unnorm_key=\"bridge_orig\", do_sample=False)\n",
    "    torch.nn.functional.mse_loss(*(torch.from_numpy(i) for i in (action, action_org)))\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "OpenVLAForActionPrediction(\n",
       "  (vision_backbone): PrismaticVisionBackbone(\n",
       "    (featurizer): ViT(\n",
       "      (patch_embed): PatchEmbed(\n",
       "        (proj): QuantConv2d(\n",
       "          (weight_quantizer): LSQPlusQuantizer()\n",
       "          (act_quantizer): LSQPlusQuantizer()\n",
       "        )\n",
       "      )\n",
       "      (blocks): Sequential(\n",
       "        (0): Block(\n",
       "          (ls1): LayerScale()\n",
       "          (norm1): QuantLayerNorm(\n",
       "            (in_quantizer): LSQPlusQuantizer()\n",
       "          )\n",
       "          (attn): QuantVitAttn(\n",
       "            (qkv): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (proj): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (qkt_mm): QuantMatMul(\n",
       "              (x1_quantizer): LSQPlusQuantizer()\n",
       "              (x2_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (softmax): QuantSoftmax(\n",
       "              (in_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (pv_mm): QuantMatMul(\n",
       "              (x1_quantizer): LSQPlusQuantizer()\n",
       "              (x2_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "          )\n",
       "          (norm2): QuantLayerNorm(\n",
       "            (in_quantizer): LSQPlusQuantizer()\n",
       "          )\n",
       "          (mlp): MLP(\n",
       "            (fc1): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (act): QuantGELU(\n",
       "              (in_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (fc2): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "          )\n",
       "          (ls2): LayerScale()\n",
       "        )\n",
       "        (1): Block(\n",
       "          (ls1): LayerScale()\n",
       "          (norm1): QuantLayerNorm(\n",
       "            (in_quantizer): LSQPlusQuantizer()\n",
       "          )\n",
       "          (attn): QuantVitAttn(\n",
       "            (qkv): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (proj): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (qkt_mm): QuantMatMul(\n",
       "              (x1_quantizer): LSQPlusQuantizer()\n",
       "              (x2_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (softmax): QuantSoftmax(\n",
       "              (in_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (pv_mm): QuantMatMul(\n",
       "              (x1_quantizer): LSQPlusQuantizer()\n",
       "              (x2_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "          )\n",
       "          (norm2): QuantLayerNorm(\n",
       "            (in_quantizer): LSQPlusQuantizer()\n",
       "          )\n",
       "          (mlp): MLP(\n",
       "            (fc1): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (act): QuantGELU(\n",
       "              (in_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (fc2): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "          )\n",
       "          (ls2): LayerScale()\n",
       "        )\n",
       "        (2): Block(\n",
       "          (ls1): LayerScale()\n",
       "          (norm1): QuantLayerNorm(\n",
       "            (in_quantizer): LSQPlusQuantizer()\n",
       "          )\n",
       "          (attn): QuantVitAttn(\n",
       "            (qkv): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (proj): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (qkt_mm): QuantMatMul(\n",
       "              (x1_quantizer): LSQPlusQuantizer()\n",
       "              (x2_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (softmax): QuantSoftmax(\n",
       "              (in_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (pv_mm): QuantMatMul(\n",
       "              (x1_quantizer): LSQPlusQuantizer()\n",
       "              (x2_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "          )\n",
       "          (norm2): QuantLayerNorm(\n",
       "            (in_quantizer): LSQPlusQuantizer()\n",
       "          )\n",
       "          (mlp): MLP(\n",
       "            (fc1): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (act): QuantGELU(\n",
       "              (in_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (fc2): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "          )\n",
       "          (ls2): LayerScale()\n",
       "        )\n",
       "        (3): Block(\n",
       "          (ls1): LayerScale()\n",
       "          (norm1): QuantLayerNorm(\n",
       "            (in_quantizer): LSQPlusQuantizer()\n",
       "          )\n",
       "          (attn): QuantVitAttn(\n",
       "            (qkv): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (proj): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (qkt_mm): QuantMatMul(\n",
       "              (x1_quantizer): LSQPlusQuantizer()\n",
       "              (x2_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (softmax): QuantSoftmax(\n",
       "              (in_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (pv_mm): QuantMatMul(\n",
       "              (x1_quantizer): LSQPlusQuantizer()\n",
       "              (x2_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "          )\n",
       "          (norm2): QuantLayerNorm(\n",
       "            (in_quantizer): LSQPlusQuantizer()\n",
       "          )\n",
       "          (mlp): MLP(\n",
       "            (fc1): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (act): QuantGELU(\n",
       "              (in_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (fc2): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "          )\n",
       "          (ls2): LayerScale()\n",
       "        )\n",
       "        (4): Block(\n",
       "          (ls1): LayerScale()\n",
       "          (norm1): QuantLayerNorm(\n",
       "            (in_quantizer): LSQPlusQuantizer()\n",
       "          )\n",
       "          (attn): QuantVitAttn(\n",
       "            (qkv): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (proj): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (qkt_mm): QuantMatMul(\n",
       "              (x1_quantizer): LSQPlusQuantizer()\n",
       "              (x2_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (softmax): QuantSoftmax(\n",
       "              (in_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (pv_mm): QuantMatMul(\n",
       "              (x1_quantizer): LSQPlusQuantizer()\n",
       "              (x2_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "          )\n",
       "          (norm2): QuantLayerNorm(\n",
       "            (in_quantizer): LSQPlusQuantizer()\n",
       "          )\n",
       "          (mlp): MLP(\n",
       "            (fc1): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (act): QuantGELU(\n",
       "              (in_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (fc2): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "          )\n",
       "          (ls2): LayerScale()\n",
       "        )\n",
       "        (5): Block(\n",
       "          (ls1): LayerScale()\n",
       "          (norm1): QuantLayerNorm(\n",
       "            (in_quantizer): LSQPlusQuantizer()\n",
       "          )\n",
       "          (attn): QuantVitAttn(\n",
       "            (qkv): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (proj): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (qkt_mm): QuantMatMul(\n",
       "              (x1_quantizer): LSQPlusQuantizer()\n",
       "              (x2_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (softmax): QuantSoftmax(\n",
       "              (in_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (pv_mm): QuantMatMul(\n",
       "              (x1_quantizer): LSQPlusQuantizer()\n",
       "              (x2_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "          )\n",
       "          (norm2): QuantLayerNorm(\n",
       "            (in_quantizer): LSQPlusQuantizer()\n",
       "          )\n",
       "          (mlp): MLP(\n",
       "            (fc1): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (act): QuantGELU(\n",
       "              (in_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (fc2): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "          )\n",
       "          (ls2): LayerScale()\n",
       "        )\n",
       "        (6): Block(\n",
       "          (ls1): LayerScale()\n",
       "          (norm1): QuantLayerNorm(\n",
       "            (in_quantizer): LSQPlusQuantizer()\n",
       "          )\n",
       "          (attn): QuantVitAttn(\n",
       "            (qkv): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (proj): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (qkt_mm): QuantMatMul(\n",
       "              (x1_quantizer): LSQPlusQuantizer()\n",
       "              (x2_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (softmax): QuantSoftmax(\n",
       "              (in_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (pv_mm): QuantMatMul(\n",
       "              (x1_quantizer): LSQPlusQuantizer()\n",
       "              (x2_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "          )\n",
       "          (norm2): QuantLayerNorm(\n",
       "            (in_quantizer): LSQPlusQuantizer()\n",
       "          )\n",
       "          (mlp): MLP(\n",
       "            (fc1): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (act): QuantGELU(\n",
       "              (in_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (fc2): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "          )\n",
       "          (ls2): LayerScale()\n",
       "        )\n",
       "        (7): Block(\n",
       "          (ls1): LayerScale()\n",
       "          (norm1): QuantLayerNorm(\n",
       "            (in_quantizer): LSQPlusQuantizer()\n",
       "          )\n",
       "          (attn): QuantVitAttn(\n",
       "            (qkv): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (proj): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (qkt_mm): QuantMatMul(\n",
       "              (x1_quantizer): LSQPlusQuantizer()\n",
       "              (x2_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (softmax): QuantSoftmax(\n",
       "              (in_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (pv_mm): QuantMatMul(\n",
       "              (x1_quantizer): LSQPlusQuantizer()\n",
       "              (x2_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "          )\n",
       "          (norm2): QuantLayerNorm(\n",
       "            (in_quantizer): LSQPlusQuantizer()\n",
       "          )\n",
       "          (mlp): MLP(\n",
       "            (fc1): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (act): QuantGELU(\n",
       "              (in_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (fc2): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "          )\n",
       "          (ls2): LayerScale()\n",
       "        )\n",
       "        (8): Block(\n",
       "          (ls1): LayerScale()\n",
       "          (norm1): QuantLayerNorm(\n",
       "            (in_quantizer): LSQPlusQuantizer()\n",
       "          )\n",
       "          (attn): QuantVitAttn(\n",
       "            (qkv): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (proj): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (qkt_mm): QuantMatMul(\n",
       "              (x1_quantizer): LSQPlusQuantizer()\n",
       "              (x2_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (softmax): QuantSoftmax(\n",
       "              (in_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (pv_mm): QuantMatMul(\n",
       "              (x1_quantizer): LSQPlusQuantizer()\n",
       "              (x2_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "          )\n",
       "          (norm2): QuantLayerNorm(\n",
       "            (in_quantizer): LSQPlusQuantizer()\n",
       "          )\n",
       "          (mlp): MLP(\n",
       "            (fc1): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (act): QuantGELU(\n",
       "              (in_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (fc2): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "          )\n",
       "          (ls2): LayerScale()\n",
       "        )\n",
       "        (9): Block(\n",
       "          (ls1): LayerScale()\n",
       "          (norm1): QuantLayerNorm(\n",
       "            (in_quantizer): LSQPlusQuantizer()\n",
       "          )\n",
       "          (attn): QuantVitAttn(\n",
       "            (qkv): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (proj): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (qkt_mm): QuantMatMul(\n",
       "              (x1_quantizer): LSQPlusQuantizer()\n",
       "              (x2_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (softmax): QuantSoftmax(\n",
       "              (in_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (pv_mm): QuantMatMul(\n",
       "              (x1_quantizer): LSQPlusQuantizer()\n",
       "              (x2_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "          )\n",
       "          (norm2): QuantLayerNorm(\n",
       "            (in_quantizer): LSQPlusQuantizer()\n",
       "          )\n",
       "          (mlp): MLP(\n",
       "            (fc1): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (act): QuantGELU(\n",
       "              (in_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (fc2): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "          )\n",
       "          (ls2): LayerScale()\n",
       "        )\n",
       "        (10): Block(\n",
       "          (ls1): LayerScale()\n",
       "          (norm1): QuantLayerNorm(\n",
       "            (in_quantizer): LSQPlusQuantizer()\n",
       "          )\n",
       "          (attn): QuantVitAttn(\n",
       "            (qkv): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (proj): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (qkt_mm): QuantMatMul(\n",
       "              (x1_quantizer): LSQPlusQuantizer()\n",
       "              (x2_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (softmax): QuantSoftmax(\n",
       "              (in_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (pv_mm): QuantMatMul(\n",
       "              (x1_quantizer): LSQPlusQuantizer()\n",
       "              (x2_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "          )\n",
       "          (norm2): QuantLayerNorm(\n",
       "            (in_quantizer): LSQPlusQuantizer()\n",
       "          )\n",
       "          (mlp): MLP(\n",
       "            (fc1): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (act): QuantGELU(\n",
       "              (in_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (fc2): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "          )\n",
       "          (ls2): LayerScale()\n",
       "        )\n",
       "        (11): Block(\n",
       "          (ls1): LayerScale()\n",
       "          (norm1): QuantLayerNorm(\n",
       "            (in_quantizer): LSQPlusQuantizer()\n",
       "          )\n",
       "          (attn): QuantVitAttn(\n",
       "            (qkv): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (proj): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (qkt_mm): QuantMatMul(\n",
       "              (x1_quantizer): LSQPlusQuantizer()\n",
       "              (x2_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (softmax): QuantSoftmax(\n",
       "              (in_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (pv_mm): QuantMatMul(\n",
       "              (x1_quantizer): LSQPlusQuantizer()\n",
       "              (x2_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "          )\n",
       "          (norm2): QuantLayerNorm(\n",
       "            (in_quantizer): LSQPlusQuantizer()\n",
       "          )\n",
       "          (mlp): MLP(\n",
       "            (fc1): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (act): QuantGELU(\n",
       "              (in_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (fc2): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "          )\n",
       "          (ls2): LayerScale()\n",
       "        )\n",
       "        (12): Block(\n",
       "          (ls1): LayerScale()\n",
       "          (norm1): QuantLayerNorm(\n",
       "            (in_quantizer): LSQPlusQuantizer()\n",
       "          )\n",
       "          (attn): QuantVitAttn(\n",
       "            (qkv): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (proj): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (qkt_mm): QuantMatMul(\n",
       "              (x1_quantizer): LSQPlusQuantizer()\n",
       "              (x2_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (softmax): QuantSoftmax(\n",
       "              (in_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (pv_mm): QuantMatMul(\n",
       "              (x1_quantizer): LSQPlusQuantizer()\n",
       "              (x2_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "          )\n",
       "          (norm2): QuantLayerNorm(\n",
       "            (in_quantizer): LSQPlusQuantizer()\n",
       "          )\n",
       "          (mlp): MLP(\n",
       "            (fc1): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (act): QuantGELU(\n",
       "              (in_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (fc2): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "          )\n",
       "          (ls2): LayerScale()\n",
       "        )\n",
       "        (13): Block(\n",
       "          (ls1): LayerScale()\n",
       "          (norm1): QuantLayerNorm(\n",
       "            (in_quantizer): LSQPlusQuantizer()\n",
       "          )\n",
       "          (attn): QuantVitAttn(\n",
       "            (qkv): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (proj): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (qkt_mm): QuantMatMul(\n",
       "              (x1_quantizer): LSQPlusQuantizer()\n",
       "              (x2_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (softmax): QuantSoftmax(\n",
       "              (in_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (pv_mm): QuantMatMul(\n",
       "              (x1_quantizer): LSQPlusQuantizer()\n",
       "              (x2_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "          )\n",
       "          (norm2): QuantLayerNorm(\n",
       "            (in_quantizer): LSQPlusQuantizer()\n",
       "          )\n",
       "          (mlp): MLP(\n",
       "            (fc1): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (act): QuantGELU(\n",
       "              (in_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (fc2): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "          )\n",
       "          (ls2): LayerScale()\n",
       "        )\n",
       "        (14): Block(\n",
       "          (ls1): LayerScale()\n",
       "          (norm1): QuantLayerNorm(\n",
       "            (in_quantizer): LSQPlusQuantizer()\n",
       "          )\n",
       "          (attn): QuantVitAttn(\n",
       "            (qkv): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (proj): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (qkt_mm): QuantMatMul(\n",
       "              (x1_quantizer): LSQPlusQuantizer()\n",
       "              (x2_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (softmax): QuantSoftmax(\n",
       "              (in_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (pv_mm): QuantMatMul(\n",
       "              (x1_quantizer): LSQPlusQuantizer()\n",
       "              (x2_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "          )\n",
       "          (norm2): QuantLayerNorm(\n",
       "            (in_quantizer): LSQPlusQuantizer()\n",
       "          )\n",
       "          (mlp): MLP(\n",
       "            (fc1): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (act): QuantGELU(\n",
       "              (in_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (fc2): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "          )\n",
       "          (ls2): LayerScale()\n",
       "        )\n",
       "        (15): Block(\n",
       "          (ls1): LayerScale()\n",
       "          (norm1): QuantLayerNorm(\n",
       "            (in_quantizer): LSQPlusQuantizer()\n",
       "          )\n",
       "          (attn): QuantVitAttn(\n",
       "            (qkv): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (proj): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (qkt_mm): QuantMatMul(\n",
       "              (x1_quantizer): LSQPlusQuantizer()\n",
       "              (x2_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (softmax): QuantSoftmax(\n",
       "              (in_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (pv_mm): QuantMatMul(\n",
       "              (x1_quantizer): LSQPlusQuantizer()\n",
       "              (x2_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "          )\n",
       "          (norm2): QuantLayerNorm(\n",
       "            (in_quantizer): LSQPlusQuantizer()\n",
       "          )\n",
       "          (mlp): MLP(\n",
       "            (fc1): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (act): QuantGELU(\n",
       "              (in_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (fc2): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "          )\n",
       "          (ls2): LayerScale()\n",
       "        )\n",
       "        (16): Block(\n",
       "          (ls1): LayerScale()\n",
       "          (norm1): QuantLayerNorm(\n",
       "            (in_quantizer): LSQPlusQuantizer()\n",
       "          )\n",
       "          (attn): QuantVitAttn(\n",
       "            (qkv): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (proj): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (qkt_mm): QuantMatMul(\n",
       "              (x1_quantizer): LSQPlusQuantizer()\n",
       "              (x2_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (softmax): QuantSoftmax(\n",
       "              (in_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (pv_mm): QuantMatMul(\n",
       "              (x1_quantizer): LSQPlusQuantizer()\n",
       "              (x2_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "          )\n",
       "          (norm2): QuantLayerNorm(\n",
       "            (in_quantizer): LSQPlusQuantizer()\n",
       "          )\n",
       "          (mlp): MLP(\n",
       "            (fc1): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (act): QuantGELU(\n",
       "              (in_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (fc2): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "          )\n",
       "          (ls2): LayerScale()\n",
       "        )\n",
       "        (17): Block(\n",
       "          (ls1): LayerScale()\n",
       "          (norm1): QuantLayerNorm(\n",
       "            (in_quantizer): LSQPlusQuantizer()\n",
       "          )\n",
       "          (attn): QuantVitAttn(\n",
       "            (qkv): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (proj): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (qkt_mm): QuantMatMul(\n",
       "              (x1_quantizer): LSQPlusQuantizer()\n",
       "              (x2_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (softmax): QuantSoftmax(\n",
       "              (in_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (pv_mm): QuantMatMul(\n",
       "              (x1_quantizer): LSQPlusQuantizer()\n",
       "              (x2_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "          )\n",
       "          (norm2): QuantLayerNorm(\n",
       "            (in_quantizer): LSQPlusQuantizer()\n",
       "          )\n",
       "          (mlp): MLP(\n",
       "            (fc1): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (act): QuantGELU(\n",
       "              (in_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (fc2): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "          )\n",
       "          (ls2): LayerScale()\n",
       "        )\n",
       "        (18): Block(\n",
       "          (ls1): LayerScale()\n",
       "          (norm1): QuantLayerNorm(\n",
       "            (in_quantizer): LSQPlusQuantizer()\n",
       "          )\n",
       "          (attn): QuantVitAttn(\n",
       "            (qkv): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (proj): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (qkt_mm): QuantMatMul(\n",
       "              (x1_quantizer): LSQPlusQuantizer()\n",
       "              (x2_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (softmax): QuantSoftmax(\n",
       "              (in_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (pv_mm): QuantMatMul(\n",
       "              (x1_quantizer): LSQPlusQuantizer()\n",
       "              (x2_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "          )\n",
       "          (norm2): QuantLayerNorm(\n",
       "            (in_quantizer): LSQPlusQuantizer()\n",
       "          )\n",
       "          (mlp): MLP(\n",
       "            (fc1): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (act): QuantGELU(\n",
       "              (in_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (fc2): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "          )\n",
       "          (ls2): LayerScale()\n",
       "        )\n",
       "        (19): Block(\n",
       "          (ls1): LayerScale()\n",
       "          (norm1): QuantLayerNorm(\n",
       "            (in_quantizer): LSQPlusQuantizer()\n",
       "          )\n",
       "          (attn): QuantVitAttn(\n",
       "            (qkv): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (proj): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (qkt_mm): QuantMatMul(\n",
       "              (x1_quantizer): LSQPlusQuantizer()\n",
       "              (x2_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (softmax): QuantSoftmax(\n",
       "              (in_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (pv_mm): QuantMatMul(\n",
       "              (x1_quantizer): LSQPlusQuantizer()\n",
       "              (x2_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "          )\n",
       "          (norm2): QuantLayerNorm(\n",
       "            (in_quantizer): LSQPlusQuantizer()\n",
       "          )\n",
       "          (mlp): MLP(\n",
       "            (fc1): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (act): QuantGELU(\n",
       "              (in_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (fc2): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "          )\n",
       "          (ls2): LayerScale()\n",
       "        )\n",
       "        (20): Block(\n",
       "          (ls1): LayerScale()\n",
       "          (norm1): QuantLayerNorm(\n",
       "            (in_quantizer): LSQPlusQuantizer()\n",
       "          )\n",
       "          (attn): QuantVitAttn(\n",
       "            (qkv): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (proj): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (qkt_mm): QuantMatMul(\n",
       "              (x1_quantizer): LSQPlusQuantizer()\n",
       "              (x2_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (softmax): QuantSoftmax(\n",
       "              (in_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (pv_mm): QuantMatMul(\n",
       "              (x1_quantizer): LSQPlusQuantizer()\n",
       "              (x2_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "          )\n",
       "          (norm2): QuantLayerNorm(\n",
       "            (in_quantizer): LSQPlusQuantizer()\n",
       "          )\n",
       "          (mlp): MLP(\n",
       "            (fc1): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (act): QuantGELU(\n",
       "              (in_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (fc2): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "          )\n",
       "          (ls2): LayerScale()\n",
       "        )\n",
       "        (21): Block(\n",
       "          (ls1): LayerScale()\n",
       "          (norm1): QuantLayerNorm(\n",
       "            (in_quantizer): LSQPlusQuantizer()\n",
       "          )\n",
       "          (attn): QuantVitAttn(\n",
       "            (qkv): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (proj): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (qkt_mm): QuantMatMul(\n",
       "              (x1_quantizer): LSQPlusQuantizer()\n",
       "              (x2_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (softmax): QuantSoftmax(\n",
       "              (in_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (pv_mm): QuantMatMul(\n",
       "              (x1_quantizer): LSQPlusQuantizer()\n",
       "              (x2_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "          )\n",
       "          (norm2): QuantLayerNorm(\n",
       "            (in_quantizer): LSQPlusQuantizer()\n",
       "          )\n",
       "          (mlp): MLP(\n",
       "            (fc1): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (act): QuantGELU(\n",
       "              (in_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (fc2): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "          )\n",
       "          (ls2): LayerScale()\n",
       "        )\n",
       "        (22): Block(\n",
       "          (ls1): LayerScale()\n",
       "          (norm1): QuantLayerNorm(\n",
       "            (in_quantizer): LSQPlusQuantizer()\n",
       "          )\n",
       "          (attn): QuantVitAttn(\n",
       "            (qkv): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (proj): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (qkt_mm): QuantMatMul(\n",
       "              (x1_quantizer): LSQPlusQuantizer()\n",
       "              (x2_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (softmax): QuantSoftmax(\n",
       "              (in_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (pv_mm): QuantMatMul(\n",
       "              (x1_quantizer): LSQPlusQuantizer()\n",
       "              (x2_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "          )\n",
       "          (norm2): QuantLayerNorm(\n",
       "            (in_quantizer): LSQPlusQuantizer()\n",
       "          )\n",
       "          (mlp): MLP(\n",
       "            (fc1): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (act): QuantGELU(\n",
       "              (in_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (fc2): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "          )\n",
       "          (ls2): LayerScale()\n",
       "        )\n",
       "        (23): Block(\n",
       "          (ls1): LayerScale()\n",
       "          (norm1): QuantLayerNorm(\n",
       "            (in_quantizer): LSQPlusQuantizer()\n",
       "          )\n",
       "          (attn): QuantVitAttn(\n",
       "            (qkv): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (proj): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (qkt_mm): QuantMatMul(\n",
       "              (x1_quantizer): LSQPlusQuantizer()\n",
       "              (x2_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (softmax): QuantSoftmax(\n",
       "              (in_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (pv_mm): QuantMatMul(\n",
       "              (x1_quantizer): LSQPlusQuantizer()\n",
       "              (x2_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "          )\n",
       "          (norm2): QuantLayerNorm(\n",
       "            (in_quantizer): LSQPlusQuantizer()\n",
       "          )\n",
       "          (mlp): MLP(\n",
       "            (fc1): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (act): QuantGELU(\n",
       "              (in_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (fc2): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "          )\n",
       "          (ls2): LayerScale()\n",
       "        )\n",
       "      )\n",
       "      (norm): QuantLayerNorm(\n",
       "        (in_quantizer): LSQPlusQuantizer()\n",
       "      )\n",
       "    )\n",
       "    (fused_featurizer): ViT(\n",
       "      (patch_embed): PatchEmbed(\n",
       "        (proj): QuantConv2d(\n",
       "          (weight_quantizer): LSQPlusQuantizer()\n",
       "          (act_quantizer): LSQPlusQuantizer()\n",
       "        )\n",
       "      )\n",
       "      (blocks): Sequential(\n",
       "        (0): Block(\n",
       "          (ls1): Identity()\n",
       "          (norm1): QuantLayerNorm(\n",
       "            (in_quantizer): LSQPlusQuantizer()\n",
       "          )\n",
       "          (attn): QuantVitAttn(\n",
       "            (qkv): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (proj): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (qkt_mm): QuantMatMul(\n",
       "              (x1_quantizer): LSQPlusQuantizer()\n",
       "              (x2_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (softmax): QuantSoftmax(\n",
       "              (in_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (pv_mm): QuantMatMul(\n",
       "              (x1_quantizer): LSQPlusQuantizer()\n",
       "              (x2_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "          )\n",
       "          (norm2): QuantLayerNorm(\n",
       "            (in_quantizer): LSQPlusQuantizer()\n",
       "          )\n",
       "          (mlp): MLP(\n",
       "            (fc1): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (act): QuantGELU(\n",
       "              (in_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (fc2): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "          )\n",
       "          (ls2): Identity()\n",
       "        )\n",
       "        (1): Block(\n",
       "          (ls1): Identity()\n",
       "          (norm1): QuantLayerNorm(\n",
       "            (in_quantizer): LSQPlusQuantizer()\n",
       "          )\n",
       "          (attn): QuantVitAttn(\n",
       "            (qkv): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (proj): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (qkt_mm): QuantMatMul(\n",
       "              (x1_quantizer): LSQPlusQuantizer()\n",
       "              (x2_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (softmax): QuantSoftmax(\n",
       "              (in_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (pv_mm): QuantMatMul(\n",
       "              (x1_quantizer): LSQPlusQuantizer()\n",
       "              (x2_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "          )\n",
       "          (norm2): QuantLayerNorm(\n",
       "            (in_quantizer): LSQPlusQuantizer()\n",
       "          )\n",
       "          (mlp): MLP(\n",
       "            (fc1): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (act): QuantGELU(\n",
       "              (in_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (fc2): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "          )\n",
       "          (ls2): Identity()\n",
       "        )\n",
       "        (2): Block(\n",
       "          (ls1): Identity()\n",
       "          (norm1): QuantLayerNorm(\n",
       "            (in_quantizer): LSQPlusQuantizer()\n",
       "          )\n",
       "          (attn): QuantVitAttn(\n",
       "            (qkv): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (proj): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (qkt_mm): QuantMatMul(\n",
       "              (x1_quantizer): LSQPlusQuantizer()\n",
       "              (x2_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (softmax): QuantSoftmax(\n",
       "              (in_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (pv_mm): QuantMatMul(\n",
       "              (x1_quantizer): LSQPlusQuantizer()\n",
       "              (x2_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "          )\n",
       "          (norm2): QuantLayerNorm(\n",
       "            (in_quantizer): LSQPlusQuantizer()\n",
       "          )\n",
       "          (mlp): MLP(\n",
       "            (fc1): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (act): QuantGELU(\n",
       "              (in_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (fc2): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "          )\n",
       "          (ls2): Identity()\n",
       "        )\n",
       "        (3): Block(\n",
       "          (ls1): Identity()\n",
       "          (norm1): QuantLayerNorm(\n",
       "            (in_quantizer): LSQPlusQuantizer()\n",
       "          )\n",
       "          (attn): QuantVitAttn(\n",
       "            (qkv): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (proj): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (qkt_mm): QuantMatMul(\n",
       "              (x1_quantizer): LSQPlusQuantizer()\n",
       "              (x2_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (softmax): QuantSoftmax(\n",
       "              (in_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (pv_mm): QuantMatMul(\n",
       "              (x1_quantizer): LSQPlusQuantizer()\n",
       "              (x2_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "          )\n",
       "          (norm2): QuantLayerNorm(\n",
       "            (in_quantizer): LSQPlusQuantizer()\n",
       "          )\n",
       "          (mlp): MLP(\n",
       "            (fc1): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (act): QuantGELU(\n",
       "              (in_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (fc2): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "          )\n",
       "          (ls2): Identity()\n",
       "        )\n",
       "        (4): Block(\n",
       "          (ls1): Identity()\n",
       "          (norm1): QuantLayerNorm(\n",
       "            (in_quantizer): LSQPlusQuantizer()\n",
       "          )\n",
       "          (attn): QuantVitAttn(\n",
       "            (qkv): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (proj): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (qkt_mm): QuantMatMul(\n",
       "              (x1_quantizer): LSQPlusQuantizer()\n",
       "              (x2_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (softmax): QuantSoftmax(\n",
       "              (in_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (pv_mm): QuantMatMul(\n",
       "              (x1_quantizer): LSQPlusQuantizer()\n",
       "              (x2_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "          )\n",
       "          (norm2): QuantLayerNorm(\n",
       "            (in_quantizer): LSQPlusQuantizer()\n",
       "          )\n",
       "          (mlp): MLP(\n",
       "            (fc1): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (act): QuantGELU(\n",
       "              (in_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (fc2): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "          )\n",
       "          (ls2): Identity()\n",
       "        )\n",
       "        (5): Block(\n",
       "          (ls1): Identity()\n",
       "          (norm1): QuantLayerNorm(\n",
       "            (in_quantizer): LSQPlusQuantizer()\n",
       "          )\n",
       "          (attn): QuantVitAttn(\n",
       "            (qkv): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (proj): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (qkt_mm): QuantMatMul(\n",
       "              (x1_quantizer): LSQPlusQuantizer()\n",
       "              (x2_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (softmax): QuantSoftmax(\n",
       "              (in_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (pv_mm): QuantMatMul(\n",
       "              (x1_quantizer): LSQPlusQuantizer()\n",
       "              (x2_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "          )\n",
       "          (norm2): QuantLayerNorm(\n",
       "            (in_quantizer): LSQPlusQuantizer()\n",
       "          )\n",
       "          (mlp): MLP(\n",
       "            (fc1): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (act): QuantGELU(\n",
       "              (in_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (fc2): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "          )\n",
       "          (ls2): Identity()\n",
       "        )\n",
       "        (6): Block(\n",
       "          (ls1): Identity()\n",
       "          (norm1): QuantLayerNorm(\n",
       "            (in_quantizer): LSQPlusQuantizer()\n",
       "          )\n",
       "          (attn): QuantVitAttn(\n",
       "            (qkv): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (proj): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (qkt_mm): QuantMatMul(\n",
       "              (x1_quantizer): LSQPlusQuantizer()\n",
       "              (x2_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (softmax): QuantSoftmax(\n",
       "              (in_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (pv_mm): QuantMatMul(\n",
       "              (x1_quantizer): LSQPlusQuantizer()\n",
       "              (x2_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "          )\n",
       "          (norm2): QuantLayerNorm(\n",
       "            (in_quantizer): LSQPlusQuantizer()\n",
       "          )\n",
       "          (mlp): MLP(\n",
       "            (fc1): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (act): QuantGELU(\n",
       "              (in_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (fc2): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "          )\n",
       "          (ls2): Identity()\n",
       "        )\n",
       "        (7): Block(\n",
       "          (ls1): Identity()\n",
       "          (norm1): QuantLayerNorm(\n",
       "            (in_quantizer): LSQPlusQuantizer()\n",
       "          )\n",
       "          (attn): QuantVitAttn(\n",
       "            (qkv): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (proj): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (qkt_mm): QuantMatMul(\n",
       "              (x1_quantizer): LSQPlusQuantizer()\n",
       "              (x2_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (softmax): QuantSoftmax(\n",
       "              (in_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (pv_mm): QuantMatMul(\n",
       "              (x1_quantizer): LSQPlusQuantizer()\n",
       "              (x2_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "          )\n",
       "          (norm2): QuantLayerNorm(\n",
       "            (in_quantizer): LSQPlusQuantizer()\n",
       "          )\n",
       "          (mlp): MLP(\n",
       "            (fc1): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (act): QuantGELU(\n",
       "              (in_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (fc2): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "          )\n",
       "          (ls2): Identity()\n",
       "        )\n",
       "        (8): Block(\n",
       "          (ls1): Identity()\n",
       "          (norm1): QuantLayerNorm(\n",
       "            (in_quantizer): LSQPlusQuantizer()\n",
       "          )\n",
       "          (attn): QuantVitAttn(\n",
       "            (qkv): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (proj): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (qkt_mm): QuantMatMul(\n",
       "              (x1_quantizer): LSQPlusQuantizer()\n",
       "              (x2_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (softmax): QuantSoftmax(\n",
       "              (in_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (pv_mm): QuantMatMul(\n",
       "              (x1_quantizer): LSQPlusQuantizer()\n",
       "              (x2_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "          )\n",
       "          (norm2): QuantLayerNorm(\n",
       "            (in_quantizer): LSQPlusQuantizer()\n",
       "          )\n",
       "          (mlp): MLP(\n",
       "            (fc1): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (act): QuantGELU(\n",
       "              (in_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (fc2): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "          )\n",
       "          (ls2): Identity()\n",
       "        )\n",
       "        (9): Block(\n",
       "          (ls1): Identity()\n",
       "          (norm1): QuantLayerNorm(\n",
       "            (in_quantizer): LSQPlusQuantizer()\n",
       "          )\n",
       "          (attn): QuantVitAttn(\n",
       "            (qkv): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (proj): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (qkt_mm): QuantMatMul(\n",
       "              (x1_quantizer): LSQPlusQuantizer()\n",
       "              (x2_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (softmax): QuantSoftmax(\n",
       "              (in_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (pv_mm): QuantMatMul(\n",
       "              (x1_quantizer): LSQPlusQuantizer()\n",
       "              (x2_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "          )\n",
       "          (norm2): QuantLayerNorm(\n",
       "            (in_quantizer): LSQPlusQuantizer()\n",
       "          )\n",
       "          (mlp): MLP(\n",
       "            (fc1): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (act): QuantGELU(\n",
       "              (in_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (fc2): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "          )\n",
       "          (ls2): Identity()\n",
       "        )\n",
       "        (10): Block(\n",
       "          (ls1): Identity()\n",
       "          (norm1): QuantLayerNorm(\n",
       "            (in_quantizer): LSQPlusQuantizer()\n",
       "          )\n",
       "          (attn): QuantVitAttn(\n",
       "            (qkv): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (proj): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (qkt_mm): QuantMatMul(\n",
       "              (x1_quantizer): LSQPlusQuantizer()\n",
       "              (x2_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (softmax): QuantSoftmax(\n",
       "              (in_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (pv_mm): QuantMatMul(\n",
       "              (x1_quantizer): LSQPlusQuantizer()\n",
       "              (x2_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "          )\n",
       "          (norm2): QuantLayerNorm(\n",
       "            (in_quantizer): LSQPlusQuantizer()\n",
       "          )\n",
       "          (mlp): MLP(\n",
       "            (fc1): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (act): QuantGELU(\n",
       "              (in_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (fc2): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "          )\n",
       "          (ls2): Identity()\n",
       "        )\n",
       "        (11): Block(\n",
       "          (ls1): Identity()\n",
       "          (norm1): QuantLayerNorm(\n",
       "            (in_quantizer): LSQPlusQuantizer()\n",
       "          )\n",
       "          (attn): QuantVitAttn(\n",
       "            (qkv): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (proj): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (qkt_mm): QuantMatMul(\n",
       "              (x1_quantizer): LSQPlusQuantizer()\n",
       "              (x2_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (softmax): QuantSoftmax(\n",
       "              (in_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (pv_mm): QuantMatMul(\n",
       "              (x1_quantizer): LSQPlusQuantizer()\n",
       "              (x2_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "          )\n",
       "          (norm2): QuantLayerNorm(\n",
       "            (in_quantizer): LSQPlusQuantizer()\n",
       "          )\n",
       "          (mlp): MLP(\n",
       "            (fc1): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (act): QuantGELU(\n",
       "              (in_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (fc2): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "          )\n",
       "          (ls2): Identity()\n",
       "        )\n",
       "        (12): Block(\n",
       "          (ls1): Identity()\n",
       "          (norm1): QuantLayerNorm(\n",
       "            (in_quantizer): LSQPlusQuantizer()\n",
       "          )\n",
       "          (attn): QuantVitAttn(\n",
       "            (qkv): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (proj): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (qkt_mm): QuantMatMul(\n",
       "              (x1_quantizer): LSQPlusQuantizer()\n",
       "              (x2_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (softmax): QuantSoftmax(\n",
       "              (in_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (pv_mm): QuantMatMul(\n",
       "              (x1_quantizer): LSQPlusQuantizer()\n",
       "              (x2_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "          )\n",
       "          (norm2): QuantLayerNorm(\n",
       "            (in_quantizer): LSQPlusQuantizer()\n",
       "          )\n",
       "          (mlp): MLP(\n",
       "            (fc1): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (act): QuantGELU(\n",
       "              (in_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (fc2): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "          )\n",
       "          (ls2): Identity()\n",
       "        )\n",
       "        (13): Block(\n",
       "          (ls1): Identity()\n",
       "          (norm1): QuantLayerNorm(\n",
       "            (in_quantizer): LSQPlusQuantizer()\n",
       "          )\n",
       "          (attn): QuantVitAttn(\n",
       "            (qkv): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (proj): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (qkt_mm): QuantMatMul(\n",
       "              (x1_quantizer): LSQPlusQuantizer()\n",
       "              (x2_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (softmax): QuantSoftmax(\n",
       "              (in_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (pv_mm): QuantMatMul(\n",
       "              (x1_quantizer): LSQPlusQuantizer()\n",
       "              (x2_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "          )\n",
       "          (norm2): QuantLayerNorm(\n",
       "            (in_quantizer): LSQPlusQuantizer()\n",
       "          )\n",
       "          (mlp): MLP(\n",
       "            (fc1): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (act): QuantGELU(\n",
       "              (in_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (fc2): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "          )\n",
       "          (ls2): Identity()\n",
       "        )\n",
       "        (14): Block(\n",
       "          (ls1): Identity()\n",
       "          (norm1): QuantLayerNorm(\n",
       "            (in_quantizer): LSQPlusQuantizer()\n",
       "          )\n",
       "          (attn): QuantVitAttn(\n",
       "            (qkv): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (proj): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (qkt_mm): QuantMatMul(\n",
       "              (x1_quantizer): LSQPlusQuantizer()\n",
       "              (x2_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (softmax): QuantSoftmax(\n",
       "              (in_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (pv_mm): QuantMatMul(\n",
       "              (x1_quantizer): LSQPlusQuantizer()\n",
       "              (x2_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "          )\n",
       "          (norm2): QuantLayerNorm(\n",
       "            (in_quantizer): LSQPlusQuantizer()\n",
       "          )\n",
       "          (mlp): MLP(\n",
       "            (fc1): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (act): QuantGELU(\n",
       "              (in_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (fc2): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "          )\n",
       "          (ls2): Identity()\n",
       "        )\n",
       "        (15): Block(\n",
       "          (ls1): Identity()\n",
       "          (norm1): QuantLayerNorm(\n",
       "            (in_quantizer): LSQPlusQuantizer()\n",
       "          )\n",
       "          (attn): QuantVitAttn(\n",
       "            (qkv): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (proj): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (qkt_mm): QuantMatMul(\n",
       "              (x1_quantizer): LSQPlusQuantizer()\n",
       "              (x2_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (softmax): QuantSoftmax(\n",
       "              (in_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (pv_mm): QuantMatMul(\n",
       "              (x1_quantizer): LSQPlusQuantizer()\n",
       "              (x2_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "          )\n",
       "          (norm2): QuantLayerNorm(\n",
       "            (in_quantizer): LSQPlusQuantizer()\n",
       "          )\n",
       "          (mlp): MLP(\n",
       "            (fc1): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (act): QuantGELU(\n",
       "              (in_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (fc2): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "          )\n",
       "          (ls2): Identity()\n",
       "        )\n",
       "        (16): Block(\n",
       "          (ls1): Identity()\n",
       "          (norm1): QuantLayerNorm(\n",
       "            (in_quantizer): LSQPlusQuantizer()\n",
       "          )\n",
       "          (attn): QuantVitAttn(\n",
       "            (qkv): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (proj): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (qkt_mm): QuantMatMul(\n",
       "              (x1_quantizer): LSQPlusQuantizer()\n",
       "              (x2_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (softmax): QuantSoftmax(\n",
       "              (in_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (pv_mm): QuantMatMul(\n",
       "              (x1_quantizer): LSQPlusQuantizer()\n",
       "              (x2_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "          )\n",
       "          (norm2): QuantLayerNorm(\n",
       "            (in_quantizer): LSQPlusQuantizer()\n",
       "          )\n",
       "          (mlp): MLP(\n",
       "            (fc1): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (act): QuantGELU(\n",
       "              (in_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (fc2): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "          )\n",
       "          (ls2): Identity()\n",
       "        )\n",
       "        (17): Block(\n",
       "          (ls1): Identity()\n",
       "          (norm1): QuantLayerNorm(\n",
       "            (in_quantizer): LSQPlusQuantizer()\n",
       "          )\n",
       "          (attn): QuantVitAttn(\n",
       "            (qkv): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (proj): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (qkt_mm): QuantMatMul(\n",
       "              (x1_quantizer): LSQPlusQuantizer()\n",
       "              (x2_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (softmax): QuantSoftmax(\n",
       "              (in_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (pv_mm): QuantMatMul(\n",
       "              (x1_quantizer): LSQPlusQuantizer()\n",
       "              (x2_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "          )\n",
       "          (norm2): QuantLayerNorm(\n",
       "            (in_quantizer): LSQPlusQuantizer()\n",
       "          )\n",
       "          (mlp): MLP(\n",
       "            (fc1): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (act): QuantGELU(\n",
       "              (in_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (fc2): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "          )\n",
       "          (ls2): Identity()\n",
       "        )\n",
       "        (18): Block(\n",
       "          (ls1): Identity()\n",
       "          (norm1): QuantLayerNorm(\n",
       "            (in_quantizer): LSQPlusQuantizer()\n",
       "          )\n",
       "          (attn): QuantVitAttn(\n",
       "            (qkv): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (proj): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (qkt_mm): QuantMatMul(\n",
       "              (x1_quantizer): LSQPlusQuantizer()\n",
       "              (x2_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (softmax): QuantSoftmax(\n",
       "              (in_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (pv_mm): QuantMatMul(\n",
       "              (x1_quantizer): LSQPlusQuantizer()\n",
       "              (x2_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "          )\n",
       "          (norm2): QuantLayerNorm(\n",
       "            (in_quantizer): LSQPlusQuantizer()\n",
       "          )\n",
       "          (mlp): MLP(\n",
       "            (fc1): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (act): QuantGELU(\n",
       "              (in_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (fc2): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "          )\n",
       "          (ls2): Identity()\n",
       "        )\n",
       "        (19): Block(\n",
       "          (ls1): Identity()\n",
       "          (norm1): QuantLayerNorm(\n",
       "            (in_quantizer): LSQPlusQuantizer()\n",
       "          )\n",
       "          (attn): QuantVitAttn(\n",
       "            (qkv): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (proj): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (qkt_mm): QuantMatMul(\n",
       "              (x1_quantizer): LSQPlusQuantizer()\n",
       "              (x2_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (softmax): QuantSoftmax(\n",
       "              (in_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (pv_mm): QuantMatMul(\n",
       "              (x1_quantizer): LSQPlusQuantizer()\n",
       "              (x2_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "          )\n",
       "          (norm2): QuantLayerNorm(\n",
       "            (in_quantizer): LSQPlusQuantizer()\n",
       "          )\n",
       "          (mlp): MLP(\n",
       "            (fc1): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (act): QuantGELU(\n",
       "              (in_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (fc2): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "          )\n",
       "          (ls2): Identity()\n",
       "        )\n",
       "        (20): Block(\n",
       "          (ls1): Identity()\n",
       "          (norm1): QuantLayerNorm(\n",
       "            (in_quantizer): LSQPlusQuantizer()\n",
       "          )\n",
       "          (attn): QuantVitAttn(\n",
       "            (qkv): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (proj): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (qkt_mm): QuantMatMul(\n",
       "              (x1_quantizer): LSQPlusQuantizer()\n",
       "              (x2_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (softmax): QuantSoftmax(\n",
       "              (in_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (pv_mm): QuantMatMul(\n",
       "              (x1_quantizer): LSQPlusQuantizer()\n",
       "              (x2_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "          )\n",
       "          (norm2): QuantLayerNorm(\n",
       "            (in_quantizer): LSQPlusQuantizer()\n",
       "          )\n",
       "          (mlp): MLP(\n",
       "            (fc1): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (act): QuantGELU(\n",
       "              (in_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (fc2): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "          )\n",
       "          (ls2): Identity()\n",
       "        )\n",
       "        (21): Block(\n",
       "          (ls1): Identity()\n",
       "          (norm1): QuantLayerNorm(\n",
       "            (in_quantizer): LSQPlusQuantizer()\n",
       "          )\n",
       "          (attn): QuantVitAttn(\n",
       "            (qkv): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (proj): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (qkt_mm): QuantMatMul(\n",
       "              (x1_quantizer): LSQPlusQuantizer()\n",
       "              (x2_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (softmax): QuantSoftmax(\n",
       "              (in_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (pv_mm): QuantMatMul(\n",
       "              (x1_quantizer): LSQPlusQuantizer()\n",
       "              (x2_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "          )\n",
       "          (norm2): QuantLayerNorm(\n",
       "            (in_quantizer): LSQPlusQuantizer()\n",
       "          )\n",
       "          (mlp): MLP(\n",
       "            (fc1): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (act): QuantGELU(\n",
       "              (in_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (fc2): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "          )\n",
       "          (ls2): Identity()\n",
       "        )\n",
       "        (22): Block(\n",
       "          (ls1): Identity()\n",
       "          (norm1): QuantLayerNorm(\n",
       "            (in_quantizer): LSQPlusQuantizer()\n",
       "          )\n",
       "          (attn): QuantVitAttn(\n",
       "            (qkv): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (proj): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (qkt_mm): QuantMatMul(\n",
       "              (x1_quantizer): LSQPlusQuantizer()\n",
       "              (x2_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (softmax): QuantSoftmax(\n",
       "              (in_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (pv_mm): QuantMatMul(\n",
       "              (x1_quantizer): LSQPlusQuantizer()\n",
       "              (x2_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "          )\n",
       "          (norm2): QuantLayerNorm(\n",
       "            (in_quantizer): LSQPlusQuantizer()\n",
       "          )\n",
       "          (mlp): MLP(\n",
       "            (fc1): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (act): QuantGELU(\n",
       "              (in_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (fc2): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "          )\n",
       "          (ls2): Identity()\n",
       "        )\n",
       "        (23): Block(\n",
       "          (ls1): Identity()\n",
       "          (norm1): QuantLayerNorm(\n",
       "            (in_quantizer): LSQPlusQuantizer()\n",
       "          )\n",
       "          (attn): QuantVitAttn(\n",
       "            (qkv): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (proj): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (qkt_mm): QuantMatMul(\n",
       "              (x1_quantizer): LSQPlusQuantizer()\n",
       "              (x2_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (softmax): QuantSoftmax(\n",
       "              (in_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (pv_mm): QuantMatMul(\n",
       "              (x1_quantizer): LSQPlusQuantizer()\n",
       "              (x2_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "          )\n",
       "          (norm2): QuantLayerNorm(\n",
       "            (in_quantizer): LSQPlusQuantizer()\n",
       "          )\n",
       "          (mlp): MLP(\n",
       "            (fc1): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (act): QuantGELU(\n",
       "              (in_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (fc2): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "          )\n",
       "          (ls2): Identity()\n",
       "        )\n",
       "        (24): Block(\n",
       "          (ls1): Identity()\n",
       "          (norm1): QuantLayerNorm(\n",
       "            (in_quantizer): LSQPlusQuantizer()\n",
       "          )\n",
       "          (attn): QuantVitAttn(\n",
       "            (qkv): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (proj): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (qkt_mm): QuantMatMul(\n",
       "              (x1_quantizer): LSQPlusQuantizer()\n",
       "              (x2_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (softmax): QuantSoftmax(\n",
       "              (in_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (pv_mm): QuantMatMul(\n",
       "              (x1_quantizer): LSQPlusQuantizer()\n",
       "              (x2_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "          )\n",
       "          (norm2): QuantLayerNorm(\n",
       "            (in_quantizer): LSQPlusQuantizer()\n",
       "          )\n",
       "          (mlp): MLP(\n",
       "            (fc1): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (act): QuantGELU(\n",
       "              (in_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (fc2): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "          )\n",
       "          (ls2): Identity()\n",
       "        )\n",
       "        (25): Block(\n",
       "          (ls1): Identity()\n",
       "          (norm1): QuantLayerNorm(\n",
       "            (in_quantizer): LSQPlusQuantizer()\n",
       "          )\n",
       "          (attn): QuantVitAttn(\n",
       "            (qkv): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (proj): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (qkt_mm): QuantMatMul(\n",
       "              (x1_quantizer): LSQPlusQuantizer()\n",
       "              (x2_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (softmax): QuantSoftmax(\n",
       "              (in_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (pv_mm): QuantMatMul(\n",
       "              (x1_quantizer): LSQPlusQuantizer()\n",
       "              (x2_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "          )\n",
       "          (norm2): QuantLayerNorm(\n",
       "            (in_quantizer): LSQPlusQuantizer()\n",
       "          )\n",
       "          (mlp): MLP(\n",
       "            (fc1): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (act): QuantGELU(\n",
       "              (in_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (fc2): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "          )\n",
       "          (ls2): Identity()\n",
       "        )\n",
       "        (26): Block(\n",
       "          (ls1): Identity()\n",
       "          (norm1): QuantLayerNorm(\n",
       "            (in_quantizer): LSQPlusQuantizer()\n",
       "          )\n",
       "          (attn): QuantVitAttn(\n",
       "            (qkv): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (proj): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (qkt_mm): QuantMatMul(\n",
       "              (x1_quantizer): LSQPlusQuantizer()\n",
       "              (x2_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (softmax): QuantSoftmax(\n",
       "              (in_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (pv_mm): QuantMatMul(\n",
       "              (x1_quantizer): LSQPlusQuantizer()\n",
       "              (x2_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "          )\n",
       "          (norm2): QuantLayerNorm(\n",
       "            (in_quantizer): LSQPlusQuantizer()\n",
       "          )\n",
       "          (mlp): MLP(\n",
       "            (fc1): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (act): QuantGELU(\n",
       "              (in_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (fc2): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "          )\n",
       "          (ls2): Identity()\n",
       "        )\n",
       "      )\n",
       "      (norm): QuantLayerNorm(\n",
       "        (in_quantizer): LSQPlusQuantizer()\n",
       "      )\n",
       "      (attn_pool): AttentionPoolLatent(\n",
       "        (q): QuantLinear(\n",
       "          (weight_quantizer): LSQPlusQuantizer()\n",
       "          (act_quantizer): LSQPlusQuantizer()\n",
       "        )\n",
       "        (kv): QuantLinear(\n",
       "          (weight_quantizer): LSQPlusQuantizer()\n",
       "          (act_quantizer): LSQPlusQuantizer()\n",
       "        )\n",
       "        (proj): QuantLinear(\n",
       "          (weight_quantizer): LSQPlusQuantizer()\n",
       "          (act_quantizer): LSQPlusQuantizer()\n",
       "        )\n",
       "        (mlp): MLP(\n",
       "          (fc1): QuantLinear(\n",
       "            (weight_quantizer): LSQPlusQuantizer()\n",
       "            (act_quantizer): LSQPlusQuantizer()\n",
       "          )\n",
       "          (act): QuantGELU(\n",
       "            (in_quantizer): LSQPlusQuantizer()\n",
       "          )\n",
       "          (fc2): QuantLinear(\n",
       "            (weight_quantizer): LSQPlusQuantizer()\n",
       "            (act_quantizer): LSQPlusQuantizer()\n",
       "          )\n",
       "        )\n",
       "        (norm): QuantLayerNorm(\n",
       "          (in_quantizer): LSQPlusQuantizer()\n",
       "        )\n",
       "      )\n",
       "    )\n",
       "  )\n",
       "  (projector): VLAProjector(\n",
       "    (fc1): QuantLinear(\n",
       "      (weight_quantizer): LSQPlusQuantizer()\n",
       "      (act_quantizer): LSQPlusQuantizer()\n",
       "    )\n",
       "    (fc2): QuantLinear(\n",
       "      (weight_quantizer): LSQPlusQuantizer()\n",
       "      (act_quantizer): LSQPlusQuantizer()\n",
       "    )\n",
       "    (fc3): QuantLinear(\n",
       "      (weight_quantizer): LSQPlusQuantizer()\n",
       "      (act_quantizer): LSQPlusQuantizer()\n",
       "    )\n",
       "    (act_fn1): QuantGELU(\n",
       "      (in_quantizer): LSQPlusQuantizer()\n",
       "    )\n",
       "    (act_fn2): QuantGELU(\n",
       "      (in_quantizer): LSQPlusQuantizer()\n",
       "    )\n",
       "  )\n",
       "  (language_model): LlamaForCausalLM(\n",
       "    (model): LlamaModel(\n",
       "      (embed_tokens): Embedding(32064, 4096, padding_idx=32000)\n",
       "      (layers): ModuleList(\n",
       "        (0-31): 32 x QuantLlamaDecoderLayer(\n",
       "          (self_attn): QuantLlamaAttn(\n",
       "            (q_proj): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (k_proj): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (qkt_mm): QuantMatMul(\n",
       "              (x1_quantizer): LSQPlusQuantizer()\n",
       "              (x2_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (softmax): QuantSoftmax(\n",
       "              (in_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (v_proj): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (pv_mm): QuantMatMul(\n",
       "              (x1_quantizer): LSQPlusQuantizer()\n",
       "              (x2_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (o_proj): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (apply_rotary_pos_emb): QuantApplyRotaryPosEmb(\n",
       "              (query_quantizer): LSQPlusQuantizer()\n",
       "              (key_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "          )\n",
       "          (mlp): QuantLlamaMLP(\n",
       "            (gate_proj): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (up_proj): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (down_proj): QuantLinear(\n",
       "              (weight_quantizer): LSQPlusQuantizer()\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (fused_silu): QuantFusedSiLU(\n",
       "              (sigmoid): QuantSigmoid(\n",
       "                (act_quantizer): LSQPlusQuantizer()\n",
       "              )\n",
       "              (hadamard1): QuantHadamardProduct(\n",
       "                (act1_quantizer): LSQPlusQuantizer()\n",
       "                (act2_quantizer): LSQPlusQuantizer()\n",
       "              )\n",
       "              (hadamard2): QuantHadamardProduct(\n",
       "                (act1_quantizer): LSQPlusQuantizer()\n",
       "                (act2_quantizer): LSQPlusQuantizer()\n",
       "              )\n",
       "            )\n",
       "          )\n",
       "          (input_layernorm): QuantRMSNorm(\n",
       "            (in_quantizer): LSQPlusQuantizer()\n",
       "          )\n",
       "          (post_attention_layernorm): QuantRMSNorm(\n",
       "            (in_quantizer): LSQPlusQuantizer()\n",
       "          )\n",
       "        )\n",
       "      )\n",
       "      (norm): QuantRMSNorm(\n",
       "        (in_quantizer): LSQPlusQuantizer()\n",
       "        (out_quantizer): LSQPlusQuantizer()\n",
       "      )\n",
       "      (rotary_emb): LlamaRotaryEmbedding()\n",
       "    )\n",
       "    (lm_head): Linear(in_features=4096, out_features=32064, bias=False)\n",
       "  )\n",
       ")"
      ]
     },
     "execution_count": 7,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "from quantize import patch_model_for_train\n",
    "\n",
    "patch_model_for_train(vla)\n",
    "vla"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "LlamaForCausalLM(\n",
       "  (model): LlamaModel(\n",
       "    (embed_tokens): Embedding(32064, 4096, padding_idx=32000)\n",
       "    (layers): ModuleList(\n",
       "      (0-31): 32 x QuantLlamaDecoderLayer(\n",
       "        (self_attn): QuantLlamaAttn(\n",
       "          (q_proj): QuantLinear(\n",
       "            (weight_quantizer): LSQPlusQuantizer()\n",
       "            (act_quantizer): LSQPlusQuantizer()\n",
       "          )\n",
       "          (k_proj): QuantLinear(\n",
       "            (weight_quantizer): LSQPlusQuantizer()\n",
       "            (act_quantizer): LSQPlusQuantizer()\n",
       "          )\n",
       "          (qkt_mm): QuantMatMul(\n",
       "            (x1_quantizer): LSQPlusQuantizer()\n",
       "            (x2_quantizer): LSQPlusQuantizer()\n",
       "          )\n",
       "          (softmax): QuantSoftmax(\n",
       "            (in_quantizer): LSQPlusQuantizer()\n",
       "          )\n",
       "          (v_proj): QuantLinear(\n",
       "            (weight_quantizer): LSQPlusQuantizer()\n",
       "            (act_quantizer): LSQPlusQuantizer()\n",
       "          )\n",
       "          (pv_mm): QuantMatMul(\n",
       "            (x1_quantizer): LSQPlusQuantizer()\n",
       "            (x2_quantizer): LSQPlusQuantizer()\n",
       "          )\n",
       "          (o_proj): QuantLinear(\n",
       "            (weight_quantizer): LSQPlusQuantizer()\n",
       "            (act_quantizer): LSQPlusQuantizer()\n",
       "          )\n",
       "          (apply_rotary_pos_emb): QuantApplyRotaryPosEmb(\n",
       "            (query_quantizer): LSQPlusQuantizer()\n",
       "            (key_quantizer): LSQPlusQuantizer()\n",
       "          )\n",
       "        )\n",
       "        (mlp): QuantLlamaMLP(\n",
       "          (gate_proj): QuantLinear(\n",
       "            (weight_quantizer): LSQPlusQuantizer()\n",
       "            (act_quantizer): LSQPlusQuantizer()\n",
       "          )\n",
       "          (up_proj): QuantLinear(\n",
       "            (weight_quantizer): LSQPlusQuantizer()\n",
       "            (act_quantizer): LSQPlusQuantizer()\n",
       "          )\n",
       "          (down_proj): QuantLinear(\n",
       "            (weight_quantizer): LSQPlusQuantizer()\n",
       "            (act_quantizer): LSQPlusQuantizer()\n",
       "          )\n",
       "          (fused_silu): QuantFusedSiLU(\n",
       "            (sigmoid): QuantSigmoid(\n",
       "              (act_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (hadamard1): QuantHadamardProduct(\n",
       "              (act1_quantizer): LSQPlusQuantizer()\n",
       "              (act2_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "            (hadamard2): QuantHadamardProduct(\n",
       "              (act1_quantizer): LSQPlusQuantizer()\n",
       "              (act2_quantizer): LSQPlusQuantizer()\n",
       "            )\n",
       "          )\n",
       "        )\n",
       "        (input_layernorm): QuantRMSNorm(\n",
       "          (in_quantizer): LSQPlusQuantizer()\n",
       "        )\n",
       "        (post_attention_layernorm): QuantRMSNorm(\n",
       "          (in_quantizer): LSQPlusQuantizer()\n",
       "        )\n",
       "      )\n",
       "    )\n",
       "    (norm): QuantRMSNorm(\n",
       "      (in_quantizer): LSQPlusQuantizer()\n",
       "      (out_quantizer): LSQPlusQuantizer()\n",
       "    )\n",
       "    (rotary_emb): LlamaRotaryEmbedding()\n",
       "  )\n",
       "  (lm_head): Linear(in_features=4096, out_features=32064, bias=False)\n",
       ")"
      ]
     },
     "execution_count": 8,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "vla.language_model"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor(0., dtype=torch.float64)"
      ]
     },
     "execution_count": 9,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "with torch.inference_mode():\n",
    "    action_q = vla.predict_action(\n",
    "        **inputs, unnorm_key=\"bridge_orig\", do_sample=False, use_cache=False\n",
    "    )\n",
    "torch.nn.functional.mse_loss(*(torch.from_numpy(i) for i in (action_q, action_org)))\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "(array([ 0.00448837,  0.01479259, -0.00569757,  0.00752425, -0.00546965,\n",
       "         0.09019398,  0.99607843]),\n",
       " array([ 0.00448837,  0.01479259, -0.00569757,  0.00752425, -0.00546965,\n",
       "         0.09019398,  0.99607843]))"
      ]
     },
     "execution_count": 10,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "action_q, action_org\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "language_model.model.layers.0.input_layernorm.in_quantizer Parameter containing:\n",
      "tensor([[[0.1523]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.0.self_attn.q_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0126],\n",
      "        [0.0272],\n",
      "        [0.0311],\n",
      "        ...,\n",
      "        [0.0195],\n",
      "        [0.0195],\n",
      "        [0.0177]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.0.self_attn.q_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.0269]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.0.self_attn.k_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0311],\n",
      "        [0.0396],\n",
      "        [0.0352],\n",
      "        ...,\n",
      "        [0.0320],\n",
      "        [0.0381],\n",
      "        [0.0223]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.0.self_attn.k_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.0269]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.0.self_attn.v_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0041],\n",
      "        [0.0049],\n",
      "        [0.0042],\n",
      "        ...,\n",
      "        [0.0040],\n",
      "        [0.0041],\n",
      "        [0.0044]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.0.self_attn.v_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.0269]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.0.self_attn.apply_rotary_pos_emb.query_quantizer Parameter containing:\n",
      "tensor([[[[0.1138]]]], device='cuda:7', dtype=torch.bfloat16,\n",
      "       requires_grad=True) True False\n",
      "language_model.model.layers.0.self_attn.apply_rotary_pos_emb.key_quantizer Parameter containing:\n",
      "tensor([[[[0.0962]]]], device='cuda:7', dtype=torch.bfloat16,\n",
      "       requires_grad=True) True False\n",
      "language_model.model.layers.0.self_attn.qkt_mm.x1_quantizer Parameter containing:\n",
      "tensor([[[0.0522]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.0.self_attn.qkt_mm.x2_quantizer Parameter containing:\n",
      "tensor([[[0.0430]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.0.self_attn.softmax.in_quantizer Parameter containing:\n",
      "tensor([[[[0.1123]]]], device='cuda:7', dtype=torch.bfloat16,\n",
      "       requires_grad=True) True False\n",
      "language_model.model.layers.0.self_attn.pv_mm.x1_quantizer Parameter containing:\n",
      "tensor([[[[0.0039]]]], device='cuda:7', dtype=torch.bfloat16,\n",
      "       requires_grad=True) True False\n",
      "language_model.model.layers.0.self_attn.pv_mm.x2_quantizer Parameter containing:\n",
      "tensor([[[[0.0018, 0.0016, 0.0029, 0.0033, 0.0007, 0.0010, 0.0023, 0.0007,\n",
      "           0.0019, 0.0018, 0.0018, 0.0026, 0.0015, 0.0031, 0.0008, 0.0014,\n",
      "           0.0032, 0.0019, 0.0007, 0.0026, 0.0022, 0.0007, 0.0006, 0.0010,\n",
      "           0.0035, 0.0019, 0.0024, 0.0011, 0.0009, 0.0007, 0.0015, 0.0010,\n",
      "           0.0015, 0.0021, 0.0016, 0.0006, 0.0009, 0.0017, 0.0025, 0.0027,\n",
      "           0.0020, 0.0022, 0.0025, 0.0025, 0.0025, 0.0041, 0.0021, 0.0011,\n",
      "           0.0014, 0.0008, 0.0024, 0.0031, 0.0008, 0.0031, 0.0018, 0.0007,\n",
      "           0.0015, 0.0016, 0.0014, 0.0014, 0.0016, 0.0025, 0.0011, 0.0016,\n",
      "           0.0019, 0.0010, 0.0014, 0.0009, 0.0014, 0.0026, 0.0007, 0.0012,\n",
      "           0.0015, 0.0013, 0.0033, 0.0025, 0.0011, 0.0014, 0.0032, 0.0006,\n",
      "           0.0009, 0.0008, 0.0006, 0.0031, 0.0009, 0.0009, 0.0048, 0.0021,\n",
      "           0.0006, 0.0018, 0.0021, 0.0036, 0.0015, 0.0006, 0.0015, 0.0009,\n",
      "           0.0021, 0.0007, 0.0011, 0.0007, 0.0026, 0.0007, 0.0019, 0.0008,\n",
      "           0.0009, 0.0020, 0.0028, 0.0009, 0.0007, 0.0009, 0.0033, 0.0016,\n",
      "           0.0006, 0.0007, 0.0007, 0.0022, 0.0025, 0.0010, 0.0009, 0.0023,\n",
      "           0.0012, 0.0014, 0.0050, 0.0006, 0.0023, 0.0019, 0.0010, 0.0018]]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.0.self_attn.o_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0063],\n",
      "        [0.0064],\n",
      "        [0.0055],\n",
      "        ...,\n",
      "        [0.0074],\n",
      "        [0.0056],\n",
      "        [0.0058]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.0.self_attn.o_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.0050]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.0.post_attention_layernorm.in_quantizer Parameter containing:\n",
      "tensor([[[0.1650]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.0.mlp.up_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0098],\n",
      "        [0.0082],\n",
      "        [0.0105],\n",
      "        ...,\n",
      "        [0.0083],\n",
      "        [0.0090],\n",
      "        [0.0091]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.0.mlp.up_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.0004, 0.0003, 0.0003,  ..., 0.0004, 0.0005, 0.0004]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.0.mlp.gate_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0091],\n",
      "        [0.0092],\n",
      "        [0.0095],\n",
      "        ...,\n",
      "        [0.0093],\n",
      "        [0.0093],\n",
      "        [0.0099]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.0.mlp.gate_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.0004, 0.0003, 0.0003,  ..., 0.0004, 0.0005, 0.0004]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.0.mlp.fused_silu.sigmoid.act_quantizer Parameter containing:\n",
      "tensor([[[0.0320]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.0.mlp.fused_silu.hadamard1.act1_quantizer Parameter containing:\n",
      "tensor([[[0.0043, 0.0041, 0.0042,  ..., 0.0041, 0.0042, 0.0041]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.0.mlp.fused_silu.sigmoid.act_quantizer Parameter containing:\n",
      "tensor([[[0.0320]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.0.mlp.fused_silu.hadamard2.act1_quantizer Parameter containing:\n",
      "tensor([[[0.0011, 0.0008, 0.0009,  ..., 0.0010, 0.0010, 0.0011]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.0.mlp.fused_silu.hadamard2.act2_quantizer Parameter containing:\n",
      "tensor([[[0.0008, 0.0004, 0.0005,  ..., 0.0003, 0.0005, 0.0004]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.0.mlp.down_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0119],\n",
      "        [0.0103],\n",
      "        [0.0104],\n",
      "        ...,\n",
      "        [0.0095],\n",
      "        [0.0111],\n",
      "        [0.0097]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.0.mlp.down_proj.act_quantizer Parameter containing:\n",
      "tensor([[[3.2902e-05, 1.5974e-05, 3.7670e-05,  ..., 2.5511e-05,\n",
      "          3.8147e-05, 2.4319e-05]]], device='cuda:7', dtype=torch.bfloat16,\n",
      "       requires_grad=True) True False\n",
      "language_model.model.layers.1.input_layernorm.in_quantizer Parameter containing:\n",
      "tensor([[[0.1680]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.1.self_attn.q_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0253],\n",
      "        [0.0215],\n",
      "        [0.0330],\n",
      "        ...,\n",
      "        [0.0135],\n",
      "        [0.0143],\n",
      "        [0.0192]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.1.self_attn.q_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.0457]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.1.self_attn.k_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0204],\n",
      "        [0.0215],\n",
      "        [0.0222],\n",
      "        ...,\n",
      "        [0.0135],\n",
      "        [0.0139],\n",
      "        [0.0100]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.1.self_attn.k_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.0457]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.1.self_attn.v_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0081],\n",
      "        [0.0086],\n",
      "        [0.0073],\n",
      "        ...,\n",
      "        [0.0036],\n",
      "        [0.0038],\n",
      "        [0.0036]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.1.self_attn.v_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.0457]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.1.self_attn.apply_rotary_pos_emb.query_quantizer Parameter containing:\n",
      "tensor([[[[0.0767]]]], device='cuda:7', dtype=torch.bfloat16,\n",
      "       requires_grad=True) True False\n",
      "language_model.model.layers.1.self_attn.apply_rotary_pos_emb.key_quantizer Parameter containing:\n",
      "tensor([[[[0.0854]]]], device='cuda:7', dtype=torch.bfloat16,\n",
      "       requires_grad=True) True False\n",
      "language_model.model.layers.1.self_attn.qkt_mm.x1_quantizer Parameter containing:\n",
      "tensor([[[0.0349]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.1.self_attn.qkt_mm.x2_quantizer Parameter containing:\n",
      "tensor([[[0.0410]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.1.self_attn.softmax.in_quantizer Parameter containing:\n",
      "tensor([[[[0.0620]]]], device='cuda:7', dtype=torch.bfloat16,\n",
      "       requires_grad=True) True False\n",
      "language_model.model.layers.1.self_attn.pv_mm.x1_quantizer Parameter containing:\n",
      "tensor([[[[0.0039]]]], device='cuda:7', dtype=torch.bfloat16,\n",
      "       requires_grad=True) True False\n",
      "language_model.model.layers.1.self_attn.pv_mm.x2_quantizer Parameter containing:\n",
      "tensor([[[[0.0022, 0.0019, 0.0029, 0.0017, 0.0033, 0.0037, 0.0016, 0.0015,\n",
      "           0.0032, 0.0019, 0.0027, 0.0020, 0.0017, 0.0021, 0.0031, 0.0019,\n",
      "           0.0019, 0.0026, 0.0020, 0.0027, 0.0018, 0.0018, 0.0071, 0.0019,\n",
      "           0.0024, 0.0016, 0.0042, 0.0020, 0.0014, 0.0021, 0.0017, 0.0014,\n",
      "           0.0029, 0.0022, 0.0019, 0.0024, 0.0033, 0.0027, 0.0038, 0.0019,\n",
      "           0.0022, 0.0018, 0.0027, 0.0016, 0.0020, 0.0017, 0.0021, 0.0020,\n",
      "           0.0073, 0.0017, 0.0021, 0.0062, 0.0039, 0.0018, 0.0022, 0.0051,\n",
      "           0.0020, 0.0052, 0.0018, 0.0017, 0.0056, 0.0042, 0.0052, 0.0042,\n",
      "           0.0019, 0.0021, 0.0042, 0.0020, 0.0030, 0.0021, 0.0021, 0.0021,\n",
      "           0.0022, 0.0018, 0.0016, 0.0019, 0.0050, 0.0016, 0.0016, 0.0058,\n",
      "           0.0022, 0.0030, 0.0024, 0.0034, 0.0041, 0.0020, 0.0084, 0.0019,\n",
      "           0.0018, 0.0024, 0.0025, 0.0028, 0.0025, 0.0017, 0.0049, 0.0027,\n",
      "           0.0031, 0.0020, 0.0016, 0.0020, 0.0014, 0.0018, 0.0026, 0.0016,\n",
      "           0.0019, 0.0020, 0.0018, 0.0022, 0.0024, 0.0025, 0.0015, 0.0015,\n",
      "           0.0022, 0.0022, 0.0015, 0.0020, 0.0022, 0.0020, 0.0016, 0.0026,\n",
      "           0.0039, 0.0014, 0.0018, 0.0025, 0.0021, 0.0020, 0.0053, 0.0018]]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.1.self_attn.o_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0069],\n",
      "        [0.0069],\n",
      "        [0.0078],\n",
      "        ...,\n",
      "        [0.0074],\n",
      "        [0.0071],\n",
      "        [0.0073]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.1.self_attn.o_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.0101]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.1.post_attention_layernorm.in_quantizer Parameter containing:\n",
      "tensor([[[0.1885]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.1.mlp.up_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0107],\n",
      "        [0.0096],\n",
      "        [0.0114],\n",
      "        ...,\n",
      "        [0.0095],\n",
      "        [0.0098],\n",
      "        [0.0096]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.1.mlp.up_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.0011, 0.0012, 0.0008,  ..., 0.0013, 0.0011, 0.0014]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.1.mlp.gate_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0101],\n",
      "        [0.0107],\n",
      "        [0.0101],\n",
      "        ...,\n",
      "        [0.0110],\n",
      "        [0.0089],\n",
      "        [0.0097]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.1.mlp.gate_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.0011, 0.0012, 0.0008,  ..., 0.0013, 0.0011, 0.0014]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.1.mlp.fused_silu.sigmoid.act_quantizer Parameter containing:\n",
      "tensor([[[0.6680]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.1.mlp.fused_silu.hadamard1.act1_quantizer Parameter containing:\n",
      "tensor([[[0.0052, 0.0048, 0.0044,  ..., 0.0047, 0.0046, 0.0046]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.1.mlp.fused_silu.sigmoid.act_quantizer Parameter containing:\n",
      "tensor([[[0.6680]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.1.mlp.fused_silu.hadamard2.act1_quantizer Parameter containing:\n",
      "tensor([[[0.0044, 0.0027, 0.0032,  ..., 0.0035, 0.0023, 0.0023]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.1.mlp.fused_silu.hadamard2.act2_quantizer Parameter containing:\n",
      "tensor([[[0.0031, 0.0019, 0.0012,  ..., 0.0017, 0.0015, 0.0016]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.1.mlp.down_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0106],\n",
      "        [0.0111],\n",
      "        [0.0111],\n",
      "        ...,\n",
      "        [0.0110],\n",
      "        [0.0114],\n",
      "        [0.0107]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.1.mlp.down_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.0007, 0.0002, 0.0003,  ..., 0.0003, 0.0001, 0.0003]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.2.input_layernorm.in_quantizer Parameter containing:\n",
      "tensor([[[115.5000]]], device='cuda:7', dtype=torch.bfloat16,\n",
      "       requires_grad=True) True False\n",
      "language_model.model.layers.2.self_attn.q_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0157],\n",
      "        [0.0131],\n",
      "        [0.0134],\n",
      "        ...,\n",
      "        [0.0194],\n",
      "        [0.0270],\n",
      "        [0.0190]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.2.self_attn.q_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.0547]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.2.self_attn.k_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0194],\n",
      "        [0.0104],\n",
      "        [0.0128],\n",
      "        ...,\n",
      "        [0.0214],\n",
      "        [0.0154],\n",
      "        [0.0190]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.2.self_attn.k_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.0547]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.2.self_attn.v_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0084],\n",
      "        [0.0092],\n",
      "        [0.0096],\n",
      "        ...,\n",
      "        [0.0079],\n",
      "        [0.0087],\n",
      "        [0.0082]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.2.self_attn.v_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.0547]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.2.self_attn.apply_rotary_pos_emb.query_quantizer Parameter containing:\n",
      "tensor([[[[0.1533]]]], device='cuda:7', dtype=torch.bfloat16,\n",
      "       requires_grad=True) True False\n",
      "language_model.model.layers.2.self_attn.apply_rotary_pos_emb.key_quantizer Parameter containing:\n",
      "tensor([[[[0.1943]]]], device='cuda:7', dtype=torch.bfloat16,\n",
      "       requires_grad=True) True False\n",
      "language_model.model.layers.2.self_attn.qkt_mm.x1_quantizer Parameter containing:\n",
      "tensor([[[0.0703]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.2.self_attn.qkt_mm.x2_quantizer Parameter containing:\n",
      "tensor([[[0.0967]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.2.self_attn.softmax.in_quantizer Parameter containing:\n",
      "tensor([[[[0.1641]]]], device='cuda:7', dtype=torch.bfloat16,\n",
      "       requires_grad=True) True False\n",
      "language_model.model.layers.2.self_attn.pv_mm.x1_quantizer Parameter containing:\n",
      "tensor([[[[0.0039]]]], device='cuda:7', dtype=torch.bfloat16,\n",
      "       requires_grad=True) True False\n",
      "language_model.model.layers.2.self_attn.pv_mm.x2_quantizer Parameter containing:\n",
      "tensor([[[[0.0074, 0.0087, 0.0049, 0.0093, 0.0095, 0.0058, 0.0057, 0.0059,\n",
      "           0.0105, 0.0068, 0.0082, 0.0055, 0.0065, 0.0066, 0.0052, 0.0065,\n",
      "           0.0054, 0.0065, 0.0070, 0.0055, 0.0074, 0.0053, 0.0066, 0.0085,\n",
      "           0.0066, 0.0071, 0.0058, 0.0084, 0.0060, 0.0087, 0.0090, 0.0064,\n",
      "           0.0067, 0.0076, 0.0089, 0.0075, 0.0060, 0.0089, 0.0075, 0.0072,\n",
      "           0.0055, 0.0071, 0.0055, 0.0066, 0.0056, 0.0060, 0.0059, 0.0061,\n",
      "           0.0056, 0.0065, 0.0078, 0.0075, 0.0052, 0.0059, 0.0079, 0.0089,\n",
      "           0.0090, 0.0067, 0.0054, 0.0064, 0.0049, 0.0062, 0.0075, 0.0055,\n",
      "           0.0067, 0.0079, 0.0065, 0.0064, 0.0149, 0.0064, 0.0062, 0.0059,\n",
      "           0.0066, 0.0060, 0.0068, 0.0085, 0.0056, 0.0062, 0.0090, 0.0049,\n",
      "           0.0120, 0.0062, 0.0070, 0.0072, 0.0057, 0.0067, 0.0057, 0.0052,\n",
      "           0.0068, 0.0062, 0.0063, 0.0081, 0.0060, 0.0068, 0.0063, 0.0055,\n",
      "           0.0063, 0.0066, 0.0081, 0.0072, 0.0054, 0.0107, 0.0064, 0.0066,\n",
      "           0.0055, 0.0066, 0.0070, 0.0060, 0.0070, 0.0059, 0.0068, 0.0061,\n",
      "           0.0129, 0.0062, 0.0064, 0.0054, 0.0059, 0.0080, 0.0108, 0.0059,\n",
      "           0.0061, 0.0060, 0.0062, 0.0060, 0.0085, 0.0059, 0.0087, 0.0064]]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.2.self_attn.o_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0079],\n",
      "        [0.0074],\n",
      "        [0.0085],\n",
      "        ...,\n",
      "        [0.0082],\n",
      "        [0.0086],\n",
      "        [0.0089]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.2.self_attn.o_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.0117]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.2.post_attention_layernorm.in_quantizer Parameter containing:\n",
      "tensor([[[115.5000]]], device='cuda:7', dtype=torch.bfloat16,\n",
      "       requires_grad=True) True False\n",
      "language_model.model.layers.2.mlp.up_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0106],\n",
      "        [0.0096],\n",
      "        [0.0102],\n",
      "        ...,\n",
      "        [0.0098],\n",
      "        [0.0104],\n",
      "        [0.0099]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.2.mlp.up_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.0014, 0.0015, 0.0012,  ..., 0.0016, 0.0018, 0.0016]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.2.mlp.gate_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0101],\n",
      "        [0.0101],\n",
      "        [0.0099],\n",
      "        ...,\n",
      "        [0.0100],\n",
      "        [0.0107],\n",
      "        [0.0096]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.2.mlp.gate_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.0014, 0.0015, 0.0012,  ..., 0.0016, 0.0018, 0.0016]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.2.mlp.fused_silu.sigmoid.act_quantizer Parameter containing:\n",
      "tensor([[[0.0171]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.2.mlp.fused_silu.hadamard1.act1_quantizer Parameter containing:\n",
      "tensor([[[0.0046, 0.0042, 0.0042,  ..., 0.0045, 0.0048, 0.0046]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.2.mlp.fused_silu.sigmoid.act_quantizer Parameter containing:\n",
      "tensor([[[0.0171]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.2.mlp.fused_silu.hadamard2.act1_quantizer Parameter containing:\n",
      "tensor([[[0.0041, 0.0052, 0.0033,  ..., 0.0025, 0.0025, 0.0039]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.2.mlp.fused_silu.hadamard2.act2_quantizer Parameter containing:\n",
      "tensor([[[0.0016, 0.0014, 0.0013,  ..., 0.0012, 0.0020, 0.0016]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.2.mlp.down_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0110],\n",
      "        [0.0112],\n",
      "        [0.0102],\n",
      "        ...,\n",
      "        [0.0108],\n",
      "        [0.0108],\n",
      "        [0.0108]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.2.mlp.down_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.0003, 0.0004, 0.0004,  ..., 0.0002, 0.0003, 0.0005]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.3.input_layernorm.in_quantizer Parameter containing:\n",
      "tensor([[[115.5000]]], device='cuda:7', dtype=torch.bfloat16,\n",
      "       requires_grad=True) True False\n",
      "language_model.model.layers.3.self_attn.q_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0115],\n",
      "        [0.0126],\n",
      "        [0.0125],\n",
      "        ...,\n",
      "        [0.0264],\n",
      "        [0.0260],\n",
      "        [0.0227]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.3.self_attn.q_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.0747]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.3.self_attn.k_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0114],\n",
      "        [0.0120],\n",
      "        [0.0118],\n",
      "        ...,\n",
      "        [0.0227],\n",
      "        [0.0223],\n",
      "        [0.0216]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.3.self_attn.k_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.0747]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.3.self_attn.v_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0092],\n",
      "        [0.0084],\n",
      "        [0.0081],\n",
      "        ...,\n",
      "        [0.0049],\n",
      "        [0.0057],\n",
      "        [0.0049]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.3.self_attn.v_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.0747]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.3.self_attn.apply_rotary_pos_emb.query_quantizer Parameter containing:\n",
      "tensor([[[[0.1768]]]], device='cuda:7', dtype=torch.bfloat16,\n",
      "       requires_grad=True) True False\n",
      "language_model.model.layers.3.self_attn.apply_rotary_pos_emb.key_quantizer Parameter containing:\n",
      "tensor([[[[0.2539]]]], device='cuda:7', dtype=torch.bfloat16,\n",
      "       requires_grad=True) True False\n",
      "language_model.model.layers.3.self_attn.qkt_mm.x1_quantizer Parameter containing:\n",
      "tensor([[[0.0762]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.3.self_attn.qkt_mm.x2_quantizer Parameter containing:\n",
      "tensor([[[0.1240]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.3.self_attn.softmax.in_quantizer Parameter containing:\n",
      "tensor([[[[0.1533]]]], device='cuda:7', dtype=torch.bfloat16,\n",
      "       requires_grad=True) True False\n",
      "language_model.model.layers.3.self_attn.pv_mm.x1_quantizer Parameter containing:\n",
      "tensor([[[[0.0039]]]], device='cuda:7', dtype=torch.bfloat16,\n",
      "       requires_grad=True) True False\n",
      "language_model.model.layers.3.self_attn.pv_mm.x2_quantizer Parameter containing:\n",
      "tensor([[[[0.0091, 0.0103, 0.0096, 0.0085, 0.0077, 0.0075, 0.0096, 0.0084,\n",
      "           0.0075, 0.0087, 0.0107, 0.0095, 0.0076, 0.0092, 0.0098, 0.0094,\n",
      "           0.0122, 0.0084, 0.0073, 0.0081, 0.0097, 0.0097, 0.0085, 0.0085,\n",
      "           0.0074, 0.0132, 0.0092, 0.0088, 0.0080, 0.0101, 0.0095, 0.0090,\n",
      "           0.0106, 0.0107, 0.0079, 0.0078, 0.0092, 0.0094, 0.0070, 0.0093,\n",
      "           0.0085, 0.0075, 0.0134, 0.0085, 0.0082, 0.0087, 0.0082, 0.0081,\n",
      "           0.0095, 0.0089, 0.0100, 0.0110, 0.0086, 0.0095, 0.0098, 0.0079,\n",
      "           0.0089, 0.0091, 0.0108, 0.0089, 0.0101, 0.0085, 0.0089, 0.0097,\n",
      "           0.0079, 0.0078, 0.0081, 0.0082, 0.0078, 0.0103, 0.0076, 0.0087,\n",
      "           0.0114, 0.0089, 0.0082, 0.0085, 0.0096, 0.0087, 0.0092, 0.0094,\n",
      "           0.0092, 0.0098, 0.0079, 0.0093, 0.0143, 0.0083, 0.0079, 0.0082,\n",
      "           0.0085, 0.0082, 0.0104, 0.0074, 0.0084, 0.0097, 0.0103, 0.0089,\n",
      "           0.0086, 0.0104, 0.0106, 0.0092, 0.0089, 0.0085, 0.0087, 0.0090,\n",
      "           0.0079, 0.0109, 0.0084, 0.0097, 0.0086, 0.0083, 0.0087, 0.0101,\n",
      "           0.0088, 0.0089, 0.0109, 0.0096, 0.0094, 0.0099, 0.0104, 0.0086,\n",
      "           0.0080, 0.0115, 0.0074, 0.0093, 0.0121, 0.0103, 0.0085, 0.0080]]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.3.self_attn.o_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0078],\n",
      "        [0.0081],\n",
      "        [0.0079],\n",
      "        ...,\n",
      "        [0.0089],\n",
      "        [0.0087],\n",
      "        [0.0079]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.3.self_attn.o_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.0089]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.3.post_attention_layernorm.in_quantizer Parameter containing:\n",
      "tensor([[[115.5000]]], device='cuda:7', dtype=torch.bfloat16,\n",
      "       requires_grad=True) True False\n",
      "language_model.model.layers.3.mlp.up_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0089],\n",
      "        [0.0093],\n",
      "        [0.0097],\n",
      "        ...,\n",
      "        [0.0103],\n",
      "        [0.0133],\n",
      "        [0.0108]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.3.mlp.up_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.0020, 0.0019, 0.0014,  ..., 0.0018, 0.0023, 0.0018]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.3.mlp.gate_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0107],\n",
      "        [0.0110],\n",
      "        [0.0106],\n",
      "        ...,\n",
      "        [0.0109],\n",
      "        [0.0288],\n",
      "        [0.0109]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.3.mlp.gate_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.0020, 0.0019, 0.0014,  ..., 0.0018, 0.0023, 0.0018]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.3.mlp.fused_silu.sigmoid.act_quantizer Parameter containing:\n",
      "tensor([[[0.0193]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.3.mlp.fused_silu.hadamard1.act1_quantizer Parameter containing:\n",
      "tensor([[[0.0049, 0.0049, 0.0045,  ..., 0.0050, 0.0046, 0.0045]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.3.mlp.fused_silu.sigmoid.act_quantizer Parameter containing:\n",
      "tensor([[[0.0193]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.3.mlp.fused_silu.hadamard2.act1_quantizer Parameter containing:\n",
      "tensor([[[0.0064, 0.0053, 0.0032,  ..., 0.0060, 0.0036, 0.0065]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.3.mlp.fused_silu.hadamard2.act2_quantizer Parameter containing:\n",
      "tensor([[[0.0022, 0.0023, 0.0021,  ..., 0.0026, 0.0015, 0.0016]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.3.mlp.down_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0121],\n",
      "        [0.0109],\n",
      "        [0.0104],\n",
      "        ...,\n",
      "        [0.0109],\n",
      "        [0.0105],\n",
      "        [0.0113]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.3.mlp.down_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.0006, 0.0010, 0.0005,  ..., 0.0007, 0.0002, 0.0009]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.4.input_layernorm.in_quantizer Parameter containing:\n",
      "tensor([[[115.5000]]], device='cuda:7', dtype=torch.bfloat16,\n",
      "       requires_grad=True) True False\n",
      "language_model.model.layers.4.self_attn.q_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0092],\n",
      "        [0.0125],\n",
      "        [0.0123],\n",
      "        ...,\n",
      "        [0.0183],\n",
      "        [0.0187],\n",
      "        [0.0177]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.4.self_attn.q_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.0815]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.4.self_attn.k_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0175],\n",
      "        [0.0113],\n",
      "        [0.0118],\n",
      "        ...,\n",
      "        [0.0172],\n",
      "        [0.0192],\n",
      "        [0.0205]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.4.self_attn.k_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.0815]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.4.self_attn.v_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0087],\n",
      "        [0.0087],\n",
      "        [0.0083],\n",
      "        ...,\n",
      "        [0.0082],\n",
      "        [0.0087],\n",
      "        [0.0097]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.4.self_attn.v_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.0815]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.4.self_attn.apply_rotary_pos_emb.query_quantizer Parameter containing:\n",
      "tensor([[[[0.1709]]]], device='cuda:7', dtype=torch.bfloat16,\n",
      "       requires_grad=True) True False\n",
      "language_model.model.layers.4.self_attn.apply_rotary_pos_emb.key_quantizer Parameter containing:\n",
      "tensor([[[[0.2539]]]], device='cuda:7', dtype=torch.bfloat16,\n",
      "       requires_grad=True) True False\n",
      "language_model.model.layers.4.self_attn.qkt_mm.x1_quantizer Parameter containing:\n",
      "tensor([[[0.0781]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.4.self_attn.qkt_mm.x2_quantizer Parameter containing:\n",
      "tensor([[[0.1221]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.4.self_attn.softmax.in_quantizer Parameter containing:\n",
      "tensor([[[[0.1641]]]], device='cuda:7', dtype=torch.bfloat16,\n",
      "       requires_grad=True) True False\n",
      "language_model.model.layers.4.self_attn.pv_mm.x1_quantizer Parameter containing:\n",
      "tensor([[[[0.0039]]]], device='cuda:7', dtype=torch.bfloat16,\n",
      "       requires_grad=True) True False\n",
      "language_model.model.layers.4.self_attn.pv_mm.x2_quantizer Parameter containing:\n",
      "tensor([[[[0.0081, 0.0070, 0.0118, 0.0068, 0.0082, 0.0137, 0.0081, 0.0088,\n",
      "           0.0077, 0.0088, 0.0071, 0.0067, 0.0090, 0.0128, 0.0074, 0.0093,\n",
      "           0.0101, 0.0089, 0.0081, 0.0095, 0.0092, 0.0106, 0.0083, 0.0091,\n",
      "           0.0088, 0.0079, 0.0074, 0.0093, 0.0081, 0.0083, 0.0069, 0.0096,\n",
      "           0.0090, 0.0085, 0.0124, 0.0080, 0.0079, 0.0074, 0.0092, 0.0089,\n",
      "           0.0081, 0.0070, 0.0081, 0.0087, 0.0091, 0.0079, 0.0076, 0.0076,\n",
      "           0.0082, 0.0085, 0.0075, 0.0084, 0.0102, 0.0083, 0.0070, 0.0070,\n",
      "           0.0073, 0.0090, 0.0084, 0.0085, 0.0073, 0.0087, 0.0084, 0.0071,\n",
      "           0.0088, 0.0087, 0.0073, 0.0072, 0.0081, 0.0092, 0.0073, 0.0098,\n",
      "           0.0093, 0.0084, 0.0104, 0.0119, 0.0087, 0.0113, 0.0089, 0.0093,\n",
      "           0.0099, 0.0106, 0.0084, 0.0083, 0.0080, 0.0082, 0.0070, 0.0081,\n",
      "           0.0104, 0.0081, 0.0091, 0.0079, 0.0095, 0.0079, 0.0084, 0.0085,\n",
      "           0.0075, 0.0074, 0.0093, 0.0093, 0.0068, 0.0139, 0.0122, 0.0101,\n",
      "           0.0075, 0.0078, 0.0077, 0.0083, 0.0088, 0.0090, 0.0066, 0.0089,\n",
      "           0.0096, 0.0078, 0.0070, 0.0072, 0.0096, 0.0079, 0.0082, 0.0092,\n",
      "           0.0079, 0.0098, 0.0073, 0.0087, 0.0084, 0.0085, 0.0075, 0.0081]]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.4.self_attn.o_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0081],\n",
      "        [0.0084],\n",
      "        [0.0081],\n",
      "        ...,\n",
      "        [0.0086],\n",
      "        [0.0083],\n",
      "        [0.0084]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.4.self_attn.o_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.0125]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.4.post_attention_layernorm.in_quantizer Parameter containing:\n",
      "tensor([[[115.5000]]], device='cuda:7', dtype=torch.bfloat16,\n",
      "       requires_grad=True) True False\n",
      "language_model.model.layers.4.mlp.up_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0090],\n",
      "        [0.0103],\n",
      "        [0.0112],\n",
      "        ...,\n",
      "        [0.0098],\n",
      "        [0.0097],\n",
      "        [0.0110]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.4.mlp.up_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.0020, 0.0025, 0.0017,  ..., 0.0020, 0.0024, 0.0021]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.4.mlp.gate_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0100],\n",
      "        [0.0102],\n",
      "        [0.0102],\n",
      "        ...,\n",
      "        [0.0108],\n",
      "        [0.0106],\n",
      "        [0.0107]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.4.mlp.gate_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.0020, 0.0025, 0.0017,  ..., 0.0020, 0.0024, 0.0021]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.4.mlp.fused_silu.sigmoid.act_quantizer Parameter containing:\n",
      "tensor([[[0.0309]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.4.mlp.fused_silu.hadamard1.act1_quantizer Parameter containing:\n",
      "tensor([[[0.0050, 0.0046, 0.0055,  ..., 0.0048, 0.0048, 0.0050]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.4.mlp.fused_silu.sigmoid.act_quantizer Parameter containing:\n",
      "tensor([[[0.0309]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.4.mlp.fused_silu.hadamard2.act1_quantizer Parameter containing:\n",
      "tensor([[[0.0052, 0.0051, 0.0045,  ..., 0.0038, 0.0061, 0.0044]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.4.mlp.fused_silu.hadamard2.act2_quantizer Parameter containing:\n",
      "tensor([[[0.0025, 0.0017, 0.0045,  ..., 0.0020, 0.0020, 0.0027]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.4.mlp.down_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0111],\n",
      "        [0.0106],\n",
      "        [0.0112],\n",
      "        ...,\n",
      "        [0.0104],\n",
      "        [0.0108],\n",
      "        [0.0108]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.4.mlp.down_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.0006, 0.0005, 0.0013,  ..., 0.0006, 0.0010, 0.0006]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.5.input_layernorm.in_quantizer Parameter containing:\n",
      "tensor([[[115.5000]]], device='cuda:7', dtype=torch.bfloat16,\n",
      "       requires_grad=True) True False\n",
      "language_model.model.layers.5.self_attn.q_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0139],\n",
      "        [0.0104],\n",
      "        [0.0104],\n",
      "        ...,\n",
      "        [0.0393],\n",
      "        [0.0176],\n",
      "        [0.0317]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.5.self_attn.q_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.0952]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.5.self_attn.k_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0192],\n",
      "        [0.0092],\n",
      "        [0.0096],\n",
      "        ...,\n",
      "        [0.0262],\n",
      "        [0.0221],\n",
      "        [0.0190]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.5.self_attn.k_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.0952]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.5.self_attn.v_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0090],\n",
      "        [0.0087],\n",
      "        [0.0084],\n",
      "        ...,\n",
      "        [0.0081],\n",
      "        [0.0082],\n",
      "        [0.0079]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.5.self_attn.v_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.0952]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.5.self_attn.apply_rotary_pos_emb.query_quantizer Parameter containing:\n",
      "tensor([[[[0.1680]]]], device='cuda:7', dtype=torch.bfloat16,\n",
      "       requires_grad=True) True False\n",
      "language_model.model.layers.5.self_attn.apply_rotary_pos_emb.key_quantizer Parameter containing:\n",
      "tensor([[[[0.2676]]]], device='cuda:7', dtype=torch.bfloat16,\n",
      "       requires_grad=True) True False\n",
      "language_model.model.layers.5.self_attn.qkt_mm.x1_quantizer Parameter containing:\n",
      "tensor([[[0.0781]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.5.self_attn.qkt_mm.x2_quantizer Parameter containing:\n",
      "tensor([[[0.1348]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.5.self_attn.softmax.in_quantizer Parameter containing:\n",
      "tensor([[[[0.1650]]]], device='cuda:7', dtype=torch.bfloat16,\n",
      "       requires_grad=True) True False\n",
      "language_model.model.layers.5.self_attn.pv_mm.x1_quantizer Parameter containing:\n",
      "tensor([[[[0.0039]]]], device='cuda:7', dtype=torch.bfloat16,\n",
      "       requires_grad=True) True False\n",
      "language_model.model.layers.5.self_attn.pv_mm.x2_quantizer Parameter containing:\n",
      "tensor([[[[0.0112, 0.0089, 0.0073, 0.0093, 0.0087, 0.0090, 0.0066, 0.0086,\n",
      "           0.0102, 0.0069, 0.0085, 0.0112, 0.0095, 0.0076, 0.0082, 0.0082,\n",
      "           0.0093, 0.0087, 0.0094, 0.0076, 0.0081, 0.0126, 0.0104, 0.0098,\n",
      "           0.0077, 0.0092, 0.0092, 0.0093, 0.0099, 0.0084, 0.0084, 0.0098,\n",
      "           0.0075, 0.0072, 0.0087, 0.0087, 0.0082, 0.0089, 0.0082, 0.0090,\n",
      "           0.0076, 0.0085, 0.0074, 0.0085, 0.0078, 0.0095, 0.0078, 0.0074,\n",
      "           0.0072, 0.0107, 0.0085, 0.0095, 0.0086, 0.0091, 0.0072, 0.0089,\n",
      "           0.0099, 0.0106, 0.0079, 0.0085, 0.0084, 0.0087, 0.0102, 0.0109,\n",
      "           0.0092, 0.0081, 0.0081, 0.0118, 0.0111, 0.0093, 0.0088, 0.0074,\n",
      "           0.0086, 0.0077, 0.0092, 0.0106, 0.0070, 0.0087, 0.0088, 0.0088,\n",
      "           0.0115, 0.0085, 0.0095, 0.0099, 0.0114, 0.0065, 0.0077, 0.0089,\n",
      "           0.0079, 0.0086, 0.0079, 0.0090, 0.0115, 0.0087, 0.0082, 0.0083,\n",
      "           0.0076, 0.0086, 0.0087, 0.0080, 0.0094, 0.0086, 0.0100, 0.0085,\n",
      "           0.0089, 0.0077, 0.0094, 0.0084, 0.0108, 0.0099, 0.0080, 0.0077,\n",
      "           0.0095, 0.0098, 0.0099, 0.0106, 0.0085, 0.0104, 0.0102, 0.0068,\n",
      "           0.0073, 0.0099, 0.0115, 0.0092, 0.0090, 0.0076, 0.0083, 0.0082]]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.5.self_attn.o_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0090],\n",
      "        [0.0077],\n",
      "        [0.0086],\n",
      "        ...,\n",
      "        [0.0087],\n",
      "        [0.0088],\n",
      "        [0.0087]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.5.self_attn.o_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.0092]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.5.post_attention_layernorm.in_quantizer Parameter containing:\n",
      "tensor([[[115.5000]]], device='cuda:7', dtype=torch.bfloat16,\n",
      "       requires_grad=True) True False\n",
      "language_model.model.layers.5.mlp.up_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0099],\n",
      "        [0.0099],\n",
      "        [0.0097],\n",
      "        ...,\n",
      "        [0.0101],\n",
      "        [0.0098],\n",
      "        [0.0103]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.5.mlp.up_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.0021, 0.0020, 0.0020,  ..., 0.0021, 0.0022, 0.0021]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.5.mlp.gate_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0098],\n",
      "        [0.0114],\n",
      "        [0.0100],\n",
      "        ...,\n",
      "        [0.0105],\n",
      "        [0.0101],\n",
      "        [0.0097]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.5.mlp.gate_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.0021, 0.0020, 0.0020,  ..., 0.0021, 0.0022, 0.0021]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.5.mlp.fused_silu.sigmoid.act_quantizer Parameter containing:\n",
      "tensor([[[0.0204]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.5.mlp.fused_silu.hadamard1.act1_quantizer Parameter containing:\n",
      "tensor([[[0.0049, 0.0045, 0.0048,  ..., 0.0048, 0.0058, 0.0049]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.5.mlp.fused_silu.sigmoid.act_quantizer Parameter containing:\n",
      "tensor([[[0.0204]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.5.mlp.fused_silu.hadamard2.act1_quantizer Parameter containing:\n",
      "tensor([[[0.0050, 0.0038, 0.0054,  ..., 0.0062, 0.0043, 0.0047]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.5.mlp.fused_silu.hadamard2.act2_quantizer Parameter containing:\n",
      "tensor([[[0.0024, 0.0018, 0.0020,  ..., 0.0021, 0.0057, 0.0024]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.5.mlp.down_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0103],\n",
      "        [0.0104],\n",
      "        [0.0115],\n",
      "        ...,\n",
      "        [0.0109],\n",
      "        [0.0109],\n",
      "        [0.0112]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.5.mlp.down_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.0006, 0.0004, 0.0008,  ..., 0.0008, 0.0015, 0.0005]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.6.input_layernorm.in_quantizer Parameter containing:\n",
      "tensor([[[115.5000]]], device='cuda:7', dtype=torch.bfloat16,\n",
      "       requires_grad=True) True False\n",
      "language_model.model.layers.6.self_attn.q_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0125],\n",
      "        [0.0112],\n",
      "        [0.0108],\n",
      "        ...,\n",
      "        [0.0171],\n",
      "        [0.0183],\n",
      "        [0.0195]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.6.self_attn.q_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.1123]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.6.self_attn.k_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0102],\n",
      "        [0.0115],\n",
      "        [0.0110],\n",
      "        ...,\n",
      "        [0.0155],\n",
      "        [0.0175],\n",
      "        [0.0175]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.6.self_attn.k_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.1123]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.6.self_attn.v_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0096],\n",
      "        [0.0090],\n",
      "        [0.0090],\n",
      "        ...,\n",
      "        [0.0079],\n",
      "        [0.0089],\n",
      "        [0.0095]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.6.self_attn.v_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.1123]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.6.self_attn.apply_rotary_pos_emb.query_quantizer Parameter containing:\n",
      "tensor([[[[0.1855]]]], device='cuda:7', dtype=torch.bfloat16,\n",
      "       requires_grad=True) True False\n",
      "language_model.model.layers.6.self_attn.apply_rotary_pos_emb.key_quantizer Parameter containing:\n",
      "tensor([[[[0.2793]]]], device='cuda:7', dtype=torch.bfloat16,\n",
      "       requires_grad=True) True False\n",
      "language_model.model.layers.6.self_attn.qkt_mm.x1_quantizer Parameter containing:\n",
      "tensor([[[0.0845]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.6.self_attn.qkt_mm.x2_quantizer Parameter containing:\n",
      "tensor([[[0.1357]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.6.self_attn.softmax.in_quantizer Parameter containing:\n",
      "tensor([[[[0.1650]]]], device='cuda:7', dtype=torch.bfloat16,\n",
      "       requires_grad=True) True False\n",
      "language_model.model.layers.6.self_attn.pv_mm.x1_quantizer Parameter containing:\n",
      "tensor([[[[0.0039]]]], device='cuda:7', dtype=torch.bfloat16,\n",
      "       requires_grad=True) True False\n",
      "language_model.model.layers.6.self_attn.pv_mm.x2_quantizer Parameter containing:\n",
      "tensor([[[[0.0125, 0.0112, 0.0080, 0.0083, 0.0097, 0.0103, 0.0095, 0.0087,\n",
      "           0.0114, 0.0090, 0.0082, 0.0107, 0.0109, 0.0112, 0.0095, 0.0088,\n",
      "           0.0109, 0.0092, 0.0099, 0.0098, 0.0096, 0.0098, 0.0090, 0.0114,\n",
      "           0.0092, 0.0084, 0.0090, 0.0086, 0.0099, 0.0087, 0.0106, 0.0120,\n",
      "           0.0113, 0.0114, 0.0114, 0.0104, 0.0092, 0.0097, 0.0099, 0.0092,\n",
      "           0.0123, 0.0090, 0.0119, 0.0103, 0.0106, 0.0083, 0.0106, 0.0111,\n",
      "           0.0086, 0.0111, 0.0112, 0.0086, 0.0110, 0.0104, 0.0116, 0.0088,\n",
      "           0.0110, 0.0084, 0.0085, 0.0110, 0.0111, 0.0094, 0.0092, 0.0118,\n",
      "           0.0114, 0.0095, 0.0131, 0.0121, 0.0089, 0.0091, 0.0137, 0.0137,\n",
      "           0.0097, 0.0104, 0.0092, 0.0107, 0.0092, 0.0086, 0.0087, 0.0104,\n",
      "           0.0132, 0.0115, 0.0095, 0.0142, 0.0095, 0.0116, 0.0130, 0.0110,\n",
      "           0.0125, 0.0101, 0.0132, 0.0093, 0.0103, 0.0113, 0.0096, 0.0100,\n",
      "           0.0107, 0.0092, 0.0091, 0.0139, 0.0104, 0.0107, 0.0099, 0.0090,\n",
      "           0.0104, 0.0103, 0.0098, 0.0091, 0.0087, 0.0106, 0.0097, 0.0095,\n",
      "           0.0099, 0.0120, 0.0085, 0.0086, 0.0100, 0.0091, 0.0112, 0.0121,\n",
      "           0.0092, 0.0099, 0.0107, 0.0091, 0.0101, 0.0090, 0.0103, 0.0109]]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.6.self_attn.o_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0087],\n",
      "        [0.0087],\n",
      "        [0.0082],\n",
      "        ...,\n",
      "        [0.0082],\n",
      "        [0.0079],\n",
      "        [0.0084]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.6.self_attn.o_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.0117]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.6.post_attention_layernorm.in_quantizer Parameter containing:\n",
      "tensor([[[115.5000]]], device='cuda:7', dtype=torch.bfloat16,\n",
      "       requires_grad=True) True False\n",
      "language_model.model.layers.6.mlp.up_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0103],\n",
      "        [0.0100],\n",
      "        [0.0095],\n",
      "        ...,\n",
      "        [0.0093],\n",
      "        [0.0099],\n",
      "        [0.0095]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.6.mlp.up_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.0024, 0.0024, 0.0025,  ..., 0.0025, 0.0023, 0.0022]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.6.mlp.gate_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0094],\n",
      "        [0.0092],\n",
      "        [0.0099],\n",
      "        ...,\n",
      "        [0.0100],\n",
      "        [0.0110],\n",
      "        [0.0122]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.6.mlp.gate_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.0024, 0.0024, 0.0025,  ..., 0.0025, 0.0023, 0.0022]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.6.mlp.fused_silu.sigmoid.act_quantizer Parameter containing:\n",
      "tensor([[[0.0203]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.6.mlp.fused_silu.hadamard1.act1_quantizer Parameter containing:\n",
      "tensor([[[0.0050, 0.0055, 0.0051,  ..., 0.0050, 0.0052, 0.0050]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.6.mlp.fused_silu.sigmoid.act_quantizer Parameter containing:\n",
      "tensor([[[0.0203]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.6.mlp.fused_silu.hadamard2.act1_quantizer Parameter containing:\n",
      "tensor([[[0.0052, 0.0042, 0.0035,  ..., 0.0054, 0.0036, 0.0039]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.6.mlp.fused_silu.hadamard2.act2_quantizer Parameter containing:\n",
      "tensor([[[0.0025, 0.0043, 0.0029,  ..., 0.0025, 0.0033, 0.0025]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.6.mlp.down_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0109],\n",
      "        [0.0115],\n",
      "        [0.0103],\n",
      "        ...,\n",
      "        [0.0101],\n",
      "        [0.0098],\n",
      "        [0.0098]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.6.mlp.down_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.0007, 0.0008, 0.0005,  ..., 0.0006, 0.0006, 0.0006]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.7.input_layernorm.in_quantizer Parameter containing:\n",
      "tensor([[[115.5000]]], device='cuda:7', dtype=torch.bfloat16,\n",
      "       requires_grad=True) True False\n",
      "language_model.model.layers.7.self_attn.q_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0082],\n",
      "        [0.0073],\n",
      "        [0.0076],\n",
      "        ...,\n",
      "        [0.0275],\n",
      "        [0.0222],\n",
      "        [0.0239]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.7.self_attn.q_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.1250]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.7.self_attn.k_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0079],\n",
      "        [0.0070],\n",
      "        [0.0081],\n",
      "        ...,\n",
      "        [0.0247],\n",
      "        [0.0192],\n",
      "        [0.0219]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.7.self_attn.k_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.1250]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.7.self_attn.v_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0091],\n",
      "        [0.0087],\n",
      "        [0.0091],\n",
      "        ...,\n",
      "        [0.0073],\n",
      "        [0.0074],\n",
      "        [0.0084]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.7.self_attn.v_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.1250]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.7.self_attn.apply_rotary_pos_emb.query_quantizer Parameter containing:\n",
      "tensor([[[[0.1738]]]], device='cuda:7', dtype=torch.bfloat16,\n",
      "       requires_grad=True) True False\n",
      "language_model.model.layers.7.self_attn.apply_rotary_pos_emb.key_quantizer Parameter containing:\n",
      "tensor([[[[0.2773]]]], device='cuda:7', dtype=torch.bfloat16,\n",
      "       requires_grad=True) True False\n",
      "language_model.model.layers.7.self_attn.qkt_mm.x1_quantizer Parameter containing:\n",
      "tensor([[[0.0820]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.7.self_attn.qkt_mm.x2_quantizer Parameter containing:\n",
      "tensor([[[0.1406]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.7.self_attn.softmax.in_quantizer Parameter containing:\n",
      "tensor([[[[0.1494]]]], device='cuda:7', dtype=torch.bfloat16,\n",
      "       requires_grad=True) True False\n",
      "language_model.model.layers.7.self_attn.pv_mm.x1_quantizer Parameter containing:\n",
      "tensor([[[[0.0039]]]], device='cuda:7', dtype=torch.bfloat16,\n",
      "       requires_grad=True) True False\n",
      "language_model.model.layers.7.self_attn.pv_mm.x2_quantizer Parameter containing:\n",
      "tensor([[[[0.0115, 0.0139, 0.0090, 0.0129, 0.0130, 0.0131, 0.0117, 0.0115,\n",
      "           0.0104, 0.0137, 0.0135, 0.0122, 0.0133, 0.0121, 0.0118, 0.0101,\n",
      "           0.0118, 0.0115, 0.0149, 0.0104, 0.0128, 0.0103, 0.0107, 0.0112,\n",
      "           0.0149, 0.0115, 0.0116, 0.0128, 0.0127, 0.0126, 0.0138, 0.0112,\n",
      "           0.0167, 0.0150, 0.0085, 0.0099, 0.0157, 0.0100, 0.0105, 0.0134,\n",
      "           0.0150, 0.0115, 0.0115, 0.0097, 0.0095, 0.0115, 0.0104, 0.0095,\n",
      "           0.0131, 0.0099, 0.0117, 0.0112, 0.0137, 0.0098, 0.0101, 0.0130,\n",
      "           0.0098, 0.0171, 0.0106, 0.0106, 0.0107, 0.0102, 0.0193, 0.0135,\n",
      "           0.0156, 0.0132, 0.0083, 0.0117, 0.0100, 0.0106, 0.0104, 0.0109,\n",
      "           0.0149, 0.0159, 0.0117, 0.0129, 0.0137, 0.0112, 0.0119, 0.0178,\n",
      "           0.0112, 0.0092, 0.0117, 0.0108, 0.0121, 0.0113, 0.0104, 0.0139,\n",
      "           0.0135, 0.0120, 0.0104, 0.0125, 0.0125, 0.0103, 0.0096, 0.0109,\n",
      "           0.0123, 0.0111, 0.0112, 0.0101, 0.0107, 0.0103, 0.0099, 0.0109,\n",
      "           0.0145, 0.0099, 0.0117, 0.0117, 0.0103, 0.0145, 0.0123, 0.0104,\n",
      "           0.0107, 0.0098, 0.0117, 0.0125, 0.0119, 0.0096, 0.0096, 0.0095,\n",
      "           0.0148, 0.0099, 0.0109, 0.0103, 0.0131, 0.0119, 0.0114, 0.0112]]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.7.self_attn.o_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0082],\n",
      "        [0.0084],\n",
      "        [0.0078],\n",
      "        ...,\n",
      "        [0.0084],\n",
      "        [0.0086],\n",
      "        [0.0083]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.7.self_attn.o_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.0123]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.7.post_attention_layernorm.in_quantizer Parameter containing:\n",
      "tensor([[[115.5000]]], device='cuda:7', dtype=torch.bfloat16,\n",
      "       requires_grad=True) True False\n",
      "language_model.model.layers.7.mlp.up_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0091],\n",
      "        [0.0099],\n",
      "        [0.0095],\n",
      "        ...,\n",
      "        [0.0096],\n",
      "        [0.0104],\n",
      "        [0.0086]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.7.mlp.up_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.0022, 0.0024, 0.0026,  ..., 0.0028, 0.0029, 0.0028]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.7.mlp.gate_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0090],\n",
      "        [0.0128],\n",
      "        [0.0120],\n",
      "        ...,\n",
      "        [0.0100],\n",
      "        [0.0087],\n",
      "        [0.0099]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.7.mlp.gate_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.0022, 0.0024, 0.0026,  ..., 0.0028, 0.0029, 0.0028]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.7.mlp.fused_silu.sigmoid.act_quantizer Parameter containing:\n",
      "tensor([[[0.0280]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.7.mlp.fused_silu.hadamard1.act1_quantizer Parameter containing:\n",
      "tensor([[[0.0050, 0.0056, 0.0053,  ..., 0.0054, 0.0056, 0.0047]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.7.mlp.fused_silu.sigmoid.act_quantizer Parameter containing:\n",
      "tensor([[[0.0280]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.7.mlp.fused_silu.hadamard2.act1_quantizer Parameter containing:\n",
      "tensor([[[0.0047, 0.0071, 0.0042,  ..., 0.0070, 0.0055, 0.0052]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.7.mlp.fused_silu.hadamard2.act2_quantizer Parameter containing:\n",
      "tensor([[[0.0025, 0.0047, 0.0036,  ..., 0.0040, 0.0049, 0.0019]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.7.mlp.down_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0102],\n",
      "        [0.0121],\n",
      "        [0.0125],\n",
      "        ...,\n",
      "        [0.0104],\n",
      "        [0.0100],\n",
      "        [0.0113]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.7.mlp.down_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.0008, 0.0013, 0.0008,  ..., 0.0020, 0.0010, 0.0006]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.8.input_layernorm.in_quantizer Parameter containing:\n",
      "tensor([[[115.5000]]], device='cuda:7', dtype=torch.bfloat16,\n",
      "       requires_grad=True) True False\n",
      "language_model.model.layers.8.self_attn.q_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0093],\n",
      "        [0.0109],\n",
      "        [0.0107],\n",
      "        ...,\n",
      "        [0.0347],\n",
      "        [0.0266],\n",
      "        [0.0286]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.8.self_attn.q_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.1592]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.8.self_attn.k_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0106],\n",
      "        [0.0101],\n",
      "        [0.0090],\n",
      "        ...,\n",
      "        [0.0245],\n",
      "        [0.0216],\n",
      "        [0.0214]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.8.self_attn.k_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.1592]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.8.self_attn.v_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0095],\n",
      "        [0.0087],\n",
      "        [0.0085],\n",
      "        ...,\n",
      "        [0.0085],\n",
      "        [0.0074],\n",
      "        [0.0087]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.8.self_attn.v_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.1592]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.8.self_attn.apply_rotary_pos_emb.query_quantizer Parameter containing:\n",
      "tensor([[[[0.1689]]]], device='cuda:7', dtype=torch.bfloat16,\n",
      "       requires_grad=True) True False\n",
      "language_model.model.layers.8.self_attn.apply_rotary_pos_emb.key_quantizer Parameter containing:\n",
      "tensor([[[[0.2891]]]], device='cuda:7', dtype=torch.bfloat16,\n",
      "       requires_grad=True) True False\n",
      "language_model.model.layers.8.self_attn.qkt_mm.x1_quantizer Parameter containing:\n",
      "tensor([[[0.0806]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.8.self_attn.qkt_mm.x2_quantizer Parameter containing:\n",
      "tensor([[[0.1416]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.8.self_attn.softmax.in_quantizer Parameter containing:\n",
      "tensor([[[[0.1748]]]], device='cuda:7', dtype=torch.bfloat16,\n",
      "       requires_grad=True) True False\n",
      "language_model.model.layers.8.self_attn.pv_mm.x1_quantizer Parameter containing:\n",
      "tensor([[[[0.0039]]]], device='cuda:7', dtype=torch.bfloat16,\n",
      "       requires_grad=True) True False\n",
      "language_model.model.layers.8.self_attn.pv_mm.x2_quantizer Parameter containing:\n",
      "tensor([[[[0.0095, 0.0170, 0.0092, 0.0108, 0.0085, 0.0121, 0.0238, 0.0118,\n",
      "           0.0143, 0.0106, 0.0101, 0.0144, 0.0110, 0.0100, 0.0131, 0.0110,\n",
      "           0.0115, 0.0109, 0.0092, 0.0106, 0.0117, 0.0204, 0.0160, 0.0103,\n",
      "           0.0101, 0.0087, 0.0123, 0.0104, 0.0126, 0.0130, 0.0093, 0.0175,\n",
      "           0.0156, 0.0109, 0.0165, 0.0109, 0.0111, 0.0114, 0.0135, 0.0197,\n",
      "           0.0144, 0.0121, 0.0099, 0.0104, 0.0139, 0.0145, 0.0096, 0.0131,\n",
      "           0.0101, 0.0137, 0.0178, 0.0093, 0.0104, 0.0104, 0.0119, 0.0159,\n",
      "           0.0115, 0.0176, 0.0120, 0.0095, 0.0125, 0.0095, 0.0166, 0.0101,\n",
      "           0.0168, 0.0177, 0.0113, 0.0156, 0.0103, 0.0096, 0.0210, 0.0102,\n",
      "           0.0129, 0.0128, 0.0121, 0.0106, 0.0121, 0.0105, 0.0114, 0.0126,\n",
      "           0.0109, 0.0153, 0.0117, 0.0096, 0.0121, 0.0104, 0.0151, 0.0217,\n",
      "           0.0142, 0.0161, 0.0102, 0.0114, 0.0118, 0.0110, 0.0102, 0.0133,\n",
      "           0.0150, 0.0193, 0.0161, 0.0095, 0.0151, 0.0105, 0.0119, 0.0145,\n",
      "           0.0085, 0.0211, 0.0120, 0.0124, 0.0107, 0.0104, 0.0131, 0.0148,\n",
      "           0.0210, 0.0113, 0.0153, 0.0138, 0.0123, 0.0113, 0.0120, 0.0103,\n",
      "           0.0144, 0.0120, 0.0138, 0.0220, 0.0103, 0.0133, 0.0141, 0.0120]]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.8.self_attn.o_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0086],\n",
      "        [0.0084],\n",
      "        [0.0081],\n",
      "        ...,\n",
      "        [0.0086],\n",
      "        [0.0081],\n",
      "        [0.0082]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.8.self_attn.o_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.0132]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.8.post_attention_layernorm.in_quantizer Parameter containing:\n",
      "tensor([[[115.5000]]], device='cuda:7', dtype=torch.bfloat16,\n",
      "       requires_grad=True) True False\n",
      "language_model.model.layers.8.mlp.up_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0096],\n",
      "        [0.0111],\n",
      "        [0.0112],\n",
      "        ...,\n",
      "        [0.0110],\n",
      "        [0.0104],\n",
      "        [0.0120]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.8.mlp.up_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.0026, 0.0030, 0.0029,  ..., 0.0031, 0.0033, 0.0022]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.8.mlp.gate_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0124],\n",
      "        [0.0109],\n",
      "        [0.0105],\n",
      "        ...,\n",
      "        [0.0098],\n",
      "        [0.0100],\n",
      "        [0.0093]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.8.mlp.gate_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.0026, 0.0030, 0.0029,  ..., 0.0031, 0.0033, 0.0022]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.8.mlp.fused_silu.sigmoid.act_quantizer Parameter containing:\n",
      "tensor([[[0.0334]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.8.mlp.fused_silu.hadamard1.act1_quantizer Parameter containing:\n",
      "tensor([[[0.0050, 0.0046, 0.0049,  ..., 0.0051, 0.0056, 0.0049]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.8.mlp.fused_silu.sigmoid.act_quantizer Parameter containing:\n",
      "tensor([[[0.0334]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.8.mlp.fused_silu.hadamard2.act1_quantizer Parameter containing:\n",
      "tensor([[[0.0040, 0.0062, 0.0049,  ..., 0.0072, 0.0089, 0.0049]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.8.mlp.fused_silu.hadamard2.act2_quantizer Parameter containing:\n",
      "tensor([[[0.0025, 0.0022, 0.0023,  ..., 0.0028, 0.0051, 0.0022]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.8.mlp.down_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0108],\n",
      "        [0.0102],\n",
      "        [0.0104],\n",
      "        ...,\n",
      "        [0.0120],\n",
      "        [0.0107],\n",
      "        [0.0103]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.8.mlp.down_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.0008, 0.0007, 0.0006,  ..., 0.0007, 0.0011, 0.0006]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.9.input_layernorm.in_quantizer Parameter containing:\n",
      "tensor([[[115.5000]]], device='cuda:7', dtype=torch.bfloat16,\n",
      "       requires_grad=True) True False\n",
      "language_model.model.layers.9.self_attn.q_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0105],\n",
      "        [0.0105],\n",
      "        [0.0106],\n",
      "        ...,\n",
      "        [0.0192],\n",
      "        [0.0161],\n",
      "        [0.0193]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.9.self_attn.q_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.1787]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.9.self_attn.k_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0124],\n",
      "        [0.0101],\n",
      "        [0.0111],\n",
      "        ...,\n",
      "        [0.0188],\n",
      "        [0.0168],\n",
      "        [0.0172]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.9.self_attn.k_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.1787]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.9.self_attn.v_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0094],\n",
      "        [0.0088],\n",
      "        [0.0101],\n",
      "        ...,\n",
      "        [0.0077],\n",
      "        [0.0080],\n",
      "        [0.0078]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.9.self_attn.v_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.1787]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.9.self_attn.apply_rotary_pos_emb.query_quantizer Parameter containing:\n",
      "tensor([[[[0.1660]]]], device='cuda:7', dtype=torch.bfloat16,\n",
      "       requires_grad=True) True False\n",
      "language_model.model.layers.9.self_attn.apply_rotary_pos_emb.key_quantizer Parameter containing:\n",
      "tensor([[[[0.2793]]]], device='cuda:7', dtype=torch.bfloat16,\n",
      "       requires_grad=True) True False\n",
      "language_model.model.layers.9.self_attn.qkt_mm.x1_quantizer Parameter containing:\n",
      "tensor([[[0.0771]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.9.self_attn.qkt_mm.x2_quantizer Parameter containing:\n",
      "tensor([[[0.1387]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.9.self_attn.softmax.in_quantizer Parameter containing:\n",
      "tensor([[[[0.1484]]]], device='cuda:7', dtype=torch.bfloat16,\n",
      "       requires_grad=True) True False\n",
      "language_model.model.layers.9.self_attn.pv_mm.x1_quantizer Parameter containing:\n",
      "tensor([[[[0.0039]]]], device='cuda:7', dtype=torch.bfloat16,\n",
      "       requires_grad=True) True False\n",
      "language_model.model.layers.9.self_attn.pv_mm.x2_quantizer Parameter containing:\n",
      "tensor([[[[0.0137, 0.0183, 0.0253, 0.0119, 0.0154, 0.0309, 0.0121, 0.0154,\n",
      "           0.0132, 0.0266, 0.0173, 0.0118, 0.0141, 0.0126, 0.0118, 0.0188,\n",
      "           0.0133, 0.0266, 0.0144, 0.0126, 0.0111, 0.0126, 0.0194, 0.0117,\n",
      "           0.0124, 0.0134, 0.0147, 0.0193, 0.0215, 0.0119, 0.0184, 0.0122,\n",
      "           0.0217, 0.0176, 0.0164, 0.0116, 0.0184, 0.0130, 0.0144, 0.0127,\n",
      "           0.0137, 0.0165, 0.0121, 0.0112, 0.0182, 0.0144, 0.0149, 0.0167,\n",
      "           0.0115, 0.0153, 0.0130, 0.0109, 0.0164, 0.0146, 0.0133, 0.0139,\n",
      "           0.0194, 0.0153, 0.0184, 0.0155, 0.0131, 0.0117, 0.0147, 0.0160,\n",
      "           0.0172, 0.0176, 0.0114, 0.0112, 0.0157, 0.0167, 0.0139, 0.0121,\n",
      "           0.0193, 0.0153, 0.0141, 0.0181, 0.0154, 0.0236, 0.0130, 0.0134,\n",
      "           0.0131, 0.0148, 0.0164, 0.0156, 0.0131, 0.0129, 0.0120, 0.0131,\n",
      "           0.0184, 0.0245, 0.0110, 0.0154, 0.0128, 0.0126, 0.0139, 0.0137,\n",
      "           0.0126, 0.0138, 0.0114, 0.0101, 0.0300, 0.0231, 0.0134, 0.0198,\n",
      "           0.0197, 0.0105, 0.0181, 0.0276, 0.0138, 0.0146, 0.0148, 0.0121,\n",
      "           0.0130, 0.0192, 0.0121, 0.0139, 0.0110, 0.0144, 0.0143, 0.0131,\n",
      "           0.0148, 0.0134, 0.0104, 0.0117, 0.0127, 0.0147, 0.0237, 0.0139]]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.9.self_attn.o_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0092],\n",
      "        [0.0091],\n",
      "        [0.0079],\n",
      "        ...,\n",
      "        [0.0087],\n",
      "        [0.0090],\n",
      "        [0.0099]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.9.self_attn.o_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.0120]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.9.post_attention_layernorm.in_quantizer Parameter containing:\n",
      "tensor([[[115.5000]]], device='cuda:7', dtype=torch.bfloat16,\n",
      "       requires_grad=True) True False\n",
      "language_model.model.layers.9.mlp.up_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0101],\n",
      "        [0.0101],\n",
      "        [0.0103],\n",
      "        ...,\n",
      "        [0.0121],\n",
      "        [0.0098],\n",
      "        [0.0093]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.9.mlp.up_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.0025, 0.0029, 0.0033,  ..., 0.0028, 0.0035, 0.0026]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.9.mlp.gate_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0118],\n",
      "        [0.0112],\n",
      "        [0.0100],\n",
      "        ...,\n",
      "        [0.0102],\n",
      "        [0.0090],\n",
      "        [0.0090]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.9.mlp.gate_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.0025, 0.0029, 0.0033,  ..., 0.0028, 0.0035, 0.0026]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.9.mlp.fused_silu.sigmoid.act_quantizer Parameter containing:\n",
      "tensor([[[0.0393]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.9.mlp.fused_silu.hadamard1.act1_quantizer Parameter containing:\n",
      "tensor([[[0.0050, 0.0052, 0.0047,  ..., 0.0046, 0.0050, 0.0045]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.9.mlp.fused_silu.sigmoid.act_quantizer Parameter containing:\n",
      "tensor([[[0.0393]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.9.mlp.fused_silu.hadamard2.act1_quantizer Parameter containing:\n",
      "tensor([[[0.0096, 0.0074, 0.0049,  ..., 0.0082, 0.0061, 0.0063]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.9.mlp.fused_silu.hadamard2.act2_quantizer Parameter containing:\n",
      "tensor([[[0.0026, 0.0035, 0.0017,  ..., 0.0020, 0.0026, 0.0020]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.9.mlp.down_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0111],\n",
      "        [0.0106],\n",
      "        [0.0126],\n",
      "        ...,\n",
      "        [0.0105],\n",
      "        [0.0109],\n",
      "        [0.0104]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.9.mlp.down_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.0022, 0.0015, 0.0004,  ..., 0.0010, 0.0006, 0.0009]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.10.input_layernorm.in_quantizer Parameter containing:\n",
      "tensor([[[115.5000]]], device='cuda:7', dtype=torch.bfloat16,\n",
      "       requires_grad=True) True False\n",
      "language_model.model.layers.10.self_attn.q_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0084],\n",
      "        [0.0091],\n",
      "        [0.0087],\n",
      "        ...,\n",
      "        [0.0173],\n",
      "        [0.0183],\n",
      "        [0.0161]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.10.self_attn.q_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.2012]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.10.self_attn.k_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0076],\n",
      "        [0.0096],\n",
      "        [0.0090],\n",
      "        ...,\n",
      "        [0.0186],\n",
      "        [0.0165],\n",
      "        [0.0179]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.10.self_attn.k_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.2012]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.10.self_attn.v_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0095],\n",
      "        [0.0103],\n",
      "        [0.0099],\n",
      "        ...,\n",
      "        [0.0094],\n",
      "        [0.0090],\n",
      "        [0.0085]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.10.self_attn.v_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.2012]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.10.self_attn.apply_rotary_pos_emb.query_quantizer Parameter containing:\n",
      "tensor([[[[0.1660]]]], device='cuda:7', dtype=torch.bfloat16,\n",
      "       requires_grad=True) True False\n",
      "language_model.model.layers.10.self_attn.apply_rotary_pos_emb.key_quantizer Parameter containing:\n",
      "tensor([[[[0.2715]]]], device='cuda:7', dtype=torch.bfloat16,\n",
      "       requires_grad=True) True False\n",
      "language_model.model.layers.10.self_attn.qkt_mm.x1_quantizer Parameter containing:\n",
      "tensor([[[0.0776]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.10.self_attn.qkt_mm.x2_quantizer Parameter containing:\n",
      "tensor([[[0.1357]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.10.self_attn.softmax.in_quantizer Parameter containing:\n",
      "tensor([[[[0.1504]]]], device='cuda:7', dtype=torch.bfloat16,\n",
      "       requires_grad=True) True False\n",
      "language_model.model.layers.10.self_attn.pv_mm.x1_quantizer Parameter containing:\n",
      "tensor([[[[0.0039]]]], device='cuda:7', dtype=torch.bfloat16,\n",
      "       requires_grad=True) True False\n",
      "language_model.model.layers.10.self_attn.pv_mm.x2_quantizer Parameter containing:\n",
      "tensor([[[[0.0171, 0.0142, 0.0267, 0.0272, 0.0204, 0.0214, 0.0168, 0.0171,\n",
      "           0.0211, 0.0135, 0.0119, 0.0176, 0.0131, 0.0140, 0.0165, 0.0332,\n",
      "           0.0203, 0.0287, 0.0201, 0.0183, 0.0139, 0.0142, 0.0154, 0.0153,\n",
      "           0.0197, 0.0325, 0.0311, 0.0164, 0.0228, 0.0173, 0.0114, 0.0157,\n",
      "           0.0172, 0.0173, 0.0145, 0.0171, 0.0132, 0.0149, 0.0147, 0.0166,\n",
      "           0.0339, 0.0132, 0.0211, 0.0233, 0.0225, 0.0128, 0.0211, 0.0176,\n",
      "           0.0179, 0.0170, 0.0280, 0.0178, 0.0249, 0.0135, 0.0121, 0.0195,\n",
      "           0.0134, 0.0258, 0.0223, 0.0152, 0.0160, 0.0156, 0.0137, 0.0282,\n",
      "           0.0150, 0.0134, 0.0165, 0.0186, 0.0175, 0.0142, 0.0201, 0.0155,\n",
      "           0.0129, 0.0133, 0.0184, 0.0154, 0.0136, 0.0199, 0.0143, 0.0162,\n",
      "           0.0354, 0.0210, 0.0139, 0.0215, 0.0200, 0.0139, 0.0189, 0.0157,\n",
      "           0.0198, 0.0193, 0.0135, 0.0186, 0.0193, 0.0188, 0.0206, 0.0165,\n",
      "           0.0215, 0.0148, 0.0181, 0.0167, 0.0187, 0.0168, 0.0148, 0.0134,\n",
      "           0.0150, 0.0143, 0.0173, 0.0176, 0.0222, 0.0141, 0.0156, 0.0188,\n",
      "           0.0232, 0.0212, 0.0184, 0.0155, 0.0258, 0.0157, 0.0149, 0.0134,\n",
      "           0.0248, 0.0222, 0.0190, 0.0144, 0.0206, 0.0280, 0.0194, 0.0146]]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.10.self_attn.o_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0081],\n",
      "        [0.0079],\n",
      "        [0.0079],\n",
      "        ...,\n",
      "        [0.0086],\n",
      "        [0.0085],\n",
      "        [0.0085]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.10.self_attn.o_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.0094]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.10.post_attention_layernorm.in_quantizer Parameter containing:\n",
      "tensor([[[115.5000]]], device='cuda:7', dtype=torch.bfloat16,\n",
      "       requires_grad=True) True False\n",
      "language_model.model.layers.10.mlp.up_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0116],\n",
      "        [0.0110],\n",
      "        [0.0096],\n",
      "        ...,\n",
      "        [0.0093],\n",
      "        [0.0088],\n",
      "        [0.0099]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.10.mlp.up_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.0028, 0.0030, 0.0035,  ..., 0.0032, 0.0031, 0.0026]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.10.mlp.gate_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0107],\n",
      "        [0.0110],\n",
      "        [0.0099],\n",
      "        ...,\n",
      "        [0.0091],\n",
      "        [0.0077],\n",
      "        [0.0106]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.10.mlp.gate_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.0028, 0.0030, 0.0035,  ..., 0.0032, 0.0031, 0.0026]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.10.mlp.fused_silu.sigmoid.act_quantizer Parameter containing:\n",
      "tensor([[[0.0284]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.10.mlp.fused_silu.hadamard1.act1_quantizer Parameter containing:\n",
      "tensor([[[0.0055, 0.0054, 0.0047,  ..., 0.0056, 0.0045, 0.0052]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.10.mlp.fused_silu.sigmoid.act_quantizer Parameter containing:\n",
      "tensor([[[0.0284]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.10.mlp.fused_silu.hadamard2.act1_quantizer Parameter containing:\n",
      "tensor([[[0.0117, 0.0061, 0.0070,  ..., 0.0047, 0.0063, 0.0063]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.10.mlp.fused_silu.hadamard2.act2_quantizer Parameter containing:\n",
      "tensor([[[0.0045, 0.0040, 0.0019,  ..., 0.0048, 0.0019, 0.0035]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.10.mlp.down_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0118],\n",
      "        [0.0107],\n",
      "        [0.0116],\n",
      "        ...,\n",
      "        [0.0110],\n",
      "        [0.0103],\n",
      "        [0.0104]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.10.mlp.down_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.0019, 0.0015, 0.0009,  ..., 0.0012, 0.0006, 0.0010]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.11.input_layernorm.in_quantizer Parameter containing:\n",
      "tensor([[[115.5000]]], device='cuda:7', dtype=torch.bfloat16,\n",
      "       requires_grad=True) True False\n",
      "language_model.model.layers.11.self_attn.q_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0104],\n",
      "        [0.0088],\n",
      "        [0.0096],\n",
      "        ...,\n",
      "        [0.0168],\n",
      "        [0.0161],\n",
      "        [0.0157]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.11.self_attn.q_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.1865]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.11.self_attn.k_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0098],\n",
      "        [0.0084],\n",
      "        [0.0090],\n",
      "        ...,\n",
      "        [0.0162],\n",
      "        [0.0165],\n",
      "        [0.0140]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.11.self_attn.k_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.1865]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.11.self_attn.v_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0095],\n",
      "        [0.0082],\n",
      "        [0.0087],\n",
      "        ...,\n",
      "        [0.0095],\n",
      "        [0.0095],\n",
      "        [0.0096]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.11.self_attn.v_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.1865]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.11.self_attn.apply_rotary_pos_emb.query_quantizer Parameter containing:\n",
      "tensor([[[[0.1582]]]], device='cuda:7', dtype=torch.bfloat16,\n",
      "       requires_grad=True) True False\n",
      "language_model.model.layers.11.self_attn.apply_rotary_pos_emb.key_quantizer Parameter containing:\n",
      "tensor([[[[0.2715]]]], device='cuda:7', dtype=torch.bfloat16,\n",
      "       requires_grad=True) True False\n",
      "language_model.model.layers.11.self_attn.qkt_mm.x1_quantizer Parameter containing:\n",
      "tensor([[[0.0757]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.11.self_attn.qkt_mm.x2_quantizer Parameter containing:\n",
      "tensor([[[0.1338]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.11.self_attn.softmax.in_quantizer Parameter containing:\n",
      "tensor([[[[0.1641]]]], device='cuda:7', dtype=torch.bfloat16,\n",
      "       requires_grad=True) True False\n",
      "language_model.model.layers.11.self_attn.pv_mm.x1_quantizer Parameter containing:\n",
      "tensor([[[[0.0039]]]], device='cuda:7', dtype=torch.bfloat16,\n",
      "       requires_grad=True) True False\n",
      "language_model.model.layers.11.self_attn.pv_mm.x2_quantizer Parameter containing:\n",
      "tensor([[[[0.0140, 0.0132, 0.0150, 0.0193, 0.0294, 0.0170, 0.0150, 0.0192,\n",
      "           0.0132, 0.0156, 0.0145, 0.0166, 0.0156, 0.0192, 0.0161, 0.0175,\n",
      "           0.0140, 0.0157, 0.0186, 0.0215, 0.0173, 0.0166, 0.0327, 0.0148,\n",
      "           0.0167, 0.0134, 0.0234, 0.0130, 0.0208, 0.0167, 0.0150, 0.0238,\n",
      "           0.0138, 0.0226, 0.0160, 0.0171, 0.0137, 0.0173, 0.0305, 0.0145,\n",
      "           0.0147, 0.0310, 0.0194, 0.0171, 0.0177, 0.0162, 0.0204, 0.0201,\n",
      "           0.0187, 0.0145, 0.0200, 0.0201, 0.0192, 0.0227, 0.0135, 0.0147,\n",
      "           0.0131, 0.0176, 0.0171, 0.0128, 0.0172, 0.0143, 0.0160, 0.0143,\n",
      "           0.0181, 0.0153, 0.0138, 0.0204, 0.0211, 0.0157, 0.0189, 0.0153,\n",
      "           0.0143, 0.0176, 0.0128, 0.0134, 0.0205, 0.0176, 0.0165, 0.0136,\n",
      "           0.0327, 0.0140, 0.0170, 0.0146, 0.0187, 0.0161, 0.0157, 0.0173,\n",
      "           0.0182, 0.0194, 0.0204, 0.0178, 0.0254, 0.0157, 0.0173, 0.0146,\n",
      "           0.0201, 0.0153, 0.0157, 0.0160, 0.0178, 0.0162, 0.0146, 0.0162,\n",
      "           0.0189, 0.0192, 0.0146, 0.0170, 0.0237, 0.0209, 0.0262, 0.0177,\n",
      "           0.0238, 0.0190, 0.0182, 0.0161, 0.0153, 0.0176, 0.0145, 0.0217,\n",
      "           0.0225, 0.0145, 0.0215, 0.0137, 0.0253, 0.0164, 0.0189, 0.0172]]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.11.self_attn.o_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0090],\n",
      "        [0.0090],\n",
      "        [0.0099],\n",
      "        ...,\n",
      "        [0.0082],\n",
      "        [0.0103],\n",
      "        [0.0090]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.11.self_attn.o_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.0123]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.11.post_attention_layernorm.in_quantizer Parameter containing:\n",
      "tensor([[[115.5000]]], device='cuda:7', dtype=torch.bfloat16,\n",
      "       requires_grad=True) True False\n",
      "language_model.model.layers.11.mlp.up_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0101],\n",
      "        [0.0106],\n",
      "        [0.0107],\n",
      "        ...,\n",
      "        [0.0096],\n",
      "        [0.0095],\n",
      "        [0.0092]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.11.mlp.up_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.0034, 0.0028, 0.0040,  ..., 0.0033, 0.0029, 0.0035]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.11.mlp.gate_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0112],\n",
      "        [0.0098],\n",
      "        [0.0099],\n",
      "        ...,\n",
      "        [0.0103],\n",
      "        [0.0096],\n",
      "        [0.0104]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.11.mlp.gate_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.0034, 0.0028, 0.0040,  ..., 0.0033, 0.0029, 0.0035]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.11.mlp.fused_silu.sigmoid.act_quantizer Parameter containing:\n",
      "tensor([[[0.0444]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.11.mlp.fused_silu.hadamard1.act1_quantizer Parameter containing:\n",
      "tensor([[[0.0057, 0.0051, 0.0047,  ..., 0.0057, 0.0054, 0.0052]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.11.mlp.fused_silu.sigmoid.act_quantizer Parameter containing:\n",
      "tensor([[[0.0444]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.11.mlp.fused_silu.hadamard2.act1_quantizer Parameter containing:\n",
      "tensor([[[0.0064, 0.0073, 0.0045,  ..., 0.0070, 0.0046, 0.0072]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.11.mlp.fused_silu.hadamard2.act2_quantizer Parameter containing:\n",
      "tensor([[[0.0055, 0.0030, 0.0019,  ..., 0.0053, 0.0039, 0.0032]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.11.mlp.down_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0117],\n",
      "        [0.0113],\n",
      "        [0.0107],\n",
      "        ...,\n",
      "        [0.0113],\n",
      "        [0.0118],\n",
      "        [0.0120]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.11.mlp.down_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.0019, 0.0011, 0.0006,  ..., 0.0015, 0.0012, 0.0013]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.12.input_layernorm.in_quantizer Parameter containing:\n",
      "tensor([[[115.5000]]], device='cuda:7', dtype=torch.bfloat16,\n",
      "       requires_grad=True) True False\n",
      "language_model.model.layers.12.self_attn.q_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0092],\n",
      "        [0.0084],\n",
      "        [0.0094],\n",
      "        ...,\n",
      "        [0.0150],\n",
      "        [0.0125],\n",
      "        [0.0199]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.12.self_attn.q_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.1865]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.12.self_attn.k_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0117],\n",
      "        [0.0091],\n",
      "        [0.0096],\n",
      "        ...,\n",
      "        [0.0151],\n",
      "        [0.0145],\n",
      "        [0.0135]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.12.self_attn.k_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.1865]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.12.self_attn.v_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0081],\n",
      "        [0.0085],\n",
      "        [0.0087],\n",
      "        ...,\n",
      "        [0.0079],\n",
      "        [0.0075],\n",
      "        [0.0093]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.12.self_attn.v_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.1865]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.12.self_attn.apply_rotary_pos_emb.query_quantizer Parameter containing:\n",
      "tensor([[[[0.1992]]]], device='cuda:7', dtype=torch.bfloat16,\n",
      "       requires_grad=True) True False\n",
      "language_model.model.layers.12.self_attn.apply_rotary_pos_emb.key_quantizer Parameter containing:\n",
      "tensor([[[[0.2871]]]], device='cuda:7', dtype=torch.bfloat16,\n",
      "       requires_grad=True) True False\n",
      "language_model.model.layers.12.self_attn.qkt_mm.x1_quantizer Parameter containing:\n",
      "tensor([[[0.0894]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.12.self_attn.qkt_mm.x2_quantizer Parameter containing:\n",
      "tensor([[[0.1416]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.12.self_attn.softmax.in_quantizer Parameter containing:\n",
      "tensor([[[[0.1738]]]], device='cuda:7', dtype=torch.bfloat16,\n",
      "       requires_grad=True) True False\n",
      "language_model.model.layers.12.self_attn.pv_mm.x1_quantizer Parameter containing:\n",
      "tensor([[[[0.0039]]]], device='cuda:7', dtype=torch.bfloat16,\n",
      "       requires_grad=True) True False\n",
      "language_model.model.layers.12.self_attn.pv_mm.x2_quantizer Parameter containing:\n",
      "tensor([[[[0.0195, 0.0152, 0.0142, 0.0236, 0.0201, 0.0167, 0.0197, 0.0171,\n",
      "           0.0150, 0.0223, 0.0176, 0.0204, 0.0157, 0.0197, 0.0195, 0.0137,\n",
      "           0.0182, 0.0155, 0.0136, 0.0171, 0.0166, 0.0170, 0.0179, 0.0153,\n",
      "           0.0166, 0.0208, 0.0173, 0.0226, 0.0125, 0.0145, 0.0137, 0.0144,\n",
      "           0.0203, 0.0156, 0.0159, 0.0162, 0.0206, 0.0148, 0.0160, 0.0176,\n",
      "           0.0161, 0.0156, 0.0173, 0.0212, 0.0190, 0.0182, 0.0151, 0.0161,\n",
      "           0.0154, 0.0277, 0.0154, 0.0200, 0.0142, 0.0142, 0.0154, 0.0144,\n",
      "           0.0170, 0.0165, 0.0161, 0.0272, 0.0161, 0.0154, 0.0215, 0.0188,\n",
      "           0.0208, 0.0195, 0.0151, 0.0142, 0.0178, 0.0153, 0.0157, 0.0145,\n",
      "           0.0245, 0.0182, 0.0162, 0.0269, 0.0201, 0.0197, 0.0198, 0.0133,\n",
      "           0.0197, 0.0139, 0.0154, 0.0167, 0.0131, 0.0193, 0.0167, 0.0161,\n",
      "           0.0177, 0.0161, 0.0172, 0.0217, 0.0154, 0.0159, 0.0146, 0.0259,\n",
      "           0.0214, 0.0181, 0.0240, 0.0189, 0.0166, 0.0150, 0.0259, 0.0303,\n",
      "           0.0234, 0.0151, 0.0215, 0.0189, 0.0194, 0.0172, 0.0197, 0.0171,\n",
      "           0.0170, 0.0132, 0.0164, 0.0188, 0.0153, 0.0193, 0.0160, 0.0208,\n",
      "           0.0184, 0.0147, 0.0197, 0.0187, 0.0188, 0.0288, 0.0131, 0.0186]]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.12.self_attn.o_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0087],\n",
      "        [0.0084],\n",
      "        [0.0086],\n",
      "        ...,\n",
      "        [0.0100],\n",
      "        [0.0084],\n",
      "        [0.0084]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.12.self_attn.o_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.0164]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.12.post_attention_layernorm.in_quantizer Parameter containing:\n",
      "tensor([[[115.5000]]], device='cuda:7', dtype=torch.bfloat16,\n",
      "       requires_grad=True) True False\n",
      "language_model.model.layers.12.mlp.up_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0106],\n",
      "        [0.0104],\n",
      "        [0.0103],\n",
      "        ...,\n",
      "        [0.0097],\n",
      "        [0.0099],\n",
      "        [0.0103]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.12.mlp.up_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.0027, 0.0034, 0.0040,  ..., 0.0034, 0.0034, 0.0037]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.12.mlp.gate_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0114],\n",
      "        [0.0094],\n",
      "        [0.0122],\n",
      "        ...,\n",
      "        [0.0121],\n",
      "        [0.0098],\n",
      "        [0.0099]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.12.mlp.gate_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.0027, 0.0034, 0.0040,  ..., 0.0034, 0.0034, 0.0037]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.12.mlp.fused_silu.sigmoid.act_quantizer Parameter containing:\n",
      "tensor([[[0.0361]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.12.mlp.fused_silu.hadamard1.act1_quantizer Parameter containing:\n",
      "tensor([[[0.0052, 0.0048, 0.0048,  ..., 0.0054, 0.0058, 0.0050]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.12.mlp.fused_silu.sigmoid.act_quantizer Parameter containing:\n",
      "tensor([[[0.0361]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.12.mlp.fused_silu.hadamard2.act1_quantizer Parameter containing:\n",
      "tensor([[[0.0065, 0.0047, 0.0067,  ..., 0.0079, 0.0061, 0.0055]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.12.mlp.fused_silu.hadamard2.act2_quantizer Parameter containing:\n",
      "tensor([[[0.0033, 0.0021, 0.0022,  ..., 0.0039, 0.0056, 0.0027]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.12.mlp.down_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0107],\n",
      "        [0.0109],\n",
      "        [0.0110],\n",
      "        ...,\n",
      "        [0.0120],\n",
      "        [0.0112],\n",
      "        [0.0114]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.12.mlp.down_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.0010, 0.0008, 0.0011,  ..., 0.0017, 0.0028, 0.0013]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.13.input_layernorm.in_quantizer Parameter containing:\n",
      "tensor([[[115.5000]]], device='cuda:7', dtype=torch.bfloat16,\n",
      "       requires_grad=True) True False\n",
      "language_model.model.layers.13.self_attn.q_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0153],\n",
      "        [0.0096],\n",
      "        [0.0098],\n",
      "        ...,\n",
      "        [0.0145],\n",
      "        [0.0149],\n",
      "        [0.0157]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.13.self_attn.q_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.1855]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.13.self_attn.k_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0188],\n",
      "        [0.0103],\n",
      "        [0.0103],\n",
      "        ...,\n",
      "        [0.0168],\n",
      "        [0.0162],\n",
      "        [0.0291]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.13.self_attn.k_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.1855]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.13.self_attn.v_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0079],\n",
      "        [0.0083],\n",
      "        [0.0148],\n",
      "        ...,\n",
      "        [0.0087],\n",
      "        [0.0079],\n",
      "        [0.0086]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.13.self_attn.v_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.1855]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.13.self_attn.apply_rotary_pos_emb.query_quantizer Parameter containing:\n",
      "tensor([[[[0.1865]]]], device='cuda:7', dtype=torch.bfloat16,\n",
      "       requires_grad=True) True False\n",
      "language_model.model.layers.13.self_attn.apply_rotary_pos_emb.key_quantizer Parameter containing:\n",
      "tensor([[[[0.3262]]]], device='cuda:7', dtype=torch.bfloat16,\n",
      "       requires_grad=True) True False\n",
      "language_model.model.layers.13.self_attn.qkt_mm.x1_quantizer Parameter containing:\n",
      "tensor([[[0.0854]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.13.self_attn.qkt_mm.x2_quantizer Parameter containing:\n",
      "tensor([[[0.1553]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.13.self_attn.softmax.in_quantizer Parameter containing:\n",
      "tensor([[[[0.1738]]]], device='cuda:7', dtype=torch.bfloat16,\n",
      "       requires_grad=True) True False\n",
      "language_model.model.layers.13.self_attn.pv_mm.x1_quantizer Parameter containing:\n",
      "tensor([[[[0.0039]]]], device='cuda:7', dtype=torch.bfloat16,\n",
      "       requires_grad=True) True False\n",
      "language_model.model.layers.13.self_attn.pv_mm.x2_quantizer Parameter containing:\n",
      "tensor([[[[0.0157, 0.0151, 0.0199, 0.0187, 0.0142, 0.0155, 0.0127, 0.0133,\n",
      "           0.0138, 0.0170, 0.0188, 0.0170, 0.0159, 0.0243, 0.0177, 0.0172,\n",
      "           0.0128, 0.0148, 0.0171, 0.0142, 0.0153, 0.0129, 0.0137, 0.0208,\n",
      "           0.0179, 0.0159, 0.0148, 0.0130, 0.0170, 0.0199, 0.0140, 0.0152,\n",
      "           0.0142, 0.0160, 0.0132, 0.0142, 0.0176, 0.0125, 0.0123, 0.0164,\n",
      "           0.0201, 0.0140, 0.0144, 0.0188, 0.0173, 0.0148, 0.0186, 0.0151,\n",
      "           0.0149, 0.0126, 0.0148, 0.0143, 0.0129, 0.0147, 0.0145, 0.0189,\n",
      "           0.0139, 0.0142, 0.0139, 0.0145, 0.0175, 0.0203, 0.0150, 0.0161,\n",
      "           0.0145, 0.0244, 0.0157, 0.0173, 0.0275, 0.0161, 0.0156, 0.0147,\n",
      "           0.0164, 0.0148, 0.0166, 0.0137, 0.0161, 0.0162, 0.0137, 0.0143,\n",
      "           0.0137, 0.0166, 0.0146, 0.0121, 0.0130, 0.0145, 0.0138, 0.0132,\n",
      "           0.0233, 0.0160, 0.0159, 0.0165, 0.0186, 0.0157, 0.0152, 0.0160,\n",
      "           0.0222, 0.0132, 0.0152, 0.0166, 0.0183, 0.0137, 0.0265, 0.0134,\n",
      "           0.0134, 0.0161, 0.0146, 0.0149, 0.0149, 0.0164, 0.0334, 0.0132,\n",
      "           0.0139, 0.0131, 0.0166, 0.0147, 0.0141, 0.0135, 0.0135, 0.0166,\n",
      "           0.0134, 0.0139, 0.0135, 0.0126, 0.0195, 0.0250, 0.0148, 0.0208]]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.13.self_attn.o_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0099],\n",
      "        [0.0089],\n",
      "        [0.0087],\n",
      "        ...,\n",
      "        [0.0088],\n",
      "        [0.0085],\n",
      "        [0.0092]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.13.self_attn.o_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.0120]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.13.post_attention_layernorm.in_quantizer Parameter containing:\n",
      "tensor([[[115.5000]]], device='cuda:7', dtype=torch.bfloat16,\n",
      "       requires_grad=True) True False\n",
      "language_model.model.layers.13.mlp.up_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0099],\n",
      "        [0.0099],\n",
      "        [0.0104],\n",
      "        ...,\n",
      "        [0.0102],\n",
      "        [0.0095],\n",
      "        [0.0106]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.13.mlp.up_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.0038, 0.0032, 0.0044,  ..., 0.0042, 0.0042, 0.0040]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.13.mlp.gate_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0086],\n",
      "        [0.0115],\n",
      "        [0.0090],\n",
      "        ...,\n",
      "        [0.0110],\n",
      "        [0.0096],\n",
      "        [0.0107]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.13.mlp.gate_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.0038, 0.0032, 0.0044,  ..., 0.0042, 0.0042, 0.0040]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.13.mlp.fused_silu.sigmoid.act_quantizer Parameter containing:\n",
      "tensor([[[0.0493]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.13.mlp.fused_silu.hadamard1.act1_quantizer Parameter containing:\n",
      "tensor([[[0.0055, 0.0052, 0.0059,  ..., 0.0058, 0.0060, 0.0052]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.13.mlp.fused_silu.sigmoid.act_quantizer Parameter containing:\n",
      "tensor([[[0.0493]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.13.mlp.fused_silu.hadamard2.act1_quantizer Parameter containing:\n",
      "tensor([[[0.0080, 0.0061, 0.0089,  ..., 0.0088, 0.0096, 0.0074]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.13.mlp.fused_silu.hadamard2.act2_quantizer Parameter containing:\n",
      "tensor([[[0.0045, 0.0033, 0.0061,  ..., 0.0059, 0.0067, 0.0035]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.13.mlp.down_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0114],\n",
      "        [0.0104],\n",
      "        [0.0110],\n",
      "        ...,\n",
      "        [0.0113],\n",
      "        [0.0116],\n",
      "        [0.0111]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.13.mlp.down_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.0026, 0.0013, 0.0033,  ..., 0.0031, 0.0022, 0.0015]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.14.input_layernorm.in_quantizer Parameter containing:\n",
      "tensor([[[115.5000]]], device='cuda:7', dtype=torch.bfloat16,\n",
      "       requires_grad=True) True False\n",
      "language_model.model.layers.14.self_attn.q_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0103],\n",
      "        [0.0118],\n",
      "        [0.0120],\n",
      "        ...,\n",
      "        [0.0135],\n",
      "        [0.0139],\n",
      "        [0.0135]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.14.self_attn.q_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.1855]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.14.self_attn.k_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0131],\n",
      "        [0.0128],\n",
      "        [0.0118],\n",
      "        ...,\n",
      "        [0.0135],\n",
      "        [0.0137],\n",
      "        [0.0145]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.14.self_attn.k_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.1855]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.14.self_attn.v_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0080],\n",
      "        [0.0082],\n",
      "        [0.0078],\n",
      "        ...,\n",
      "        [0.0091],\n",
      "        [0.0103],\n",
      "        [0.0090]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.14.self_attn.v_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.1855]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.14.self_attn.apply_rotary_pos_emb.query_quantizer Parameter containing:\n",
      "tensor([[[[0.1797]]]], device='cuda:7', dtype=torch.bfloat16,\n",
      "       requires_grad=True) True False\n",
      "language_model.model.layers.14.self_attn.apply_rotary_pos_emb.key_quantizer Parameter containing:\n",
      "tensor([[[[0.2988]]]], device='cuda:7', dtype=torch.bfloat16,\n",
      "       requires_grad=True) True False\n",
      "language_model.model.layers.14.self_attn.qkt_mm.x1_quantizer Parameter containing:\n",
      "tensor([[[0.0850]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.14.self_attn.qkt_mm.x2_quantizer Parameter containing:\n",
      "tensor([[[0.1426]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.14.self_attn.softmax.in_quantizer Parameter containing:\n",
      "tensor([[[[0.1641]]]], device='cuda:7', dtype=torch.bfloat16,\n",
      "       requires_grad=True) True False\n",
      "language_model.model.layers.14.self_attn.pv_mm.x1_quantizer Parameter containing:\n",
      "tensor([[[[0.0039]]]], device='cuda:7', dtype=torch.bfloat16,\n",
      "       requires_grad=True) True False\n",
      "language_model.model.layers.14.self_attn.pv_mm.x2_quantizer Parameter containing:\n",
      "tensor([[[[0.0178, 0.0177, 0.0159, 0.0168, 0.0166, 0.0147, 0.0157, 0.0150,\n",
      "           0.0177, 0.0184, 0.0127, 0.0161, 0.0137, 0.0144, 0.0173, 0.0176,\n",
      "           0.0172, 0.0244, 0.0201, 0.0197, 0.0147, 0.0164, 0.0161, 0.0160,\n",
      "           0.0166, 0.0220, 0.0124, 0.0139, 0.0153, 0.0166, 0.0197, 0.0167,\n",
      "           0.0150, 0.0156, 0.0172, 0.0161, 0.0153, 0.0198, 0.0156, 0.0128,\n",
      "           0.0190, 0.0188, 0.0140, 0.0193, 0.0167, 0.0167, 0.0137, 0.0259,\n",
      "           0.0150, 0.0164, 0.0162, 0.0148, 0.0220, 0.0182, 0.0145, 0.0162,\n",
      "           0.0154, 0.0143, 0.0193, 0.0156, 0.0192, 0.0135, 0.0187, 0.0167,\n",
      "           0.0181, 0.0184, 0.0201, 0.0195, 0.0183, 0.0369, 0.0170, 0.0145,\n",
      "           0.0165, 0.0173, 0.0172, 0.0162, 0.0145, 0.0141, 0.0177, 0.0190,\n",
      "           0.0150, 0.0178, 0.0209, 0.0260, 0.0184, 0.0222, 0.0154, 0.0173,\n",
      "           0.0244, 0.0165, 0.0152, 0.0203, 0.0192, 0.0271, 0.0187, 0.0164,\n",
      "           0.0200, 0.0178, 0.0166, 0.0173, 0.0209, 0.0173, 0.0134, 0.0221,\n",
      "           0.0137, 0.0173, 0.0179, 0.0150, 0.0138, 0.0225, 0.0220, 0.0149,\n",
      "           0.0155, 0.0212, 0.0179, 0.0190, 0.0134, 0.0182, 0.0156, 0.0175,\n",
      "           0.0168, 0.0214, 0.0192, 0.0205, 0.0199, 0.0182, 0.0192, 0.0161]]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.14.self_attn.o_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0087],\n",
      "        [0.0093],\n",
      "        [0.0079],\n",
      "        ...,\n",
      "        [0.0090],\n",
      "        [0.0090],\n",
      "        [0.0091]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.14.self_attn.o_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.0154]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.14.post_attention_layernorm.in_quantizer Parameter containing:\n",
      "tensor([[[115.5000]]], device='cuda:7', dtype=torch.bfloat16,\n",
      "       requires_grad=True) True False\n",
      "language_model.model.layers.14.mlp.up_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0120],\n",
      "        [0.0107],\n",
      "        [0.0100],\n",
      "        ...,\n",
      "        [0.0099],\n",
      "        [0.0101],\n",
      "        [0.0095]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.14.mlp.up_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.0041, 0.0035, 0.0039,  ..., 0.0040, 0.0045, 0.0044]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.14.mlp.gate_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0101],\n",
      "        [0.0120],\n",
      "        [0.0099],\n",
      "        ...,\n",
      "        [0.0098],\n",
      "        [0.0093],\n",
      "        [0.0092]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.14.mlp.gate_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.0041, 0.0035, 0.0039,  ..., 0.0040, 0.0045, 0.0044]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.14.mlp.fused_silu.sigmoid.act_quantizer Parameter containing:\n",
      "tensor([[[0.0325]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.14.mlp.fused_silu.hadamard1.act1_quantizer Parameter containing:\n",
      "tensor([[[0.0056, 0.0047, 0.0053,  ..., 0.0057, 0.0051, 0.0050]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.14.mlp.fused_silu.sigmoid.act_quantizer Parameter containing:\n",
      "tensor([[[0.0325]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.14.mlp.fused_silu.hadamard2.act1_quantizer Parameter containing:\n",
      "tensor([[[0.0070, 0.0073, 0.0095,  ..., 0.0072, 0.0060, 0.0067]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.14.mlp.fused_silu.hadamard2.act2_quantizer Parameter containing:\n",
      "tensor([[[0.0047, 0.0021, 0.0037,  ..., 0.0055, 0.0029, 0.0025]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.14.mlp.down_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0126],\n",
      "        [0.0115],\n",
      "        [0.0108],\n",
      "        ...,\n",
      "        [0.0107],\n",
      "        [0.0115],\n",
      "        [0.0123]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.14.mlp.down_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.0022, 0.0015, 0.0013,  ..., 0.0013, 0.0012, 0.0009]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.15.input_layernorm.in_quantizer Parameter containing:\n",
      "tensor([[[115.5000]]], device='cuda:7', dtype=torch.bfloat16,\n",
      "       requires_grad=True) True False\n",
      "language_model.model.layers.15.self_attn.q_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0084],\n",
      "        [0.0106],\n",
      "        [0.0094],\n",
      "        ...,\n",
      "        [0.0153],\n",
      "        [0.0177],\n",
      "        [0.0165]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.15.self_attn.q_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.1914]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.15.self_attn.k_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0090],\n",
      "        [0.0094],\n",
      "        [0.0092],\n",
      "        ...,\n",
      "        [0.0217],\n",
      "        [0.0200],\n",
      "        [0.0177]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.15.self_attn.k_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.1914]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.15.self_attn.v_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0091],\n",
      "        [0.0089],\n",
      "        [0.0095],\n",
      "        ...,\n",
      "        [0.0084],\n",
      "        [0.0090],\n",
      "        [0.0090]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.15.self_attn.v_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.1914]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.15.self_attn.apply_rotary_pos_emb.query_quantizer Parameter containing:\n",
      "tensor([[[[0.1943]]]], device='cuda:7', dtype=torch.bfloat16,\n",
      "       requires_grad=True) True False\n",
      "language_model.model.layers.15.self_attn.apply_rotary_pos_emb.key_quantizer Parameter containing:\n",
      "tensor([[[[0.2773]]]], device='cuda:7', dtype=torch.bfloat16,\n",
      "       requires_grad=True) True False\n",
      "language_model.model.layers.15.self_attn.qkt_mm.x1_quantizer Parameter containing:\n",
      "tensor([[[0.0908]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.15.self_attn.qkt_mm.x2_quantizer Parameter containing:\n",
      "tensor([[[0.1436]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.15.self_attn.softmax.in_quantizer Parameter containing:\n",
      "tensor([[[[0.1865]]]], device='cuda:7', dtype=torch.bfloat16,\n",
      "       requires_grad=True) True False\n",
      "language_model.model.layers.15.self_attn.pv_mm.x1_quantizer Parameter containing:\n",
      "tensor([[[[0.0039]]]], device='cuda:7', dtype=torch.bfloat16,\n",
      "       requires_grad=True) True False\n",
      "language_model.model.layers.15.self_attn.pv_mm.x2_quantizer Parameter containing:\n",
      "tensor([[[[0.0139, 0.0200, 0.0154, 0.0149, 0.0139, 0.0168, 0.0181, 0.0153,\n",
      "           0.0183, 0.0181, 0.0123, 0.0168, 0.0172, 0.0164, 0.0181, 0.0269,\n",
      "           0.0150, 0.0182, 0.0167, 0.0171, 0.0161, 0.0165, 0.0146, 0.0210,\n",
      "           0.0183, 0.0167, 0.0168, 0.0151, 0.0183, 0.0165, 0.0147, 0.0160,\n",
      "           0.0172, 0.0136, 0.0244, 0.0194, 0.0199, 0.0146, 0.0187, 0.0140,\n",
      "           0.0210, 0.0198, 0.0159, 0.0172, 0.0166, 0.0143, 0.0199, 0.0173,\n",
      "           0.0248, 0.0193, 0.0189, 0.0186, 0.0192, 0.0145, 0.0155, 0.0146,\n",
      "           0.0146, 0.0159, 0.0147, 0.0146, 0.0153, 0.0222, 0.0159, 0.0150,\n",
      "           0.0150, 0.0143, 0.0176, 0.0178, 0.0156, 0.0149, 0.0184, 0.0179,\n",
      "           0.0203, 0.0177, 0.0155, 0.0153, 0.0143, 0.0150, 0.0143, 0.0143,\n",
      "           0.0197, 0.0150, 0.0139, 0.0176, 0.0143, 0.0160, 0.0144, 0.0171,\n",
      "           0.0164, 0.0140, 0.0210, 0.0162, 0.0160, 0.0194, 0.0156, 0.0148,\n",
      "           0.0148, 0.0184, 0.0183, 0.0175, 0.0161, 0.0159, 0.0176, 0.0166,\n",
      "           0.0166, 0.0229, 0.0155, 0.0168, 0.0152, 0.0156, 0.0190, 0.0141,\n",
      "           0.0155, 0.0201, 0.0151, 0.0156, 0.0183, 0.0173, 0.0172, 0.0152,\n",
      "           0.0142, 0.0168, 0.0168, 0.0175, 0.0164, 0.0162, 0.0303, 0.0184]]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.15.self_attn.o_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0091],\n",
      "        [0.0099],\n",
      "        [0.0085],\n",
      "        ...,\n",
      "        [0.0089],\n",
      "        [0.0091],\n",
      "        [0.0090]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.15.self_attn.o_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.0177]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.15.post_attention_layernorm.in_quantizer Parameter containing:\n",
      "tensor([[[115.5000]]], device='cuda:7', dtype=torch.bfloat16,\n",
      "       requires_grad=True) True False\n",
      "language_model.model.layers.15.mlp.up_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0110],\n",
      "        [0.0110],\n",
      "        [0.0103],\n",
      "        ...,\n",
      "        [0.0095],\n",
      "        [0.0095],\n",
      "        [0.0097]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.15.mlp.up_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.0042, 0.0036, 0.0041,  ..., 0.0040, 0.0046, 0.0039]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.15.mlp.gate_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0106],\n",
      "        [0.0109],\n",
      "        [0.0111],\n",
      "        ...,\n",
      "        [0.0107],\n",
      "        [0.0104],\n",
      "        [0.0093]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.15.mlp.gate_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.0042, 0.0036, 0.0041,  ..., 0.0040, 0.0046, 0.0039]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.15.mlp.fused_silu.sigmoid.act_quantizer Parameter containing:\n",
      "tensor([[[0.0515]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.15.mlp.fused_silu.hadamard1.act1_quantizer Parameter containing:\n",
      "tensor([[[0.0068, 0.0059, 0.0059,  ..., 0.0060, 0.0068, 0.0050]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.15.mlp.fused_silu.sigmoid.act_quantizer Parameter containing:\n",
      "tensor([[[0.0515]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.15.mlp.fused_silu.hadamard2.act1_quantizer Parameter containing:\n",
      "tensor([[[0.0093, 0.0101, 0.0099,  ..., 0.0073, 0.0093, 0.0140]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.15.mlp.fused_silu.hadamard2.act2_quantizer Parameter containing:\n",
      "tensor([[[0.0121, 0.0064, 0.0060,  ..., 0.0070, 0.0120, 0.0026]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.15.mlp.down_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0118],\n",
      "        [0.0115],\n",
      "        [0.0105],\n",
      "        ...,\n",
      "        [0.0115],\n",
      "        [0.0123],\n",
      "        [0.0118]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.15.mlp.down_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.0068, 0.0036, 0.0030,  ..., 0.0023, 0.0084, 0.0024]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.16.input_layernorm.in_quantizer Parameter containing:\n",
      "tensor([[[115.5000]]], device='cuda:7', dtype=torch.bfloat16,\n",
      "       requires_grad=True) True False\n",
      "language_model.model.layers.16.self_attn.q_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0101],\n",
      "        [0.0110],\n",
      "        [0.0119],\n",
      "        ...,\n",
      "        [0.0137],\n",
      "        [0.0135],\n",
      "        [0.0134]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.16.self_attn.q_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.1777]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.16.self_attn.k_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0105],\n",
      "        [0.0107],\n",
      "        [0.0119],\n",
      "        ...,\n",
      "        [0.0165],\n",
      "        [0.0212],\n",
      "        [0.0168]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.16.self_attn.k_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.1777]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.16.self_attn.v_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0121],\n",
      "        [0.0117],\n",
      "        [0.0103],\n",
      "        ...,\n",
      "        [0.0098],\n",
      "        [0.0092],\n",
      "        [0.0094]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.16.self_attn.v_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.1777]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.16.self_attn.apply_rotary_pos_emb.query_quantizer Parameter containing:\n",
      "tensor([[[[0.1943]]]], device='cuda:7', dtype=torch.bfloat16,\n",
      "       requires_grad=True) True False\n",
      "language_model.model.layers.16.self_attn.apply_rotary_pos_emb.key_quantizer Parameter containing:\n",
      "tensor([[[[0.3125]]]], device='cuda:7', dtype=torch.bfloat16,\n",
      "       requires_grad=True) True False\n",
      "language_model.model.layers.16.self_attn.qkt_mm.x1_quantizer Parameter containing:\n",
      "tensor([[[0.0923]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.16.self_attn.qkt_mm.x2_quantizer Parameter containing:\n",
      "tensor([[[0.1562]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.16.self_attn.softmax.in_quantizer Parameter containing:\n",
      "tensor([[[[0.2070]]]], device='cuda:7', dtype=torch.bfloat16,\n",
      "       requires_grad=True) True False\n",
      "language_model.model.layers.16.self_attn.pv_mm.x1_quantizer Parameter containing:\n",
      "tensor([[[[0.0039]]]], device='cuda:7', dtype=torch.bfloat16,\n",
      "       requires_grad=True) True False\n",
      "language_model.model.layers.16.self_attn.pv_mm.x2_quantizer Parameter containing:\n",
      "tensor([[[[0.0151, 0.0172, 0.0143, 0.0170, 0.0142, 0.0154, 0.0148, 0.0149,\n",
      "           0.0134, 0.0146, 0.0139, 0.0216, 0.0153, 0.0167, 0.0154, 0.0152,\n",
      "           0.0156, 0.0166, 0.0146, 0.0204, 0.0150, 0.0156, 0.0195, 0.0208,\n",
      "           0.0165, 0.0151, 0.0311, 0.0145, 0.0139, 0.0194, 0.0151, 0.0145,\n",
      "           0.0129, 0.0205, 0.0132, 0.0154, 0.0193, 0.0127, 0.0166, 0.0228,\n",
      "           0.0146, 0.0156, 0.0189, 0.0156, 0.0178, 0.0222, 0.0139, 0.0192,\n",
      "           0.0140, 0.0177, 0.0166, 0.0146, 0.0152, 0.0171, 0.0168, 0.0146,\n",
      "           0.0170, 0.0140, 0.0172, 0.0128, 0.0149, 0.0179, 0.0161, 0.0179,\n",
      "           0.0175, 0.0134, 0.0146, 0.0161, 0.0159, 0.0199, 0.0222, 0.0165,\n",
      "           0.0152, 0.0135, 0.0177, 0.0127, 0.0147, 0.0151, 0.0142, 0.0155,\n",
      "           0.0137, 0.0193, 0.0143, 0.0151, 0.0164, 0.0128, 0.0152, 0.0153,\n",
      "           0.0160, 0.0164, 0.0155, 0.0168, 0.0138, 0.0144, 0.0248, 0.0166,\n",
      "           0.0140, 0.0181, 0.0187, 0.0166, 0.0132, 0.0156, 0.0186, 0.0199,\n",
      "           0.0156, 0.0164, 0.0146, 0.0152, 0.0219, 0.0140, 0.0195, 0.0159,\n",
      "           0.0134, 0.0167, 0.0167, 0.0142, 0.0159, 0.0168, 0.0132, 0.0157,\n",
      "           0.0177, 0.0147, 0.0161, 0.0248, 0.0178, 0.0156, 0.0159, 0.0170]]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.16.self_attn.o_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0098],\n",
      "        [0.0084],\n",
      "        [0.0095],\n",
      "        ...,\n",
      "        [0.0111],\n",
      "        [0.0082],\n",
      "        [0.0090]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.16.self_attn.o_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.0148]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.16.post_attention_layernorm.in_quantizer Parameter containing:\n",
      "tensor([[[115.5000]]], device='cuda:7', dtype=torch.bfloat16,\n",
      "       requires_grad=True) True False\n",
      "language_model.model.layers.16.mlp.up_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0114],\n",
      "        [0.0097],\n",
      "        [0.0103],\n",
      "        ...,\n",
      "        [0.0111],\n",
      "        [0.0090],\n",
      "        [0.0119]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.16.mlp.up_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.0045, 0.0039, 0.0039,  ..., 0.0046, 0.0043, 0.0045]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.16.mlp.gate_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0097],\n",
      "        [0.0092],\n",
      "        [0.0095],\n",
      "        ...,\n",
      "        [0.0092],\n",
      "        [0.0118],\n",
      "        [0.0146]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.16.mlp.gate_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.0045, 0.0039, 0.0039,  ..., 0.0046, 0.0043, 0.0045]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.16.mlp.fused_silu.sigmoid.act_quantizer Parameter containing:\n",
      "tensor([[[0.0400]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.16.mlp.fused_silu.hadamard1.act1_quantizer Parameter containing:\n",
      "tensor([[[0.0060, 0.0060, 0.0051,  ..., 0.0060, 0.0048, 0.0053]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.16.mlp.fused_silu.sigmoid.act_quantizer Parameter containing:\n",
      "tensor([[[0.0400]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.16.mlp.fused_silu.hadamard2.act1_quantizer Parameter containing:\n",
      "tensor([[[0.0067, 0.0083, 0.0081,  ..., 0.0087, 0.0075, 0.0078]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.16.mlp.fused_silu.hadamard2.act2_quantizer Parameter containing:\n",
      "tensor([[[0.0065, 0.0069, 0.0030,  ..., 0.0069, 0.0021, 0.0038]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.16.mlp.down_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0115],\n",
      "        [0.0102],\n",
      "        [0.0117],\n",
      "        ...,\n",
      "        [0.0116],\n",
      "        [0.0124],\n",
      "        [0.0109]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.16.mlp.down_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.0021, 0.0026, 0.0013,  ..., 0.0040, 0.0009, 0.0017]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.17.input_layernorm.in_quantizer Parameter containing:\n",
      "tensor([[[115.5000]]], device='cuda:7', dtype=torch.bfloat16,\n",
      "       requires_grad=True) True False\n",
      "language_model.model.layers.17.self_attn.q_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0086],\n",
      "        [0.0078],\n",
      "        [0.0081],\n",
      "        ...,\n",
      "        [0.0183],\n",
      "        [0.0264],\n",
      "        [0.0150]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.17.self_attn.q_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.1650]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.17.self_attn.k_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0087],\n",
      "        [0.0085],\n",
      "        [0.0087],\n",
      "        ...,\n",
      "        [0.0238],\n",
      "        [0.0243],\n",
      "        [0.0223]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.17.self_attn.k_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.1650]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.17.self_attn.v_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0084],\n",
      "        [0.0157],\n",
      "        [0.0098],\n",
      "        ...,\n",
      "        [0.0100],\n",
      "        [0.0091],\n",
      "        [0.0096]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.17.self_attn.v_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.1650]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.17.self_attn.apply_rotary_pos_emb.query_quantizer Parameter containing:\n",
      "tensor([[[[0.2021]]]], device='cuda:7', dtype=torch.bfloat16,\n",
      "       requires_grad=True) True False\n",
      "language_model.model.layers.17.self_attn.apply_rotary_pos_emb.key_quantizer Parameter containing:\n",
      "tensor([[[[0.2988]]]], device='cuda:7', dtype=torch.bfloat16,\n",
      "       requires_grad=True) True False\n",
      "language_model.model.layers.17.self_attn.qkt_mm.x1_quantizer Parameter containing:\n",
      "tensor([[[0.0933]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.17.self_attn.qkt_mm.x2_quantizer Parameter containing:\n",
      "tensor([[[0.1514]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.17.self_attn.softmax.in_quantizer Parameter containing:\n",
      "tensor([[[[0.2002]]]], device='cuda:7', dtype=torch.bfloat16,\n",
      "       requires_grad=True) True False\n",
      "language_model.model.layers.17.self_attn.pv_mm.x1_quantizer Parameter containing:\n",
      "tensor([[[[0.0039]]]], device='cuda:7', dtype=torch.bfloat16,\n",
      "       requires_grad=True) True False\n",
      "language_model.model.layers.17.self_attn.pv_mm.x2_quantizer Parameter containing:\n",
      "tensor([[[[0.0167, 0.0150, 0.0170, 0.0159, 0.0142, 0.0171, 0.0161, 0.0135,\n",
      "           0.0138, 0.0140, 0.0195, 0.0146, 0.0157, 0.0143, 0.0148, 0.0148,\n",
      "           0.0133, 0.0148, 0.0147, 0.0142, 0.0132, 0.0170, 0.0123, 0.0164,\n",
      "           0.0179, 0.0173, 0.0151, 0.0139, 0.0148, 0.0142, 0.0145, 0.0148,\n",
      "           0.0164, 0.0150, 0.0142, 0.0155, 0.0140, 0.0137, 0.0137, 0.0181,\n",
      "           0.0197, 0.0161, 0.0151, 0.0164, 0.0140, 0.0186, 0.0145, 0.0134,\n",
      "           0.0157, 0.0168, 0.0144, 0.0178, 0.0139, 0.0147, 0.0137, 0.0130,\n",
      "           0.0148, 0.0153, 0.0145, 0.0130, 0.0170, 0.0160, 0.0139, 0.0173,\n",
      "           0.0148, 0.0137, 0.0128, 0.0132, 0.0140, 0.0159, 0.0148, 0.0143,\n",
      "           0.0135, 0.0143, 0.0148, 0.0122, 0.0140, 0.0145, 0.0134, 0.0170,\n",
      "           0.0164, 0.0178, 0.0143, 0.0157, 0.0136, 0.0153, 0.0144, 0.0156,\n",
      "           0.0142, 0.0136, 0.0145, 0.0140, 0.0155, 0.0190, 0.0154, 0.0172,\n",
      "           0.0143, 0.0143, 0.0145, 0.0129, 0.0165, 0.0177, 0.0161, 0.0133,\n",
      "           0.0153, 0.0182, 0.0176, 0.0141, 0.0154, 0.0150, 0.0176, 0.0165,\n",
      "           0.0160, 0.0139, 0.0148, 0.0135, 0.0145, 0.0162, 0.0154, 0.0161,\n",
      "           0.0131, 0.0164, 0.0176, 0.0166, 0.0164, 0.0144, 0.0162, 0.0157]]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.17.self_attn.o_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0101],\n",
      "        [0.0087],\n",
      "        [0.0091],\n",
      "        ...,\n",
      "        [0.0090],\n",
      "        [0.0092],\n",
      "        [0.0096]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.17.self_attn.o_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.0135]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.17.post_attention_layernorm.in_quantizer Parameter containing:\n",
      "tensor([[[115.5000]]], device='cuda:7', dtype=torch.bfloat16,\n",
      "       requires_grad=True) True False\n",
      "language_model.model.layers.17.mlp.up_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0112],\n",
      "        [0.0101],\n",
      "        [0.0099],\n",
      "        ...,\n",
      "        [0.0109],\n",
      "        [0.0103],\n",
      "        [0.0099]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.17.mlp.up_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.0059, 0.0047, 0.0046,  ..., 0.0042, 0.0045, 0.0044]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.17.mlp.gate_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0102],\n",
      "        [0.0098],\n",
      "        [0.0114],\n",
      "        ...,\n",
      "        [0.0110],\n",
      "        [0.0112],\n",
      "        [0.0106]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.17.mlp.gate_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.0059, 0.0047, 0.0046,  ..., 0.0042, 0.0045, 0.0044]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.17.mlp.fused_silu.sigmoid.act_quantizer Parameter containing:\n",
      "tensor([[[0.0300]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.17.mlp.fused_silu.hadamard1.act1_quantizer Parameter containing:\n",
      "tensor([[[0.0052, 0.0049, 0.0060,  ..., 0.0052, 0.0055, 0.0050]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.17.mlp.fused_silu.sigmoid.act_quantizer Parameter containing:\n",
      "tensor([[[0.0300]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.17.mlp.fused_silu.hadamard2.act1_quantizer Parameter containing:\n",
      "tensor([[[0.0104, 0.0081, 0.0081,  ..., 0.0077, 0.0095, 0.0069]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.17.mlp.fused_silu.hadamard2.act2_quantizer Parameter containing:\n",
      "tensor([[[0.0034, 0.0023, 0.0066,  ..., 0.0033, 0.0043, 0.0028]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.17.mlp.down_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0108],\n",
      "        [0.0114],\n",
      "        [0.0121],\n",
      "        ...,\n",
      "        [0.0106],\n",
      "        [0.0118],\n",
      "        [0.0112]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.17.mlp.down_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.0023, 0.0016, 0.0015,  ..., 0.0019, 0.0017, 0.0011]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.18.input_layernorm.in_quantizer Parameter containing:\n",
      "tensor([[[115.5000]]], device='cuda:7', dtype=torch.bfloat16,\n",
      "       requires_grad=True) True False\n",
      "language_model.model.layers.18.self_attn.q_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0095],\n",
      "        [0.0114],\n",
      "        [0.0114],\n",
      "        ...,\n",
      "        [0.0167],\n",
      "        [0.0164],\n",
      "        [0.0154]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.18.self_attn.q_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.1846]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.18.self_attn.k_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0107],\n",
      "        [0.0110],\n",
      "        [0.0115],\n",
      "        ...,\n",
      "        [0.0239],\n",
      "        [0.0200],\n",
      "        [0.0197]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.18.self_attn.k_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.1846]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.18.self_attn.v_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0099],\n",
      "        [0.0101],\n",
      "        [0.0102],\n",
      "        ...,\n",
      "        [0.0095],\n",
      "        [0.0083],\n",
      "        [0.0091]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.18.self_attn.v_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.1846]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.18.self_attn.apply_rotary_pos_emb.query_quantizer Parameter containing:\n",
      "tensor([[[[0.2119]]]], device='cuda:7', dtype=torch.bfloat16,\n",
      "       requires_grad=True) True False\n",
      "language_model.model.layers.18.self_attn.apply_rotary_pos_emb.key_quantizer Parameter containing:\n",
      "tensor([[[[0.3262]]]], device='cuda:7', dtype=torch.bfloat16,\n",
      "       requires_grad=True) True False\n",
      "language_model.model.layers.18.self_attn.qkt_mm.x1_quantizer Parameter containing:\n",
      "tensor([[[0.1006]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.18.self_attn.qkt_mm.x2_quantizer Parameter containing:\n",
      "tensor([[[0.1592]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.18.self_attn.softmax.in_quantizer Parameter containing:\n",
      "tensor([[[[0.2168]]]], device='cuda:7', dtype=torch.bfloat16,\n",
      "       requires_grad=True) True False\n",
      "language_model.model.layers.18.self_attn.pv_mm.x1_quantizer Parameter containing:\n",
      "tensor([[[[0.0039]]]], device='cuda:7', dtype=torch.bfloat16,\n",
      "       requires_grad=True) True False\n",
      "language_model.model.layers.18.self_attn.pv_mm.x2_quantizer Parameter containing:\n",
      "tensor([[[[0.0150, 0.0168, 0.0160, 0.0205, 0.0150, 0.0154, 0.0165, 0.0151,\n",
      "           0.0179, 0.0154, 0.0153, 0.0173, 0.0162, 0.0140, 0.0267, 0.0146,\n",
      "           0.0175, 0.0139, 0.0184, 0.0170, 0.0162, 0.0145, 0.0150, 0.0164,\n",
      "           0.0177, 0.0161, 0.0160, 0.0182, 0.0143, 0.0134, 0.0173, 0.0145,\n",
      "           0.0173, 0.0183, 0.0157, 0.0197, 0.0153, 0.0164, 0.0161, 0.0140,\n",
      "           0.0165, 0.0161, 0.0156, 0.0162, 0.0150, 0.0168, 0.0171, 0.0164,\n",
      "           0.0184, 0.0159, 0.0164, 0.0164, 0.0160, 0.0151, 0.0160, 0.0143,\n",
      "           0.0190, 0.0210, 0.0192, 0.0165, 0.0155, 0.0150, 0.0143, 0.0215,\n",
      "           0.0154, 0.0153, 0.0146, 0.0200, 0.0157, 0.0177, 0.0199, 0.0177,\n",
      "           0.0173, 0.0141, 0.0142, 0.0160, 0.0157, 0.0166, 0.0167, 0.0151,\n",
      "           0.0187, 0.0170, 0.0186, 0.0148, 0.0166, 0.0146, 0.0149, 0.0197,\n",
      "           0.0159, 0.0149, 0.0159, 0.0171, 0.0166, 0.0137, 0.0160, 0.0199,\n",
      "           0.0149, 0.0170, 0.0146, 0.0153, 0.0178, 0.0184, 0.0164, 0.0167,\n",
      "           0.0182, 0.0178, 0.0148, 0.0157, 0.0150, 0.0153, 0.0138, 0.0162,\n",
      "           0.0167, 0.0150, 0.0151, 0.0153, 0.0164, 0.0168, 0.0200, 0.0150,\n",
      "           0.0205, 0.0181, 0.0170, 0.0173, 0.0160, 0.0182, 0.0155, 0.0172]]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.18.self_attn.o_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0101],\n",
      "        [0.0087],\n",
      "        [0.0102],\n",
      "        ...,\n",
      "        [0.0093],\n",
      "        [0.0089],\n",
      "        [0.0101]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.18.self_attn.o_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.0211]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.18.post_attention_layernorm.in_quantizer Parameter containing:\n",
      "tensor([[[115.5000]]], device='cuda:7', dtype=torch.bfloat16,\n",
      "       requires_grad=True) True False\n",
      "language_model.model.layers.18.mlp.up_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0114],\n",
      "        [0.0104],\n",
      "        [0.0110],\n",
      "        ...,\n",
      "        [0.0104],\n",
      "        [0.0098],\n",
      "        [0.0107]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.18.mlp.up_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.0064, 0.0056, 0.0053,  ..., 0.0050, 0.0044, 0.0046]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.18.mlp.gate_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0104],\n",
      "        [0.0107],\n",
      "        [0.0095],\n",
      "        ...,\n",
      "        [0.0129],\n",
      "        [0.0111],\n",
      "        [0.0107]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.18.mlp.gate_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.0064, 0.0056, 0.0053,  ..., 0.0050, 0.0044, 0.0046]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.18.mlp.fused_silu.sigmoid.act_quantizer Parameter containing:\n",
      "tensor([[[0.0393]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.18.mlp.fused_silu.hadamard1.act1_quantizer Parameter containing:\n",
      "tensor([[[0.0050, 0.0053, 0.0059,  ..., 0.0054, 0.0063, 0.0062]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.18.mlp.fused_silu.sigmoid.act_quantizer Parameter containing:\n",
      "tensor([[[0.0393]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.18.mlp.fused_silu.hadamard2.act1_quantizer Parameter containing:\n",
      "tensor([[[0.0111, 0.0076, 0.0077,  ..., 0.0090, 0.0077, 0.0084]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.18.mlp.fused_silu.hadamard2.act2_quantizer Parameter containing:\n",
      "tensor([[[0.0027, 0.0036, 0.0061,  ..., 0.0040, 0.0089, 0.0078]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.18.mlp.down_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0118],\n",
      "        [0.0118],\n",
      "        [0.0106],\n",
      "        ...,\n",
      "        [0.0105],\n",
      "        [0.0128],\n",
      "        [0.0118]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.18.mlp.down_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.0022, 0.0014, 0.0019,  ..., 0.0027, 0.0028, 0.0015]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.19.input_layernorm.in_quantizer Parameter containing:\n",
      "tensor([[[115.5000]]], device='cuda:7', dtype=torch.bfloat16,\n",
      "       requires_grad=True) True False\n",
      "language_model.model.layers.19.self_attn.q_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0118],\n",
      "        [0.0099],\n",
      "        [0.0097],\n",
      "        ...,\n",
      "        [0.0178],\n",
      "        [0.0142],\n",
      "        [0.0160]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.19.self_attn.q_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.1768]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.19.self_attn.k_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0139],\n",
      "        [0.0103],\n",
      "        [0.0106],\n",
      "        ...,\n",
      "        [0.0183],\n",
      "        [0.0178],\n",
      "        [0.0192]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.19.self_attn.k_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.1768]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.19.self_attn.v_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0101],\n",
      "        [0.0097],\n",
      "        [0.0101],\n",
      "        ...,\n",
      "        [0.0103],\n",
      "        [0.0101],\n",
      "        [0.0098]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.19.self_attn.v_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.1768]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.19.self_attn.apply_rotary_pos_emb.query_quantizer Parameter containing:\n",
      "tensor([[[[0.2100]]]], device='cuda:7', dtype=torch.bfloat16,\n",
      "       requires_grad=True) True False\n",
      "language_model.model.layers.19.self_attn.apply_rotary_pos_emb.key_quantizer Parameter containing:\n",
      "tensor([[[[0.3555]]]], device='cuda:7', dtype=torch.bfloat16,\n",
      "       requires_grad=True) True False\n",
      "language_model.model.layers.19.self_attn.qkt_mm.x1_quantizer Parameter containing:\n",
      "tensor([[[0.0991]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.19.self_attn.qkt_mm.x2_quantizer Parameter containing:\n",
      "tensor([[[0.1641]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.19.self_attn.softmax.in_quantizer Parameter containing:\n",
      "tensor([[[[0.2168]]]], device='cuda:7', dtype=torch.bfloat16,\n",
      "       requires_grad=True) True False\n",
      "language_model.model.layers.19.self_attn.pv_mm.x1_quantizer Parameter containing:\n",
      "tensor([[[[0.0039]]]], device='cuda:7', dtype=torch.bfloat16,\n",
      "       requires_grad=True) True False\n",
      "language_model.model.layers.19.self_attn.pv_mm.x2_quantizer Parameter containing:\n",
      "tensor([[[[0.0182, 0.0149, 0.0171, 0.0160, 0.0186, 0.0148, 0.0166, 0.0145,\n",
      "           0.0179, 0.0150, 0.0155, 0.0142, 0.0162, 0.0165, 0.0178, 0.0188,\n",
      "           0.0161, 0.0154, 0.0170, 0.0153, 0.0193, 0.0155, 0.0149, 0.0144,\n",
      "           0.0156, 0.0170, 0.0176, 0.0152, 0.0153, 0.0166, 0.0181, 0.0151,\n",
      "           0.0146, 0.0157, 0.0160, 0.0165, 0.0155, 0.0152, 0.0153, 0.0200,\n",
      "           0.0162, 0.0171, 0.0168, 0.0153, 0.0149, 0.0179, 0.0187, 0.0159,\n",
      "           0.0140, 0.0234, 0.0164, 0.0177, 0.0157, 0.0171, 0.0146, 0.0150,\n",
      "           0.0161, 0.0141, 0.0198, 0.0162, 0.0161, 0.0161, 0.0153, 0.0157,\n",
      "           0.0160, 0.0137, 0.0164, 0.0172, 0.0170, 0.0149, 0.0183, 0.0153,\n",
      "           0.0184, 0.0142, 0.0147, 0.0151, 0.0150, 0.0160, 0.0175, 0.0161,\n",
      "           0.0137, 0.0160, 0.0141, 0.0153, 0.0166, 0.0193, 0.0134, 0.0157,\n",
      "           0.0168, 0.0170, 0.0166, 0.0155, 0.0149, 0.0165, 0.0162, 0.0172,\n",
      "           0.0162, 0.0157, 0.0145, 0.0165, 0.0145, 0.0161, 0.0150, 0.0159,\n",
      "           0.0184, 0.0157, 0.0154, 0.0190, 0.0179, 0.0172, 0.0143, 0.0165,\n",
      "           0.0150, 0.0153, 0.0165, 0.0162, 0.0164, 0.0153, 0.0146, 0.0153,\n",
      "           0.0131, 0.0164, 0.0199, 0.0166, 0.0153, 0.0170, 0.0181, 0.0164]]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.19.self_attn.o_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0096],\n",
      "        [0.0125],\n",
      "        [0.0105],\n",
      "        ...,\n",
      "        [0.0093],\n",
      "        [0.0101],\n",
      "        [0.0099]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.19.self_attn.o_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.0117]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.19.post_attention_layernorm.in_quantizer Parameter containing:\n",
      "tensor([[[115.5000]]], device='cuda:7', dtype=torch.bfloat16,\n",
      "       requires_grad=True) True False\n",
      "language_model.model.layers.19.mlp.up_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0554],\n",
      "        [0.0095],\n",
      "        [0.0108],\n",
      "        ...,\n",
      "        [0.0103],\n",
      "        [0.0095],\n",
      "        [0.0105]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.19.mlp.up_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.0056, 0.0059, 0.0059,  ..., 0.0058, 0.0048, 0.0045]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.19.mlp.gate_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0201],\n",
      "        [0.0106],\n",
      "        [0.0113],\n",
      "        ...,\n",
      "        [0.0112],\n",
      "        [0.0099],\n",
      "        [0.0097]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.19.mlp.gate_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.0056, 0.0059, 0.0059,  ..., 0.0058, 0.0048, 0.0045]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.19.mlp.fused_silu.sigmoid.act_quantizer Parameter containing:\n",
      "tensor([[[0.0366]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.19.mlp.fused_silu.hadamard1.act1_quantizer Parameter containing:\n",
      "tensor([[[0.0071, 0.0052, 0.0067,  ..., 0.0062, 0.0057, 0.0056]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.19.mlp.fused_silu.sigmoid.act_quantizer Parameter containing:\n",
      "tensor([[[0.0366]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.19.mlp.fused_silu.hadamard2.act1_quantizer Parameter containing:\n",
      "tensor([[[0.0074, 0.0088, 0.0109,  ..., 0.0083, 0.0108, 0.0095]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.19.mlp.fused_silu.hadamard2.act2_quantizer Parameter containing:\n",
      "tensor([[[0.0148, 0.0034, 0.0113,  ..., 0.0079, 0.0055, 0.0048]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.19.mlp.down_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0114],\n",
      "        [0.0107],\n",
      "        [0.0115],\n",
      "        ...,\n",
      "        [0.0119],\n",
      "        [0.0118],\n",
      "        [0.0113]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.19.mlp.down_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.0055, 0.0025, 0.0031,  ..., 0.0025, 0.0019, 0.0039]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.20.input_layernorm.in_quantizer Parameter containing:\n",
      "tensor([[[115.5000]]], device='cuda:7', dtype=torch.bfloat16,\n",
      "       requires_grad=True) True False\n",
      "language_model.model.layers.20.self_attn.q_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0074],\n",
      "        [0.0081],\n",
      "        [0.0081],\n",
      "        ...,\n",
      "        [0.0155],\n",
      "        [0.0149],\n",
      "        [0.0238]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.20.self_attn.q_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.1768]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.20.self_attn.k_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0074],\n",
      "        [0.0070],\n",
      "        [0.0075],\n",
      "        ...,\n",
      "        [0.0208],\n",
      "        [0.0148],\n",
      "        [0.0178]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.20.self_attn.k_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.1768]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.20.self_attn.v_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0091],\n",
      "        [0.0107],\n",
      "        [0.0104],\n",
      "        ...,\n",
      "        [0.0106],\n",
      "        [0.0092],\n",
      "        [0.0093]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.20.self_attn.v_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.1768]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.20.self_attn.apply_rotary_pos_emb.query_quantizer Parameter containing:\n",
      "tensor([[[[0.2207]]]], device='cuda:7', dtype=torch.bfloat16,\n",
      "       requires_grad=True) True False\n",
      "language_model.model.layers.20.self_attn.apply_rotary_pos_emb.key_quantizer Parameter containing:\n",
      "tensor([[[[0.3730]]]], device='cuda:7', dtype=torch.bfloat16,\n",
      "       requires_grad=True) True False\n",
      "language_model.model.layers.20.self_attn.qkt_mm.x1_quantizer Parameter containing:\n",
      "tensor([[[0.1035]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.20.self_attn.qkt_mm.x2_quantizer Parameter containing:\n",
      "tensor([[[0.1777]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.20.self_attn.softmax.in_quantizer Parameter containing:\n",
      "tensor([[[[0.2344]]]], device='cuda:7', dtype=torch.bfloat16,\n",
      "       requires_grad=True) True False\n",
      "language_model.model.layers.20.self_attn.pv_mm.x1_quantizer Parameter containing:\n",
      "tensor([[[[0.0039]]]], device='cuda:7', dtype=torch.bfloat16,\n",
      "       requires_grad=True) True False\n",
      "language_model.model.layers.20.self_attn.pv_mm.x2_quantizer Parameter containing:\n",
      "tensor([[[[0.0176, 0.0179, 0.0162, 0.0156, 0.0146, 0.0147, 0.0165, 0.0147,\n",
      "           0.0150, 0.0164, 0.0156, 0.0142, 0.0192, 0.0171, 0.0178, 0.0178,\n",
      "           0.0156, 0.0172, 0.0150, 0.0150, 0.0143, 0.0137, 0.0167, 0.0165,\n",
      "           0.0172, 0.0165, 0.0143, 0.0188, 0.0179, 0.0154, 0.0160, 0.0141,\n",
      "           0.0151, 0.0168, 0.0177, 0.0162, 0.0147, 0.0159, 0.0154, 0.0166,\n",
      "           0.0160, 0.0157, 0.0160, 0.0167, 0.0140, 0.0157, 0.0142, 0.0160,\n",
      "           0.0173, 0.0161, 0.0166, 0.0146, 0.0175, 0.0156, 0.0161, 0.0153,\n",
      "           0.0162, 0.0148, 0.0137, 0.0156, 0.0156, 0.0159, 0.0173, 0.0157,\n",
      "           0.0166, 0.0149, 0.0153, 0.0167, 0.0155, 0.0156, 0.0164, 0.0172,\n",
      "           0.0194, 0.0160, 0.0141, 0.0171, 0.0157, 0.0153, 0.0153, 0.0171,\n",
      "           0.0153, 0.0186, 0.0143, 0.0138, 0.0160, 0.0177, 0.0162, 0.0203,\n",
      "           0.0167, 0.0164, 0.0194, 0.0172, 0.0170, 0.0181, 0.0178, 0.0162,\n",
      "           0.0144, 0.0156, 0.0161, 0.0165, 0.0154, 0.0165, 0.0172, 0.0176,\n",
      "           0.0157, 0.0159, 0.0141, 0.0146, 0.0186, 0.0172, 0.0152, 0.0160,\n",
      "           0.0157, 0.0178, 0.0178, 0.0159, 0.0148, 0.0155, 0.0161, 0.0176,\n",
      "           0.0189, 0.0154, 0.0204, 0.0151, 0.0155, 0.0152, 0.0154, 0.0145]]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.20.self_attn.o_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0101],\n",
      "        [0.0098],\n",
      "        [0.0110],\n",
      "        ...,\n",
      "        [0.0100],\n",
      "        [0.0095],\n",
      "        [0.0108]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.20.self_attn.o_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.0125]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.20.post_attention_layernorm.in_quantizer Parameter containing:\n",
      "tensor([[[115.5000]]], device='cuda:7', dtype=torch.bfloat16,\n",
      "       requires_grad=True) True False\n",
      "language_model.model.layers.20.mlp.up_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0114],\n",
      "        [0.0105],\n",
      "        [0.0114],\n",
      "        ...,\n",
      "        [0.0114],\n",
      "        [0.0109],\n",
      "        [0.0101]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.20.mlp.up_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.0065, 0.0057, 0.0055,  ..., 0.0058, 0.0045, 0.0045]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.20.mlp.gate_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0114],\n",
      "        [0.0108],\n",
      "        [0.0098],\n",
      "        ...,\n",
      "        [0.0120],\n",
      "        [0.0095],\n",
      "        [0.0109]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.20.mlp.gate_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.0065, 0.0057, 0.0055,  ..., 0.0058, 0.0045, 0.0045]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.20.mlp.fused_silu.sigmoid.act_quantizer Parameter containing:\n",
      "tensor([[[0.0337]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.20.mlp.fused_silu.hadamard1.act1_quantizer Parameter containing:\n",
      "tensor([[[0.0058, 0.0052, 0.0054,  ..., 0.0058, 0.0063, 0.0059]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.20.mlp.fused_silu.sigmoid.act_quantizer Parameter containing:\n",
      "tensor([[[0.0337]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.20.mlp.fused_silu.hadamard2.act1_quantizer Parameter containing:\n",
      "tensor([[[0.0101, 0.0095, 0.0127,  ..., 0.0103, 0.0115, 0.0114]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.20.mlp.fused_silu.hadamard2.act2_quantizer Parameter containing:\n",
      "tensor([[[0.0056, 0.0035, 0.0041,  ..., 0.0060, 0.0086, 0.0062]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.20.mlp.down_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0121],\n",
      "        [0.0115],\n",
      "        [0.0105],\n",
      "        ...,\n",
      "        [0.0121],\n",
      "        [0.0114],\n",
      "        [0.0110]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.20.mlp.down_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.0026, 0.0029, 0.0017,  ..., 0.0031, 0.0033, 0.0041]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.21.input_layernorm.in_quantizer Parameter containing:\n",
      "tensor([[[115.5000]]], device='cuda:7', dtype=torch.bfloat16,\n",
      "       requires_grad=True) True False\n",
      "language_model.model.layers.21.self_attn.q_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0124],\n",
      "        [0.0095],\n",
      "        [0.0129],\n",
      "        ...,\n",
      "        [0.0145],\n",
      "        [0.0248],\n",
      "        [0.0157]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.21.self_attn.q_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.1807]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.21.self_attn.k_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0119],\n",
      "        [0.0091],\n",
      "        [0.0097],\n",
      "        ...,\n",
      "        [0.0208],\n",
      "        [0.0236],\n",
      "        [0.0201]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.21.self_attn.k_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.1807]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.21.self_attn.v_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0099],\n",
      "        [0.0096],\n",
      "        [0.0104],\n",
      "        ...,\n",
      "        [0.0103],\n",
      "        [0.0099],\n",
      "        [0.0096]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.21.self_attn.v_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.1807]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.21.self_attn.apply_rotary_pos_emb.query_quantizer Parameter containing:\n",
      "tensor([[[[0.1973]]]], device='cuda:7', dtype=torch.bfloat16,\n",
      "       requires_grad=True) True False\n",
      "language_model.model.layers.21.self_attn.apply_rotary_pos_emb.key_quantizer Parameter containing:\n",
      "tensor([[[[0.3555]]]], device='cuda:7', dtype=torch.bfloat16,\n",
      "       requires_grad=True) True False\n",
      "language_model.model.layers.21.self_attn.qkt_mm.x1_quantizer Parameter containing:\n",
      "tensor([[[0.0952]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.21.self_attn.qkt_mm.x2_quantizer Parameter containing:\n",
      "tensor([[[0.1768]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.21.self_attn.softmax.in_quantizer Parameter containing:\n",
      "tensor([[[[0.2441]]]], device='cuda:7', dtype=torch.bfloat16,\n",
      "       requires_grad=True) True False\n",
      "language_model.model.layers.21.self_attn.pv_mm.x1_quantizer Parameter containing:\n",
      "tensor([[[[0.0039]]]], device='cuda:7', dtype=torch.bfloat16,\n",
      "       requires_grad=True) True False\n",
      "language_model.model.layers.21.self_attn.pv_mm.x2_quantizer Parameter containing:\n",
      "tensor([[[[0.0198, 0.0152, 0.0175, 0.0194, 0.0172, 0.0192, 0.0183, 0.0186,\n",
      "           0.0159, 0.0172, 0.0159, 0.0172, 0.0157, 0.0195, 0.0155, 0.0237,\n",
      "           0.0190, 0.0166, 0.0168, 0.0178, 0.0168, 0.0161, 0.0165, 0.0166,\n",
      "           0.0152, 0.0170, 0.0175, 0.0215, 0.0175, 0.0186, 0.0160, 0.0175,\n",
      "           0.0148, 0.0170, 0.0181, 0.0187, 0.0181, 0.0170, 0.0164, 0.0178,\n",
      "           0.0189, 0.0166, 0.0179, 0.0216, 0.0161, 0.0198, 0.0173, 0.0188,\n",
      "           0.0168, 0.0184, 0.0166, 0.0175, 0.0156, 0.0157, 0.0187, 0.0155,\n",
      "           0.0161, 0.0176, 0.0208, 0.0178, 0.0170, 0.0160, 0.0175, 0.0167,\n",
      "           0.0245, 0.0188, 0.0166, 0.0183, 0.0189, 0.0178, 0.0145, 0.0176,\n",
      "           0.0165, 0.0168, 0.0175, 0.0204, 0.0199, 0.0171, 0.0172, 0.0187,\n",
      "           0.0166, 0.0195, 0.0210, 0.0150, 0.0183, 0.0159, 0.0157, 0.0187,\n",
      "           0.0195, 0.0167, 0.0165, 0.0198, 0.0153, 0.0182, 0.0182, 0.0155,\n",
      "           0.0171, 0.0164, 0.0164, 0.0173, 0.0157, 0.0155, 0.0175, 0.0176,\n",
      "           0.0170, 0.0162, 0.0170, 0.0178, 0.0181, 0.0178, 0.0154, 0.0182,\n",
      "           0.0228, 0.0156, 0.0168, 0.0194, 0.0157, 0.0162, 0.0172, 0.0184,\n",
      "           0.0159, 0.0183, 0.0160, 0.0160, 0.0175, 0.0179, 0.0181, 0.0295]]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.21.self_attn.o_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0124],\n",
      "        [0.0111],\n",
      "        [0.0109],\n",
      "        ...,\n",
      "        [0.0101],\n",
      "        [0.0120],\n",
      "        [0.0100]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.21.self_attn.o_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.0248]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.21.post_attention_layernorm.in_quantizer Parameter containing:\n",
      "tensor([[[115.5000]]], device='cuda:7', dtype=torch.bfloat16,\n",
      "       requires_grad=True) True False\n",
      "language_model.model.layers.21.mlp.up_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0100],\n",
      "        [0.0101],\n",
      "        [0.0102],\n",
      "        ...,\n",
      "        [0.0101],\n",
      "        [0.0104],\n",
      "        [0.0103]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.21.mlp.up_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.0068, 0.0054, 0.0060,  ..., 0.0068, 0.0041, 0.0049]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.21.mlp.gate_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0099],\n",
      "        [0.0107],\n",
      "        [0.0134],\n",
      "        ...,\n",
      "        [0.0140],\n",
      "        [0.0119],\n",
      "        [0.0112]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.21.mlp.gate_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.0068, 0.0054, 0.0060,  ..., 0.0068, 0.0041, 0.0049]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.21.mlp.fused_silu.sigmoid.act_quantizer Parameter containing:\n",
      "tensor([[[0.0405]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.21.mlp.fused_silu.hadamard1.act1_quantizer Parameter containing:\n",
      "tensor([[[0.0057, 0.0054, 0.0055,  ..., 0.0053, 0.0063, 0.0058]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.21.mlp.fused_silu.sigmoid.act_quantizer Parameter containing:\n",
      "tensor([[[0.0405]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.21.mlp.fused_silu.hadamard2.act1_quantizer Parameter containing:\n",
      "tensor([[[0.0087, 0.0108, 0.0077,  ..., 0.0108, 0.0165, 0.0114]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.21.mlp.fused_silu.hadamard2.act2_quantizer Parameter containing:\n",
      "tensor([[[0.0055, 0.0042, 0.0043,  ..., 0.0037, 0.0084, 0.0059]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.21.mlp.down_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0114],\n",
      "        [0.0118],\n",
      "        [0.0132],\n",
      "        ...,\n",
      "        [0.0115],\n",
      "        [0.0110],\n",
      "        [0.0104]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.21.mlp.down_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.0029, 0.0018, 0.0019,  ..., 0.0025, 0.0117, 0.0042]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.22.input_layernorm.in_quantizer Parameter containing:\n",
      "tensor([[[115.5000]]], device='cuda:7', dtype=torch.bfloat16,\n",
      "       requires_grad=True) True False\n",
      "language_model.model.layers.22.self_attn.q_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0125],\n",
      "        [0.0121],\n",
      "        [0.0120],\n",
      "        ...,\n",
      "        [0.0435],\n",
      "        [0.0206],\n",
      "        [0.0173]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.22.self_attn.q_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.1826]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.22.self_attn.k_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0118],\n",
      "        [0.0140],\n",
      "        [0.0137],\n",
      "        ...,\n",
      "        [0.0234],\n",
      "        [0.0143],\n",
      "        [0.0155]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.22.self_attn.k_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.1826]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.22.self_attn.v_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0101],\n",
      "        [0.0109],\n",
      "        [0.0102],\n",
      "        ...,\n",
      "        [0.0099],\n",
      "        [0.0106],\n",
      "        [0.0111]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.22.self_attn.v_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.1826]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.22.self_attn.apply_rotary_pos_emb.query_quantizer Parameter containing:\n",
      "tensor([[[[0.2314]]]], device='cuda:7', dtype=torch.bfloat16,\n",
      "       requires_grad=True) True False\n",
      "language_model.model.layers.22.self_attn.apply_rotary_pos_emb.key_quantizer Parameter containing:\n",
      "tensor([[[[0.3945]]]], device='cuda:7', dtype=torch.bfloat16,\n",
      "       requires_grad=True) True False\n",
      "language_model.model.layers.22.self_attn.qkt_mm.x1_quantizer Parameter containing:\n",
      "tensor([[[0.1108]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.22.self_attn.qkt_mm.x2_quantizer Parameter containing:\n",
      "tensor([[[0.1885]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.22.self_attn.softmax.in_quantizer Parameter containing:\n",
      "tensor([[[[0.2471]]]], device='cuda:7', dtype=torch.bfloat16,\n",
      "       requires_grad=True) True False\n",
      "language_model.model.layers.22.self_attn.pv_mm.x1_quantizer Parameter containing:\n",
      "tensor([[[[0.0039]]]], device='cuda:7', dtype=torch.bfloat16,\n",
      "       requires_grad=True) True False\n",
      "language_model.model.layers.22.self_attn.pv_mm.x2_quantizer Parameter containing:\n",
      "tensor([[[[0.0166, 0.0175, 0.0200, 0.0164, 0.0167, 0.0177, 0.0194, 0.0151,\n",
      "           0.0168, 0.0175, 0.0161, 0.0203, 0.0178, 0.0145, 0.0166, 0.0159,\n",
      "           0.0159, 0.0162, 0.0173, 0.0168, 0.0153, 0.0162, 0.0160, 0.0193,\n",
      "           0.0168, 0.0182, 0.0143, 0.0171, 0.0183, 0.0178, 0.0175, 0.0166,\n",
      "           0.0183, 0.0157, 0.0212, 0.0164, 0.0167, 0.0184, 0.0165, 0.0173,\n",
      "           0.0168, 0.0164, 0.0176, 0.0182, 0.0168, 0.0164, 0.0182, 0.0194,\n",
      "           0.0170, 0.0215, 0.0153, 0.0187, 0.0190, 0.0190, 0.0161, 0.0172,\n",
      "           0.0164, 0.0177, 0.0157, 0.0165, 0.0175, 0.0208, 0.0172, 0.0145,\n",
      "           0.0167, 0.0182, 0.0201, 0.0181, 0.0160, 0.0171, 0.0172, 0.0193,\n",
      "           0.0176, 0.0165, 0.0161, 0.0155, 0.0155, 0.0172, 0.0176, 0.0184,\n",
      "           0.0151, 0.0166, 0.0170, 0.0155, 0.0183, 0.0162, 0.0173, 0.0192,\n",
      "           0.0161, 0.0182, 0.0156, 0.0189, 0.0164, 0.0204, 0.0162, 0.0157,\n",
      "           0.0184, 0.0232, 0.0221, 0.0168, 0.0157, 0.0178, 0.0182, 0.0161,\n",
      "           0.0175, 0.0157, 0.0156, 0.0170, 0.0177, 0.0171, 0.0179, 0.0150,\n",
      "           0.0176, 0.0179, 0.0143, 0.0173, 0.0167, 0.0172, 0.0171, 0.0156,\n",
      "           0.0178, 0.0170, 0.0164, 0.0194, 0.0151, 0.0151, 0.0161, 0.0164]]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.22.self_attn.o_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0099],\n",
      "        [0.0097],\n",
      "        [0.0104],\n",
      "        ...,\n",
      "        [0.0098],\n",
      "        [0.0096],\n",
      "        [0.0099]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.22.self_attn.o_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.0208]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.22.post_attention_layernorm.in_quantizer Parameter containing:\n",
      "tensor([[[115.5000]]], device='cuda:7', dtype=torch.bfloat16,\n",
      "       requires_grad=True) True False\n",
      "language_model.model.layers.22.mlp.up_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0099],\n",
      "        [0.0104],\n",
      "        [0.0099],\n",
      "        ...,\n",
      "        [0.0109],\n",
      "        [0.0108],\n",
      "        [0.0105]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.22.mlp.up_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.0051, 0.0065, 0.0052,  ..., 0.0060, 0.0042, 0.0053]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.22.mlp.gate_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0111],\n",
      "        [0.0106],\n",
      "        [0.0109],\n",
      "        ...,\n",
      "        [0.0107],\n",
      "        [0.0115],\n",
      "        [0.0109]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.22.mlp.gate_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.0051, 0.0065, 0.0052,  ..., 0.0060, 0.0042, 0.0053]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.22.mlp.fused_silu.sigmoid.act_quantizer Parameter containing:\n",
      "tensor([[[0.0381]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.22.mlp.fused_silu.hadamard1.act1_quantizer Parameter containing:\n",
      "tensor([[[0.0058, 0.0056, 0.0061,  ..., 0.0055, 0.0052, 0.0055]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.22.mlp.fused_silu.sigmoid.act_quantizer Parameter containing:\n",
      "tensor([[[0.0381]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.22.mlp.fused_silu.hadamard2.act1_quantizer Parameter containing:\n",
      "tensor([[[0.0101, 0.0098, 0.0091,  ..., 0.0098, 0.0079, 0.0092]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.22.mlp.fused_silu.hadamard2.act2_quantizer Parameter containing:\n",
      "tensor([[[0.0058, 0.0048, 0.0075,  ..., 0.0045, 0.0034, 0.0043]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.22.mlp.down_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0119],\n",
      "        [0.0114],\n",
      "        [0.0123],\n",
      "        ...,\n",
      "        [0.0117],\n",
      "        [0.0115],\n",
      "        [0.0119]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.22.mlp.down_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.0031, 0.0027, 0.0029,  ..., 0.0021, 0.0019, 0.0020]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.23.input_layernorm.in_quantizer Parameter containing:\n",
      "tensor([[[115.5000]]], device='cuda:7', dtype=torch.bfloat16,\n",
      "       requires_grad=True) True False\n",
      "language_model.model.layers.23.self_attn.q_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0091],\n",
      "        [0.0083],\n",
      "        [0.0080],\n",
      "        ...,\n",
      "        [0.0140],\n",
      "        [0.0277],\n",
      "        [0.0156]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.23.self_attn.q_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.1846]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.23.self_attn.k_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0082],\n",
      "        [0.0087],\n",
      "        [0.0079],\n",
      "        ...,\n",
      "        [0.0222],\n",
      "        [0.0278],\n",
      "        [0.0229]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.23.self_attn.k_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.1846]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.23.self_attn.v_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0099],\n",
      "        [0.0093],\n",
      "        [0.0096],\n",
      "        ...,\n",
      "        [0.0099],\n",
      "        [0.0091],\n",
      "        [0.0104]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.23.self_attn.v_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.1846]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.23.self_attn.apply_rotary_pos_emb.query_quantizer Parameter containing:\n",
      "tensor([[[[0.2471]]]], device='cuda:7', dtype=torch.bfloat16,\n",
      "       requires_grad=True) True False\n",
      "language_model.model.layers.23.self_attn.apply_rotary_pos_emb.key_quantizer Parameter containing:\n",
      "tensor([[[[0.3711]]]], device='cuda:7', dtype=torch.bfloat16,\n",
      "       requires_grad=True) True False\n",
      "language_model.model.layers.23.self_attn.qkt_mm.x1_quantizer Parameter containing:\n",
      "tensor([[[0.1147]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.23.self_attn.qkt_mm.x2_quantizer Parameter containing:\n",
      "tensor([[[0.1846]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.23.self_attn.softmax.in_quantizer Parameter containing:\n",
      "tensor([[[[0.2578]]]], device='cuda:7', dtype=torch.bfloat16,\n",
      "       requires_grad=True) True False\n",
      "language_model.model.layers.23.self_attn.pv_mm.x1_quantizer Parameter containing:\n",
      "tensor([[[[0.0039]]]], device='cuda:7', dtype=torch.bfloat16,\n",
      "       requires_grad=True) True False\n",
      "language_model.model.layers.23.self_attn.pv_mm.x2_quantizer Parameter containing:\n",
      "tensor([[[[0.0203, 0.0167, 0.0171, 0.0190, 0.0178, 0.0212, 0.0165, 0.0205,\n",
      "           0.0175, 0.0222, 0.0172, 0.0200, 0.0183, 0.0182, 0.0217, 0.0184,\n",
      "           0.0173, 0.0186, 0.0176, 0.0229, 0.0181, 0.0189, 0.0215, 0.0229,\n",
      "           0.0165, 0.0198, 0.0194, 0.0175, 0.0182, 0.0160, 0.0175, 0.0181,\n",
      "           0.0179, 0.0192, 0.0177, 0.0182, 0.0168, 0.0194, 0.0177, 0.0188,\n",
      "           0.0176, 0.0200, 0.0187, 0.0193, 0.0190, 0.0178, 0.0205, 0.0212,\n",
      "           0.0183, 0.0172, 0.0162, 0.0187, 0.0195, 0.0190, 0.0175, 0.0198,\n",
      "           0.0187, 0.0177, 0.0193, 0.0189, 0.0192, 0.0195, 0.0229, 0.0171,\n",
      "           0.0175, 0.0210, 0.0197, 0.0167, 0.0198, 0.0190, 0.0173, 0.0176,\n",
      "           0.0171, 0.0181, 0.0175, 0.0200, 0.0199, 0.0173, 0.0215, 0.0188,\n",
      "           0.0226, 0.0167, 0.0184, 0.0203, 0.0171, 0.0181, 0.0181, 0.0168,\n",
      "           0.0197, 0.0188, 0.0187, 0.0198, 0.0172, 0.0177, 0.0167, 0.0184,\n",
      "           0.0176, 0.0198, 0.0183, 0.0178, 0.0189, 0.0179, 0.0167, 0.0161,\n",
      "           0.0171, 0.0208, 0.0176, 0.0179, 0.0156, 0.0184, 0.0203, 0.0227,\n",
      "           0.0183, 0.0168, 0.0183, 0.0178, 0.0175, 0.0221, 0.0165, 0.0187,\n",
      "           0.0179, 0.0168, 0.0195, 0.0205, 0.0181, 0.0211, 0.0181, 0.0200]]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.23.self_attn.o_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0103],\n",
      "        [0.0101],\n",
      "        [0.0092],\n",
      "        ...,\n",
      "        [0.0118],\n",
      "        [0.0120],\n",
      "        [0.0106]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.23.self_attn.o_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.0144]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.23.post_attention_layernorm.in_quantizer Parameter containing:\n",
      "tensor([[[115.5000]]], device='cuda:7', dtype=torch.bfloat16,\n",
      "       requires_grad=True) True False\n",
      "language_model.model.layers.23.mlp.up_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0104],\n",
      "        [0.0111],\n",
      "        [0.0106],\n",
      "        ...,\n",
      "        [0.0096],\n",
      "        [0.0109],\n",
      "        [0.0114]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.23.mlp.up_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.0057, 0.0056, 0.0057,  ..., 0.0052, 0.0049, 0.0058]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.23.mlp.gate_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0107],\n",
      "        [0.0110],\n",
      "        [0.0107],\n",
      "        ...,\n",
      "        [0.0114],\n",
      "        [0.0104],\n",
      "        [0.0116]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.23.mlp.gate_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.0057, 0.0056, 0.0057,  ..., 0.0052, 0.0049, 0.0058]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.23.mlp.fused_silu.sigmoid.act_quantizer Parameter containing:\n",
      "tensor([[[0.0337]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.23.mlp.fused_silu.hadamard1.act1_quantizer Parameter containing:\n",
      "tensor([[[0.0052, 0.0053, 0.0059,  ..., 0.0054, 0.0053, 0.0061]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.23.mlp.fused_silu.sigmoid.act_quantizer Parameter containing:\n",
      "tensor([[[0.0337]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.23.mlp.fused_silu.hadamard2.act1_quantizer Parameter containing:\n",
      "tensor([[[0.0132, 0.0108, 0.0146,  ..., 0.0122, 0.0116, 0.0101]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.23.mlp.fused_silu.hadamard2.act2_quantizer Parameter containing:\n",
      "tensor([[[0.0034, 0.0036, 0.0064,  ..., 0.0039, 0.0038, 0.0074]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.23.mlp.down_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0114],\n",
      "        [0.0112],\n",
      "        [0.0110],\n",
      "        ...,\n",
      "        [0.0112],\n",
      "        [0.0109],\n",
      "        [0.0114]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.23.mlp.down_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.0024, 0.0025, 0.0062,  ..., 0.0030, 0.0029, 0.0031]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.24.input_layernorm.in_quantizer Parameter containing:\n",
      "tensor([[[115.5000]]], device='cuda:7', dtype=torch.bfloat16,\n",
      "       requires_grad=True) True False\n",
      "language_model.model.layers.24.self_attn.q_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0121],\n",
      "        [0.0114],\n",
      "        [0.0129],\n",
      "        ...,\n",
      "        [0.0126],\n",
      "        [0.0148],\n",
      "        [0.0134]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.24.self_attn.q_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.1953]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.24.self_attn.k_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0110],\n",
      "        [0.0111],\n",
      "        [0.0126],\n",
      "        ...,\n",
      "        [0.0155],\n",
      "        [0.0135],\n",
      "        [0.0139]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.24.self_attn.k_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.1953]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.24.self_attn.v_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0112],\n",
      "        [0.0109],\n",
      "        [0.0118],\n",
      "        ...,\n",
      "        [0.0096],\n",
      "        [0.0104],\n",
      "        [0.0104]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.24.self_attn.v_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.1953]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.24.self_attn.apply_rotary_pos_emb.query_quantizer Parameter containing:\n",
      "tensor([[[[0.2266]]]], device='cuda:7', dtype=torch.bfloat16,\n",
      "       requires_grad=True) True False\n",
      "language_model.model.layers.24.self_attn.apply_rotary_pos_emb.key_quantizer Parameter containing:\n",
      "tensor([[[[0.4336]]]], device='cuda:7', dtype=torch.bfloat16,\n",
      "       requires_grad=True) True False\n",
      "language_model.model.layers.24.self_attn.qkt_mm.x1_quantizer Parameter containing:\n",
      "tensor([[[0.1094]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.24.self_attn.qkt_mm.x2_quantizer Parameter containing:\n",
      "tensor([[[0.1943]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.24.self_attn.softmax.in_quantizer Parameter containing:\n",
      "tensor([[[[0.2754]]]], device='cuda:7', dtype=torch.bfloat16,\n",
      "       requires_grad=True) True False\n",
      "language_model.model.layers.24.self_attn.pv_mm.x1_quantizer Parameter containing:\n",
      "tensor([[[[0.0039]]]], device='cuda:7', dtype=torch.bfloat16,\n",
      "       requires_grad=True) True False\n",
      "language_model.model.layers.24.self_attn.pv_mm.x2_quantizer Parameter containing:\n",
      "tensor([[[[0.0176, 0.0184, 0.0187, 0.0198, 0.0167, 0.0203, 0.0192, 0.0173,\n",
      "           0.0195, 0.0187, 0.0182, 0.0186, 0.0186, 0.0165, 0.0175, 0.0177,\n",
      "           0.0194, 0.0166, 0.0184, 0.0186, 0.0201, 0.0168, 0.0178, 0.0206,\n",
      "           0.0190, 0.0262, 0.0197, 0.0162, 0.0205, 0.0175, 0.0187, 0.0156,\n",
      "           0.0170, 0.0198, 0.0186, 0.0167, 0.0172, 0.0223, 0.0201, 0.0177,\n",
      "           0.0179, 0.0177, 0.0209, 0.0206, 0.0199, 0.0192, 0.0176, 0.0228,\n",
      "           0.0178, 0.0162, 0.0200, 0.0176, 0.0208, 0.0249, 0.0203, 0.0182,\n",
      "           0.0181, 0.0216, 0.0206, 0.0171, 0.0193, 0.0192, 0.0184, 0.0188,\n",
      "           0.0187, 0.0179, 0.0177, 0.0201, 0.0201, 0.0198, 0.0193, 0.0194,\n",
      "           0.0170, 0.0194, 0.0187, 0.0172, 0.0187, 0.0175, 0.0177, 0.0189,\n",
      "           0.0194, 0.0156, 0.0195, 0.0188, 0.0220, 0.0187, 0.0179, 0.0205,\n",
      "           0.0178, 0.0195, 0.0179, 0.0189, 0.0166, 0.0200, 0.0203, 0.0195,\n",
      "           0.0181, 0.0187, 0.0187, 0.0170, 0.0162, 0.0161, 0.0161, 0.0183,\n",
      "           0.0182, 0.0179, 0.0200, 0.0189, 0.0173, 0.0186, 0.0167, 0.0225,\n",
      "           0.0186, 0.0222, 0.0187, 0.0200, 0.0186, 0.0175, 0.0203, 0.0208,\n",
      "           0.0179, 0.0181, 0.0209, 0.0182, 0.0203, 0.0189, 0.0188, 0.0170]]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.24.self_attn.o_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0100],\n",
      "        [0.0106],\n",
      "        [0.0106],\n",
      "        ...,\n",
      "        [0.0104],\n",
      "        [0.0121],\n",
      "        [0.0112]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.24.self_attn.o_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.0205]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.24.post_attention_layernorm.in_quantizer Parameter containing:\n",
      "tensor([[[115.5000]]], device='cuda:7', dtype=torch.bfloat16,\n",
      "       requires_grad=True) True False\n",
      "language_model.model.layers.24.mlp.up_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0097],\n",
      "        [0.0101],\n",
      "        [0.0109],\n",
      "        ...,\n",
      "        [0.0104],\n",
      "        [0.0117],\n",
      "        [0.0099]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.24.mlp.up_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.0054, 0.0058, 0.0051,  ..., 0.0071, 0.0056, 0.0063]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.24.mlp.gate_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0108],\n",
      "        [0.0110],\n",
      "        [0.0113],\n",
      "        ...,\n",
      "        [0.0119],\n",
      "        [0.0117],\n",
      "        [0.0099]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.24.mlp.gate_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.0054, 0.0058, 0.0051,  ..., 0.0071, 0.0056, 0.0063]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.24.mlp.fused_silu.sigmoid.act_quantizer Parameter containing:\n",
      "tensor([[[0.0320]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.24.mlp.fused_silu.hadamard1.act1_quantizer Parameter containing:\n",
      "tensor([[[0.0057, 0.0059, 0.0051,  ..., 0.0059, 0.0053, 0.0054]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.24.mlp.fused_silu.sigmoid.act_quantizer Parameter containing:\n",
      "tensor([[[0.0320]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.24.mlp.fused_silu.hadamard2.act1_quantizer Parameter containing:\n",
      "tensor([[[0.0096, 0.0093, 0.0131,  ..., 0.0132, 0.0085, 0.0106]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.24.mlp.fused_silu.hadamard2.act2_quantizer Parameter containing:\n",
      "tensor([[[0.0055, 0.0064, 0.0028,  ..., 0.0064, 0.0039, 0.0040]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.24.mlp.down_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0107],\n",
      "        [0.0114],\n",
      "        [0.0107],\n",
      "        ...,\n",
      "        [0.0111],\n",
      "        [0.0113],\n",
      "        [0.0109]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.24.mlp.down_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.0026, 0.0027, 0.0026,  ..., 0.0045, 0.0021, 0.0027]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.25.input_layernorm.in_quantizer Parameter containing:\n",
      "tensor([[[115.5000]]], device='cuda:7', dtype=torch.bfloat16,\n",
      "       requires_grad=True) True False\n",
      "language_model.model.layers.25.self_attn.q_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0084],\n",
      "        [0.0084],\n",
      "        [0.0070],\n",
      "        ...,\n",
      "        [0.0131],\n",
      "        [0.0195],\n",
      "        [0.0209]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.25.self_attn.q_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.1885]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.25.self_attn.k_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0087],\n",
      "        [0.0081],\n",
      "        [0.0067],\n",
      "        ...,\n",
      "        [0.0137],\n",
      "        [0.0129],\n",
      "        [0.0128]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.25.self_attn.k_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.1885]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.25.self_attn.v_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0110],\n",
      "        [0.0129],\n",
      "        [0.0119],\n",
      "        ...,\n",
      "        [0.0117],\n",
      "        [0.0126],\n",
      "        [0.0122]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.25.self_attn.v_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.1885]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.25.self_attn.apply_rotary_pos_emb.query_quantizer Parameter containing:\n",
      "tensor([[[[0.2314]]]], device='cuda:7', dtype=torch.bfloat16,\n",
      "       requires_grad=True) True False\n",
      "language_model.model.layers.25.self_attn.apply_rotary_pos_emb.key_quantizer Parameter containing:\n",
      "tensor([[[[0.3867]]]], device='cuda:7', dtype=torch.bfloat16,\n",
      "       requires_grad=True) True False\n",
      "language_model.model.layers.25.self_attn.qkt_mm.x1_quantizer Parameter containing:\n",
      "tensor([[[0.1094]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.25.self_attn.qkt_mm.x2_quantizer Parameter containing:\n",
      "tensor([[[0.1865]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.25.self_attn.softmax.in_quantizer Parameter containing:\n",
      "tensor([[[[0.2988]]]], device='cuda:7', dtype=torch.bfloat16,\n",
      "       requires_grad=True) True False\n",
      "language_model.model.layers.25.self_attn.pv_mm.x1_quantizer Parameter containing:\n",
      "tensor([[[[0.0039]]]], device='cuda:7', dtype=torch.bfloat16,\n",
      "       requires_grad=True) True False\n",
      "language_model.model.layers.25.self_attn.pv_mm.x2_quantizer Parameter containing:\n",
      "tensor([[[[0.0203, 0.0182, 0.0204, 0.0194, 0.0200, 0.0183, 0.0187, 0.0177,\n",
      "           0.0184, 0.0200, 0.0181, 0.0179, 0.0238, 0.0228, 0.0182, 0.0211,\n",
      "           0.0211, 0.0179, 0.0177, 0.0210, 0.0201, 0.0195, 0.0175, 0.0182,\n",
      "           0.0195, 0.0219, 0.0212, 0.0184, 0.0182, 0.0212, 0.0159, 0.0198,\n",
      "           0.0200, 0.0186, 0.0197, 0.0183, 0.0195, 0.0178, 0.0181, 0.0183,\n",
      "           0.0168, 0.0233, 0.0193, 0.0211, 0.0208, 0.0204, 0.0225, 0.0188,\n",
      "           0.0175, 0.0189, 0.0204, 0.0199, 0.0203, 0.0179, 0.0197, 0.0183,\n",
      "           0.0219, 0.0192, 0.0195, 0.0206, 0.0194, 0.0211, 0.0188, 0.0173,\n",
      "           0.0198, 0.0199, 0.0199, 0.0187, 0.0184, 0.0178, 0.0198, 0.0228,\n",
      "           0.0172, 0.0194, 0.0190, 0.0195, 0.0195, 0.0179, 0.0187, 0.0183,\n",
      "           0.0187, 0.0189, 0.0210, 0.0206, 0.0232, 0.0181, 0.0188, 0.0176,\n",
      "           0.0186, 0.0203, 0.0201, 0.0187, 0.0178, 0.0203, 0.0210, 0.0194,\n",
      "           0.0172, 0.0200, 0.0168, 0.0181, 0.0227, 0.0215, 0.0212, 0.0229,\n",
      "           0.0189, 0.0203, 0.0269, 0.0208, 0.0189, 0.0201, 0.0214, 0.0217,\n",
      "           0.0203, 0.0193, 0.0205, 0.0189, 0.0179, 0.0179, 0.0183, 0.0211,\n",
      "           0.0205, 0.0194, 0.0212, 0.0221, 0.0255, 0.0201, 0.0206, 0.0193]]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.25.self_attn.o_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0104],\n",
      "        [0.0120],\n",
      "        [0.0109],\n",
      "        ...,\n",
      "        [0.0104],\n",
      "        [0.0115],\n",
      "        [0.0104]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.25.self_attn.o_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.0165]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.25.post_attention_layernorm.in_quantizer Parameter containing:\n",
      "tensor([[[115.5000]]], device='cuda:7', dtype=torch.bfloat16,\n",
      "       requires_grad=True) True False\n",
      "language_model.model.layers.25.mlp.up_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0111],\n",
      "        [0.0095],\n",
      "        [0.0118],\n",
      "        ...,\n",
      "        [0.0115],\n",
      "        [0.0106],\n",
      "        [0.0101]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.25.mlp.up_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.0048, 0.0063, 0.0054,  ..., 0.0063, 0.0058, 0.0056]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.25.mlp.gate_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0112],\n",
      "        [0.0121],\n",
      "        [0.0127],\n",
      "        ...,\n",
      "        [0.0106],\n",
      "        [0.0120],\n",
      "        [0.0106]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.25.mlp.gate_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.0048, 0.0063, 0.0054,  ..., 0.0063, 0.0058, 0.0056]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.25.mlp.fused_silu.sigmoid.act_quantizer Parameter containing:\n",
      "tensor([[[0.0439]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.25.mlp.fused_silu.hadamard1.act1_quantizer Parameter containing:\n",
      "tensor([[[0.0056, 0.0058, 0.0050,  ..., 0.0057, 0.0062, 0.0051]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.25.mlp.fused_silu.sigmoid.act_quantizer Parameter containing:\n",
      "tensor([[[0.0439]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.25.mlp.fused_silu.hadamard2.act1_quantizer Parameter containing:\n",
      "tensor([[[0.0102, 0.0099, 0.0104,  ..., 0.0095, 0.0104, 0.0121]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.25.mlp.fused_silu.hadamard2.act2_quantizer Parameter containing:\n",
      "tensor([[[0.0051, 0.0058, 0.0025,  ..., 0.0052, 0.0077, 0.0028]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.25.mlp.down_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0107],\n",
      "        [0.0120],\n",
      "        [0.0110],\n",
      "        ...,\n",
      "        [0.0118],\n",
      "        [0.0121],\n",
      "        [0.0111]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.25.mlp.down_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.0034, 0.0038, 0.0018,  ..., 0.0027, 0.0022, 0.0027]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.26.input_layernorm.in_quantizer Parameter containing:\n",
      "tensor([[[115.5000]]], device='cuda:7', dtype=torch.bfloat16,\n",
      "       requires_grad=True) True False\n",
      "language_model.model.layers.26.self_attn.q_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0104],\n",
      "        [0.0112],\n",
      "        [0.0118],\n",
      "        ...,\n",
      "        [0.0292],\n",
      "        [0.0154],\n",
      "        [0.0245]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.26.self_attn.q_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.2090]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.26.self_attn.k_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0110],\n",
      "        [0.0122],\n",
      "        [0.0098],\n",
      "        ...,\n",
      "        [0.0327],\n",
      "        [0.0139],\n",
      "        [0.0168]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.26.self_attn.k_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.2090]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.26.self_attn.v_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0115],\n",
      "        [0.0128],\n",
      "        [0.0115],\n",
      "        ...,\n",
      "        [0.0100],\n",
      "        [0.0104],\n",
      "        [0.0110]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.26.self_attn.v_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.2090]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.26.self_attn.apply_rotary_pos_emb.query_quantizer Parameter containing:\n",
      "tensor([[[[0.2832]]]], device='cuda:7', dtype=torch.bfloat16,\n",
      "       requires_grad=True) True False\n",
      "language_model.model.layers.26.self_attn.apply_rotary_pos_emb.key_quantizer Parameter containing:\n",
      "tensor([[[[0.4141]]]], device='cuda:7', dtype=torch.bfloat16,\n",
      "       requires_grad=True) True False\n",
      "language_model.model.layers.26.self_attn.qkt_mm.x1_quantizer Parameter containing:\n",
      "tensor([[[0.1289]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.26.self_attn.qkt_mm.x2_quantizer Parameter containing:\n",
      "tensor([[[0.2021]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.26.self_attn.softmax.in_quantizer Parameter containing:\n",
      "tensor([[[[0.3496]]]], device='cuda:7', dtype=torch.bfloat16,\n",
      "       requires_grad=True) True False\n",
      "language_model.model.layers.26.self_attn.pv_mm.x1_quantizer Parameter containing:\n",
      "tensor([[[[0.0039]]]], device='cuda:7', dtype=torch.bfloat16,\n",
      "       requires_grad=True) True False\n",
      "language_model.model.layers.26.self_attn.pv_mm.x2_quantizer Parameter containing:\n",
      "tensor([[[[0.0192, 0.0198, 0.0172, 0.0199, 0.0188, 0.0192, 0.0181, 0.0209,\n",
      "           0.0208, 0.0183, 0.0192, 0.0217, 0.0192, 0.0181, 0.0198, 0.0217,\n",
      "           0.0186, 0.0192, 0.0200, 0.0198, 0.0181, 0.0195, 0.0198, 0.0206,\n",
      "           0.0210, 0.0192, 0.0188, 0.0193, 0.0211, 0.0189, 0.0206, 0.0187,\n",
      "           0.0183, 0.0189, 0.0209, 0.0205, 0.0208, 0.0186, 0.0225, 0.0189,\n",
      "           0.0181, 0.0188, 0.0187, 0.0186, 0.0206, 0.0210, 0.0192, 0.0237,\n",
      "           0.0162, 0.0225, 0.0225, 0.0192, 0.0184, 0.0187, 0.0211, 0.0210,\n",
      "           0.0186, 0.0198, 0.0188, 0.0178, 0.0214, 0.0201, 0.0182, 0.0287,\n",
      "           0.0186, 0.0220, 0.0206, 0.0208, 0.0175, 0.0175, 0.0178, 0.0225,\n",
      "           0.0199, 0.0190, 0.0195, 0.0199, 0.0183, 0.0190, 0.0192, 0.0232,\n",
      "           0.0208, 0.0236, 0.0201, 0.0182, 0.0219, 0.0182, 0.0193, 0.0192,\n",
      "           0.0195, 0.0267, 0.0197, 0.0178, 0.0226, 0.0205, 0.0186, 0.0201,\n",
      "           0.0225, 0.0250, 0.0217, 0.0184, 0.0219, 0.0204, 0.0205, 0.0181,\n",
      "           0.0178, 0.0187, 0.0220, 0.0225, 0.0189, 0.0201, 0.0225, 0.0195,\n",
      "           0.0190, 0.0177, 0.0211, 0.0195, 0.0203, 0.0219, 0.0204, 0.0193,\n",
      "           0.0193, 0.0192, 0.0215, 0.0184, 0.0208, 0.0227, 0.0217, 0.0201]]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.26.self_attn.o_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0126],\n",
      "        [0.0128],\n",
      "        [0.0115],\n",
      "        ...,\n",
      "        [0.0113],\n",
      "        [0.0129],\n",
      "        [0.0143]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.26.self_attn.o_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.0259]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.26.post_attention_layernorm.in_quantizer Parameter containing:\n",
      "tensor([[[115.5000]]], device='cuda:7', dtype=torch.bfloat16,\n",
      "       requires_grad=True) True False\n",
      "language_model.model.layers.26.mlp.up_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0118],\n",
      "        [0.0107],\n",
      "        [0.0095],\n",
      "        ...,\n",
      "        [0.0106],\n",
      "        [0.0110],\n",
      "        [0.0099]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.26.mlp.up_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.0053, 0.0068, 0.0059,  ..., 0.0071, 0.0053, 0.0064]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.26.mlp.gate_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0099],\n",
      "        [0.0112],\n",
      "        [0.0106],\n",
      "        ...,\n",
      "        [0.0105],\n",
      "        [0.0108],\n",
      "        [0.0109]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.26.mlp.gate_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.0053, 0.0068, 0.0059,  ..., 0.0071, 0.0053, 0.0064]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.26.mlp.fused_silu.sigmoid.act_quantizer Parameter containing:\n",
      "tensor([[[0.0364]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.26.mlp.fused_silu.hadamard1.act1_quantizer Parameter containing:\n",
      "tensor([[[0.0057, 0.0070, 0.0055,  ..., 0.0056, 0.0059, 0.0060]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.26.mlp.fused_silu.sigmoid.act_quantizer Parameter containing:\n",
      "tensor([[[0.0364]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.26.mlp.fused_silu.hadamard2.act1_quantizer Parameter containing:\n",
      "tensor([[[0.0099, 0.0093, 0.0123,  ..., 0.0126, 0.0172, 0.0120]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.26.mlp.fused_silu.hadamard2.act2_quantizer Parameter containing:\n",
      "tensor([[[0.0052, 0.0143, 0.0043,  ..., 0.0050, 0.0063, 0.0071]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.26.mlp.down_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0124],\n",
      "        [0.0104],\n",
      "        [0.0106],\n",
      "        ...,\n",
      "        [0.0112],\n",
      "        [0.0112],\n",
      "        [0.0131]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.26.mlp.down_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.0024, 0.0066, 0.0028,  ..., 0.0029, 0.0029, 0.0018]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.27.input_layernorm.in_quantizer Parameter containing:\n",
      "tensor([[[115.5000]]], device='cuda:7', dtype=torch.bfloat16,\n",
      "       requires_grad=True) True False\n",
      "language_model.model.layers.27.self_attn.q_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0128],\n",
      "        [0.0110],\n",
      "        [0.0114],\n",
      "        ...,\n",
      "        [0.0125],\n",
      "        [0.0179],\n",
      "        [0.0271]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.27.self_attn.q_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.1641]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.27.self_attn.k_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0107],\n",
      "        [0.0117],\n",
      "        [0.0121],\n",
      "        ...,\n",
      "        [0.0119],\n",
      "        [0.0118],\n",
      "        [0.0195]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.27.self_attn.k_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.1641]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.27.self_attn.v_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0120],\n",
      "        [0.0096],\n",
      "        [0.0115],\n",
      "        ...,\n",
      "        [0.0117],\n",
      "        [0.0125],\n",
      "        [0.0125]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.27.self_attn.v_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.1641]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.27.self_attn.apply_rotary_pos_emb.query_quantizer Parameter containing:\n",
      "tensor([[[[0.2773]]]], device='cuda:7', dtype=torch.bfloat16,\n",
      "       requires_grad=True) True False\n",
      "language_model.model.layers.27.self_attn.apply_rotary_pos_emb.key_quantizer Parameter containing:\n",
      "tensor([[[[0.4141]]]], device='cuda:7', dtype=torch.bfloat16,\n",
      "       requires_grad=True) True False\n",
      "language_model.model.layers.27.self_attn.qkt_mm.x1_quantizer Parameter containing:\n",
      "tensor([[[0.1309]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.27.self_attn.qkt_mm.x2_quantizer Parameter containing:\n",
      "tensor([[[0.2012]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.27.self_attn.softmax.in_quantizer Parameter containing:\n",
      "tensor([[[[0.3770]]]], device='cuda:7', dtype=torch.bfloat16,\n",
      "       requires_grad=True) True False\n",
      "language_model.model.layers.27.self_attn.pv_mm.x1_quantizer Parameter containing:\n",
      "tensor([[[[0.0039]]]], device='cuda:7', dtype=torch.bfloat16,\n",
      "       requires_grad=True) True False\n",
      "language_model.model.layers.27.self_attn.pv_mm.x2_quantizer Parameter containing:\n",
      "tensor([[[[0.0184, 0.0195, 0.0189, 0.0171, 0.0200, 0.0188, 0.0186, 0.0187,\n",
      "           0.0181, 0.0192, 0.0186, 0.0289, 0.0217, 0.0189, 0.0204, 0.0205,\n",
      "           0.0175, 0.0179, 0.0206, 0.0286, 0.0192, 0.0184, 0.0200, 0.0201,\n",
      "           0.0190, 0.0188, 0.0188, 0.0199, 0.0178, 0.0205, 0.0197, 0.0211,\n",
      "           0.0190, 0.0221, 0.0176, 0.0203, 0.0212, 0.0204, 0.0176, 0.0206,\n",
      "           0.0199, 0.0187, 0.0204, 0.0254, 0.0195, 0.0199, 0.0176, 0.0182,\n",
      "           0.0203, 0.0206, 0.0192, 0.0204, 0.0192, 0.0184, 0.0223, 0.0199,\n",
      "           0.0187, 0.0198, 0.0192, 0.0209, 0.0199, 0.0197, 0.0181, 0.0183,\n",
      "           0.0197, 0.0228, 0.0225, 0.0223, 0.0187, 0.0199, 0.0175, 0.0171,\n",
      "           0.0195, 0.0179, 0.0234, 0.0175, 0.0209, 0.0293, 0.0184, 0.0197,\n",
      "           0.0189, 0.0188, 0.0205, 0.0201, 0.0245, 0.0171, 0.0205, 0.0200,\n",
      "           0.0190, 0.0183, 0.0186, 0.0194, 0.0182, 0.0190, 0.0183, 0.0194,\n",
      "           0.0197, 0.0187, 0.0200, 0.0233, 0.0186, 0.0225, 0.0220, 0.0184,\n",
      "           0.0181, 0.0199, 0.0168, 0.0187, 0.0223, 0.0204, 0.0210, 0.0186,\n",
      "           0.0198, 0.0187, 0.0201, 0.0186, 0.0189, 0.0206, 0.0195, 0.0188,\n",
      "           0.0193, 0.0225, 0.0172, 0.0182, 0.0219, 0.0182, 0.0212, 0.0192]]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.27.self_attn.o_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0104],\n",
      "        [0.0112],\n",
      "        [0.0110],\n",
      "        ...,\n",
      "        [0.0113],\n",
      "        [0.0110],\n",
      "        [0.0120]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.27.self_attn.o_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.0238]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.27.post_attention_layernorm.in_quantizer Parameter containing:\n",
      "tensor([[[115.5000]]], device='cuda:7', dtype=torch.bfloat16,\n",
      "       requires_grad=True) True False\n",
      "language_model.model.layers.27.mlp.up_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0102],\n",
      "        [0.0112],\n",
      "        [0.0118],\n",
      "        ...,\n",
      "        [0.0104],\n",
      "        [0.0104],\n",
      "        [0.0109]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.27.mlp.up_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.0055, 0.0066, 0.0056,  ..., 0.0078, 0.0060, 0.0063]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.27.mlp.gate_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0109],\n",
      "        [0.0101],\n",
      "        [0.0110],\n",
      "        ...,\n",
      "        [0.0129],\n",
      "        [0.0106],\n",
      "        [0.0110]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.27.mlp.gate_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.0055, 0.0066, 0.0056,  ..., 0.0078, 0.0060, 0.0063]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.27.mlp.fused_silu.sigmoid.act_quantizer Parameter containing:\n",
      "tensor([[[0.0564]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.27.mlp.fused_silu.hadamard1.act1_quantizer Parameter containing:\n",
      "tensor([[[0.0057, 0.0065, 0.0066,  ..., 0.0058, 0.0051, 0.0063]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.27.mlp.fused_silu.sigmoid.act_quantizer Parameter containing:\n",
      "tensor([[[0.0564]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.27.mlp.fused_silu.hadamard2.act1_quantizer Parameter containing:\n",
      "tensor([[[0.0090, 0.0162, 0.0094,  ..., 0.0092, 0.0121, 0.0148]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.27.mlp.fused_silu.hadamard2.act2_quantizer Parameter containing:\n",
      "tensor([[[0.0052, 0.0097, 0.0106,  ..., 0.0059, 0.0030, 0.0088]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.27.mlp.down_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0120],\n",
      "        [0.0117],\n",
      "        [0.0108],\n",
      "        ...,\n",
      "        [0.0110],\n",
      "        [0.0129],\n",
      "        [0.0125]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.27.mlp.down_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.0030, 0.0063, 0.0064,  ..., 0.0042, 0.0024, 0.0082]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.28.input_layernorm.in_quantizer Parameter containing:\n",
      "tensor([[[115.5000]]], device='cuda:7', dtype=torch.bfloat16,\n",
      "       requires_grad=True) True False\n",
      "language_model.model.layers.28.self_attn.q_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0112],\n",
      "        [0.0216],\n",
      "        [0.0265],\n",
      "        ...,\n",
      "        [0.0137],\n",
      "        [0.0154],\n",
      "        [0.0148]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.28.self_attn.q_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.1699]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.28.self_attn.k_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0095],\n",
      "        [0.0082],\n",
      "        [0.0081],\n",
      "        ...,\n",
      "        [0.0129],\n",
      "        [0.0137],\n",
      "        [0.0140]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.28.self_attn.k_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.1699]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.28.self_attn.v_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0115],\n",
      "        [0.0114],\n",
      "        [0.0120],\n",
      "        ...,\n",
      "        [0.0121],\n",
      "        [0.0113],\n",
      "        [0.0114]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.28.self_attn.v_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.1699]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.28.self_attn.apply_rotary_pos_emb.query_quantizer Parameter containing:\n",
      "tensor([[[[0.2617]]]], device='cuda:7', dtype=torch.bfloat16,\n",
      "       requires_grad=True) True False\n",
      "language_model.model.layers.28.self_attn.apply_rotary_pos_emb.key_quantizer Parameter containing:\n",
      "tensor([[[[0.4316]]]], device='cuda:7', dtype=torch.bfloat16,\n",
      "       requires_grad=True) True False\n",
      "language_model.model.layers.28.self_attn.qkt_mm.x1_quantizer Parameter containing:\n",
      "tensor([[[0.1260]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.28.self_attn.qkt_mm.x2_quantizer Parameter containing:\n",
      "tensor([[[0.2080]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.28.self_attn.softmax.in_quantizer Parameter containing:\n",
      "tensor([[[[0.3711]]]], device='cuda:7', dtype=torch.bfloat16,\n",
      "       requires_grad=True) True False\n",
      "language_model.model.layers.28.self_attn.pv_mm.x1_quantizer Parameter containing:\n",
      "tensor([[[[0.0039]]]], device='cuda:7', dtype=torch.bfloat16,\n",
      "       requires_grad=True) True False\n",
      "language_model.model.layers.28.self_attn.pv_mm.x2_quantizer Parameter containing:\n",
      "tensor([[[[0.0184, 0.0201, 0.0217, 0.0214, 0.0219, 0.0206, 0.0200, 0.0210,\n",
      "           0.0217, 0.0208, 0.0186, 0.0253, 0.0201, 0.0193, 0.0193, 0.0192,\n",
      "           0.0231, 0.0249, 0.0210, 0.0276, 0.0221, 0.0222, 0.0226, 0.0187,\n",
      "           0.0228, 0.0195, 0.0195, 0.0219, 0.0211, 0.0208, 0.0223, 0.0209,\n",
      "           0.0186, 0.0204, 0.0197, 0.0243, 0.0250, 0.0220, 0.0195, 0.0219,\n",
      "           0.0181, 0.0190, 0.0271, 0.0245, 0.0236, 0.0188, 0.0210, 0.0217,\n",
      "           0.0194, 0.0203, 0.0276, 0.0189, 0.0194, 0.0243, 0.0244, 0.0211,\n",
      "           0.0271, 0.0205, 0.0223, 0.0188, 0.0242, 0.0194, 0.0198, 0.0199,\n",
      "           0.0188, 0.0186, 0.0195, 0.0216, 0.0197, 0.0189, 0.0219, 0.0184,\n",
      "           0.0198, 0.0210, 0.0200, 0.0203, 0.0171, 0.0204, 0.0206, 0.0209,\n",
      "           0.0219, 0.0233, 0.0209, 0.0194, 0.0209, 0.0203, 0.0176, 0.0187,\n",
      "           0.0188, 0.0205, 0.0266, 0.0209, 0.0211, 0.0267, 0.0206, 0.0219,\n",
      "           0.0215, 0.0238, 0.0204, 0.0239, 0.0190, 0.0238, 0.0200, 0.0200,\n",
      "           0.0214, 0.0220, 0.0212, 0.0197, 0.0222, 0.0233, 0.0210, 0.0215,\n",
      "           0.0238, 0.0205, 0.0209, 0.0216, 0.0219, 0.0198, 0.0184, 0.0205,\n",
      "           0.0216, 0.0232, 0.0211, 0.0212, 0.0205, 0.0186, 0.0222, 0.0184]]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.28.self_attn.o_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0123],\n",
      "        [0.0117],\n",
      "        [0.0113],\n",
      "        ...,\n",
      "        [0.0114],\n",
      "        [0.0116],\n",
      "        [0.0113]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.28.self_attn.o_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.0194]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.28.post_attention_layernorm.in_quantizer Parameter containing:\n",
      "tensor([[[115.5000]]], device='cuda:7', dtype=torch.bfloat16,\n",
      "       requires_grad=True) True False\n",
      "language_model.model.layers.28.mlp.up_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0110],\n",
      "        [0.0114],\n",
      "        [0.0108],\n",
      "        ...,\n",
      "        [0.0109],\n",
      "        [0.0116],\n",
      "        [0.0102]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.28.mlp.up_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.0066, 0.0068, 0.0066,  ..., 0.0066, 0.0061, 0.0061]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.28.mlp.gate_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0105],\n",
      "        [0.0111],\n",
      "        [0.0116],\n",
      "        ...,\n",
      "        [0.0115],\n",
      "        [0.0106],\n",
      "        [0.0101]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.28.mlp.gate_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.0066, 0.0068, 0.0066,  ..., 0.0066, 0.0061, 0.0061]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.28.mlp.fused_silu.sigmoid.act_quantizer Parameter containing:\n",
      "tensor([[[0.0537]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.28.mlp.fused_silu.hadamard1.act1_quantizer Parameter containing:\n",
      "tensor([[[0.0062, 0.0061, 0.0055,  ..., 0.0064, 0.0051, 0.0057]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.28.mlp.fused_silu.sigmoid.act_quantizer Parameter containing:\n",
      "tensor([[[0.0537]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.28.mlp.fused_silu.hadamard2.act1_quantizer Parameter containing:\n",
      "tensor([[[0.0127, 0.0120, 0.0120,  ..., 0.0154, 0.0110, 0.0120]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.28.mlp.fused_silu.hadamard2.act2_quantizer Parameter containing:\n",
      "tensor([[[0.0081, 0.0072, 0.0043,  ..., 0.0093, 0.0030, 0.0055]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.28.mlp.down_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0107],\n",
      "        [0.0120],\n",
      "        [0.0131],\n",
      "        ...,\n",
      "        [0.0123],\n",
      "        [0.0122],\n",
      "        [0.0118]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.28.mlp.down_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.0052, 0.0053, 0.0020,  ..., 0.0052, 0.0029, 0.0031]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.29.input_layernorm.in_quantizer Parameter containing:\n",
      "tensor([[[115.5000]]], device='cuda:7', dtype=torch.bfloat16,\n",
      "       requires_grad=True) True False\n",
      "language_model.model.layers.29.self_attn.q_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0118],\n",
      "        [0.0092],\n",
      "        [0.0091],\n",
      "        ...,\n",
      "        [0.0270],\n",
      "        [0.0187],\n",
      "        [0.0308]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.29.self_attn.q_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.1934]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.29.self_attn.k_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0098],\n",
      "        [0.0096],\n",
      "        [0.0096],\n",
      "        ...,\n",
      "        [0.0152],\n",
      "        [0.0138],\n",
      "        [0.0146]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.29.self_attn.k_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.1934]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.29.self_attn.v_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0115],\n",
      "        [0.0104],\n",
      "        [0.0112],\n",
      "        ...,\n",
      "        [0.0106],\n",
      "        [0.0127],\n",
      "        [0.0107]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.29.self_attn.v_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.1934]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.29.self_attn.apply_rotary_pos_emb.query_quantizer Parameter containing:\n",
      "tensor([[[[0.2256]]]], device='cuda:7', dtype=torch.bfloat16,\n",
      "       requires_grad=True) True False\n",
      "language_model.model.layers.29.self_attn.apply_rotary_pos_emb.key_quantizer Parameter containing:\n",
      "tensor([[[[0.4121]]]], device='cuda:7', dtype=torch.bfloat16,\n",
      "       requires_grad=True) True False\n",
      "language_model.model.layers.29.self_attn.qkt_mm.x1_quantizer Parameter containing:\n",
      "tensor([[[0.1025]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.29.self_attn.qkt_mm.x2_quantizer Parameter containing:\n",
      "tensor([[[0.2051]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.29.self_attn.softmax.in_quantizer Parameter containing:\n",
      "tensor([[[[0.3340]]]], device='cuda:7', dtype=torch.bfloat16,\n",
      "       requires_grad=True) True False\n",
      "language_model.model.layers.29.self_attn.pv_mm.x1_quantizer Parameter containing:\n",
      "tensor([[[[0.0039]]]], device='cuda:7', dtype=torch.bfloat16,\n",
      "       requires_grad=True) True False\n",
      "language_model.model.layers.29.self_attn.pv_mm.x2_quantizer Parameter containing:\n",
      "tensor([[[[0.0226, 0.0183, 0.0211, 0.0199, 0.0204, 0.0208, 0.0203, 0.0223,\n",
      "           0.0184, 0.0214, 0.0200, 0.0204, 0.0239, 0.0198, 0.0208, 0.0190,\n",
      "           0.0197, 0.0222, 0.0190, 0.0192, 0.0201, 0.0211, 0.0208, 0.0214,\n",
      "           0.0211, 0.0219, 0.0211, 0.0210, 0.0194, 0.0234, 0.0203, 0.0203,\n",
      "           0.0190, 0.0187, 0.0210, 0.0203, 0.0232, 0.0220, 0.0190, 0.0234,\n",
      "           0.0223, 0.0200, 0.0203, 0.0206, 0.0206, 0.0193, 0.0182, 0.0228,\n",
      "           0.0215, 0.0195, 0.0198, 0.0214, 0.0197, 0.0192, 0.0192, 0.0195,\n",
      "           0.0215, 0.0190, 0.0203, 0.0198, 0.0211, 0.0193, 0.0211, 0.0228,\n",
      "           0.0184, 0.0216, 0.0192, 0.0227, 0.0211, 0.0182, 0.0219, 0.0187,\n",
      "           0.0216, 0.0214, 0.0200, 0.0176, 0.0197, 0.0201, 0.0232, 0.0192,\n",
      "           0.0206, 0.0194, 0.0221, 0.0206, 0.0192, 0.0210, 0.0210, 0.0195,\n",
      "           0.0197, 0.0201, 0.0200, 0.0198, 0.0220, 0.0211, 0.0179, 0.0211,\n",
      "           0.0228, 0.0203, 0.0200, 0.0197, 0.0216, 0.0176, 0.0203, 0.0189,\n",
      "           0.0217, 0.0171, 0.0214, 0.0197, 0.0177, 0.0186, 0.0177, 0.0204,\n",
      "           0.0237, 0.0195, 0.0208, 0.0187, 0.0192, 0.0189, 0.0221, 0.0220,\n",
      "           0.0189, 0.0237, 0.0214, 0.0186, 0.0200, 0.0210, 0.0183, 0.0210]]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.29.self_attn.o_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0135],\n",
      "        [0.0120],\n",
      "        [0.0133],\n",
      "        ...,\n",
      "        [0.0124],\n",
      "        [0.0118],\n",
      "        [0.0121]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.29.self_attn.o_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.0215]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.29.post_attention_layernorm.in_quantizer Parameter containing:\n",
      "tensor([[[115.5000]]], device='cuda:7', dtype=torch.bfloat16,\n",
      "       requires_grad=True) True False\n",
      "language_model.model.layers.29.mlp.up_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0109],\n",
      "        [0.0120],\n",
      "        [0.0109],\n",
      "        ...,\n",
      "        [0.0100],\n",
      "        [0.0113],\n",
      "        [0.0112]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.29.mlp.up_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.0065, 0.0069, 0.0070,  ..., 0.0066, 0.0064, 0.0066]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.29.mlp.gate_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0105],\n",
      "        [0.0105],\n",
      "        [0.0115],\n",
      "        ...,\n",
      "        [0.0101],\n",
      "        [0.0116],\n",
      "        [0.0098]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.29.mlp.gate_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.0065, 0.0069, 0.0070,  ..., 0.0066, 0.0064, 0.0066]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.29.mlp.fused_silu.sigmoid.act_quantizer Parameter containing:\n",
      "tensor([[[0.0718]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.29.mlp.fused_silu.hadamard1.act1_quantizer Parameter containing:\n",
      "tensor([[[0.0057, 0.0064, 0.0059,  ..., 0.0056, 0.0055, 0.0059]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.29.mlp.fused_silu.sigmoid.act_quantizer Parameter containing:\n",
      "tensor([[[0.0718]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.29.mlp.fused_silu.hadamard2.act1_quantizer Parameter containing:\n",
      "tensor([[[0.0134, 0.0147, 0.0134,  ..., 0.0119, 0.0132, 0.0152]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.29.mlp.fused_silu.hadamard2.act2_quantizer Parameter containing:\n",
      "tensor([[[0.0053, 0.0092, 0.0061,  ..., 0.0050, 0.0043, 0.0060]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.29.mlp.down_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0120],\n",
      "        [0.0114],\n",
      "        [0.0114],\n",
      "        ...,\n",
      "        [0.0116],\n",
      "        [0.0116],\n",
      "        [0.0115]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.29.mlp.down_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.0041, 0.0050, 0.0029,  ..., 0.0034, 0.0026, 0.0074]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.30.input_layernorm.in_quantizer Parameter containing:\n",
      "tensor([[[115.5000]]], device='cuda:7', dtype=torch.bfloat16,\n",
      "       requires_grad=True) True False\n",
      "language_model.model.layers.30.self_attn.q_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0223],\n",
      "        [0.0203],\n",
      "        [0.0233],\n",
      "        ...,\n",
      "        [0.0121],\n",
      "        [0.0233],\n",
      "        [0.0239]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.30.self_attn.q_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.1602]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.30.self_attn.k_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0099],\n",
      "        [0.0093],\n",
      "        [0.0099],\n",
      "        ...,\n",
      "        [0.0123],\n",
      "        [0.0143],\n",
      "        [0.0172]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.30.self_attn.k_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.1602]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.30.self_attn.v_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0129],\n",
      "        [0.0110],\n",
      "        [0.0104],\n",
      "        ...,\n",
      "        [0.0121],\n",
      "        [0.0145],\n",
      "        [0.0140]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.30.self_attn.v_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.1602]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.30.self_attn.apply_rotary_pos_emb.query_quantizer Parameter containing:\n",
      "tensor([[[[0.2363]]]], device='cuda:7', dtype=torch.bfloat16,\n",
      "       requires_grad=True) True False\n",
      "language_model.model.layers.30.self_attn.apply_rotary_pos_emb.key_quantizer Parameter containing:\n",
      "tensor([[[[0.3828]]]], device='cuda:7', dtype=torch.bfloat16,\n",
      "       requires_grad=True) True False\n",
      "language_model.model.layers.30.self_attn.qkt_mm.x1_quantizer Parameter containing:\n",
      "tensor([[[0.1074]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.30.self_attn.qkt_mm.x2_quantizer Parameter containing:\n",
      "tensor([[[0.1924]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.30.self_attn.softmax.in_quantizer Parameter containing:\n",
      "tensor([[[[0.3613]]]], device='cuda:7', dtype=torch.bfloat16,\n",
      "       requires_grad=True) True False\n",
      "language_model.model.layers.30.self_attn.pv_mm.x1_quantizer Parameter containing:\n",
      "tensor([[[[0.0039]]]], device='cuda:7', dtype=torch.bfloat16,\n",
      "       requires_grad=True) True False\n",
      "language_model.model.layers.30.self_attn.pv_mm.x2_quantizer Parameter containing:\n",
      "tensor([[[[0.0206, 0.0222, 0.0233, 0.0231, 0.0190, 0.0228, 0.0205, 0.0229,\n",
      "           0.0227, 0.0197, 0.0253, 0.0186, 0.0190, 0.0193, 0.0242, 0.0208,\n",
      "           0.0188, 0.0226, 0.0215, 0.0276, 0.0216, 0.0205, 0.0288, 0.0192,\n",
      "           0.0195, 0.0209, 0.0201, 0.0222, 0.0193, 0.0211, 0.0226, 0.0210,\n",
      "           0.0219, 0.0215, 0.0232, 0.0215, 0.0216, 0.0203, 0.0201, 0.0190,\n",
      "           0.0239, 0.0190, 0.0209, 0.0203, 0.0197, 0.0214, 0.0242, 0.0209,\n",
      "           0.0352, 0.0254, 0.0232, 0.0190, 0.0222, 0.0189, 0.0195, 0.0259,\n",
      "           0.0201, 0.0214, 0.0199, 0.0178, 0.0209, 0.0201, 0.0221, 0.0226,\n",
      "           0.0189, 0.0217, 0.0201, 0.0232, 0.0190, 0.0232, 0.0283, 0.0332,\n",
      "           0.0184, 0.0195, 0.0199, 0.0192, 0.0225, 0.0233, 0.0210, 0.0205,\n",
      "           0.0209, 0.0211, 0.0199, 0.0194, 0.0211, 0.0206, 0.0194, 0.0201,\n",
      "           0.0182, 0.0236, 0.0221, 0.0211, 0.0193, 0.0220, 0.0234, 0.0188,\n",
      "           0.0194, 0.0189, 0.0217, 0.0208, 0.0203, 0.0183, 0.0215, 0.0199,\n",
      "           0.0186, 0.0188, 0.0183, 0.0253, 0.0206, 0.0186, 0.0271, 0.0232,\n",
      "           0.0277, 0.0193, 0.0203, 0.0216, 0.0210, 0.0226, 0.0238, 0.0190,\n",
      "           0.0203, 0.0182, 0.0209, 0.0266, 0.0216, 0.0200, 0.0220, 0.0197]]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.30.self_attn.o_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0109],\n",
      "        [0.0123],\n",
      "        [0.0115],\n",
      "        ...,\n",
      "        [0.0126],\n",
      "        [0.0115],\n",
      "        [0.0117]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.30.self_attn.o_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.0251]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.30.post_attention_layernorm.in_quantizer Parameter containing:\n",
      "tensor([[[115.5000]]], device='cuda:7', dtype=torch.bfloat16,\n",
      "       requires_grad=True) True False\n",
      "language_model.model.layers.30.mlp.up_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0112],\n",
      "        [0.0110],\n",
      "        [0.0111],\n",
      "        ...,\n",
      "        [0.0104],\n",
      "        [0.0115],\n",
      "        [0.0113]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.30.mlp.up_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.0073, 0.0073, 0.0073,  ..., 0.0066, 0.0073, 0.0060]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.30.mlp.gate_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0106],\n",
      "        [0.0100],\n",
      "        [0.0112],\n",
      "        ...,\n",
      "        [0.0113],\n",
      "        [0.0119],\n",
      "        [0.0111]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.30.mlp.gate_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.0073, 0.0073, 0.0073,  ..., 0.0066, 0.0073, 0.0060]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.30.mlp.fused_silu.sigmoid.act_quantizer Parameter containing:\n",
      "tensor([[[0.3613]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.30.mlp.fused_silu.hadamard1.act1_quantizer Parameter containing:\n",
      "tensor([[[0.0063, 0.0049, 0.0056,  ..., 0.0069, 0.0066, 0.0062]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.30.mlp.fused_silu.sigmoid.act_quantizer Parameter containing:\n",
      "tensor([[[0.3613]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.30.mlp.fused_silu.hadamard2.act1_quantizer Parameter containing:\n",
      "tensor([[[0.0170, 0.0135, 0.0137,  ..., 0.0159, 0.0167, 0.0164]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.30.mlp.fused_silu.hadamard2.act2_quantizer Parameter containing:\n",
      "tensor([[[0.0087, 0.0025, 0.0048,  ..., 0.0131, 0.0103, 0.0077]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.30.mlp.down_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0116],\n",
      "        [0.0110],\n",
      "        [0.0115],\n",
      "        ...,\n",
      "        [0.0111],\n",
      "        [0.0123],\n",
      "        [0.0113]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.30.mlp.down_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.0022, 0.0025, 0.0024,  ..., 0.0106, 0.0073, 0.0066]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.31.input_layernorm.in_quantizer Parameter containing:\n",
      "tensor([[[115.]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.31.self_attn.q_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0206],\n",
      "        [0.0108],\n",
      "        [0.0105],\n",
      "        ...,\n",
      "        [0.0129],\n",
      "        [0.0142],\n",
      "        [0.0151]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.31.self_attn.q_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.1011]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.31.self_attn.k_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0154],\n",
      "        [0.0105],\n",
      "        [0.0098],\n",
      "        ...,\n",
      "        [0.0140],\n",
      "        [0.0170],\n",
      "        [0.0125]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.31.self_attn.k_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.1011]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.31.self_attn.v_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0118],\n",
      "        [0.0102],\n",
      "        [0.0109],\n",
      "        ...,\n",
      "        [0.0097],\n",
      "        [0.0112],\n",
      "        [0.0101]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.31.self_attn.v_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.1011]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.31.self_attn.apply_rotary_pos_emb.query_quantizer Parameter containing:\n",
      "tensor([[[[0.1670]]]], device='cuda:7', dtype=torch.bfloat16,\n",
      "       requires_grad=True) True False\n",
      "language_model.model.layers.31.self_attn.apply_rotary_pos_emb.key_quantizer Parameter containing:\n",
      "tensor([[[[0.3320]]]], device='cuda:7', dtype=torch.bfloat16,\n",
      "       requires_grad=True) True False\n",
      "language_model.model.layers.31.self_attn.qkt_mm.x1_quantizer Parameter containing:\n",
      "tensor([[[0.0747]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.31.self_attn.qkt_mm.x2_quantizer Parameter containing:\n",
      "tensor([[[0.1650]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.31.self_attn.softmax.in_quantizer Parameter containing:\n",
      "tensor([[[[0.3203]]]], device='cuda:7', dtype=torch.bfloat16,\n",
      "       requires_grad=True) True False\n",
      "language_model.model.layers.31.self_attn.pv_mm.x1_quantizer Parameter containing:\n",
      "tensor([[[[0.0039]]]], device='cuda:7', dtype=torch.bfloat16,\n",
      "       requires_grad=True) True False\n",
      "language_model.model.layers.31.self_attn.pv_mm.x2_quantizer Parameter containing:\n",
      "tensor([[[[0.0187, 0.0212, 0.0150, 0.0219, 0.0176, 0.0199, 0.0245, 0.0161,\n",
      "           0.0205, 0.0262, 0.0177, 0.0160, 0.0167, 0.0181, 0.0238, 0.0200,\n",
      "           0.0190, 0.0178, 0.0153, 0.0173, 0.0178, 0.0143, 0.0156, 0.0154,\n",
      "           0.0160, 0.0177, 0.0175, 0.0165, 0.0266, 0.0181, 0.0233, 0.0144,\n",
      "           0.0245, 0.0150, 0.0200, 0.0164, 0.0176, 0.0179, 0.0177, 0.0160,\n",
      "           0.0304, 0.0184, 0.0192, 0.0152, 0.0200, 0.0166, 0.0152, 0.0172,\n",
      "           0.0156, 0.0270, 0.0162, 0.0171, 0.0173, 0.0192, 0.0154, 0.0173,\n",
      "           0.0194, 0.0215, 0.0157, 0.0204, 0.0190, 0.0208, 0.0217, 0.0154,\n",
      "           0.0192, 0.0151, 0.0156, 0.0188, 0.0193, 0.0200, 0.0159, 0.0199,\n",
      "           0.0199, 0.0204, 0.0172, 0.0192, 0.0171, 0.0206, 0.0234, 0.0187,\n",
      "           0.0210, 0.0181, 0.0198, 0.0179, 0.0165, 0.0199, 0.0164, 0.0178,\n",
      "           0.0183, 0.0182, 0.0179, 0.0190, 0.0201, 0.0198, 0.0172, 0.0197,\n",
      "           0.0139, 0.0156, 0.0167, 0.0188, 0.0142, 0.0161, 0.0167, 0.0186,\n",
      "           0.0253, 0.0183, 0.0160, 0.0186, 0.0201, 0.0177, 0.0167, 0.0173,\n",
      "           0.0147, 0.0208, 0.0186, 0.0187, 0.0155, 0.0181, 0.0166, 0.0155,\n",
      "           0.0167, 0.0148, 0.0166, 0.0159, 0.0159, 0.0181, 0.0156, 0.0205]]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.31.self_attn.o_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0100],\n",
      "        [0.0117],\n",
      "        [0.0106],\n",
      "        ...,\n",
      "        [0.0103],\n",
      "        [0.0113],\n",
      "        [0.0108]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.31.self_attn.o_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.0306]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.31.post_attention_layernorm.in_quantizer Parameter containing:\n",
      "tensor([[[115.]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.31.mlp.up_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0116],\n",
      "        [0.0112],\n",
      "        [0.0102],\n",
      "        ...,\n",
      "        [0.0107],\n",
      "        [0.0105],\n",
      "        [0.0193]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.31.mlp.up_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.0053, 0.0052, 0.0056,  ..., 0.0058, 0.0052, 0.0049]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.31.mlp.gate_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0115],\n",
      "        [0.0111],\n",
      "        [0.0109],\n",
      "        ...,\n",
      "        [0.0110],\n",
      "        [0.0177],\n",
      "        [0.0103]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.31.mlp.gate_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.0053, 0.0052, 0.0056,  ..., 0.0058, 0.0052, 0.0049]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.31.mlp.fused_silu.sigmoid.act_quantizer Parameter containing:\n",
      "tensor([[[0.0618]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.31.mlp.fused_silu.hadamard1.act1_quantizer Parameter containing:\n",
      "tensor([[[0.0063, 0.0060, 0.0057,  ..., 0.0051, 0.0064, 0.0050]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.31.mlp.fused_silu.sigmoid.act_quantizer Parameter containing:\n",
      "tensor([[[0.0618]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.31.mlp.fused_silu.hadamard2.act1_quantizer Parameter containing:\n",
      "tensor([[[0.0129, 0.0120, 0.0139,  ..., 0.0105, 0.0103, 0.0090]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.31.mlp.fused_silu.hadamard2.act2_quantizer Parameter containing:\n",
      "tensor([[[0.0086, 0.0068, 0.0053,  ..., 0.0029, 0.0094, 0.0027]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.31.mlp.down_proj.weight_quantizer Parameter containing:\n",
      "tensor([[0.0115],\n",
      "        [0.0126],\n",
      "        [0.0124],\n",
      "        ...,\n",
      "        [0.0111],\n",
      "        [0.0109],\n",
      "        [0.0120]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.layers.31.mlp.down_proj.act_quantizer Parameter containing:\n",
      "tensor([[[0.0049, 0.0029, 0.0029,  ..., 0.0019, 0.0024, 0.0020]]],\n",
      "       device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.norm.in_quantizer Parameter containing:\n",
      "tensor([[[115.]]], device='cuda:7', dtype=torch.bfloat16, requires_grad=True) True False\n",
      "language_model.model.norm.out_quantizer Parameter containing:\n",
      "tensor([[[0.5973]]], device='cuda:7', requires_grad=True) True False\n"
     ]
    }
   ],
   "source": [
    "with torch.inference_mode():\n",
    "    Quantizer.calibration_quantizer(vla, **inputs)\n",
    "    "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor(0.0041, dtype=torch.float64)"
      ]
     },
     "execution_count": 12,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "with torch.inference_mode():\n",
    "    action_q_init = vla.predict_action(\n",
    "        **inputs, unnorm_key=\"bridge_orig\", do_sample=False, use_cache=False\n",
    "    )\n",
    "torch.nn.functional.mse_loss(\n",
    "    *(torch.from_numpy(i) for i in (action_q_init, action_org))\n",
    ")\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "(array([0.02819784, 0.04069337, 0.04003167, 0.08160118, 0.07759357,\n",
       "        0.20301985, 0.99607843]),\n",
       " array([ 0.00448837,  0.01479259, -0.00569757,  0.00752425, -0.00546965,\n",
       "         0.09019398,  0.99607843]))"
      ]
     },
     "execution_count": 13,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "action_q_init, action_org\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [
    {
     "ename": "RuntimeError",
     "evalue": "Given groups=1, weight of size [1024, 3, 14, 14], expected input[1, 6, 224, 224] to have 3 channels, but got 6 channels instead",
     "output_type": "error",
     "traceback": [
      "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[0;31mRuntimeError\u001b[0m                              Traceback (most recent call last)",
      "Cell \u001b[0;32mIn[10], line 2\u001b[0m\n\u001b[1;32m      1\u001b[0m [\n\u001b[0;32m----> 2\u001b[0m     \u001b[43mvits\u001b[49m\u001b[43m[\u001b[49m\u001b[38;5;241;43m0\u001b[39;49m\u001b[43m]\u001b[49m\u001b[43m(\u001b[49m\u001b[43mimg\u001b[49m\u001b[43m)\u001b[49m\u001b[38;5;241m.\u001b[39mshape,\n\u001b[1;32m      3\u001b[0m     vits[\u001b[38;5;241m1\u001b[39m](img)\u001b[38;5;241m.\u001b[39mshape,\n\u001b[1;32m      4\u001b[0m     v1\u001b[38;5;241m.\u001b[39mvla_forward(img)\u001b[38;5;241m.\u001b[39mshape,\n\u001b[1;32m      5\u001b[0m     v2\u001b[38;5;241m.\u001b[39mvla_forward(img)\u001b[38;5;241m.\u001b[39mshape,\n\u001b[1;32m      6\u001b[0m ]\n",
      "File \u001b[0;32m~/miniforge3/envs/deepseek/lib/python3.10/site-packages/torch/nn/modules/module.py:1739\u001b[0m, in \u001b[0;36mModule._wrapped_call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m   1737\u001b[0m     \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_compiled_call_impl(\u001b[38;5;241m*\u001b[39margs, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs)  \u001b[38;5;66;03m# type: ignore[misc]\u001b[39;00m\n\u001b[1;32m   1738\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[0;32m-> 1739\u001b[0m     \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_call_impl\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n",
      "File \u001b[0;32m~/miniforge3/envs/deepseek/lib/python3.10/site-packages/torch/nn/modules/module.py:1750\u001b[0m, in \u001b[0;36mModule._call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m   1745\u001b[0m \u001b[38;5;66;03m# If we don't have any hooks, we want to skip the rest of the logic in\u001b[39;00m\n\u001b[1;32m   1746\u001b[0m \u001b[38;5;66;03m# this function, and just call forward.\u001b[39;00m\n\u001b[1;32m   1747\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m (\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_backward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_backward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_pre_hooks\n\u001b[1;32m   1748\u001b[0m         \u001b[38;5;129;01mor\u001b[39;00m _global_backward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_backward_hooks\n\u001b[1;32m   1749\u001b[0m         \u001b[38;5;129;01mor\u001b[39;00m _global_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_forward_pre_hooks):\n\u001b[0;32m-> 1750\u001b[0m     \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mforward_call\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m   1752\u001b[0m result \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mNone\u001b[39;00m\n\u001b[1;32m   1753\u001b[0m called_always_called_hooks \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mset\u001b[39m()\n",
      "File \u001b[0;32m~/.cache/huggingface/modules/transformers_modules/openvla-7b+casia_franka+b16+lr-0.0005+lora-r32+dropout-0.0--image_aug/modeling_prismatic.py:43\u001b[0m, in \u001b[0;36munpack_tuple.<locals>.wrapper\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m     42\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[38;5;21mwrapper\u001b[39m(\u001b[38;5;241m*\u001b[39margs: Any, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs: Any) \u001b[38;5;241m-\u001b[39m\u001b[38;5;241m>\u001b[39m Any:\n\u001b[0;32m---> 43\u001b[0m     result \u001b[38;5;241m=\u001b[39m \u001b[43mfn\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m     44\u001b[0m     \u001b[38;5;28;01mreturn\u001b[39;00m result[\u001b[38;5;241m0\u001b[39m] \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28misinstance\u001b[39m(result, \u001b[38;5;28mtuple\u001b[39m) \u001b[38;5;28;01melse\u001b[39;00m result\n",
      "File \u001b[0;32m~/miniforge3/envs/deepseek/lib/python3.10/site-packages/timm/models/vision_transformer.py:671\u001b[0m, in \u001b[0;36mVisionTransformer.get_intermediate_layers\u001b[0;34m(self, x, n, reshape, return_prefix_tokens, norm)\u001b[0m\n\u001b[1;32m    667\u001b[0m \u001b[38;5;250m\u001b[39m\u001b[38;5;124;03m\"\"\" Intermediate layer accessor (NOTE: This is a WIP experiment).\u001b[39;00m\n\u001b[1;32m    668\u001b[0m \u001b[38;5;124;03mInspired by DINO / DINOv2 interface\u001b[39;00m\n\u001b[1;32m    669\u001b[0m \u001b[38;5;124;03m\"\"\"\u001b[39;00m\n\u001b[1;32m    670\u001b[0m \u001b[38;5;66;03m# take last n blocks if n is an int, if in is a sequence, select by matching indices\u001b[39;00m\n\u001b[0;32m--> 671\u001b[0m outputs \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_intermediate_layers\u001b[49m\u001b[43m(\u001b[49m\u001b[43mx\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mn\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m    672\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m norm:\n\u001b[1;32m    673\u001b[0m     outputs \u001b[38;5;241m=\u001b[39m [\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mnorm(out) \u001b[38;5;28;01mfor\u001b[39;00m out \u001b[38;5;129;01min\u001b[39;00m outputs]\n",
      "File \u001b[0;32m~/miniforge3/envs/deepseek/lib/python3.10/site-packages/timm/models/vision_transformer.py:649\u001b[0m, in \u001b[0;36mVisionTransformer._intermediate_layers\u001b[0;34m(self, x, n)\u001b[0m\n\u001b[1;32m    646\u001b[0m take_indices \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mset\u001b[39m(\u001b[38;5;28mrange\u001b[39m(num_blocks \u001b[38;5;241m-\u001b[39m n, num_blocks) \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28misinstance\u001b[39m(n, \u001b[38;5;28mint\u001b[39m) \u001b[38;5;28;01melse\u001b[39;00m n)\n\u001b[1;32m    648\u001b[0m \u001b[38;5;66;03m# forward pass\u001b[39;00m\n\u001b[0;32m--> 649\u001b[0m x \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mpatch_embed\u001b[49m\u001b[43m(\u001b[49m\u001b[43mx\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m    650\u001b[0m x \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_pos_embed(x)\n\u001b[1;32m    651\u001b[0m x \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mpatch_drop(x)\n",
      "File \u001b[0;32m~/miniforge3/envs/deepseek/lib/python3.10/site-packages/torch/nn/modules/module.py:1739\u001b[0m, in \u001b[0;36mModule._wrapped_call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m   1737\u001b[0m     \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_compiled_call_impl(\u001b[38;5;241m*\u001b[39margs, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs)  \u001b[38;5;66;03m# type: ignore[misc]\u001b[39;00m\n\u001b[1;32m   1738\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[0;32m-> 1739\u001b[0m     \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_call_impl\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n",
      "File \u001b[0;32m~/miniforge3/envs/deepseek/lib/python3.10/site-packages/torch/nn/modules/module.py:1750\u001b[0m, in \u001b[0;36mModule._call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m   1745\u001b[0m \u001b[38;5;66;03m# If we don't have any hooks, we want to skip the rest of the logic in\u001b[39;00m\n\u001b[1;32m   1746\u001b[0m \u001b[38;5;66;03m# this function, and just call forward.\u001b[39;00m\n\u001b[1;32m   1747\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m (\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_backward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_backward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_pre_hooks\n\u001b[1;32m   1748\u001b[0m         \u001b[38;5;129;01mor\u001b[39;00m _global_backward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_backward_hooks\n\u001b[1;32m   1749\u001b[0m         \u001b[38;5;129;01mor\u001b[39;00m _global_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_forward_pre_hooks):\n\u001b[0;32m-> 1750\u001b[0m     \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mforward_call\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m   1752\u001b[0m result \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mNone\u001b[39;00m\n\u001b[1;32m   1753\u001b[0m called_always_called_hooks \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mset\u001b[39m()\n",
      "File \u001b[0;32m~/miniforge3/envs/deepseek/lib/python3.10/site-packages/timm/layers/patch_embed.py:87\u001b[0m, in \u001b[0;36mPatchEmbed.forward\u001b[0;34m(self, x)\u001b[0m\n\u001b[1;32m     85\u001b[0m     pad_w \u001b[38;5;241m=\u001b[39m (\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mpatch_size[\u001b[38;5;241m1\u001b[39m] \u001b[38;5;241m-\u001b[39m W \u001b[38;5;241m%\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mpatch_size[\u001b[38;5;241m1\u001b[39m]) \u001b[38;5;241m%\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mpatch_size[\u001b[38;5;241m1\u001b[39m]\n\u001b[1;32m     86\u001b[0m     x \u001b[38;5;241m=\u001b[39m F\u001b[38;5;241m.\u001b[39mpad(x, (\u001b[38;5;241m0\u001b[39m, pad_w, \u001b[38;5;241m0\u001b[39m, pad_h))\n\u001b[0;32m---> 87\u001b[0m x \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mproj\u001b[49m\u001b[43m(\u001b[49m\u001b[43mx\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m     88\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mflatten:\n\u001b[1;32m     89\u001b[0m     x \u001b[38;5;241m=\u001b[39m x\u001b[38;5;241m.\u001b[39mflatten(\u001b[38;5;241m2\u001b[39m)\u001b[38;5;241m.\u001b[39mtranspose(\u001b[38;5;241m1\u001b[39m, \u001b[38;5;241m2\u001b[39m)  \u001b[38;5;66;03m# NCHW -> NLC\u001b[39;00m\n",
      "File \u001b[0;32m~/miniforge3/envs/deepseek/lib/python3.10/site-packages/torch/nn/modules/module.py:1739\u001b[0m, in \u001b[0;36mModule._wrapped_call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m   1737\u001b[0m     \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_compiled_call_impl(\u001b[38;5;241m*\u001b[39margs, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs)  \u001b[38;5;66;03m# type: ignore[misc]\u001b[39;00m\n\u001b[1;32m   1738\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[0;32m-> 1739\u001b[0m     \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_call_impl\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n",
      "File \u001b[0;32m~/miniforge3/envs/deepseek/lib/python3.10/site-packages/torch/nn/modules/module.py:1750\u001b[0m, in \u001b[0;36mModule._call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m   1745\u001b[0m \u001b[38;5;66;03m# If we don't have any hooks, we want to skip the rest of the logic in\u001b[39;00m\n\u001b[1;32m   1746\u001b[0m \u001b[38;5;66;03m# this function, and just call forward.\u001b[39;00m\n\u001b[1;32m   1747\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m (\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_backward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_backward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_pre_hooks\n\u001b[1;32m   1748\u001b[0m         \u001b[38;5;129;01mor\u001b[39;00m _global_backward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_backward_hooks\n\u001b[1;32m   1749\u001b[0m         \u001b[38;5;129;01mor\u001b[39;00m _global_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_forward_pre_hooks):\n\u001b[0;32m-> 1750\u001b[0m     \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mforward_call\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m   1752\u001b[0m result \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mNone\u001b[39;00m\n\u001b[1;32m   1753\u001b[0m called_always_called_hooks \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mset\u001b[39m()\n",
      "File \u001b[0;32m~/miniforge3/envs/deepseek/lib/python3.10/site-packages/torch/nn/modules/conv.py:554\u001b[0m, in \u001b[0;36mConv2d.forward\u001b[0;34m(self, input)\u001b[0m\n\u001b[1;32m    553\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[38;5;21mforward\u001b[39m(\u001b[38;5;28mself\u001b[39m, \u001b[38;5;28minput\u001b[39m: Tensor) \u001b[38;5;241m-\u001b[39m\u001b[38;5;241m>\u001b[39m Tensor:\n\u001b[0;32m--> 554\u001b[0m     \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_conv_forward\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;28;43minput\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mweight\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mbias\u001b[49m\u001b[43m)\u001b[49m\n",
      "File \u001b[0;32m~/miniforge3/envs/deepseek/lib/python3.10/site-packages/torch/nn/modules/conv.py:549\u001b[0m, in \u001b[0;36mConv2d._conv_forward\u001b[0;34m(self, input, weight, bias)\u001b[0m\n\u001b[1;32m    537\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mpadding_mode \u001b[38;5;241m!=\u001b[39m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mzeros\u001b[39m\u001b[38;5;124m\"\u001b[39m:\n\u001b[1;32m    538\u001b[0m     \u001b[38;5;28;01mreturn\u001b[39;00m F\u001b[38;5;241m.\u001b[39mconv2d(\n\u001b[1;32m    539\u001b[0m         F\u001b[38;5;241m.\u001b[39mpad(\n\u001b[1;32m    540\u001b[0m             \u001b[38;5;28minput\u001b[39m, \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_reversed_padding_repeated_twice, mode\u001b[38;5;241m=\u001b[39m\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mpadding_mode\n\u001b[0;32m   (...)\u001b[0m\n\u001b[1;32m    547\u001b[0m         \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mgroups,\n\u001b[1;32m    548\u001b[0m     )\n\u001b[0;32m--> 549\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mF\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mconv2d\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m    550\u001b[0m \u001b[43m    \u001b[49m\u001b[38;5;28;43minput\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mweight\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mbias\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mstride\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mpadding\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mdilation\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mgroups\u001b[49m\n\u001b[1;32m    551\u001b[0m \u001b[43m\u001b[49m\u001b[43m)\u001b[49m\n",
      "\u001b[0;31mRuntimeError\u001b[0m: Given groups=1, weight of size [1024, 3, 14, 14], expected input[1, 6, 224, 224] to have 3 channels, but got 6 channels instead"
     ]
    }
   ],
   "source": [
    "[\n",
    "    vits[0](img).shape,\n",
    "    vits[1](img).shape,\n",
    "    v1.vla_forward(img).shape,\n",
    "    v2.vla_forward(img).shape,\n",
    "]\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "v2.norm.bias\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "vits[1]\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "torch.allclose(v2.vla_forward(img), vits[1](img)),\n",
    "# torch.nn.functional.mse_loss(v1.vla_forward(img), vits[0](img))\n",
    "# (v1.vla_forward(img) - vits[0](img)).abs().max()\n",
    "# v2.vla_forward(img), vits[1](img)\n",
    "# "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "vits[1].attn_pool.pool\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "vits[0].num_prefix_tokens"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "s = set(type(i) for i in vla.vision_backbone.featurizer.modules())\n",
    "s\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import inspect\n",
    "import torch.nn.modules.linear\n",
    "\n",
    "torch.nn.modules.linear.Identity\n",
    "\n",
    "# print(next(vla.children()).__class__)\n",
    "# inspect.getsourcefile(vla.vision_backbone.featurizer.forward.__dict__)\n",
    "inspect.getsource(\n",
    "    inspect.getclosurevars(vla.vision_backbone.featurizer.forward).nonlocals[\"fn\"].func\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "fake_input = dict(\n",
    "    input_ids=torch.randint(1,10000,\n",
    "        [1, 200], dtype=torch.int64, device=vla.language_model.model.device\n",
    "    )\n",
    ")\n",
    "o_r = vla.language_model.model(**fake_input)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "patch_model_for_train(vla.language_model)\n",
    "set_module_names(vla.language_model.model)\n",
    "Quantizer.calibration_quantizer(vla.language_model.model,**fake_input)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "list(vla.children())\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "disable_params(vla.language_model.model)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "dict(vla.language_model.model.named_parameters())"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "with torch.no_grad():\n",
    "    torch.cuda.empty_cache()\n",
    "    o = vla.language_model.model(**fake_input)\n",
    "\n",
    "\n",
    "(o.last_hidden_state - o_r.last_hidden_state).abs().mean()\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "torch.all(o == 0)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "from transformers import AutoModelForCausalLM,AutoConfig\n",
    "\n",
    "# llm = AutoModelForCausalLM.from_config(\n",
    "#     {\n",
    "#         \"model_type\": \"llama\",\n",
    "#         \"pad_token_id\": 32000,\n",
    "#         \"torch_dtype\": \"bfloat16\",\n",
    "#         \"vocab_size\": 32064,\n",
    "#     }\n",
    "\n",
    "# )\n",
    "# llm\n",
    "AutoConfig.from_pretrained(\"../model/openvla\").text_config"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "deepseek",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.10.16"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
