{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "import torch"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 48,
   "metadata": {},
   "outputs": [],
   "source": [
    "ori_ckpt_path = \"/mnt/sfs/asr/ckpt/epoch_3.pt\"\n",
    "new_ckpt_path = \"/mnt/obs/ckpt/um/qwen2_multi_task_4_fa/epoch_0_with_speechp/step_0/mp_rank_00_model_states.pt\""
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Key: model_state_dict\n",
      "    Key: layer1.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([3, 3])\n",
      "    Key: layer1.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([3])\n",
      "    Key: layer2\n",
      "        Key: weight\n",
      "            Type: <class 'torch.Tensor'>, Shape: torch.Size([2, 2])\n",
      "        Key: bias\n",
      "            Type: <class 'torch.Tensor'>, Shape: torch.Size([2])\n",
      "Key: optimizer_state_dict\n",
      "    Key: state\n",
      "        Key: param1\n",
      "            Type: <class 'torch.Tensor'>, Shape: torch.Size([1])\n",
      "        Key: param2\n",
      "            Type: <class 'torch.Tensor'>, Shape: torch.Size([1])\n",
      "    Key: param_groups\n",
      "        Type: <class 'list'>, Value: [{'lr': 0.001}]\n",
      "Key: epoch\n",
      "    Type: <class 'int'>, Value: 10\n",
      "Key: loss\n",
      "    Type: <class 'float'>, Value: 0.5\n"
     ]
    }
   ],
   "source": [
    "def print_ckpt_structure(ckpt, indent=0, max_depth=None, current_depth=0):\n",
    "    \"\"\"\n",
    "    打印 PyTorch 模型 checkpoint 的结构。\n",
    "\n",
    "    参数:\n",
    "        ckpt (dict): 模型 checkpoint 的字典。\n",
    "        indent (int): 当前层级的缩进空格数。\n",
    "        max_depth (int): 最大打印深度。如果为 None，则打印所有层级。\n",
    "        current_depth (int): 当前递归深度。\n",
    "    \"\"\"\n",
    "    if max_depth is not None and current_depth > max_depth:\n",
    "        return\n",
    "\n",
    "    for key, value in ckpt.items():\n",
    "        # 打印当前键\n",
    "        print(\" \" * indent + f\"Key: {key}\")\n",
    "\n",
    "        # 如果值是字典，递归打印\n",
    "        if isinstance(value, dict):\n",
    "            print_ckpt_structure(value, indent + 4, max_depth, current_depth + 1)\n",
    "        else:\n",
    "            # 打印值的类型和形状（如果是张量）\n",
    "            if isinstance(value, torch.Tensor):\n",
    "                print(\" \" * (indent + 4) + f\"Type: {type(value)}, Shape: {value.shape}\")\n",
    "            else:\n",
    "                print(\" \" * (indent + 4) + f\"Type: {type(value)}, Value: {value}\")\n",
    "# 假设有一个嵌套的 checkpoint 字典\n",
    "ckpt = {\n",
    "    \"model_state_dict\": {\n",
    "        \"layer1.weight\": torch.randn(3, 3),\n",
    "        \"layer1.bias\": torch.randn(3),\n",
    "        \"layer2\": {\n",
    "            \"weight\": torch.randn(2, 2),\n",
    "            \"bias\": torch.randn(2),\n",
    "        },\n",
    "    },\n",
    "    \"optimizer_state_dict\": {\n",
    "        \"state\": {\n",
    "            \"param1\": torch.randn(1),\n",
    "            \"param2\": torch.randn(1),\n",
    "        },\n",
    "        \"param_groups\": [{\"lr\": 0.001}],\n",
    "    },\n",
    "    \"epoch\": 10,\n",
    "    \"loss\": 0.5,\n",
    "}\n",
    "\n",
    "# 打印 checkpoint 结构\n",
    "print_ckpt_structure(ckpt, max_depth=2)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 54,
   "metadata": {},
   "outputs": [],
   "source": [
    "ori_ckpt = torch.load(ori_ckpt_path)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Key: encoder.embed.pos_enc.pe\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1, 1500, 1024])\n",
      "Key: speech_transformer.embed.pos_enc.pe\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1, 5000, 1024])\n",
      "Key: llama_model.base_model.model.model.embed_tokens.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([152064, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.0.self_attn.q_proj.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.0.self_attn.q_proj.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584])\n",
      "Key: llama_model.base_model.model.model.layers.0.self_attn.k_proj.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([512, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.0.self_attn.k_proj.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([512])\n",
      "Key: llama_model.base_model.model.model.layers.0.self_attn.v_proj.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([512, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.0.self_attn.v_proj.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([512])\n",
      "Key: llama_model.base_model.model.model.layers.0.self_attn.o_proj.base_layer.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.0.mlp.gate_proj.base_layer.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([18944, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.0.mlp.up_proj.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([18944, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.0.mlp.down_proj.base_layer.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 18944])\n",
      "Key: llama_model.base_model.model.model.layers.0.input_layernorm.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584])\n",
      "Key: llama_model.base_model.model.model.layers.0.post_attention_layernorm.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584])\n",
      "Key: llama_model.base_model.model.model.layers.1.self_attn.q_proj.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.1.self_attn.q_proj.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584])\n",
      "Key: llama_model.base_model.model.model.layers.1.self_attn.k_proj.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([512, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.1.self_attn.k_proj.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([512])\n",
      "Key: llama_model.base_model.model.model.layers.1.self_attn.v_proj.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([512, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.1.self_attn.v_proj.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([512])\n",
      "Key: llama_model.base_model.model.model.layers.1.self_attn.o_proj.base_layer.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.1.mlp.gate_proj.base_layer.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([18944, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.1.mlp.up_proj.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([18944, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.1.mlp.down_proj.base_layer.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 18944])\n",
      "Key: llama_model.base_model.model.model.layers.1.input_layernorm.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584])\n",
      "Key: llama_model.base_model.model.model.layers.1.post_attention_layernorm.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584])\n",
      "Key: llama_model.base_model.model.model.layers.2.self_attn.q_proj.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.2.self_attn.q_proj.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584])\n",
      "Key: llama_model.base_model.model.model.layers.2.self_attn.k_proj.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([512, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.2.self_attn.k_proj.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([512])\n",
      "Key: llama_model.base_model.model.model.layers.2.self_attn.v_proj.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([512, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.2.self_attn.v_proj.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([512])\n",
      "Key: llama_model.base_model.model.model.layers.2.self_attn.o_proj.base_layer.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.2.mlp.gate_proj.base_layer.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([18944, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.2.mlp.up_proj.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([18944, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.2.mlp.down_proj.base_layer.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 18944])\n",
      "Key: llama_model.base_model.model.model.layers.2.input_layernorm.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584])\n",
      "Key: llama_model.base_model.model.model.layers.2.post_attention_layernorm.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584])\n",
      "Key: llama_model.base_model.model.model.layers.3.self_attn.q_proj.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.3.self_attn.q_proj.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584])\n",
      "Key: llama_model.base_model.model.model.layers.3.self_attn.k_proj.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([512, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.3.self_attn.k_proj.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([512])\n",
      "Key: llama_model.base_model.model.model.layers.3.self_attn.v_proj.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([512, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.3.self_attn.v_proj.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([512])\n",
      "Key: llama_model.base_model.model.model.layers.3.self_attn.o_proj.base_layer.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.3.mlp.gate_proj.base_layer.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([18944, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.3.mlp.up_proj.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([18944, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.3.mlp.down_proj.base_layer.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 18944])\n",
      "Key: llama_model.base_model.model.model.layers.3.input_layernorm.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584])\n",
      "Key: llama_model.base_model.model.model.layers.3.post_attention_layernorm.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584])\n",
      "Key: llama_model.base_model.model.model.layers.4.self_attn.q_proj.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.4.self_attn.q_proj.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584])\n",
      "Key: llama_model.base_model.model.model.layers.4.self_attn.k_proj.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([512, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.4.self_attn.k_proj.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([512])\n",
      "Key: llama_model.base_model.model.model.layers.4.self_attn.v_proj.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([512, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.4.self_attn.v_proj.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([512])\n",
      "Key: llama_model.base_model.model.model.layers.4.self_attn.o_proj.base_layer.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.4.mlp.gate_proj.base_layer.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([18944, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.4.mlp.up_proj.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([18944, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.4.mlp.down_proj.base_layer.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 18944])\n",
      "Key: llama_model.base_model.model.model.layers.4.input_layernorm.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584])\n",
      "Key: llama_model.base_model.model.model.layers.4.post_attention_layernorm.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584])\n",
      "Key: llama_model.base_model.model.model.layers.5.self_attn.q_proj.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.5.self_attn.q_proj.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584])\n",
      "Key: llama_model.base_model.model.model.layers.5.self_attn.k_proj.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([512, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.5.self_attn.k_proj.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([512])\n",
      "Key: llama_model.base_model.model.model.layers.5.self_attn.v_proj.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([512, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.5.self_attn.v_proj.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([512])\n",
      "Key: llama_model.base_model.model.model.layers.5.self_attn.o_proj.base_layer.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.5.mlp.gate_proj.base_layer.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([18944, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.5.mlp.up_proj.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([18944, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.5.mlp.down_proj.base_layer.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 18944])\n",
      "Key: llama_model.base_model.model.model.layers.5.input_layernorm.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584])\n",
      "Key: llama_model.base_model.model.model.layers.5.post_attention_layernorm.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584])\n",
      "Key: llama_model.base_model.model.model.layers.6.self_attn.q_proj.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.6.self_attn.q_proj.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584])\n",
      "Key: llama_model.base_model.model.model.layers.6.self_attn.k_proj.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([512, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.6.self_attn.k_proj.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([512])\n",
      "Key: llama_model.base_model.model.model.layers.6.self_attn.v_proj.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([512, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.6.self_attn.v_proj.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([512])\n",
      "Key: llama_model.base_model.model.model.layers.6.self_attn.o_proj.base_layer.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.6.mlp.gate_proj.base_layer.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([18944, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.6.mlp.up_proj.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([18944, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.6.mlp.down_proj.base_layer.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 18944])\n",
      "Key: llama_model.base_model.model.model.layers.6.input_layernorm.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584])\n",
      "Key: llama_model.base_model.model.model.layers.6.post_attention_layernorm.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584])\n",
      "Key: llama_model.base_model.model.model.layers.7.self_attn.q_proj.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.7.self_attn.q_proj.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584])\n",
      "Key: llama_model.base_model.model.model.layers.7.self_attn.k_proj.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([512, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.7.self_attn.k_proj.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([512])\n",
      "Key: llama_model.base_model.model.model.layers.7.self_attn.v_proj.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([512, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.7.self_attn.v_proj.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([512])\n",
      "Key: llama_model.base_model.model.model.layers.7.self_attn.o_proj.base_layer.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.7.mlp.gate_proj.base_layer.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([18944, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.7.mlp.up_proj.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([18944, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.7.mlp.down_proj.base_layer.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 18944])\n",
      "Key: llama_model.base_model.model.model.layers.7.input_layernorm.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584])\n",
      "Key: llama_model.base_model.model.model.layers.7.post_attention_layernorm.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584])\n",
      "Key: llama_model.base_model.model.model.layers.8.self_attn.q_proj.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.8.self_attn.q_proj.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584])\n",
      "Key: llama_model.base_model.model.model.layers.8.self_attn.k_proj.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([512, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.8.self_attn.k_proj.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([512])\n",
      "Key: llama_model.base_model.model.model.layers.8.self_attn.v_proj.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([512, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.8.self_attn.v_proj.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([512])\n",
      "Key: llama_model.base_model.model.model.layers.8.self_attn.o_proj.base_layer.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.8.mlp.gate_proj.base_layer.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([18944, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.8.mlp.up_proj.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([18944, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.8.mlp.down_proj.base_layer.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 18944])\n",
      "Key: llama_model.base_model.model.model.layers.8.input_layernorm.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584])\n",
      "Key: llama_model.base_model.model.model.layers.8.post_attention_layernorm.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584])\n",
      "Key: llama_model.base_model.model.model.layers.9.self_attn.q_proj.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.9.self_attn.q_proj.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584])\n",
      "Key: llama_model.base_model.model.model.layers.9.self_attn.k_proj.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([512, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.9.self_attn.k_proj.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([512])\n",
      "Key: llama_model.base_model.model.model.layers.9.self_attn.v_proj.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([512, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.9.self_attn.v_proj.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([512])\n",
      "Key: llama_model.base_model.model.model.layers.9.self_attn.o_proj.base_layer.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.9.mlp.gate_proj.base_layer.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([18944, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.9.mlp.up_proj.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([18944, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.9.mlp.down_proj.base_layer.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 18944])\n",
      "Key: llama_model.base_model.model.model.layers.9.input_layernorm.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584])\n",
      "Key: llama_model.base_model.model.model.layers.9.post_attention_layernorm.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584])\n",
      "Key: llama_model.base_model.model.model.layers.10.self_attn.q_proj.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.10.self_attn.q_proj.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584])\n",
      "Key: llama_model.base_model.model.model.layers.10.self_attn.k_proj.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([512, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.10.self_attn.k_proj.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([512])\n",
      "Key: llama_model.base_model.model.model.layers.10.self_attn.v_proj.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([512, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.10.self_attn.v_proj.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([512])\n",
      "Key: llama_model.base_model.model.model.layers.10.self_attn.o_proj.base_layer.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.10.mlp.gate_proj.base_layer.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([18944, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.10.mlp.up_proj.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([18944, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.10.mlp.down_proj.base_layer.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 18944])\n",
      "Key: llama_model.base_model.model.model.layers.10.input_layernorm.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584])\n",
      "Key: llama_model.base_model.model.model.layers.10.post_attention_layernorm.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584])\n",
      "Key: llama_model.base_model.model.model.layers.11.self_attn.q_proj.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.11.self_attn.q_proj.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584])\n",
      "Key: llama_model.base_model.model.model.layers.11.self_attn.k_proj.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([512, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.11.self_attn.k_proj.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([512])\n",
      "Key: llama_model.base_model.model.model.layers.11.self_attn.v_proj.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([512, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.11.self_attn.v_proj.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([512])\n",
      "Key: llama_model.base_model.model.model.layers.11.self_attn.o_proj.base_layer.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.11.mlp.gate_proj.base_layer.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([18944, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.11.mlp.up_proj.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([18944, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.11.mlp.down_proj.base_layer.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 18944])\n",
      "Key: llama_model.base_model.model.model.layers.11.input_layernorm.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584])\n",
      "Key: llama_model.base_model.model.model.layers.11.post_attention_layernorm.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584])\n",
      "Key: llama_model.base_model.model.model.layers.12.self_attn.q_proj.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.12.self_attn.q_proj.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584])\n",
      "Key: llama_model.base_model.model.model.layers.12.self_attn.k_proj.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([512, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.12.self_attn.k_proj.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([512])\n",
      "Key: llama_model.base_model.model.model.layers.12.self_attn.v_proj.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([512, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.12.self_attn.v_proj.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([512])\n",
      "Key: llama_model.base_model.model.model.layers.12.self_attn.o_proj.base_layer.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.12.mlp.gate_proj.base_layer.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([18944, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.12.mlp.up_proj.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([18944, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.12.mlp.down_proj.base_layer.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 18944])\n",
      "Key: llama_model.base_model.model.model.layers.12.input_layernorm.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584])\n",
      "Key: llama_model.base_model.model.model.layers.12.post_attention_layernorm.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584])\n",
      "Key: llama_model.base_model.model.model.layers.13.self_attn.q_proj.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.13.self_attn.q_proj.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584])\n",
      "Key: llama_model.base_model.model.model.layers.13.self_attn.k_proj.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([512, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.13.self_attn.k_proj.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([512])\n",
      "Key: llama_model.base_model.model.model.layers.13.self_attn.v_proj.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([512, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.13.self_attn.v_proj.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([512])\n",
      "Key: llama_model.base_model.model.model.layers.13.self_attn.o_proj.base_layer.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.13.mlp.gate_proj.base_layer.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([18944, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.13.mlp.up_proj.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([18944, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.13.mlp.down_proj.base_layer.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 18944])\n",
      "Key: llama_model.base_model.model.model.layers.13.input_layernorm.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584])\n",
      "Key: llama_model.base_model.model.model.layers.13.post_attention_layernorm.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584])\n",
      "Key: llama_model.base_model.model.model.layers.14.self_attn.q_proj.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.14.self_attn.q_proj.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584])\n",
      "Key: llama_model.base_model.model.model.layers.14.self_attn.k_proj.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([512, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.14.self_attn.k_proj.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([512])\n",
      "Key: llama_model.base_model.model.model.layers.14.self_attn.v_proj.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([512, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.14.self_attn.v_proj.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([512])\n",
      "Key: llama_model.base_model.model.model.layers.14.self_attn.o_proj.base_layer.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.14.mlp.gate_proj.base_layer.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([18944, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.14.mlp.up_proj.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([18944, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.14.mlp.down_proj.base_layer.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 18944])\n",
      "Key: llama_model.base_model.model.model.layers.14.input_layernorm.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584])\n",
      "Key: llama_model.base_model.model.model.layers.14.post_attention_layernorm.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584])\n",
      "Key: llama_model.base_model.model.model.layers.15.self_attn.q_proj.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.15.self_attn.q_proj.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584])\n",
      "Key: llama_model.base_model.model.model.layers.15.self_attn.k_proj.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([512, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.15.self_attn.k_proj.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([512])\n",
      "Key: llama_model.base_model.model.model.layers.15.self_attn.v_proj.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([512, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.15.self_attn.v_proj.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([512])\n",
      "Key: llama_model.base_model.model.model.layers.15.self_attn.o_proj.base_layer.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.15.mlp.gate_proj.base_layer.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([18944, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.15.mlp.up_proj.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([18944, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.15.mlp.down_proj.base_layer.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 18944])\n",
      "Key: llama_model.base_model.model.model.layers.15.input_layernorm.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584])\n",
      "Key: llama_model.base_model.model.model.layers.15.post_attention_layernorm.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584])\n",
      "Key: llama_model.base_model.model.model.layers.16.self_attn.q_proj.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.16.self_attn.q_proj.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584])\n",
      "Key: llama_model.base_model.model.model.layers.16.self_attn.k_proj.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([512, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.16.self_attn.k_proj.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([512])\n",
      "Key: llama_model.base_model.model.model.layers.16.self_attn.v_proj.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([512, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.16.self_attn.v_proj.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([512])\n",
      "Key: llama_model.base_model.model.model.layers.16.self_attn.o_proj.base_layer.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.16.mlp.gate_proj.base_layer.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([18944, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.16.mlp.up_proj.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([18944, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.16.mlp.down_proj.base_layer.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 18944])\n",
      "Key: llama_model.base_model.model.model.layers.16.input_layernorm.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584])\n",
      "Key: llama_model.base_model.model.model.layers.16.post_attention_layernorm.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584])\n",
      "Key: llama_model.base_model.model.model.layers.17.self_attn.q_proj.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.17.self_attn.q_proj.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584])\n",
      "Key: llama_model.base_model.model.model.layers.17.self_attn.k_proj.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([512, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.17.self_attn.k_proj.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([512])\n",
      "Key: llama_model.base_model.model.model.layers.17.self_attn.v_proj.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([512, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.17.self_attn.v_proj.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([512])\n",
      "Key: llama_model.base_model.model.model.layers.17.self_attn.o_proj.base_layer.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.17.mlp.gate_proj.base_layer.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([18944, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.17.mlp.up_proj.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([18944, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.17.mlp.down_proj.base_layer.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 18944])\n",
      "Key: llama_model.base_model.model.model.layers.17.input_layernorm.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584])\n",
      "Key: llama_model.base_model.model.model.layers.17.post_attention_layernorm.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584])\n",
      "Key: llama_model.base_model.model.model.layers.18.self_attn.q_proj.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.18.self_attn.q_proj.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584])\n",
      "Key: llama_model.base_model.model.model.layers.18.self_attn.k_proj.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([512, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.18.self_attn.k_proj.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([512])\n",
      "Key: llama_model.base_model.model.model.layers.18.self_attn.v_proj.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([512, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.18.self_attn.v_proj.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([512])\n",
      "Key: llama_model.base_model.model.model.layers.18.self_attn.o_proj.base_layer.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.18.mlp.gate_proj.base_layer.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([18944, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.18.mlp.up_proj.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([18944, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.18.mlp.down_proj.base_layer.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 18944])\n",
      "Key: llama_model.base_model.model.model.layers.18.input_layernorm.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584])\n",
      "Key: llama_model.base_model.model.model.layers.18.post_attention_layernorm.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584])\n",
      "Key: llama_model.base_model.model.model.layers.19.self_attn.q_proj.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.19.self_attn.q_proj.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584])\n",
      "Key: llama_model.base_model.model.model.layers.19.self_attn.k_proj.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([512, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.19.self_attn.k_proj.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([512])\n",
      "Key: llama_model.base_model.model.model.layers.19.self_attn.v_proj.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([512, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.19.self_attn.v_proj.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([512])\n",
      "Key: llama_model.base_model.model.model.layers.19.self_attn.o_proj.base_layer.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.19.mlp.gate_proj.base_layer.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([18944, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.19.mlp.up_proj.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([18944, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.19.mlp.down_proj.base_layer.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 18944])\n",
      "Key: llama_model.base_model.model.model.layers.19.input_layernorm.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584])\n",
      "Key: llama_model.base_model.model.model.layers.19.post_attention_layernorm.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584])\n",
      "Key: llama_model.base_model.model.model.layers.20.self_attn.q_proj.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.20.self_attn.q_proj.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584])\n",
      "Key: llama_model.base_model.model.model.layers.20.self_attn.k_proj.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([512, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.20.self_attn.k_proj.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([512])\n",
      "Key: llama_model.base_model.model.model.layers.20.self_attn.v_proj.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([512, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.20.self_attn.v_proj.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([512])\n",
      "Key: llama_model.base_model.model.model.layers.20.self_attn.o_proj.base_layer.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.20.mlp.gate_proj.base_layer.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([18944, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.20.mlp.up_proj.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([18944, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.20.mlp.down_proj.base_layer.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 18944])\n",
      "Key: llama_model.base_model.model.model.layers.20.input_layernorm.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584])\n",
      "Key: llama_model.base_model.model.model.layers.20.post_attention_layernorm.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584])\n",
      "Key: llama_model.base_model.model.model.layers.21.self_attn.q_proj.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.21.self_attn.q_proj.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584])\n",
      "Key: llama_model.base_model.model.model.layers.21.self_attn.k_proj.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([512, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.21.self_attn.k_proj.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([512])\n",
      "Key: llama_model.base_model.model.model.layers.21.self_attn.v_proj.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([512, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.21.self_attn.v_proj.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([512])\n",
      "Key: llama_model.base_model.model.model.layers.21.self_attn.o_proj.base_layer.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.21.mlp.gate_proj.base_layer.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([18944, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.21.mlp.up_proj.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([18944, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.21.mlp.down_proj.base_layer.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 18944])\n",
      "Key: llama_model.base_model.model.model.layers.21.input_layernorm.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584])\n",
      "Key: llama_model.base_model.model.model.layers.21.post_attention_layernorm.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584])\n",
      "Key: llama_model.base_model.model.model.layers.22.self_attn.q_proj.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.22.self_attn.q_proj.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584])\n",
      "Key: llama_model.base_model.model.model.layers.22.self_attn.k_proj.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([512, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.22.self_attn.k_proj.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([512])\n",
      "Key: llama_model.base_model.model.model.layers.22.self_attn.v_proj.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([512, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.22.self_attn.v_proj.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([512])\n",
      "Key: llama_model.base_model.model.model.layers.22.self_attn.o_proj.base_layer.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.22.mlp.gate_proj.base_layer.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([18944, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.22.mlp.up_proj.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([18944, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.22.mlp.down_proj.base_layer.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 18944])\n",
      "Key: llama_model.base_model.model.model.layers.22.input_layernorm.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584])\n",
      "Key: llama_model.base_model.model.model.layers.22.post_attention_layernorm.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584])\n",
      "Key: llama_model.base_model.model.model.layers.23.self_attn.q_proj.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.23.self_attn.q_proj.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584])\n",
      "Key: llama_model.base_model.model.model.layers.23.self_attn.k_proj.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([512, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.23.self_attn.k_proj.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([512])\n",
      "Key: llama_model.base_model.model.model.layers.23.self_attn.v_proj.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([512, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.23.self_attn.v_proj.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([512])\n",
      "Key: llama_model.base_model.model.model.layers.23.self_attn.o_proj.base_layer.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.23.mlp.gate_proj.base_layer.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([18944, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.23.mlp.up_proj.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([18944, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.23.mlp.down_proj.base_layer.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 18944])\n",
      "Key: llama_model.base_model.model.model.layers.23.input_layernorm.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584])\n",
      "Key: llama_model.base_model.model.model.layers.23.post_attention_layernorm.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584])\n",
      "Key: llama_model.base_model.model.model.layers.24.self_attn.q_proj.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.24.self_attn.q_proj.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584])\n",
      "Key: llama_model.base_model.model.model.layers.24.self_attn.k_proj.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([512, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.24.self_attn.k_proj.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([512])\n",
      "Key: llama_model.base_model.model.model.layers.24.self_attn.v_proj.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([512, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.24.self_attn.v_proj.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([512])\n",
      "Key: llama_model.base_model.model.model.layers.24.self_attn.o_proj.base_layer.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.24.mlp.gate_proj.base_layer.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([18944, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.24.mlp.up_proj.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([18944, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.24.mlp.down_proj.base_layer.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 18944])\n",
      "Key: llama_model.base_model.model.model.layers.24.input_layernorm.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584])\n",
      "Key: llama_model.base_model.model.model.layers.24.post_attention_layernorm.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584])\n",
      "Key: llama_model.base_model.model.model.layers.25.self_attn.q_proj.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.25.self_attn.q_proj.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584])\n",
      "Key: llama_model.base_model.model.model.layers.25.self_attn.k_proj.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([512, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.25.self_attn.k_proj.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([512])\n",
      "Key: llama_model.base_model.model.model.layers.25.self_attn.v_proj.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([512, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.25.self_attn.v_proj.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([512])\n",
      "Key: llama_model.base_model.model.model.layers.25.self_attn.o_proj.base_layer.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.25.mlp.gate_proj.base_layer.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([18944, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.25.mlp.up_proj.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([18944, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.25.mlp.down_proj.base_layer.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 18944])\n",
      "Key: llama_model.base_model.model.model.layers.25.input_layernorm.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584])\n",
      "Key: llama_model.base_model.model.model.layers.25.post_attention_layernorm.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584])\n",
      "Key: llama_model.base_model.model.model.layers.26.self_attn.q_proj.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.26.self_attn.q_proj.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584])\n",
      "Key: llama_model.base_model.model.model.layers.26.self_attn.k_proj.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([512, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.26.self_attn.k_proj.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([512])\n",
      "Key: llama_model.base_model.model.model.layers.26.self_attn.v_proj.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([512, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.26.self_attn.v_proj.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([512])\n",
      "Key: llama_model.base_model.model.model.layers.26.self_attn.o_proj.base_layer.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.26.mlp.gate_proj.base_layer.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([18944, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.26.mlp.up_proj.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([18944, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.26.mlp.down_proj.base_layer.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 18944])\n",
      "Key: llama_model.base_model.model.model.layers.26.input_layernorm.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584])\n",
      "Key: llama_model.base_model.model.model.layers.26.post_attention_layernorm.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584])\n",
      "Key: llama_model.base_model.model.model.layers.27.self_attn.q_proj.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.27.self_attn.q_proj.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584])\n",
      "Key: llama_model.base_model.model.model.layers.27.self_attn.k_proj.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([512, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.27.self_attn.k_proj.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([512])\n",
      "Key: llama_model.base_model.model.model.layers.27.self_attn.v_proj.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([512, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.27.self_attn.v_proj.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([512])\n",
      "Key: llama_model.base_model.model.model.layers.27.self_attn.o_proj.base_layer.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.27.mlp.gate_proj.base_layer.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([18944, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.27.mlp.up_proj.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([18944, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.27.mlp.down_proj.base_layer.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 18944])\n",
      "Key: llama_model.base_model.model.model.layers.27.input_layernorm.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584])\n",
      "Key: llama_model.base_model.model.model.layers.27.post_attention_layernorm.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584])\n",
      "Key: llama_model.base_model.model.model.norm.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584])\n",
      "Key: llama_model.base_model.model.lm_head.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([152064, 3584])\n",
      "Key: encoder.embed.conv.0.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 80, 3])\n",
      "Key: encoder.embed.conv.0.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.embed.conv.2.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024, 3])\n",
      "Key: encoder.embed.conv.2.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.after_norm.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.after_norm.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.encoders.0.self_attn.linear_q.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "Key: encoder.encoders.0.self_attn.linear_q.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.encoders.0.self_attn.linear_k.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "Key: encoder.encoders.0.self_attn.linear_v.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "Key: encoder.encoders.0.self_attn.linear_v.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.encoders.0.self_attn.linear_out.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "Key: encoder.encoders.0.self_attn.linear_out.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.encoders.0.feed_forward.w_1.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([4096, 1024])\n",
      "Key: encoder.encoders.0.feed_forward.w_1.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([4096])\n",
      "Key: encoder.encoders.0.feed_forward.w_2.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 4096])\n",
      "Key: encoder.encoders.0.feed_forward.w_2.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.encoders.0.norm1.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.encoders.0.norm1.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.encoders.0.norm2.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.encoders.0.norm2.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.encoders.1.self_attn.linear_q.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "Key: encoder.encoders.1.self_attn.linear_q.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.encoders.1.self_attn.linear_k.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "Key: encoder.encoders.1.self_attn.linear_v.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "Key: encoder.encoders.1.self_attn.linear_v.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.encoders.1.self_attn.linear_out.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "Key: encoder.encoders.1.self_attn.linear_out.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.encoders.1.feed_forward.w_1.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([4096, 1024])\n",
      "Key: encoder.encoders.1.feed_forward.w_1.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([4096])\n",
      "Key: encoder.encoders.1.feed_forward.w_2.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 4096])\n",
      "Key: encoder.encoders.1.feed_forward.w_2.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.encoders.1.norm1.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.encoders.1.norm1.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.encoders.1.norm2.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.encoders.1.norm2.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.encoders.2.self_attn.linear_q.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "Key: encoder.encoders.2.self_attn.linear_q.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.encoders.2.self_attn.linear_k.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "Key: encoder.encoders.2.self_attn.linear_v.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "Key: encoder.encoders.2.self_attn.linear_v.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.encoders.2.self_attn.linear_out.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "Key: encoder.encoders.2.self_attn.linear_out.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.encoders.2.feed_forward.w_1.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([4096, 1024])\n",
      "Key: encoder.encoders.2.feed_forward.w_1.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([4096])\n",
      "Key: encoder.encoders.2.feed_forward.w_2.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 4096])\n",
      "Key: encoder.encoders.2.feed_forward.w_2.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.encoders.2.norm1.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.encoders.2.norm1.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.encoders.2.norm2.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.encoders.2.norm2.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.encoders.3.self_attn.linear_q.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "Key: encoder.encoders.3.self_attn.linear_q.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.encoders.3.self_attn.linear_k.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "Key: encoder.encoders.3.self_attn.linear_v.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "Key: encoder.encoders.3.self_attn.linear_v.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.encoders.3.self_attn.linear_out.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "Key: encoder.encoders.3.self_attn.linear_out.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.encoders.3.feed_forward.w_1.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([4096, 1024])\n",
      "Key: encoder.encoders.3.feed_forward.w_1.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([4096])\n",
      "Key: encoder.encoders.3.feed_forward.w_2.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 4096])\n",
      "Key: encoder.encoders.3.feed_forward.w_2.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.encoders.3.norm1.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.encoders.3.norm1.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.encoders.3.norm2.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.encoders.3.norm2.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.encoders.4.self_attn.linear_q.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "Key: encoder.encoders.4.self_attn.linear_q.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.encoders.4.self_attn.linear_k.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "Key: encoder.encoders.4.self_attn.linear_v.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "Key: encoder.encoders.4.self_attn.linear_v.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.encoders.4.self_attn.linear_out.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "Key: encoder.encoders.4.self_attn.linear_out.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.encoders.4.feed_forward.w_1.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([4096, 1024])\n",
      "Key: encoder.encoders.4.feed_forward.w_1.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([4096])\n",
      "Key: encoder.encoders.4.feed_forward.w_2.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 4096])\n",
      "Key: encoder.encoders.4.feed_forward.w_2.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.encoders.4.norm1.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.encoders.4.norm1.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.encoders.4.norm2.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.encoders.4.norm2.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.encoders.5.self_attn.linear_q.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "Key: encoder.encoders.5.self_attn.linear_q.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.encoders.5.self_attn.linear_k.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "Key: encoder.encoders.5.self_attn.linear_v.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "Key: encoder.encoders.5.self_attn.linear_v.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.encoders.5.self_attn.linear_out.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "Key: encoder.encoders.5.self_attn.linear_out.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.encoders.5.feed_forward.w_1.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([4096, 1024])\n",
      "Key: encoder.encoders.5.feed_forward.w_1.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([4096])\n",
      "Key: encoder.encoders.5.feed_forward.w_2.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 4096])\n",
      "Key: encoder.encoders.5.feed_forward.w_2.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.encoders.5.norm1.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.encoders.5.norm1.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.encoders.5.norm2.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.encoders.5.norm2.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.encoders.6.self_attn.linear_q.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "Key: encoder.encoders.6.self_attn.linear_q.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.encoders.6.self_attn.linear_k.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "Key: encoder.encoders.6.self_attn.linear_v.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "Key: encoder.encoders.6.self_attn.linear_v.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.encoders.6.self_attn.linear_out.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "Key: encoder.encoders.6.self_attn.linear_out.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.encoders.6.feed_forward.w_1.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([4096, 1024])\n",
      "Key: encoder.encoders.6.feed_forward.w_1.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([4096])\n",
      "Key: encoder.encoders.6.feed_forward.w_2.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 4096])\n",
      "Key: encoder.encoders.6.feed_forward.w_2.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.encoders.6.norm1.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.encoders.6.norm1.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.encoders.6.norm2.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.encoders.6.norm2.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.encoders.7.self_attn.linear_q.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "Key: encoder.encoders.7.self_attn.linear_q.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.encoders.7.self_attn.linear_k.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "Key: encoder.encoders.7.self_attn.linear_v.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "Key: encoder.encoders.7.self_attn.linear_v.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.encoders.7.self_attn.linear_out.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "Key: encoder.encoders.7.self_attn.linear_out.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.encoders.7.feed_forward.w_1.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([4096, 1024])\n",
      "Key: encoder.encoders.7.feed_forward.w_1.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([4096])\n",
      "Key: encoder.encoders.7.feed_forward.w_2.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 4096])\n",
      "Key: encoder.encoders.7.feed_forward.w_2.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.encoders.7.norm1.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.encoders.7.norm1.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.encoders.7.norm2.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.encoders.7.norm2.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.encoders.8.self_attn.linear_q.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "Key: encoder.encoders.8.self_attn.linear_q.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.encoders.8.self_attn.linear_k.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "Key: encoder.encoders.8.self_attn.linear_v.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "Key: encoder.encoders.8.self_attn.linear_v.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.encoders.8.self_attn.linear_out.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "Key: encoder.encoders.8.self_attn.linear_out.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.encoders.8.feed_forward.w_1.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([4096, 1024])\n",
      "Key: encoder.encoders.8.feed_forward.w_1.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([4096])\n",
      "Key: encoder.encoders.8.feed_forward.w_2.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 4096])\n",
      "Key: encoder.encoders.8.feed_forward.w_2.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.encoders.8.norm1.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.encoders.8.norm1.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.encoders.8.norm2.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.encoders.8.norm2.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.encoders.9.self_attn.linear_q.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "Key: encoder.encoders.9.self_attn.linear_q.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.encoders.9.self_attn.linear_k.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "Key: encoder.encoders.9.self_attn.linear_v.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "Key: encoder.encoders.9.self_attn.linear_v.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.encoders.9.self_attn.linear_out.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "Key: encoder.encoders.9.self_attn.linear_out.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.encoders.9.feed_forward.w_1.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([4096, 1024])\n",
      "Key: encoder.encoders.9.feed_forward.w_1.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([4096])\n",
      "Key: encoder.encoders.9.feed_forward.w_2.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 4096])\n",
      "Key: encoder.encoders.9.feed_forward.w_2.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.encoders.9.norm1.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.encoders.9.norm1.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.encoders.9.norm2.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.encoders.9.norm2.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.encoders.10.self_attn.linear_q.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "Key: encoder.encoders.10.self_attn.linear_q.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.encoders.10.self_attn.linear_k.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "Key: encoder.encoders.10.self_attn.linear_v.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "Key: encoder.encoders.10.self_attn.linear_v.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.encoders.10.self_attn.linear_out.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "Key: encoder.encoders.10.self_attn.linear_out.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.encoders.10.feed_forward.w_1.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([4096, 1024])\n",
      "Key: encoder.encoders.10.feed_forward.w_1.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([4096])\n",
      "Key: encoder.encoders.10.feed_forward.w_2.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 4096])\n",
      "Key: encoder.encoders.10.feed_forward.w_2.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.encoders.10.norm1.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.encoders.10.norm1.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.encoders.10.norm2.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.encoders.10.norm2.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.encoders.11.self_attn.linear_q.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "Key: encoder.encoders.11.self_attn.linear_q.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.encoders.11.self_attn.linear_k.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "Key: encoder.encoders.11.self_attn.linear_v.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "Key: encoder.encoders.11.self_attn.linear_v.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.encoders.11.self_attn.linear_out.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "Key: encoder.encoders.11.self_attn.linear_out.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.encoders.11.feed_forward.w_1.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([4096, 1024])\n",
      "Key: encoder.encoders.11.feed_forward.w_1.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([4096])\n",
      "Key: encoder.encoders.11.feed_forward.w_2.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 4096])\n",
      "Key: encoder.encoders.11.feed_forward.w_2.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.encoders.11.norm1.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.encoders.11.norm1.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.encoders.11.norm2.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.encoders.11.norm2.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.encoders.12.self_attn.linear_q.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "Key: encoder.encoders.12.self_attn.linear_q.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.encoders.12.self_attn.linear_k.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "Key: encoder.encoders.12.self_attn.linear_v.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "Key: encoder.encoders.12.self_attn.linear_v.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.encoders.12.self_attn.linear_out.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "Key: encoder.encoders.12.self_attn.linear_out.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.encoders.12.feed_forward.w_1.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([4096, 1024])\n",
      "Key: encoder.encoders.12.feed_forward.w_1.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([4096])\n",
      "Key: encoder.encoders.12.feed_forward.w_2.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 4096])\n",
      "Key: encoder.encoders.12.feed_forward.w_2.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.encoders.12.norm1.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.encoders.12.norm1.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.encoders.12.norm2.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.encoders.12.norm2.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.encoders.13.self_attn.linear_q.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "Key: encoder.encoders.13.self_attn.linear_q.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.encoders.13.self_attn.linear_k.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "Key: encoder.encoders.13.self_attn.linear_v.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "Key: encoder.encoders.13.self_attn.linear_v.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.encoders.13.self_attn.linear_out.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "Key: encoder.encoders.13.self_attn.linear_out.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.encoders.13.feed_forward.w_1.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([4096, 1024])\n",
      "Key: encoder.encoders.13.feed_forward.w_1.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([4096])\n",
      "Key: encoder.encoders.13.feed_forward.w_2.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 4096])\n",
      "Key: encoder.encoders.13.feed_forward.w_2.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.encoders.13.norm1.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.encoders.13.norm1.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.encoders.13.norm2.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.encoders.13.norm2.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.encoders.14.self_attn.linear_q.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "Key: encoder.encoders.14.self_attn.linear_q.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.encoders.14.self_attn.linear_k.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "Key: encoder.encoders.14.self_attn.linear_v.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "Key: encoder.encoders.14.self_attn.linear_v.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.encoders.14.self_attn.linear_out.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "Key: encoder.encoders.14.self_attn.linear_out.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.encoders.14.feed_forward.w_1.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([4096, 1024])\n",
      "Key: encoder.encoders.14.feed_forward.w_1.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([4096])\n",
      "Key: encoder.encoders.14.feed_forward.w_2.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 4096])\n",
      "Key: encoder.encoders.14.feed_forward.w_2.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.encoders.14.norm1.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.encoders.14.norm1.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.encoders.14.norm2.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.encoders.14.norm2.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.encoders.15.self_attn.linear_q.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "Key: encoder.encoders.15.self_attn.linear_q.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.encoders.15.self_attn.linear_k.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "Key: encoder.encoders.15.self_attn.linear_v.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "Key: encoder.encoders.15.self_attn.linear_v.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.encoders.15.self_attn.linear_out.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "Key: encoder.encoders.15.self_attn.linear_out.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.encoders.15.feed_forward.w_1.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([4096, 1024])\n",
      "Key: encoder.encoders.15.feed_forward.w_1.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([4096])\n",
      "Key: encoder.encoders.15.feed_forward.w_2.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 4096])\n",
      "Key: encoder.encoders.15.feed_forward.w_2.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.encoders.15.norm1.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.encoders.15.norm1.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.encoders.15.norm2.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.encoders.15.norm2.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.encoders.16.self_attn.linear_q.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "Key: encoder.encoders.16.self_attn.linear_q.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.encoders.16.self_attn.linear_k.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "Key: encoder.encoders.16.self_attn.linear_v.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "Key: encoder.encoders.16.self_attn.linear_v.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.encoders.16.self_attn.linear_out.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "Key: encoder.encoders.16.self_attn.linear_out.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.encoders.16.feed_forward.w_1.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([4096, 1024])\n",
      "Key: encoder.encoders.16.feed_forward.w_1.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([4096])\n",
      "Key: encoder.encoders.16.feed_forward.w_2.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 4096])\n",
      "Key: encoder.encoders.16.feed_forward.w_2.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.encoders.16.norm1.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.encoders.16.norm1.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.encoders.16.norm2.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.encoders.16.norm2.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.encoders.17.self_attn.linear_q.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "Key: encoder.encoders.17.self_attn.linear_q.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.encoders.17.self_attn.linear_k.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "Key: encoder.encoders.17.self_attn.linear_v.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "Key: encoder.encoders.17.self_attn.linear_v.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.encoders.17.self_attn.linear_out.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "Key: encoder.encoders.17.self_attn.linear_out.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.encoders.17.feed_forward.w_1.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([4096, 1024])\n",
      "Key: encoder.encoders.17.feed_forward.w_1.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([4096])\n",
      "Key: encoder.encoders.17.feed_forward.w_2.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 4096])\n",
      "Key: encoder.encoders.17.feed_forward.w_2.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.encoders.17.norm1.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.encoders.17.norm1.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.encoders.17.norm2.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.encoders.17.norm2.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.encoders.18.self_attn.linear_q.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "Key: encoder.encoders.18.self_attn.linear_q.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.encoders.18.self_attn.linear_k.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "Key: encoder.encoders.18.self_attn.linear_v.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "Key: encoder.encoders.18.self_attn.linear_v.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.encoders.18.self_attn.linear_out.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "Key: encoder.encoders.18.self_attn.linear_out.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.encoders.18.feed_forward.w_1.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([4096, 1024])\n",
      "Key: encoder.encoders.18.feed_forward.w_1.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([4096])\n",
      "Key: encoder.encoders.18.feed_forward.w_2.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 4096])\n",
      "Key: encoder.encoders.18.feed_forward.w_2.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.encoders.18.norm1.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.encoders.18.norm1.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.encoders.18.norm2.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.encoders.18.norm2.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.encoders.19.self_attn.linear_q.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "Key: encoder.encoders.19.self_attn.linear_q.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.encoders.19.self_attn.linear_k.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "Key: encoder.encoders.19.self_attn.linear_v.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "Key: encoder.encoders.19.self_attn.linear_v.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.encoders.19.self_attn.linear_out.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "Key: encoder.encoders.19.self_attn.linear_out.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.encoders.19.feed_forward.w_1.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([4096, 1024])\n",
      "Key: encoder.encoders.19.feed_forward.w_1.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([4096])\n",
      "Key: encoder.encoders.19.feed_forward.w_2.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 4096])\n",
      "Key: encoder.encoders.19.feed_forward.w_2.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.encoders.19.norm1.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.encoders.19.norm1.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.encoders.19.norm2.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.encoders.19.norm2.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.encoders.20.self_attn.linear_q.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "Key: encoder.encoders.20.self_attn.linear_q.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.encoders.20.self_attn.linear_k.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "Key: encoder.encoders.20.self_attn.linear_v.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "Key: encoder.encoders.20.self_attn.linear_v.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.encoders.20.self_attn.linear_out.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "Key: encoder.encoders.20.self_attn.linear_out.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.encoders.20.feed_forward.w_1.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([4096, 1024])\n",
      "Key: encoder.encoders.20.feed_forward.w_1.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([4096])\n",
      "Key: encoder.encoders.20.feed_forward.w_2.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 4096])\n",
      "Key: encoder.encoders.20.feed_forward.w_2.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.encoders.20.norm1.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.encoders.20.norm1.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.encoders.20.norm2.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.encoders.20.norm2.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.encoders.21.self_attn.linear_q.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "Key: encoder.encoders.21.self_attn.linear_q.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.encoders.21.self_attn.linear_k.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "Key: encoder.encoders.21.self_attn.linear_v.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "Key: encoder.encoders.21.self_attn.linear_v.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.encoders.21.self_attn.linear_out.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "Key: encoder.encoders.21.self_attn.linear_out.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.encoders.21.feed_forward.w_1.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([4096, 1024])\n",
      "Key: encoder.encoders.21.feed_forward.w_1.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([4096])\n",
      "Key: encoder.encoders.21.feed_forward.w_2.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 4096])\n",
      "Key: encoder.encoders.21.feed_forward.w_2.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.encoders.21.norm1.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.encoders.21.norm1.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.encoders.21.norm2.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.encoders.21.norm2.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.encoders.22.self_attn.linear_q.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "Key: encoder.encoders.22.self_attn.linear_q.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.encoders.22.self_attn.linear_k.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "Key: encoder.encoders.22.self_attn.linear_v.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "Key: encoder.encoders.22.self_attn.linear_v.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.encoders.22.self_attn.linear_out.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "Key: encoder.encoders.22.self_attn.linear_out.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.encoders.22.feed_forward.w_1.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([4096, 1024])\n",
      "Key: encoder.encoders.22.feed_forward.w_1.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([4096])\n",
      "Key: encoder.encoders.22.feed_forward.w_2.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 4096])\n",
      "Key: encoder.encoders.22.feed_forward.w_2.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.encoders.22.norm1.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.encoders.22.norm1.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.encoders.22.norm2.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.encoders.22.norm2.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.encoders.23.self_attn.linear_q.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "Key: encoder.encoders.23.self_attn.linear_q.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.encoders.23.self_attn.linear_k.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "Key: encoder.encoders.23.self_attn.linear_v.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "Key: encoder.encoders.23.self_attn.linear_v.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.encoders.23.self_attn.linear_out.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "Key: encoder.encoders.23.self_attn.linear_out.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.encoders.23.feed_forward.w_1.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([4096, 1024])\n",
      "Key: encoder.encoders.23.feed_forward.w_1.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([4096])\n",
      "Key: encoder.encoders.23.feed_forward.w_2.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 4096])\n",
      "Key: encoder.encoders.23.feed_forward.w_2.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.encoders.23.norm1.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.encoders.23.norm1.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.encoders.23.norm2.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: encoder.encoders.23.norm2.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: ln_speech.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: ln_speech.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: speech_transformer.embed.out.0.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "Key: speech_transformer.embed.out.0.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: speech_transformer.embed.out.1.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: speech_transformer.embed.out.1.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: speech_transformer.after_norm.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: speech_transformer.after_norm.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: speech_transformer.encoders.0.self_attn.linear_q.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "Key: speech_transformer.encoders.0.self_attn.linear_q.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: speech_transformer.encoders.0.self_attn.linear_k.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "Key: speech_transformer.encoders.0.self_attn.linear_k.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: speech_transformer.encoders.0.self_attn.linear_v.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "Key: speech_transformer.encoders.0.self_attn.linear_v.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: speech_transformer.encoders.0.self_attn.linear_out.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "Key: speech_transformer.encoders.0.self_attn.linear_out.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: speech_transformer.encoders.0.feed_forward.w_1.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([2560, 1024])\n",
      "Key: speech_transformer.encoders.0.feed_forward.w_1.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([2560])\n",
      "Key: speech_transformer.encoders.0.feed_forward.w_2.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 2560])\n",
      "Key: speech_transformer.encoders.0.feed_forward.w_2.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: speech_transformer.encoders.0.norm1.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: speech_transformer.encoders.0.norm1.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: speech_transformer.encoders.0.norm2.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: speech_transformer.encoders.0.norm2.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: speech_transformer.encoders.1.self_attn.linear_q.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "Key: speech_transformer.encoders.1.self_attn.linear_q.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: speech_transformer.encoders.1.self_attn.linear_k.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "Key: speech_transformer.encoders.1.self_attn.linear_k.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: speech_transformer.encoders.1.self_attn.linear_v.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "Key: speech_transformer.encoders.1.self_attn.linear_v.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: speech_transformer.encoders.1.self_attn.linear_out.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "Key: speech_transformer.encoders.1.self_attn.linear_out.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: speech_transformer.encoders.1.feed_forward.w_1.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([2560, 1024])\n",
      "Key: speech_transformer.encoders.1.feed_forward.w_1.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([2560])\n",
      "Key: speech_transformer.encoders.1.feed_forward.w_2.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 2560])\n",
      "Key: speech_transformer.encoders.1.feed_forward.w_2.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: speech_transformer.encoders.1.norm1.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: speech_transformer.encoders.1.norm1.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: speech_transformer.encoders.1.norm2.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: speech_transformer.encoders.1.norm2.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: speech_transformer.encoders.2.self_attn.linear_q.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "Key: speech_transformer.encoders.2.self_attn.linear_q.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: speech_transformer.encoders.2.self_attn.linear_k.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "Key: speech_transformer.encoders.2.self_attn.linear_k.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: speech_transformer.encoders.2.self_attn.linear_v.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "Key: speech_transformer.encoders.2.self_attn.linear_v.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: speech_transformer.encoders.2.self_attn.linear_out.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "Key: speech_transformer.encoders.2.self_attn.linear_out.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: speech_transformer.encoders.2.feed_forward.w_1.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([2560, 1024])\n",
      "Key: speech_transformer.encoders.2.feed_forward.w_1.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([2560])\n",
      "Key: speech_transformer.encoders.2.feed_forward.w_2.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 2560])\n",
      "Key: speech_transformer.encoders.2.feed_forward.w_2.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: speech_transformer.encoders.2.norm1.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: speech_transformer.encoders.2.norm1.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: speech_transformer.encoders.2.norm2.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: speech_transformer.encoders.2.norm2.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: speech_transformer.encoders.3.self_attn.linear_q.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "Key: speech_transformer.encoders.3.self_attn.linear_q.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: speech_transformer.encoders.3.self_attn.linear_k.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "Key: speech_transformer.encoders.3.self_attn.linear_k.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: speech_transformer.encoders.3.self_attn.linear_v.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "Key: speech_transformer.encoders.3.self_attn.linear_v.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: speech_transformer.encoders.3.self_attn.linear_out.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "Key: speech_transformer.encoders.3.self_attn.linear_out.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: speech_transformer.encoders.3.feed_forward.w_1.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([2560, 1024])\n",
      "Key: speech_transformer.encoders.3.feed_forward.w_1.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([2560])\n",
      "Key: speech_transformer.encoders.3.feed_forward.w_2.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 2560])\n",
      "Key: speech_transformer.encoders.3.feed_forward.w_2.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: speech_transformer.encoders.3.norm1.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: speech_transformer.encoders.3.norm1.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: speech_transformer.encoders.3.norm2.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: speech_transformer.encoders.3.norm2.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: llama_model.base_model.model.model.layers.0.self_attn.o_proj.lora_A.default.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([8, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.0.self_attn.o_proj.lora_B.default.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 8])\n",
      "Key: llama_model.base_model.model.model.layers.0.mlp.gate_proj.lora_A.default.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([8, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.0.mlp.gate_proj.lora_B.default.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([18944, 8])\n",
      "Key: llama_model.base_model.model.model.layers.0.mlp.down_proj.lora_A.default.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([8, 18944])\n",
      "Key: llama_model.base_model.model.model.layers.0.mlp.down_proj.lora_B.default.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 8])\n",
      "Key: llama_model.base_model.model.model.layers.1.self_attn.o_proj.lora_A.default.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([8, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.1.self_attn.o_proj.lora_B.default.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 8])\n",
      "Key: llama_model.base_model.model.model.layers.1.mlp.gate_proj.lora_A.default.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([8, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.1.mlp.gate_proj.lora_B.default.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([18944, 8])\n",
      "Key: llama_model.base_model.model.model.layers.1.mlp.down_proj.lora_A.default.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([8, 18944])\n",
      "Key: llama_model.base_model.model.model.layers.1.mlp.down_proj.lora_B.default.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 8])\n",
      "Key: llama_model.base_model.model.model.layers.2.self_attn.o_proj.lora_A.default.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([8, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.2.self_attn.o_proj.lora_B.default.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 8])\n",
      "Key: llama_model.base_model.model.model.layers.2.mlp.gate_proj.lora_A.default.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([8, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.2.mlp.gate_proj.lora_B.default.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([18944, 8])\n",
      "Key: llama_model.base_model.model.model.layers.2.mlp.down_proj.lora_A.default.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([8, 18944])\n",
      "Key: llama_model.base_model.model.model.layers.2.mlp.down_proj.lora_B.default.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 8])\n",
      "Key: llama_model.base_model.model.model.layers.3.self_attn.o_proj.lora_A.default.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([8, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.3.self_attn.o_proj.lora_B.default.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 8])\n",
      "Key: llama_model.base_model.model.model.layers.3.mlp.gate_proj.lora_A.default.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([8, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.3.mlp.gate_proj.lora_B.default.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([18944, 8])\n",
      "Key: llama_model.base_model.model.model.layers.3.mlp.down_proj.lora_A.default.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([8, 18944])\n",
      "Key: llama_model.base_model.model.model.layers.3.mlp.down_proj.lora_B.default.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 8])\n",
      "Key: llama_model.base_model.model.model.layers.4.self_attn.o_proj.lora_A.default.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([8, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.4.self_attn.o_proj.lora_B.default.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 8])\n",
      "Key: llama_model.base_model.model.model.layers.4.mlp.gate_proj.lora_A.default.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([8, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.4.mlp.gate_proj.lora_B.default.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([18944, 8])\n",
      "Key: llama_model.base_model.model.model.layers.4.mlp.down_proj.lora_A.default.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([8, 18944])\n",
      "Key: llama_model.base_model.model.model.layers.4.mlp.down_proj.lora_B.default.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 8])\n",
      "Key: llama_model.base_model.model.model.layers.5.self_attn.o_proj.lora_A.default.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([8, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.5.self_attn.o_proj.lora_B.default.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 8])\n",
      "Key: llama_model.base_model.model.model.layers.5.mlp.gate_proj.lora_A.default.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([8, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.5.mlp.gate_proj.lora_B.default.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([18944, 8])\n",
      "Key: llama_model.base_model.model.model.layers.5.mlp.down_proj.lora_A.default.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([8, 18944])\n",
      "Key: llama_model.base_model.model.model.layers.5.mlp.down_proj.lora_B.default.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 8])\n",
      "Key: llama_model.base_model.model.model.layers.6.self_attn.o_proj.lora_A.default.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([8, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.6.self_attn.o_proj.lora_B.default.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 8])\n",
      "Key: llama_model.base_model.model.model.layers.6.mlp.gate_proj.lora_A.default.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([8, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.6.mlp.gate_proj.lora_B.default.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([18944, 8])\n",
      "Key: llama_model.base_model.model.model.layers.6.mlp.down_proj.lora_A.default.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([8, 18944])\n",
      "Key: llama_model.base_model.model.model.layers.6.mlp.down_proj.lora_B.default.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 8])\n",
      "Key: llama_model.base_model.model.model.layers.7.self_attn.o_proj.lora_A.default.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([8, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.7.self_attn.o_proj.lora_B.default.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 8])\n",
      "Key: llama_model.base_model.model.model.layers.7.mlp.gate_proj.lora_A.default.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([8, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.7.mlp.gate_proj.lora_B.default.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([18944, 8])\n",
      "Key: llama_model.base_model.model.model.layers.7.mlp.down_proj.lora_A.default.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([8, 18944])\n",
      "Key: llama_model.base_model.model.model.layers.7.mlp.down_proj.lora_B.default.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 8])\n",
      "Key: llama_model.base_model.model.model.layers.8.self_attn.o_proj.lora_A.default.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([8, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.8.self_attn.o_proj.lora_B.default.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 8])\n",
      "Key: llama_model.base_model.model.model.layers.8.mlp.gate_proj.lora_A.default.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([8, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.8.mlp.gate_proj.lora_B.default.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([18944, 8])\n",
      "Key: llama_model.base_model.model.model.layers.8.mlp.down_proj.lora_A.default.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([8, 18944])\n",
      "Key: llama_model.base_model.model.model.layers.8.mlp.down_proj.lora_B.default.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 8])\n",
      "Key: llama_model.base_model.model.model.layers.9.self_attn.o_proj.lora_A.default.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([8, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.9.self_attn.o_proj.lora_B.default.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 8])\n",
      "Key: llama_model.base_model.model.model.layers.9.mlp.gate_proj.lora_A.default.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([8, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.9.mlp.gate_proj.lora_B.default.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([18944, 8])\n",
      "Key: llama_model.base_model.model.model.layers.9.mlp.down_proj.lora_A.default.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([8, 18944])\n",
      "Key: llama_model.base_model.model.model.layers.9.mlp.down_proj.lora_B.default.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 8])\n",
      "Key: llama_model.base_model.model.model.layers.10.self_attn.o_proj.lora_A.default.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([8, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.10.self_attn.o_proj.lora_B.default.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 8])\n",
      "Key: llama_model.base_model.model.model.layers.10.mlp.gate_proj.lora_A.default.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([8, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.10.mlp.gate_proj.lora_B.default.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([18944, 8])\n",
      "Key: llama_model.base_model.model.model.layers.10.mlp.down_proj.lora_A.default.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([8, 18944])\n",
      "Key: llama_model.base_model.model.model.layers.10.mlp.down_proj.lora_B.default.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 8])\n",
      "Key: llama_model.base_model.model.model.layers.11.self_attn.o_proj.lora_A.default.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([8, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.11.self_attn.o_proj.lora_B.default.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 8])\n",
      "Key: llama_model.base_model.model.model.layers.11.mlp.gate_proj.lora_A.default.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([8, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.11.mlp.gate_proj.lora_B.default.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([18944, 8])\n",
      "Key: llama_model.base_model.model.model.layers.11.mlp.down_proj.lora_A.default.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([8, 18944])\n",
      "Key: llama_model.base_model.model.model.layers.11.mlp.down_proj.lora_B.default.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 8])\n",
      "Key: llama_model.base_model.model.model.layers.12.self_attn.o_proj.lora_A.default.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([8, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.12.self_attn.o_proj.lora_B.default.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 8])\n",
      "Key: llama_model.base_model.model.model.layers.12.mlp.gate_proj.lora_A.default.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([8, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.12.mlp.gate_proj.lora_B.default.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([18944, 8])\n",
      "Key: llama_model.base_model.model.model.layers.12.mlp.down_proj.lora_A.default.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([8, 18944])\n",
      "Key: llama_model.base_model.model.model.layers.12.mlp.down_proj.lora_B.default.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 8])\n",
      "Key: llama_model.base_model.model.model.layers.13.self_attn.o_proj.lora_A.default.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([8, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.13.self_attn.o_proj.lora_B.default.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 8])\n",
      "Key: llama_model.base_model.model.model.layers.13.mlp.gate_proj.lora_A.default.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([8, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.13.mlp.gate_proj.lora_B.default.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([18944, 8])\n",
      "Key: llama_model.base_model.model.model.layers.13.mlp.down_proj.lora_A.default.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([8, 18944])\n",
      "Key: llama_model.base_model.model.model.layers.13.mlp.down_proj.lora_B.default.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 8])\n",
      "Key: llama_model.base_model.model.model.layers.14.self_attn.o_proj.lora_A.default.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([8, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.14.self_attn.o_proj.lora_B.default.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 8])\n",
      "Key: llama_model.base_model.model.model.layers.14.mlp.gate_proj.lora_A.default.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([8, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.14.mlp.gate_proj.lora_B.default.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([18944, 8])\n",
      "Key: llama_model.base_model.model.model.layers.14.mlp.down_proj.lora_A.default.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([8, 18944])\n",
      "Key: llama_model.base_model.model.model.layers.14.mlp.down_proj.lora_B.default.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 8])\n",
      "Key: llama_model.base_model.model.model.layers.15.self_attn.o_proj.lora_A.default.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([8, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.15.self_attn.o_proj.lora_B.default.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 8])\n",
      "Key: llama_model.base_model.model.model.layers.15.mlp.gate_proj.lora_A.default.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([8, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.15.mlp.gate_proj.lora_B.default.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([18944, 8])\n",
      "Key: llama_model.base_model.model.model.layers.15.mlp.down_proj.lora_A.default.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([8, 18944])\n",
      "Key: llama_model.base_model.model.model.layers.15.mlp.down_proj.lora_B.default.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 8])\n",
      "Key: llama_model.base_model.model.model.layers.16.self_attn.o_proj.lora_A.default.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([8, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.16.self_attn.o_proj.lora_B.default.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 8])\n",
      "Key: llama_model.base_model.model.model.layers.16.mlp.gate_proj.lora_A.default.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([8, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.16.mlp.gate_proj.lora_B.default.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([18944, 8])\n",
      "Key: llama_model.base_model.model.model.layers.16.mlp.down_proj.lora_A.default.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([8, 18944])\n",
      "Key: llama_model.base_model.model.model.layers.16.mlp.down_proj.lora_B.default.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 8])\n",
      "Key: llama_model.base_model.model.model.layers.17.self_attn.o_proj.lora_A.default.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([8, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.17.self_attn.o_proj.lora_B.default.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 8])\n",
      "Key: llama_model.base_model.model.model.layers.17.mlp.gate_proj.lora_A.default.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([8, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.17.mlp.gate_proj.lora_B.default.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([18944, 8])\n",
      "Key: llama_model.base_model.model.model.layers.17.mlp.down_proj.lora_A.default.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([8, 18944])\n",
      "Key: llama_model.base_model.model.model.layers.17.mlp.down_proj.lora_B.default.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 8])\n",
      "Key: llama_model.base_model.model.model.layers.18.self_attn.o_proj.lora_A.default.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([8, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.18.self_attn.o_proj.lora_B.default.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 8])\n",
      "Key: llama_model.base_model.model.model.layers.18.mlp.gate_proj.lora_A.default.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([8, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.18.mlp.gate_proj.lora_B.default.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([18944, 8])\n",
      "Key: llama_model.base_model.model.model.layers.18.mlp.down_proj.lora_A.default.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([8, 18944])\n",
      "Key: llama_model.base_model.model.model.layers.18.mlp.down_proj.lora_B.default.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 8])\n",
      "Key: llama_model.base_model.model.model.layers.19.self_attn.o_proj.lora_A.default.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([8, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.19.self_attn.o_proj.lora_B.default.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 8])\n",
      "Key: llama_model.base_model.model.model.layers.19.mlp.gate_proj.lora_A.default.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([8, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.19.mlp.gate_proj.lora_B.default.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([18944, 8])\n",
      "Key: llama_model.base_model.model.model.layers.19.mlp.down_proj.lora_A.default.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([8, 18944])\n",
      "Key: llama_model.base_model.model.model.layers.19.mlp.down_proj.lora_B.default.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 8])\n",
      "Key: llama_model.base_model.model.model.layers.20.self_attn.o_proj.lora_A.default.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([8, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.20.self_attn.o_proj.lora_B.default.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 8])\n",
      "Key: llama_model.base_model.model.model.layers.20.mlp.gate_proj.lora_A.default.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([8, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.20.mlp.gate_proj.lora_B.default.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([18944, 8])\n",
      "Key: llama_model.base_model.model.model.layers.20.mlp.down_proj.lora_A.default.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([8, 18944])\n",
      "Key: llama_model.base_model.model.model.layers.20.mlp.down_proj.lora_B.default.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 8])\n",
      "Key: llama_model.base_model.model.model.layers.21.self_attn.o_proj.lora_A.default.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([8, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.21.self_attn.o_proj.lora_B.default.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 8])\n",
      "Key: llama_model.base_model.model.model.layers.21.mlp.gate_proj.lora_A.default.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([8, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.21.mlp.gate_proj.lora_B.default.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([18944, 8])\n",
      "Key: llama_model.base_model.model.model.layers.21.mlp.down_proj.lora_A.default.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([8, 18944])\n",
      "Key: llama_model.base_model.model.model.layers.21.mlp.down_proj.lora_B.default.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 8])\n",
      "Key: llama_model.base_model.model.model.layers.22.self_attn.o_proj.lora_A.default.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([8, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.22.self_attn.o_proj.lora_B.default.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 8])\n",
      "Key: llama_model.base_model.model.model.layers.22.mlp.gate_proj.lora_A.default.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([8, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.22.mlp.gate_proj.lora_B.default.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([18944, 8])\n",
      "Key: llama_model.base_model.model.model.layers.22.mlp.down_proj.lora_A.default.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([8, 18944])\n",
      "Key: llama_model.base_model.model.model.layers.22.mlp.down_proj.lora_B.default.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 8])\n",
      "Key: llama_model.base_model.model.model.layers.23.self_attn.o_proj.lora_A.default.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([8, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.23.self_attn.o_proj.lora_B.default.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 8])\n",
      "Key: llama_model.base_model.model.model.layers.23.mlp.gate_proj.lora_A.default.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([8, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.23.mlp.gate_proj.lora_B.default.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([18944, 8])\n",
      "Key: llama_model.base_model.model.model.layers.23.mlp.down_proj.lora_A.default.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([8, 18944])\n",
      "Key: llama_model.base_model.model.model.layers.23.mlp.down_proj.lora_B.default.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 8])\n",
      "Key: llama_model.base_model.model.model.layers.24.self_attn.o_proj.lora_A.default.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([8, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.24.self_attn.o_proj.lora_B.default.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 8])\n",
      "Key: llama_model.base_model.model.model.layers.24.mlp.gate_proj.lora_A.default.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([8, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.24.mlp.gate_proj.lora_B.default.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([18944, 8])\n",
      "Key: llama_model.base_model.model.model.layers.24.mlp.down_proj.lora_A.default.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([8, 18944])\n",
      "Key: llama_model.base_model.model.model.layers.24.mlp.down_proj.lora_B.default.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 8])\n",
      "Key: llama_model.base_model.model.model.layers.25.self_attn.o_proj.lora_A.default.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([8, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.25.self_attn.o_proj.lora_B.default.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 8])\n",
      "Key: llama_model.base_model.model.model.layers.25.mlp.gate_proj.lora_A.default.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([8, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.25.mlp.gate_proj.lora_B.default.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([18944, 8])\n",
      "Key: llama_model.base_model.model.model.layers.25.mlp.down_proj.lora_A.default.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([8, 18944])\n",
      "Key: llama_model.base_model.model.model.layers.25.mlp.down_proj.lora_B.default.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 8])\n",
      "Key: llama_model.base_model.model.model.layers.26.self_attn.o_proj.lora_A.default.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([8, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.26.self_attn.o_proj.lora_B.default.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 8])\n",
      "Key: llama_model.base_model.model.model.layers.26.mlp.gate_proj.lora_A.default.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([8, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.26.mlp.gate_proj.lora_B.default.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([18944, 8])\n",
      "Key: llama_model.base_model.model.model.layers.26.mlp.down_proj.lora_A.default.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([8, 18944])\n",
      "Key: llama_model.base_model.model.model.layers.26.mlp.down_proj.lora_B.default.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 8])\n",
      "Key: llama_model.base_model.model.model.layers.27.self_attn.o_proj.lora_A.default.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([8, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.27.self_attn.o_proj.lora_B.default.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 8])\n",
      "Key: llama_model.base_model.model.model.layers.27.mlp.gate_proj.lora_A.default.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([8, 3584])\n",
      "Key: llama_model.base_model.model.model.layers.27.mlp.gate_proj.lora_B.default.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([18944, 8])\n",
      "Key: llama_model.base_model.model.model.layers.27.mlp.down_proj.lora_A.default.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([8, 18944])\n",
      "Key: llama_model.base_model.model.model.layers.27.mlp.down_proj.lora_B.default.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 8])\n",
      "Key: down_sample_2.conv.1.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024, 3])\n",
      "Key: down_sample_2.conv.1.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: down_sample_2.conv.4.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024, 3])\n",
      "Key: down_sample_2.conv.4.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: down_sample_2.conv.7.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024, 3])\n",
      "Key: down_sample_2.conv.7.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "Key: speech_llama_proj.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 1024])\n",
      "Key: speech_llama_proj.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([3584])\n",
      "Key: speech_token_emded.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([4099, 3584])\n",
      "Key: speaker_head.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([4097, 3584])\n",
      "Key: speaker_head.bias\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([4097])\n",
      "Key: embed_tokens.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([152064, 3584])\n",
      "Key: lm_head.weight\n",
      "    Type: <class 'torch.Tensor'>, Shape: torch.Size([152064, 3584])\n"
     ]
    }
   ],
   "source": [
    "print_ckpt_structure(ori_ckpt)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 22,
   "metadata": {},
   "outputs": [],
   "source": [
    "new_ckpt = torch.load(new_ckpt_path, map_location=\"cpu\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 23,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Key: module\n",
      "    Key: encoder.embed.conv.0.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 80, 3])\n",
      "    Key: encoder.embed.conv.0.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.embed.conv.2.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024, 3])\n",
      "    Key: encoder.embed.conv.2.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.embed.pos_enc.pe\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1, 1500, 1024])\n",
      "    Key: encoder.after_norm.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.after_norm.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.encoders.0.self_attn.linear_q.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "    Key: encoder.encoders.0.self_attn.linear_q.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.encoders.0.self_attn.linear_k.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "    Key: encoder.encoders.0.self_attn.linear_v.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "    Key: encoder.encoders.0.self_attn.linear_v.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.encoders.0.self_attn.linear_out.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "    Key: encoder.encoders.0.self_attn.linear_out.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.encoders.0.feed_forward.w_1.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([4096, 1024])\n",
      "    Key: encoder.encoders.0.feed_forward.w_1.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([4096])\n",
      "    Key: encoder.encoders.0.feed_forward.w_2.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 4096])\n",
      "    Key: encoder.encoders.0.feed_forward.w_2.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.encoders.0.norm1.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.encoders.0.norm1.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.encoders.0.norm2.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.encoders.0.norm2.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.encoders.1.self_attn.linear_q.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "    Key: encoder.encoders.1.self_attn.linear_q.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.encoders.1.self_attn.linear_k.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "    Key: encoder.encoders.1.self_attn.linear_v.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "    Key: encoder.encoders.1.self_attn.linear_v.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.encoders.1.self_attn.linear_out.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "    Key: encoder.encoders.1.self_attn.linear_out.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.encoders.1.feed_forward.w_1.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([4096, 1024])\n",
      "    Key: encoder.encoders.1.feed_forward.w_1.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([4096])\n",
      "    Key: encoder.encoders.1.feed_forward.w_2.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 4096])\n",
      "    Key: encoder.encoders.1.feed_forward.w_2.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.encoders.1.norm1.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.encoders.1.norm1.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.encoders.1.norm2.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.encoders.1.norm2.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.encoders.2.self_attn.linear_q.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "    Key: encoder.encoders.2.self_attn.linear_q.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.encoders.2.self_attn.linear_k.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "    Key: encoder.encoders.2.self_attn.linear_v.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "    Key: encoder.encoders.2.self_attn.linear_v.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.encoders.2.self_attn.linear_out.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "    Key: encoder.encoders.2.self_attn.linear_out.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.encoders.2.feed_forward.w_1.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([4096, 1024])\n",
      "    Key: encoder.encoders.2.feed_forward.w_1.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([4096])\n",
      "    Key: encoder.encoders.2.feed_forward.w_2.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 4096])\n",
      "    Key: encoder.encoders.2.feed_forward.w_2.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.encoders.2.norm1.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.encoders.2.norm1.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.encoders.2.norm2.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.encoders.2.norm2.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.encoders.3.self_attn.linear_q.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "    Key: encoder.encoders.3.self_attn.linear_q.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.encoders.3.self_attn.linear_k.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "    Key: encoder.encoders.3.self_attn.linear_v.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "    Key: encoder.encoders.3.self_attn.linear_v.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.encoders.3.self_attn.linear_out.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "    Key: encoder.encoders.3.self_attn.linear_out.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.encoders.3.feed_forward.w_1.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([4096, 1024])\n",
      "    Key: encoder.encoders.3.feed_forward.w_1.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([4096])\n",
      "    Key: encoder.encoders.3.feed_forward.w_2.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 4096])\n",
      "    Key: encoder.encoders.3.feed_forward.w_2.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.encoders.3.norm1.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.encoders.3.norm1.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.encoders.3.norm2.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.encoders.3.norm2.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.encoders.4.self_attn.linear_q.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "    Key: encoder.encoders.4.self_attn.linear_q.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.encoders.4.self_attn.linear_k.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "    Key: encoder.encoders.4.self_attn.linear_v.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "    Key: encoder.encoders.4.self_attn.linear_v.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.encoders.4.self_attn.linear_out.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "    Key: encoder.encoders.4.self_attn.linear_out.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.encoders.4.feed_forward.w_1.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([4096, 1024])\n",
      "    Key: encoder.encoders.4.feed_forward.w_1.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([4096])\n",
      "    Key: encoder.encoders.4.feed_forward.w_2.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 4096])\n",
      "    Key: encoder.encoders.4.feed_forward.w_2.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.encoders.4.norm1.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.encoders.4.norm1.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.encoders.4.norm2.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.encoders.4.norm2.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.encoders.5.self_attn.linear_q.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "    Key: encoder.encoders.5.self_attn.linear_q.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.encoders.5.self_attn.linear_k.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "    Key: encoder.encoders.5.self_attn.linear_v.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "    Key: encoder.encoders.5.self_attn.linear_v.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.encoders.5.self_attn.linear_out.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "    Key: encoder.encoders.5.self_attn.linear_out.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.encoders.5.feed_forward.w_1.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([4096, 1024])\n",
      "    Key: encoder.encoders.5.feed_forward.w_1.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([4096])\n",
      "    Key: encoder.encoders.5.feed_forward.w_2.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 4096])\n",
      "    Key: encoder.encoders.5.feed_forward.w_2.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.encoders.5.norm1.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.encoders.5.norm1.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.encoders.5.norm2.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.encoders.5.norm2.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.encoders.6.self_attn.linear_q.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "    Key: encoder.encoders.6.self_attn.linear_q.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.encoders.6.self_attn.linear_k.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "    Key: encoder.encoders.6.self_attn.linear_v.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "    Key: encoder.encoders.6.self_attn.linear_v.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.encoders.6.self_attn.linear_out.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "    Key: encoder.encoders.6.self_attn.linear_out.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.encoders.6.feed_forward.w_1.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([4096, 1024])\n",
      "    Key: encoder.encoders.6.feed_forward.w_1.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([4096])\n",
      "    Key: encoder.encoders.6.feed_forward.w_2.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 4096])\n",
      "    Key: encoder.encoders.6.feed_forward.w_2.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.encoders.6.norm1.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.encoders.6.norm1.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.encoders.6.norm2.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.encoders.6.norm2.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.encoders.7.self_attn.linear_q.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "    Key: encoder.encoders.7.self_attn.linear_q.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.encoders.7.self_attn.linear_k.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "    Key: encoder.encoders.7.self_attn.linear_v.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "    Key: encoder.encoders.7.self_attn.linear_v.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.encoders.7.self_attn.linear_out.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "    Key: encoder.encoders.7.self_attn.linear_out.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.encoders.7.feed_forward.w_1.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([4096, 1024])\n",
      "    Key: encoder.encoders.7.feed_forward.w_1.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([4096])\n",
      "    Key: encoder.encoders.7.feed_forward.w_2.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 4096])\n",
      "    Key: encoder.encoders.7.feed_forward.w_2.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.encoders.7.norm1.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.encoders.7.norm1.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.encoders.7.norm2.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.encoders.7.norm2.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.encoders.8.self_attn.linear_q.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "    Key: encoder.encoders.8.self_attn.linear_q.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.encoders.8.self_attn.linear_k.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "    Key: encoder.encoders.8.self_attn.linear_v.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "    Key: encoder.encoders.8.self_attn.linear_v.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.encoders.8.self_attn.linear_out.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "    Key: encoder.encoders.8.self_attn.linear_out.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.encoders.8.feed_forward.w_1.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([4096, 1024])\n",
      "    Key: encoder.encoders.8.feed_forward.w_1.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([4096])\n",
      "    Key: encoder.encoders.8.feed_forward.w_2.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 4096])\n",
      "    Key: encoder.encoders.8.feed_forward.w_2.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.encoders.8.norm1.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.encoders.8.norm1.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.encoders.8.norm2.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.encoders.8.norm2.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.encoders.9.self_attn.linear_q.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "    Key: encoder.encoders.9.self_attn.linear_q.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.encoders.9.self_attn.linear_k.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "    Key: encoder.encoders.9.self_attn.linear_v.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "    Key: encoder.encoders.9.self_attn.linear_v.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.encoders.9.self_attn.linear_out.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "    Key: encoder.encoders.9.self_attn.linear_out.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.encoders.9.feed_forward.w_1.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([4096, 1024])\n",
      "    Key: encoder.encoders.9.feed_forward.w_1.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([4096])\n",
      "    Key: encoder.encoders.9.feed_forward.w_2.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 4096])\n",
      "    Key: encoder.encoders.9.feed_forward.w_2.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.encoders.9.norm1.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.encoders.9.norm1.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.encoders.9.norm2.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.encoders.9.norm2.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.encoders.10.self_attn.linear_q.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "    Key: encoder.encoders.10.self_attn.linear_q.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.encoders.10.self_attn.linear_k.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "    Key: encoder.encoders.10.self_attn.linear_v.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "    Key: encoder.encoders.10.self_attn.linear_v.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.encoders.10.self_attn.linear_out.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "    Key: encoder.encoders.10.self_attn.linear_out.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.encoders.10.feed_forward.w_1.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([4096, 1024])\n",
      "    Key: encoder.encoders.10.feed_forward.w_1.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([4096])\n",
      "    Key: encoder.encoders.10.feed_forward.w_2.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 4096])\n",
      "    Key: encoder.encoders.10.feed_forward.w_2.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.encoders.10.norm1.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.encoders.10.norm1.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.encoders.10.norm2.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.encoders.10.norm2.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.encoders.11.self_attn.linear_q.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "    Key: encoder.encoders.11.self_attn.linear_q.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.encoders.11.self_attn.linear_k.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "    Key: encoder.encoders.11.self_attn.linear_v.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "    Key: encoder.encoders.11.self_attn.linear_v.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.encoders.11.self_attn.linear_out.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "    Key: encoder.encoders.11.self_attn.linear_out.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.encoders.11.feed_forward.w_1.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([4096, 1024])\n",
      "    Key: encoder.encoders.11.feed_forward.w_1.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([4096])\n",
      "    Key: encoder.encoders.11.feed_forward.w_2.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 4096])\n",
      "    Key: encoder.encoders.11.feed_forward.w_2.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.encoders.11.norm1.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.encoders.11.norm1.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.encoders.11.norm2.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.encoders.11.norm2.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.encoders.12.self_attn.linear_q.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "    Key: encoder.encoders.12.self_attn.linear_q.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.encoders.12.self_attn.linear_k.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "    Key: encoder.encoders.12.self_attn.linear_v.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "    Key: encoder.encoders.12.self_attn.linear_v.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.encoders.12.self_attn.linear_out.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "    Key: encoder.encoders.12.self_attn.linear_out.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.encoders.12.feed_forward.w_1.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([4096, 1024])\n",
      "    Key: encoder.encoders.12.feed_forward.w_1.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([4096])\n",
      "    Key: encoder.encoders.12.feed_forward.w_2.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 4096])\n",
      "    Key: encoder.encoders.12.feed_forward.w_2.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.encoders.12.norm1.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.encoders.12.norm1.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.encoders.12.norm2.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.encoders.12.norm2.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.encoders.13.self_attn.linear_q.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "    Key: encoder.encoders.13.self_attn.linear_q.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.encoders.13.self_attn.linear_k.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "    Key: encoder.encoders.13.self_attn.linear_v.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "    Key: encoder.encoders.13.self_attn.linear_v.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.encoders.13.self_attn.linear_out.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "    Key: encoder.encoders.13.self_attn.linear_out.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.encoders.13.feed_forward.w_1.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([4096, 1024])\n",
      "    Key: encoder.encoders.13.feed_forward.w_1.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([4096])\n",
      "    Key: encoder.encoders.13.feed_forward.w_2.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 4096])\n",
      "    Key: encoder.encoders.13.feed_forward.w_2.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.encoders.13.norm1.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.encoders.13.norm1.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.encoders.13.norm2.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.encoders.13.norm2.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.encoders.14.self_attn.linear_q.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "    Key: encoder.encoders.14.self_attn.linear_q.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.encoders.14.self_attn.linear_k.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "    Key: encoder.encoders.14.self_attn.linear_v.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "    Key: encoder.encoders.14.self_attn.linear_v.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.encoders.14.self_attn.linear_out.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "    Key: encoder.encoders.14.self_attn.linear_out.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.encoders.14.feed_forward.w_1.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([4096, 1024])\n",
      "    Key: encoder.encoders.14.feed_forward.w_1.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([4096])\n",
      "    Key: encoder.encoders.14.feed_forward.w_2.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 4096])\n",
      "    Key: encoder.encoders.14.feed_forward.w_2.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.encoders.14.norm1.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.encoders.14.norm1.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.encoders.14.norm2.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.encoders.14.norm2.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.encoders.15.self_attn.linear_q.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "    Key: encoder.encoders.15.self_attn.linear_q.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.encoders.15.self_attn.linear_k.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "    Key: encoder.encoders.15.self_attn.linear_v.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "    Key: encoder.encoders.15.self_attn.linear_v.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.encoders.15.self_attn.linear_out.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "    Key: encoder.encoders.15.self_attn.linear_out.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.encoders.15.feed_forward.w_1.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([4096, 1024])\n",
      "    Key: encoder.encoders.15.feed_forward.w_1.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([4096])\n",
      "    Key: encoder.encoders.15.feed_forward.w_2.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 4096])\n",
      "    Key: encoder.encoders.15.feed_forward.w_2.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.encoders.15.norm1.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.encoders.15.norm1.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.encoders.15.norm2.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.encoders.15.norm2.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.encoders.16.self_attn.linear_q.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "    Key: encoder.encoders.16.self_attn.linear_q.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.encoders.16.self_attn.linear_k.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "    Key: encoder.encoders.16.self_attn.linear_v.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "    Key: encoder.encoders.16.self_attn.linear_v.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.encoders.16.self_attn.linear_out.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "    Key: encoder.encoders.16.self_attn.linear_out.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.encoders.16.feed_forward.w_1.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([4096, 1024])\n",
      "    Key: encoder.encoders.16.feed_forward.w_1.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([4096])\n",
      "    Key: encoder.encoders.16.feed_forward.w_2.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 4096])\n",
      "    Key: encoder.encoders.16.feed_forward.w_2.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.encoders.16.norm1.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.encoders.16.norm1.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.encoders.16.norm2.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.encoders.16.norm2.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.encoders.17.self_attn.linear_q.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "    Key: encoder.encoders.17.self_attn.linear_q.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.encoders.17.self_attn.linear_k.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "    Key: encoder.encoders.17.self_attn.linear_v.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "    Key: encoder.encoders.17.self_attn.linear_v.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.encoders.17.self_attn.linear_out.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "    Key: encoder.encoders.17.self_attn.linear_out.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.encoders.17.feed_forward.w_1.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([4096, 1024])\n",
      "    Key: encoder.encoders.17.feed_forward.w_1.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([4096])\n",
      "    Key: encoder.encoders.17.feed_forward.w_2.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 4096])\n",
      "    Key: encoder.encoders.17.feed_forward.w_2.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.encoders.17.norm1.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.encoders.17.norm1.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.encoders.17.norm2.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.encoders.17.norm2.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.encoders.18.self_attn.linear_q.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "    Key: encoder.encoders.18.self_attn.linear_q.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.encoders.18.self_attn.linear_k.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "    Key: encoder.encoders.18.self_attn.linear_v.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "    Key: encoder.encoders.18.self_attn.linear_v.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.encoders.18.self_attn.linear_out.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "    Key: encoder.encoders.18.self_attn.linear_out.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.encoders.18.feed_forward.w_1.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([4096, 1024])\n",
      "    Key: encoder.encoders.18.feed_forward.w_1.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([4096])\n",
      "    Key: encoder.encoders.18.feed_forward.w_2.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 4096])\n",
      "    Key: encoder.encoders.18.feed_forward.w_2.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.encoders.18.norm1.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.encoders.18.norm1.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.encoders.18.norm2.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.encoders.18.norm2.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.encoders.19.self_attn.linear_q.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "    Key: encoder.encoders.19.self_attn.linear_q.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.encoders.19.self_attn.linear_k.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "    Key: encoder.encoders.19.self_attn.linear_v.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "    Key: encoder.encoders.19.self_attn.linear_v.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.encoders.19.self_attn.linear_out.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "    Key: encoder.encoders.19.self_attn.linear_out.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.encoders.19.feed_forward.w_1.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([4096, 1024])\n",
      "    Key: encoder.encoders.19.feed_forward.w_1.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([4096])\n",
      "    Key: encoder.encoders.19.feed_forward.w_2.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 4096])\n",
      "    Key: encoder.encoders.19.feed_forward.w_2.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.encoders.19.norm1.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.encoders.19.norm1.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.encoders.19.norm2.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.encoders.19.norm2.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.encoders.20.self_attn.linear_q.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "    Key: encoder.encoders.20.self_attn.linear_q.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.encoders.20.self_attn.linear_k.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "    Key: encoder.encoders.20.self_attn.linear_v.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "    Key: encoder.encoders.20.self_attn.linear_v.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.encoders.20.self_attn.linear_out.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "    Key: encoder.encoders.20.self_attn.linear_out.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.encoders.20.feed_forward.w_1.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([4096, 1024])\n",
      "    Key: encoder.encoders.20.feed_forward.w_1.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([4096])\n",
      "    Key: encoder.encoders.20.feed_forward.w_2.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 4096])\n",
      "    Key: encoder.encoders.20.feed_forward.w_2.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.encoders.20.norm1.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.encoders.20.norm1.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.encoders.20.norm2.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.encoders.20.norm2.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.encoders.21.self_attn.linear_q.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "    Key: encoder.encoders.21.self_attn.linear_q.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.encoders.21.self_attn.linear_k.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "    Key: encoder.encoders.21.self_attn.linear_v.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "    Key: encoder.encoders.21.self_attn.linear_v.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.encoders.21.self_attn.linear_out.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "    Key: encoder.encoders.21.self_attn.linear_out.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.encoders.21.feed_forward.w_1.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([4096, 1024])\n",
      "    Key: encoder.encoders.21.feed_forward.w_1.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([4096])\n",
      "    Key: encoder.encoders.21.feed_forward.w_2.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 4096])\n",
      "    Key: encoder.encoders.21.feed_forward.w_2.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.encoders.21.norm1.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.encoders.21.norm1.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.encoders.21.norm2.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.encoders.21.norm2.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.encoders.22.self_attn.linear_q.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "    Key: encoder.encoders.22.self_attn.linear_q.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.encoders.22.self_attn.linear_k.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "    Key: encoder.encoders.22.self_attn.linear_v.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "    Key: encoder.encoders.22.self_attn.linear_v.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.encoders.22.self_attn.linear_out.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "    Key: encoder.encoders.22.self_attn.linear_out.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.encoders.22.feed_forward.w_1.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([4096, 1024])\n",
      "    Key: encoder.encoders.22.feed_forward.w_1.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([4096])\n",
      "    Key: encoder.encoders.22.feed_forward.w_2.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 4096])\n",
      "    Key: encoder.encoders.22.feed_forward.w_2.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.encoders.22.norm1.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.encoders.22.norm1.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.encoders.22.norm2.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.encoders.22.norm2.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.encoders.23.self_attn.linear_q.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "    Key: encoder.encoders.23.self_attn.linear_q.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.encoders.23.self_attn.linear_k.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "    Key: encoder.encoders.23.self_attn.linear_v.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "    Key: encoder.encoders.23.self_attn.linear_v.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.encoders.23.self_attn.linear_out.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "    Key: encoder.encoders.23.self_attn.linear_out.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.encoders.23.feed_forward.w_1.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([4096, 1024])\n",
      "    Key: encoder.encoders.23.feed_forward.w_1.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([4096])\n",
      "    Key: encoder.encoders.23.feed_forward.w_2.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 4096])\n",
      "    Key: encoder.encoders.23.feed_forward.w_2.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.encoders.23.norm1.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.encoders.23.norm1.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.encoders.23.norm2.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: encoder.encoders.23.norm2.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: ln_speech.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: ln_speech.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: speech_transformer.embed.out.0.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "    Key: speech_transformer.embed.out.0.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: speech_transformer.embed.out.1.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: speech_transformer.embed.out.1.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: speech_transformer.embed.pos_enc.pe\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1, 5000, 1024])\n",
      "    Key: speech_transformer.after_norm.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: speech_transformer.after_norm.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: speech_transformer.encoders.0.self_attn.linear_q.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "    Key: speech_transformer.encoders.0.self_attn.linear_q.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: speech_transformer.encoders.0.self_attn.linear_k.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "    Key: speech_transformer.encoders.0.self_attn.linear_k.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: speech_transformer.encoders.0.self_attn.linear_v.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "    Key: speech_transformer.encoders.0.self_attn.linear_v.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: speech_transformer.encoders.0.self_attn.linear_out.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "    Key: speech_transformer.encoders.0.self_attn.linear_out.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: speech_transformer.encoders.0.feed_forward.w_1.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([2560, 1024])\n",
      "    Key: speech_transformer.encoders.0.feed_forward.w_1.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([2560])\n",
      "    Key: speech_transformer.encoders.0.feed_forward.w_2.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 2560])\n",
      "    Key: speech_transformer.encoders.0.feed_forward.w_2.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: speech_transformer.encoders.0.norm1.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: speech_transformer.encoders.0.norm1.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: speech_transformer.encoders.0.norm2.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: speech_transformer.encoders.0.norm2.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: speech_transformer.encoders.1.self_attn.linear_q.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "    Key: speech_transformer.encoders.1.self_attn.linear_q.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: speech_transformer.encoders.1.self_attn.linear_k.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "    Key: speech_transformer.encoders.1.self_attn.linear_k.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: speech_transformer.encoders.1.self_attn.linear_v.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "    Key: speech_transformer.encoders.1.self_attn.linear_v.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: speech_transformer.encoders.1.self_attn.linear_out.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "    Key: speech_transformer.encoders.1.self_attn.linear_out.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: speech_transformer.encoders.1.feed_forward.w_1.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([2560, 1024])\n",
      "    Key: speech_transformer.encoders.1.feed_forward.w_1.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([2560])\n",
      "    Key: speech_transformer.encoders.1.feed_forward.w_2.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 2560])\n",
      "    Key: speech_transformer.encoders.1.feed_forward.w_2.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: speech_transformer.encoders.1.norm1.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: speech_transformer.encoders.1.norm1.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: speech_transformer.encoders.1.norm2.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: speech_transformer.encoders.1.norm2.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: speech_transformer.encoders.2.self_attn.linear_q.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "    Key: speech_transformer.encoders.2.self_attn.linear_q.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: speech_transformer.encoders.2.self_attn.linear_k.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "    Key: speech_transformer.encoders.2.self_attn.linear_k.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: speech_transformer.encoders.2.self_attn.linear_v.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "    Key: speech_transformer.encoders.2.self_attn.linear_v.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: speech_transformer.encoders.2.self_attn.linear_out.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "    Key: speech_transformer.encoders.2.self_attn.linear_out.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: speech_transformer.encoders.2.feed_forward.w_1.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([2560, 1024])\n",
      "    Key: speech_transformer.encoders.2.feed_forward.w_1.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([2560])\n",
      "    Key: speech_transformer.encoders.2.feed_forward.w_2.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 2560])\n",
      "    Key: speech_transformer.encoders.2.feed_forward.w_2.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: speech_transformer.encoders.2.norm1.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: speech_transformer.encoders.2.norm1.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: speech_transformer.encoders.2.norm2.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: speech_transformer.encoders.2.norm2.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: speech_transformer.encoders.3.self_attn.linear_q.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "    Key: speech_transformer.encoders.3.self_attn.linear_q.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: speech_transformer.encoders.3.self_attn.linear_k.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "    Key: speech_transformer.encoders.3.self_attn.linear_k.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: speech_transformer.encoders.3.self_attn.linear_v.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "    Key: speech_transformer.encoders.3.self_attn.linear_v.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: speech_transformer.encoders.3.self_attn.linear_out.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024])\n",
      "    Key: speech_transformer.encoders.3.self_attn.linear_out.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: speech_transformer.encoders.3.feed_forward.w_1.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([2560, 1024])\n",
      "    Key: speech_transformer.encoders.3.feed_forward.w_1.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([2560])\n",
      "    Key: speech_transformer.encoders.3.feed_forward.w_2.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 2560])\n",
      "    Key: speech_transformer.encoders.3.feed_forward.w_2.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: speech_transformer.encoders.3.norm1.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: speech_transformer.encoders.3.norm1.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: speech_transformer.encoders.3.norm2.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: speech_transformer.encoders.3.norm2.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: llama_model.base_model.model.model.embed_tokens.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([152064, 3584])\n",
      "    Key: llama_model.base_model.model.model.layers.0.self_attn.w_pack.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([4608, 3584])\n",
      "    Key: llama_model.base_model.model.model.layers.0.self_attn.w_pack.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([4608])\n",
      "    Key: llama_model.base_model.model.model.layers.0.self_attn.o_proj.base_layer.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 3584])\n",
      "    Key: llama_model.base_model.model.model.layers.0.self_attn.o_proj.lora_A.default.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([8, 3584])\n",
      "    Key: llama_model.base_model.model.model.layers.0.self_attn.o_proj.lora_B.default.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 8])\n",
      "    Key: llama_model.base_model.model.model.layers.0.mlp.gate_proj.base_layer.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([18944, 3584])\n",
      "    Key: llama_model.base_model.model.model.layers.0.mlp.gate_proj.lora_A.default.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([8, 3584])\n",
      "    Key: llama_model.base_model.model.model.layers.0.mlp.gate_proj.lora_B.default.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([18944, 8])\n",
      "    Key: llama_model.base_model.model.model.layers.0.mlp.up_proj.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([18944, 3584])\n",
      "    Key: llama_model.base_model.model.model.layers.0.mlp.down_proj.base_layer.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 18944])\n",
      "    Key: llama_model.base_model.model.model.layers.0.mlp.down_proj.lora_A.default.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([8, 18944])\n",
      "    Key: llama_model.base_model.model.model.layers.0.mlp.down_proj.lora_B.default.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 8])\n",
      "    Key: llama_model.base_model.model.model.layers.0.input_layernorm.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([3584])\n",
      "    Key: llama_model.base_model.model.model.layers.0.post_attention_layernorm.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([3584])\n",
      "    Key: llama_model.base_model.model.model.layers.1.self_attn.w_pack.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([4608, 3584])\n",
      "    Key: llama_model.base_model.model.model.layers.1.self_attn.w_pack.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([4608])\n",
      "    Key: llama_model.base_model.model.model.layers.1.self_attn.o_proj.base_layer.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 3584])\n",
      "    Key: llama_model.base_model.model.model.layers.1.self_attn.o_proj.lora_A.default.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([8, 3584])\n",
      "    Key: llama_model.base_model.model.model.layers.1.self_attn.o_proj.lora_B.default.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 8])\n",
      "    Key: llama_model.base_model.model.model.layers.1.mlp.gate_proj.base_layer.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([18944, 3584])\n",
      "    Key: llama_model.base_model.model.model.layers.1.mlp.gate_proj.lora_A.default.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([8, 3584])\n",
      "    Key: llama_model.base_model.model.model.layers.1.mlp.gate_proj.lora_B.default.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([18944, 8])\n",
      "    Key: llama_model.base_model.model.model.layers.1.mlp.up_proj.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([18944, 3584])\n",
      "    Key: llama_model.base_model.model.model.layers.1.mlp.down_proj.base_layer.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 18944])\n",
      "    Key: llama_model.base_model.model.model.layers.1.mlp.down_proj.lora_A.default.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([8, 18944])\n",
      "    Key: llama_model.base_model.model.model.layers.1.mlp.down_proj.lora_B.default.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 8])\n",
      "    Key: llama_model.base_model.model.model.layers.1.input_layernorm.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([3584])\n",
      "    Key: llama_model.base_model.model.model.layers.1.post_attention_layernorm.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([3584])\n",
      "    Key: llama_model.base_model.model.model.layers.2.self_attn.w_pack.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([4608, 3584])\n",
      "    Key: llama_model.base_model.model.model.layers.2.self_attn.w_pack.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([4608])\n",
      "    Key: llama_model.base_model.model.model.layers.2.self_attn.o_proj.base_layer.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 3584])\n",
      "    Key: llama_model.base_model.model.model.layers.2.self_attn.o_proj.lora_A.default.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([8, 3584])\n",
      "    Key: llama_model.base_model.model.model.layers.2.self_attn.o_proj.lora_B.default.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 8])\n",
      "    Key: llama_model.base_model.model.model.layers.2.mlp.gate_proj.base_layer.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([18944, 3584])\n",
      "    Key: llama_model.base_model.model.model.layers.2.mlp.gate_proj.lora_A.default.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([8, 3584])\n",
      "    Key: llama_model.base_model.model.model.layers.2.mlp.gate_proj.lora_B.default.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([18944, 8])\n",
      "    Key: llama_model.base_model.model.model.layers.2.mlp.up_proj.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([18944, 3584])\n",
      "    Key: llama_model.base_model.model.model.layers.2.mlp.down_proj.base_layer.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 18944])\n",
      "    Key: llama_model.base_model.model.model.layers.2.mlp.down_proj.lora_A.default.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([8, 18944])\n",
      "    Key: llama_model.base_model.model.model.layers.2.mlp.down_proj.lora_B.default.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 8])\n",
      "    Key: llama_model.base_model.model.model.layers.2.input_layernorm.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([3584])\n",
      "    Key: llama_model.base_model.model.model.layers.2.post_attention_layernorm.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([3584])\n",
      "    Key: llama_model.base_model.model.model.layers.3.self_attn.w_pack.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([4608, 3584])\n",
      "    Key: llama_model.base_model.model.model.layers.3.self_attn.w_pack.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([4608])\n",
      "    Key: llama_model.base_model.model.model.layers.3.self_attn.o_proj.base_layer.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 3584])\n",
      "    Key: llama_model.base_model.model.model.layers.3.self_attn.o_proj.lora_A.default.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([8, 3584])\n",
      "    Key: llama_model.base_model.model.model.layers.3.self_attn.o_proj.lora_B.default.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 8])\n",
      "    Key: llama_model.base_model.model.model.layers.3.mlp.gate_proj.base_layer.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([18944, 3584])\n",
      "    Key: llama_model.base_model.model.model.layers.3.mlp.gate_proj.lora_A.default.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([8, 3584])\n",
      "    Key: llama_model.base_model.model.model.layers.3.mlp.gate_proj.lora_B.default.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([18944, 8])\n",
      "    Key: llama_model.base_model.model.model.layers.3.mlp.up_proj.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([18944, 3584])\n",
      "    Key: llama_model.base_model.model.model.layers.3.mlp.down_proj.base_layer.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 18944])\n",
      "    Key: llama_model.base_model.model.model.layers.3.mlp.down_proj.lora_A.default.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([8, 18944])\n",
      "    Key: llama_model.base_model.model.model.layers.3.mlp.down_proj.lora_B.default.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 8])\n",
      "    Key: llama_model.base_model.model.model.layers.3.input_layernorm.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([3584])\n",
      "    Key: llama_model.base_model.model.model.layers.3.post_attention_layernorm.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([3584])\n",
      "    Key: llama_model.base_model.model.model.layers.4.self_attn.w_pack.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([4608, 3584])\n",
      "    Key: llama_model.base_model.model.model.layers.4.self_attn.w_pack.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([4608])\n",
      "    Key: llama_model.base_model.model.model.layers.4.self_attn.o_proj.base_layer.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 3584])\n",
      "    Key: llama_model.base_model.model.model.layers.4.self_attn.o_proj.lora_A.default.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([8, 3584])\n",
      "    Key: llama_model.base_model.model.model.layers.4.self_attn.o_proj.lora_B.default.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 8])\n",
      "    Key: llama_model.base_model.model.model.layers.4.mlp.gate_proj.base_layer.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([18944, 3584])\n",
      "    Key: llama_model.base_model.model.model.layers.4.mlp.gate_proj.lora_A.default.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([8, 3584])\n",
      "    Key: llama_model.base_model.model.model.layers.4.mlp.gate_proj.lora_B.default.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([18944, 8])\n",
      "    Key: llama_model.base_model.model.model.layers.4.mlp.up_proj.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([18944, 3584])\n",
      "    Key: llama_model.base_model.model.model.layers.4.mlp.down_proj.base_layer.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 18944])\n",
      "    Key: llama_model.base_model.model.model.layers.4.mlp.down_proj.lora_A.default.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([8, 18944])\n",
      "    Key: llama_model.base_model.model.model.layers.4.mlp.down_proj.lora_B.default.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 8])\n",
      "    Key: llama_model.base_model.model.model.layers.4.input_layernorm.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([3584])\n",
      "    Key: llama_model.base_model.model.model.layers.4.post_attention_layernorm.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([3584])\n",
      "    Key: llama_model.base_model.model.model.layers.5.self_attn.w_pack.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([4608, 3584])\n",
      "    Key: llama_model.base_model.model.model.layers.5.self_attn.w_pack.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([4608])\n",
      "    Key: llama_model.base_model.model.model.layers.5.self_attn.o_proj.base_layer.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 3584])\n",
      "    Key: llama_model.base_model.model.model.layers.5.self_attn.o_proj.lora_A.default.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([8, 3584])\n",
      "    Key: llama_model.base_model.model.model.layers.5.self_attn.o_proj.lora_B.default.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 8])\n",
      "    Key: llama_model.base_model.model.model.layers.5.mlp.gate_proj.base_layer.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([18944, 3584])\n",
      "    Key: llama_model.base_model.model.model.layers.5.mlp.gate_proj.lora_A.default.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([8, 3584])\n",
      "    Key: llama_model.base_model.model.model.layers.5.mlp.gate_proj.lora_B.default.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([18944, 8])\n",
      "    Key: llama_model.base_model.model.model.layers.5.mlp.up_proj.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([18944, 3584])\n",
      "    Key: llama_model.base_model.model.model.layers.5.mlp.down_proj.base_layer.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 18944])\n",
      "    Key: llama_model.base_model.model.model.layers.5.mlp.down_proj.lora_A.default.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([8, 18944])\n",
      "    Key: llama_model.base_model.model.model.layers.5.mlp.down_proj.lora_B.default.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 8])\n",
      "    Key: llama_model.base_model.model.model.layers.5.input_layernorm.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([3584])\n",
      "    Key: llama_model.base_model.model.model.layers.5.post_attention_layernorm.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([3584])\n",
      "    Key: llama_model.base_model.model.model.layers.6.self_attn.w_pack.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([4608, 3584])\n",
      "    Key: llama_model.base_model.model.model.layers.6.self_attn.w_pack.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([4608])\n",
      "    Key: llama_model.base_model.model.model.layers.6.self_attn.o_proj.base_layer.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 3584])\n",
      "    Key: llama_model.base_model.model.model.layers.6.self_attn.o_proj.lora_A.default.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([8, 3584])\n",
      "    Key: llama_model.base_model.model.model.layers.6.self_attn.o_proj.lora_B.default.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 8])\n",
      "    Key: llama_model.base_model.model.model.layers.6.mlp.gate_proj.base_layer.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([18944, 3584])\n",
      "    Key: llama_model.base_model.model.model.layers.6.mlp.gate_proj.lora_A.default.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([8, 3584])\n",
      "    Key: llama_model.base_model.model.model.layers.6.mlp.gate_proj.lora_B.default.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([18944, 8])\n",
      "    Key: llama_model.base_model.model.model.layers.6.mlp.up_proj.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([18944, 3584])\n",
      "    Key: llama_model.base_model.model.model.layers.6.mlp.down_proj.base_layer.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 18944])\n",
      "    Key: llama_model.base_model.model.model.layers.6.mlp.down_proj.lora_A.default.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([8, 18944])\n",
      "    Key: llama_model.base_model.model.model.layers.6.mlp.down_proj.lora_B.default.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 8])\n",
      "    Key: llama_model.base_model.model.model.layers.6.input_layernorm.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([3584])\n",
      "    Key: llama_model.base_model.model.model.layers.6.post_attention_layernorm.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([3584])\n",
      "    Key: llama_model.base_model.model.model.layers.7.self_attn.w_pack.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([4608, 3584])\n",
      "    Key: llama_model.base_model.model.model.layers.7.self_attn.w_pack.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([4608])\n",
      "    Key: llama_model.base_model.model.model.layers.7.self_attn.o_proj.base_layer.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 3584])\n",
      "    Key: llama_model.base_model.model.model.layers.7.self_attn.o_proj.lora_A.default.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([8, 3584])\n",
      "    Key: llama_model.base_model.model.model.layers.7.self_attn.o_proj.lora_B.default.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 8])\n",
      "    Key: llama_model.base_model.model.model.layers.7.mlp.gate_proj.base_layer.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([18944, 3584])\n",
      "    Key: llama_model.base_model.model.model.layers.7.mlp.gate_proj.lora_A.default.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([8, 3584])\n",
      "    Key: llama_model.base_model.model.model.layers.7.mlp.gate_proj.lora_B.default.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([18944, 8])\n",
      "    Key: llama_model.base_model.model.model.layers.7.mlp.up_proj.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([18944, 3584])\n",
      "    Key: llama_model.base_model.model.model.layers.7.mlp.down_proj.base_layer.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 18944])\n",
      "    Key: llama_model.base_model.model.model.layers.7.mlp.down_proj.lora_A.default.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([8, 18944])\n",
      "    Key: llama_model.base_model.model.model.layers.7.mlp.down_proj.lora_B.default.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 8])\n",
      "    Key: llama_model.base_model.model.model.layers.7.input_layernorm.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([3584])\n",
      "    Key: llama_model.base_model.model.model.layers.7.post_attention_layernorm.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([3584])\n",
      "    Key: llama_model.base_model.model.model.layers.8.self_attn.w_pack.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([4608, 3584])\n",
      "    Key: llama_model.base_model.model.model.layers.8.self_attn.w_pack.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([4608])\n",
      "    Key: llama_model.base_model.model.model.layers.8.self_attn.o_proj.base_layer.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 3584])\n",
      "    Key: llama_model.base_model.model.model.layers.8.self_attn.o_proj.lora_A.default.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([8, 3584])\n",
      "    Key: llama_model.base_model.model.model.layers.8.self_attn.o_proj.lora_B.default.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 8])\n",
      "    Key: llama_model.base_model.model.model.layers.8.mlp.gate_proj.base_layer.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([18944, 3584])\n",
      "    Key: llama_model.base_model.model.model.layers.8.mlp.gate_proj.lora_A.default.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([8, 3584])\n",
      "    Key: llama_model.base_model.model.model.layers.8.mlp.gate_proj.lora_B.default.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([18944, 8])\n",
      "    Key: llama_model.base_model.model.model.layers.8.mlp.up_proj.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([18944, 3584])\n",
      "    Key: llama_model.base_model.model.model.layers.8.mlp.down_proj.base_layer.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 18944])\n",
      "    Key: llama_model.base_model.model.model.layers.8.mlp.down_proj.lora_A.default.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([8, 18944])\n",
      "    Key: llama_model.base_model.model.model.layers.8.mlp.down_proj.lora_B.default.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 8])\n",
      "    Key: llama_model.base_model.model.model.layers.8.input_layernorm.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([3584])\n",
      "    Key: llama_model.base_model.model.model.layers.8.post_attention_layernorm.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([3584])\n",
      "    Key: llama_model.base_model.model.model.layers.9.self_attn.w_pack.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([4608, 3584])\n",
      "    Key: llama_model.base_model.model.model.layers.9.self_attn.w_pack.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([4608])\n",
      "    Key: llama_model.base_model.model.model.layers.9.self_attn.o_proj.base_layer.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 3584])\n",
      "    Key: llama_model.base_model.model.model.layers.9.self_attn.o_proj.lora_A.default.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([8, 3584])\n",
      "    Key: llama_model.base_model.model.model.layers.9.self_attn.o_proj.lora_B.default.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 8])\n",
      "    Key: llama_model.base_model.model.model.layers.9.mlp.gate_proj.base_layer.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([18944, 3584])\n",
      "    Key: llama_model.base_model.model.model.layers.9.mlp.gate_proj.lora_A.default.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([8, 3584])\n",
      "    Key: llama_model.base_model.model.model.layers.9.mlp.gate_proj.lora_B.default.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([18944, 8])\n",
      "    Key: llama_model.base_model.model.model.layers.9.mlp.up_proj.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([18944, 3584])\n",
      "    Key: llama_model.base_model.model.model.layers.9.mlp.down_proj.base_layer.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 18944])\n",
      "    Key: llama_model.base_model.model.model.layers.9.mlp.down_proj.lora_A.default.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([8, 18944])\n",
      "    Key: llama_model.base_model.model.model.layers.9.mlp.down_proj.lora_B.default.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 8])\n",
      "    Key: llama_model.base_model.model.model.layers.9.input_layernorm.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([3584])\n",
      "    Key: llama_model.base_model.model.model.layers.9.post_attention_layernorm.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([3584])\n",
      "    Key: llama_model.base_model.model.model.layers.10.self_attn.w_pack.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([4608, 3584])\n",
      "    Key: llama_model.base_model.model.model.layers.10.self_attn.w_pack.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([4608])\n",
      "    Key: llama_model.base_model.model.model.layers.10.self_attn.o_proj.base_layer.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 3584])\n",
      "    Key: llama_model.base_model.model.model.layers.10.self_attn.o_proj.lora_A.default.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([8, 3584])\n",
      "    Key: llama_model.base_model.model.model.layers.10.self_attn.o_proj.lora_B.default.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 8])\n",
      "    Key: llama_model.base_model.model.model.layers.10.mlp.gate_proj.base_layer.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([18944, 3584])\n",
      "    Key: llama_model.base_model.model.model.layers.10.mlp.gate_proj.lora_A.default.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([8, 3584])\n",
      "    Key: llama_model.base_model.model.model.layers.10.mlp.gate_proj.lora_B.default.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([18944, 8])\n",
      "    Key: llama_model.base_model.model.model.layers.10.mlp.up_proj.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([18944, 3584])\n",
      "    Key: llama_model.base_model.model.model.layers.10.mlp.down_proj.base_layer.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 18944])\n",
      "    Key: llama_model.base_model.model.model.layers.10.mlp.down_proj.lora_A.default.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([8, 18944])\n",
      "    Key: llama_model.base_model.model.model.layers.10.mlp.down_proj.lora_B.default.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 8])\n",
      "    Key: llama_model.base_model.model.model.layers.10.input_layernorm.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([3584])\n",
      "    Key: llama_model.base_model.model.model.layers.10.post_attention_layernorm.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([3584])\n",
      "    Key: llama_model.base_model.model.model.layers.11.self_attn.w_pack.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([4608, 3584])\n",
      "    Key: llama_model.base_model.model.model.layers.11.self_attn.w_pack.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([4608])\n",
      "    Key: llama_model.base_model.model.model.layers.11.self_attn.o_proj.base_layer.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 3584])\n",
      "    Key: llama_model.base_model.model.model.layers.11.self_attn.o_proj.lora_A.default.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([8, 3584])\n",
      "    Key: llama_model.base_model.model.model.layers.11.self_attn.o_proj.lora_B.default.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 8])\n",
      "    Key: llama_model.base_model.model.model.layers.11.mlp.gate_proj.base_layer.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([18944, 3584])\n",
      "    Key: llama_model.base_model.model.model.layers.11.mlp.gate_proj.lora_A.default.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([8, 3584])\n",
      "    Key: llama_model.base_model.model.model.layers.11.mlp.gate_proj.lora_B.default.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([18944, 8])\n",
      "    Key: llama_model.base_model.model.model.layers.11.mlp.up_proj.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([18944, 3584])\n",
      "    Key: llama_model.base_model.model.model.layers.11.mlp.down_proj.base_layer.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 18944])\n",
      "    Key: llama_model.base_model.model.model.layers.11.mlp.down_proj.lora_A.default.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([8, 18944])\n",
      "    Key: llama_model.base_model.model.model.layers.11.mlp.down_proj.lora_B.default.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 8])\n",
      "    Key: llama_model.base_model.model.model.layers.11.input_layernorm.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([3584])\n",
      "    Key: llama_model.base_model.model.model.layers.11.post_attention_layernorm.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([3584])\n",
      "    Key: llama_model.base_model.model.model.layers.12.self_attn.w_pack.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([4608, 3584])\n",
      "    Key: llama_model.base_model.model.model.layers.12.self_attn.w_pack.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([4608])\n",
      "    Key: llama_model.base_model.model.model.layers.12.self_attn.o_proj.base_layer.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 3584])\n",
      "    Key: llama_model.base_model.model.model.layers.12.self_attn.o_proj.lora_A.default.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([8, 3584])\n",
      "    Key: llama_model.base_model.model.model.layers.12.self_attn.o_proj.lora_B.default.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 8])\n",
      "    Key: llama_model.base_model.model.model.layers.12.mlp.gate_proj.base_layer.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([18944, 3584])\n",
      "    Key: llama_model.base_model.model.model.layers.12.mlp.gate_proj.lora_A.default.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([8, 3584])\n",
      "    Key: llama_model.base_model.model.model.layers.12.mlp.gate_proj.lora_B.default.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([18944, 8])\n",
      "    Key: llama_model.base_model.model.model.layers.12.mlp.up_proj.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([18944, 3584])\n",
      "    Key: llama_model.base_model.model.model.layers.12.mlp.down_proj.base_layer.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 18944])\n",
      "    Key: llama_model.base_model.model.model.layers.12.mlp.down_proj.lora_A.default.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([8, 18944])\n",
      "    Key: llama_model.base_model.model.model.layers.12.mlp.down_proj.lora_B.default.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 8])\n",
      "    Key: llama_model.base_model.model.model.layers.12.input_layernorm.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([3584])\n",
      "    Key: llama_model.base_model.model.model.layers.12.post_attention_layernorm.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([3584])\n",
      "    Key: llama_model.base_model.model.model.layers.13.self_attn.w_pack.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([4608, 3584])\n",
      "    Key: llama_model.base_model.model.model.layers.13.self_attn.w_pack.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([4608])\n",
      "    Key: llama_model.base_model.model.model.layers.13.self_attn.o_proj.base_layer.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 3584])\n",
      "    Key: llama_model.base_model.model.model.layers.13.self_attn.o_proj.lora_A.default.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([8, 3584])\n",
      "    Key: llama_model.base_model.model.model.layers.13.self_attn.o_proj.lora_B.default.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 8])\n",
      "    Key: llama_model.base_model.model.model.layers.13.mlp.gate_proj.base_layer.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([18944, 3584])\n",
      "    Key: llama_model.base_model.model.model.layers.13.mlp.gate_proj.lora_A.default.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([8, 3584])\n",
      "    Key: llama_model.base_model.model.model.layers.13.mlp.gate_proj.lora_B.default.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([18944, 8])\n",
      "    Key: llama_model.base_model.model.model.layers.13.mlp.up_proj.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([18944, 3584])\n",
      "    Key: llama_model.base_model.model.model.layers.13.mlp.down_proj.base_layer.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 18944])\n",
      "    Key: llama_model.base_model.model.model.layers.13.mlp.down_proj.lora_A.default.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([8, 18944])\n",
      "    Key: llama_model.base_model.model.model.layers.13.mlp.down_proj.lora_B.default.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 8])\n",
      "    Key: llama_model.base_model.model.model.layers.13.input_layernorm.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([3584])\n",
      "    Key: llama_model.base_model.model.model.layers.13.post_attention_layernorm.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([3584])\n",
      "    Key: llama_model.base_model.model.model.layers.14.self_attn.w_pack.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([4608, 3584])\n",
      "    Key: llama_model.base_model.model.model.layers.14.self_attn.w_pack.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([4608])\n",
      "    Key: llama_model.base_model.model.model.layers.14.self_attn.o_proj.base_layer.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 3584])\n",
      "    Key: llama_model.base_model.model.model.layers.14.self_attn.o_proj.lora_A.default.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([8, 3584])\n",
      "    Key: llama_model.base_model.model.model.layers.14.self_attn.o_proj.lora_B.default.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 8])\n",
      "    Key: llama_model.base_model.model.model.layers.14.mlp.gate_proj.base_layer.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([18944, 3584])\n",
      "    Key: llama_model.base_model.model.model.layers.14.mlp.gate_proj.lora_A.default.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([8, 3584])\n",
      "    Key: llama_model.base_model.model.model.layers.14.mlp.gate_proj.lora_B.default.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([18944, 8])\n",
      "    Key: llama_model.base_model.model.model.layers.14.mlp.up_proj.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([18944, 3584])\n",
      "    Key: llama_model.base_model.model.model.layers.14.mlp.down_proj.base_layer.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 18944])\n",
      "    Key: llama_model.base_model.model.model.layers.14.mlp.down_proj.lora_A.default.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([8, 18944])\n",
      "    Key: llama_model.base_model.model.model.layers.14.mlp.down_proj.lora_B.default.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 8])\n",
      "    Key: llama_model.base_model.model.model.layers.14.input_layernorm.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([3584])\n",
      "    Key: llama_model.base_model.model.model.layers.14.post_attention_layernorm.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([3584])\n",
      "    Key: llama_model.base_model.model.model.layers.15.self_attn.w_pack.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([4608, 3584])\n",
      "    Key: llama_model.base_model.model.model.layers.15.self_attn.w_pack.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([4608])\n",
      "    Key: llama_model.base_model.model.model.layers.15.self_attn.o_proj.base_layer.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 3584])\n",
      "    Key: llama_model.base_model.model.model.layers.15.self_attn.o_proj.lora_A.default.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([8, 3584])\n",
      "    Key: llama_model.base_model.model.model.layers.15.self_attn.o_proj.lora_B.default.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 8])\n",
      "    Key: llama_model.base_model.model.model.layers.15.mlp.gate_proj.base_layer.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([18944, 3584])\n",
      "    Key: llama_model.base_model.model.model.layers.15.mlp.gate_proj.lora_A.default.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([8, 3584])\n",
      "    Key: llama_model.base_model.model.model.layers.15.mlp.gate_proj.lora_B.default.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([18944, 8])\n",
      "    Key: llama_model.base_model.model.model.layers.15.mlp.up_proj.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([18944, 3584])\n",
      "    Key: llama_model.base_model.model.model.layers.15.mlp.down_proj.base_layer.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 18944])\n",
      "    Key: llama_model.base_model.model.model.layers.15.mlp.down_proj.lora_A.default.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([8, 18944])\n",
      "    Key: llama_model.base_model.model.model.layers.15.mlp.down_proj.lora_B.default.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 8])\n",
      "    Key: llama_model.base_model.model.model.layers.15.input_layernorm.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([3584])\n",
      "    Key: llama_model.base_model.model.model.layers.15.post_attention_layernorm.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([3584])\n",
      "    Key: llama_model.base_model.model.model.layers.16.self_attn.w_pack.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([4608, 3584])\n",
      "    Key: llama_model.base_model.model.model.layers.16.self_attn.w_pack.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([4608])\n",
      "    Key: llama_model.base_model.model.model.layers.16.self_attn.o_proj.base_layer.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 3584])\n",
      "    Key: llama_model.base_model.model.model.layers.16.self_attn.o_proj.lora_A.default.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([8, 3584])\n",
      "    Key: llama_model.base_model.model.model.layers.16.self_attn.o_proj.lora_B.default.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 8])\n",
      "    Key: llama_model.base_model.model.model.layers.16.mlp.gate_proj.base_layer.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([18944, 3584])\n",
      "    Key: llama_model.base_model.model.model.layers.16.mlp.gate_proj.lora_A.default.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([8, 3584])\n",
      "    Key: llama_model.base_model.model.model.layers.16.mlp.gate_proj.lora_B.default.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([18944, 8])\n",
      "    Key: llama_model.base_model.model.model.layers.16.mlp.up_proj.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([18944, 3584])\n",
      "    Key: llama_model.base_model.model.model.layers.16.mlp.down_proj.base_layer.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 18944])\n",
      "    Key: llama_model.base_model.model.model.layers.16.mlp.down_proj.lora_A.default.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([8, 18944])\n",
      "    Key: llama_model.base_model.model.model.layers.16.mlp.down_proj.lora_B.default.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 8])\n",
      "    Key: llama_model.base_model.model.model.layers.16.input_layernorm.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([3584])\n",
      "    Key: llama_model.base_model.model.model.layers.16.post_attention_layernorm.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([3584])\n",
      "    Key: llama_model.base_model.model.model.layers.17.self_attn.w_pack.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([4608, 3584])\n",
      "    Key: llama_model.base_model.model.model.layers.17.self_attn.w_pack.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([4608])\n",
      "    Key: llama_model.base_model.model.model.layers.17.self_attn.o_proj.base_layer.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 3584])\n",
      "    Key: llama_model.base_model.model.model.layers.17.self_attn.o_proj.lora_A.default.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([8, 3584])\n",
      "    Key: llama_model.base_model.model.model.layers.17.self_attn.o_proj.lora_B.default.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 8])\n",
      "    Key: llama_model.base_model.model.model.layers.17.mlp.gate_proj.base_layer.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([18944, 3584])\n",
      "    Key: llama_model.base_model.model.model.layers.17.mlp.gate_proj.lora_A.default.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([8, 3584])\n",
      "    Key: llama_model.base_model.model.model.layers.17.mlp.gate_proj.lora_B.default.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([18944, 8])\n",
      "    Key: llama_model.base_model.model.model.layers.17.mlp.up_proj.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([18944, 3584])\n",
      "    Key: llama_model.base_model.model.model.layers.17.mlp.down_proj.base_layer.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 18944])\n",
      "    Key: llama_model.base_model.model.model.layers.17.mlp.down_proj.lora_A.default.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([8, 18944])\n",
      "    Key: llama_model.base_model.model.model.layers.17.mlp.down_proj.lora_B.default.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 8])\n",
      "    Key: llama_model.base_model.model.model.layers.17.input_layernorm.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([3584])\n",
      "    Key: llama_model.base_model.model.model.layers.17.post_attention_layernorm.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([3584])\n",
      "    Key: llama_model.base_model.model.model.layers.18.self_attn.w_pack.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([4608, 3584])\n",
      "    Key: llama_model.base_model.model.model.layers.18.self_attn.w_pack.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([4608])\n",
      "    Key: llama_model.base_model.model.model.layers.18.self_attn.o_proj.base_layer.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 3584])\n",
      "    Key: llama_model.base_model.model.model.layers.18.self_attn.o_proj.lora_A.default.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([8, 3584])\n",
      "    Key: llama_model.base_model.model.model.layers.18.self_attn.o_proj.lora_B.default.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 8])\n",
      "    Key: llama_model.base_model.model.model.layers.18.mlp.gate_proj.base_layer.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([18944, 3584])\n",
      "    Key: llama_model.base_model.model.model.layers.18.mlp.gate_proj.lora_A.default.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([8, 3584])\n",
      "    Key: llama_model.base_model.model.model.layers.18.mlp.gate_proj.lora_B.default.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([18944, 8])\n",
      "    Key: llama_model.base_model.model.model.layers.18.mlp.up_proj.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([18944, 3584])\n",
      "    Key: llama_model.base_model.model.model.layers.18.mlp.down_proj.base_layer.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 18944])\n",
      "    Key: llama_model.base_model.model.model.layers.18.mlp.down_proj.lora_A.default.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([8, 18944])\n",
      "    Key: llama_model.base_model.model.model.layers.18.mlp.down_proj.lora_B.default.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 8])\n",
      "    Key: llama_model.base_model.model.model.layers.18.input_layernorm.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([3584])\n",
      "    Key: llama_model.base_model.model.model.layers.18.post_attention_layernorm.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([3584])\n",
      "    Key: llama_model.base_model.model.model.layers.19.self_attn.w_pack.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([4608, 3584])\n",
      "    Key: llama_model.base_model.model.model.layers.19.self_attn.w_pack.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([4608])\n",
      "    Key: llama_model.base_model.model.model.layers.19.self_attn.o_proj.base_layer.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 3584])\n",
      "    Key: llama_model.base_model.model.model.layers.19.self_attn.o_proj.lora_A.default.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([8, 3584])\n",
      "    Key: llama_model.base_model.model.model.layers.19.self_attn.o_proj.lora_B.default.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 8])\n",
      "    Key: llama_model.base_model.model.model.layers.19.mlp.gate_proj.base_layer.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([18944, 3584])\n",
      "    Key: llama_model.base_model.model.model.layers.19.mlp.gate_proj.lora_A.default.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([8, 3584])\n",
      "    Key: llama_model.base_model.model.model.layers.19.mlp.gate_proj.lora_B.default.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([18944, 8])\n",
      "    Key: llama_model.base_model.model.model.layers.19.mlp.up_proj.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([18944, 3584])\n",
      "    Key: llama_model.base_model.model.model.layers.19.mlp.down_proj.base_layer.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 18944])\n",
      "    Key: llama_model.base_model.model.model.layers.19.mlp.down_proj.lora_A.default.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([8, 18944])\n",
      "    Key: llama_model.base_model.model.model.layers.19.mlp.down_proj.lora_B.default.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 8])\n",
      "    Key: llama_model.base_model.model.model.layers.19.input_layernorm.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([3584])\n",
      "    Key: llama_model.base_model.model.model.layers.19.post_attention_layernorm.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([3584])\n",
      "    Key: llama_model.base_model.model.model.layers.20.self_attn.w_pack.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([4608, 3584])\n",
      "    Key: llama_model.base_model.model.model.layers.20.self_attn.w_pack.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([4608])\n",
      "    Key: llama_model.base_model.model.model.layers.20.self_attn.o_proj.base_layer.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 3584])\n",
      "    Key: llama_model.base_model.model.model.layers.20.self_attn.o_proj.lora_A.default.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([8, 3584])\n",
      "    Key: llama_model.base_model.model.model.layers.20.self_attn.o_proj.lora_B.default.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 8])\n",
      "    Key: llama_model.base_model.model.model.layers.20.mlp.gate_proj.base_layer.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([18944, 3584])\n",
      "    Key: llama_model.base_model.model.model.layers.20.mlp.gate_proj.lora_A.default.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([8, 3584])\n",
      "    Key: llama_model.base_model.model.model.layers.20.mlp.gate_proj.lora_B.default.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([18944, 8])\n",
      "    Key: llama_model.base_model.model.model.layers.20.mlp.up_proj.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([18944, 3584])\n",
      "    Key: llama_model.base_model.model.model.layers.20.mlp.down_proj.base_layer.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 18944])\n",
      "    Key: llama_model.base_model.model.model.layers.20.mlp.down_proj.lora_A.default.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([8, 18944])\n",
      "    Key: llama_model.base_model.model.model.layers.20.mlp.down_proj.lora_B.default.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 8])\n",
      "    Key: llama_model.base_model.model.model.layers.20.input_layernorm.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([3584])\n",
      "    Key: llama_model.base_model.model.model.layers.20.post_attention_layernorm.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([3584])\n",
      "    Key: llama_model.base_model.model.model.layers.21.self_attn.w_pack.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([4608, 3584])\n",
      "    Key: llama_model.base_model.model.model.layers.21.self_attn.w_pack.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([4608])\n",
      "    Key: llama_model.base_model.model.model.layers.21.self_attn.o_proj.base_layer.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 3584])\n",
      "    Key: llama_model.base_model.model.model.layers.21.self_attn.o_proj.lora_A.default.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([8, 3584])\n",
      "    Key: llama_model.base_model.model.model.layers.21.self_attn.o_proj.lora_B.default.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 8])\n",
      "    Key: llama_model.base_model.model.model.layers.21.mlp.gate_proj.base_layer.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([18944, 3584])\n",
      "    Key: llama_model.base_model.model.model.layers.21.mlp.gate_proj.lora_A.default.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([8, 3584])\n",
      "    Key: llama_model.base_model.model.model.layers.21.mlp.gate_proj.lora_B.default.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([18944, 8])\n",
      "    Key: llama_model.base_model.model.model.layers.21.mlp.up_proj.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([18944, 3584])\n",
      "    Key: llama_model.base_model.model.model.layers.21.mlp.down_proj.base_layer.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 18944])\n",
      "    Key: llama_model.base_model.model.model.layers.21.mlp.down_proj.lora_A.default.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([8, 18944])\n",
      "    Key: llama_model.base_model.model.model.layers.21.mlp.down_proj.lora_B.default.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 8])\n",
      "    Key: llama_model.base_model.model.model.layers.21.input_layernorm.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([3584])\n",
      "    Key: llama_model.base_model.model.model.layers.21.post_attention_layernorm.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([3584])\n",
      "    Key: llama_model.base_model.model.model.layers.22.self_attn.w_pack.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([4608, 3584])\n",
      "    Key: llama_model.base_model.model.model.layers.22.self_attn.w_pack.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([4608])\n",
      "    Key: llama_model.base_model.model.model.layers.22.self_attn.o_proj.base_layer.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 3584])\n",
      "    Key: llama_model.base_model.model.model.layers.22.self_attn.o_proj.lora_A.default.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([8, 3584])\n",
      "    Key: llama_model.base_model.model.model.layers.22.self_attn.o_proj.lora_B.default.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 8])\n",
      "    Key: llama_model.base_model.model.model.layers.22.mlp.gate_proj.base_layer.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([18944, 3584])\n",
      "    Key: llama_model.base_model.model.model.layers.22.mlp.gate_proj.lora_A.default.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([8, 3584])\n",
      "    Key: llama_model.base_model.model.model.layers.22.mlp.gate_proj.lora_B.default.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([18944, 8])\n",
      "    Key: llama_model.base_model.model.model.layers.22.mlp.up_proj.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([18944, 3584])\n",
      "    Key: llama_model.base_model.model.model.layers.22.mlp.down_proj.base_layer.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 18944])\n",
      "    Key: llama_model.base_model.model.model.layers.22.mlp.down_proj.lora_A.default.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([8, 18944])\n",
      "    Key: llama_model.base_model.model.model.layers.22.mlp.down_proj.lora_B.default.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 8])\n",
      "    Key: llama_model.base_model.model.model.layers.22.input_layernorm.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([3584])\n",
      "    Key: llama_model.base_model.model.model.layers.22.post_attention_layernorm.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([3584])\n",
      "    Key: llama_model.base_model.model.model.layers.23.self_attn.w_pack.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([4608, 3584])\n",
      "    Key: llama_model.base_model.model.model.layers.23.self_attn.w_pack.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([4608])\n",
      "    Key: llama_model.base_model.model.model.layers.23.self_attn.o_proj.base_layer.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 3584])\n",
      "    Key: llama_model.base_model.model.model.layers.23.self_attn.o_proj.lora_A.default.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([8, 3584])\n",
      "    Key: llama_model.base_model.model.model.layers.23.self_attn.o_proj.lora_B.default.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 8])\n",
      "    Key: llama_model.base_model.model.model.layers.23.mlp.gate_proj.base_layer.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([18944, 3584])\n",
      "    Key: llama_model.base_model.model.model.layers.23.mlp.gate_proj.lora_A.default.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([8, 3584])\n",
      "    Key: llama_model.base_model.model.model.layers.23.mlp.gate_proj.lora_B.default.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([18944, 8])\n",
      "    Key: llama_model.base_model.model.model.layers.23.mlp.up_proj.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([18944, 3584])\n",
      "    Key: llama_model.base_model.model.model.layers.23.mlp.down_proj.base_layer.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 18944])\n",
      "    Key: llama_model.base_model.model.model.layers.23.mlp.down_proj.lora_A.default.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([8, 18944])\n",
      "    Key: llama_model.base_model.model.model.layers.23.mlp.down_proj.lora_B.default.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 8])\n",
      "    Key: llama_model.base_model.model.model.layers.23.input_layernorm.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([3584])\n",
      "    Key: llama_model.base_model.model.model.layers.23.post_attention_layernorm.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([3584])\n",
      "    Key: llama_model.base_model.model.model.layers.24.self_attn.w_pack.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([4608, 3584])\n",
      "    Key: llama_model.base_model.model.model.layers.24.self_attn.w_pack.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([4608])\n",
      "    Key: llama_model.base_model.model.model.layers.24.self_attn.o_proj.base_layer.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 3584])\n",
      "    Key: llama_model.base_model.model.model.layers.24.self_attn.o_proj.lora_A.default.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([8, 3584])\n",
      "    Key: llama_model.base_model.model.model.layers.24.self_attn.o_proj.lora_B.default.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 8])\n",
      "    Key: llama_model.base_model.model.model.layers.24.mlp.gate_proj.base_layer.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([18944, 3584])\n",
      "    Key: llama_model.base_model.model.model.layers.24.mlp.gate_proj.lora_A.default.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([8, 3584])\n",
      "    Key: llama_model.base_model.model.model.layers.24.mlp.gate_proj.lora_B.default.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([18944, 8])\n",
      "    Key: llama_model.base_model.model.model.layers.24.mlp.up_proj.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([18944, 3584])\n",
      "    Key: llama_model.base_model.model.model.layers.24.mlp.down_proj.base_layer.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 18944])\n",
      "    Key: llama_model.base_model.model.model.layers.24.mlp.down_proj.lora_A.default.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([8, 18944])\n",
      "    Key: llama_model.base_model.model.model.layers.24.mlp.down_proj.lora_B.default.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 8])\n",
      "    Key: llama_model.base_model.model.model.layers.24.input_layernorm.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([3584])\n",
      "    Key: llama_model.base_model.model.model.layers.24.post_attention_layernorm.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([3584])\n",
      "    Key: llama_model.base_model.model.model.layers.25.self_attn.w_pack.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([4608, 3584])\n",
      "    Key: llama_model.base_model.model.model.layers.25.self_attn.w_pack.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([4608])\n",
      "    Key: llama_model.base_model.model.model.layers.25.self_attn.o_proj.base_layer.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 3584])\n",
      "    Key: llama_model.base_model.model.model.layers.25.self_attn.o_proj.lora_A.default.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([8, 3584])\n",
      "    Key: llama_model.base_model.model.model.layers.25.self_attn.o_proj.lora_B.default.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 8])\n",
      "    Key: llama_model.base_model.model.model.layers.25.mlp.gate_proj.base_layer.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([18944, 3584])\n",
      "    Key: llama_model.base_model.model.model.layers.25.mlp.gate_proj.lora_A.default.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([8, 3584])\n",
      "    Key: llama_model.base_model.model.model.layers.25.mlp.gate_proj.lora_B.default.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([18944, 8])\n",
      "    Key: llama_model.base_model.model.model.layers.25.mlp.up_proj.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([18944, 3584])\n",
      "    Key: llama_model.base_model.model.model.layers.25.mlp.down_proj.base_layer.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 18944])\n",
      "    Key: llama_model.base_model.model.model.layers.25.mlp.down_proj.lora_A.default.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([8, 18944])\n",
      "    Key: llama_model.base_model.model.model.layers.25.mlp.down_proj.lora_B.default.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 8])\n",
      "    Key: llama_model.base_model.model.model.layers.25.input_layernorm.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([3584])\n",
      "    Key: llama_model.base_model.model.model.layers.25.post_attention_layernorm.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([3584])\n",
      "    Key: llama_model.base_model.model.model.layers.26.self_attn.w_pack.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([4608, 3584])\n",
      "    Key: llama_model.base_model.model.model.layers.26.self_attn.w_pack.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([4608])\n",
      "    Key: llama_model.base_model.model.model.layers.26.self_attn.o_proj.base_layer.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 3584])\n",
      "    Key: llama_model.base_model.model.model.layers.26.self_attn.o_proj.lora_A.default.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([8, 3584])\n",
      "    Key: llama_model.base_model.model.model.layers.26.self_attn.o_proj.lora_B.default.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 8])\n",
      "    Key: llama_model.base_model.model.model.layers.26.mlp.gate_proj.base_layer.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([18944, 3584])\n",
      "    Key: llama_model.base_model.model.model.layers.26.mlp.gate_proj.lora_A.default.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([8, 3584])\n",
      "    Key: llama_model.base_model.model.model.layers.26.mlp.gate_proj.lora_B.default.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([18944, 8])\n",
      "    Key: llama_model.base_model.model.model.layers.26.mlp.up_proj.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([18944, 3584])\n",
      "    Key: llama_model.base_model.model.model.layers.26.mlp.down_proj.base_layer.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 18944])\n",
      "    Key: llama_model.base_model.model.model.layers.26.mlp.down_proj.lora_A.default.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([8, 18944])\n",
      "    Key: llama_model.base_model.model.model.layers.26.mlp.down_proj.lora_B.default.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 8])\n",
      "    Key: llama_model.base_model.model.model.layers.26.input_layernorm.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([3584])\n",
      "    Key: llama_model.base_model.model.model.layers.26.post_attention_layernorm.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([3584])\n",
      "    Key: llama_model.base_model.model.model.layers.27.self_attn.w_pack.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([4608, 3584])\n",
      "    Key: llama_model.base_model.model.model.layers.27.self_attn.w_pack.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([4608])\n",
      "    Key: llama_model.base_model.model.model.layers.27.self_attn.o_proj.base_layer.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 3584])\n",
      "    Key: llama_model.base_model.model.model.layers.27.self_attn.o_proj.lora_A.default.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([8, 3584])\n",
      "    Key: llama_model.base_model.model.model.layers.27.self_attn.o_proj.lora_B.default.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 8])\n",
      "    Key: llama_model.base_model.model.model.layers.27.mlp.gate_proj.base_layer.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([18944, 3584])\n",
      "    Key: llama_model.base_model.model.model.layers.27.mlp.gate_proj.lora_A.default.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([8, 3584])\n",
      "    Key: llama_model.base_model.model.model.layers.27.mlp.gate_proj.lora_B.default.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([18944, 8])\n",
      "    Key: llama_model.base_model.model.model.layers.27.mlp.up_proj.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([18944, 3584])\n",
      "    Key: llama_model.base_model.model.model.layers.27.mlp.down_proj.base_layer.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 18944])\n",
      "    Key: llama_model.base_model.model.model.layers.27.mlp.down_proj.lora_A.default.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([8, 18944])\n",
      "    Key: llama_model.base_model.model.model.layers.27.mlp.down_proj.lora_B.default.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 8])\n",
      "    Key: llama_model.base_model.model.model.layers.27.input_layernorm.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([3584])\n",
      "    Key: llama_model.base_model.model.model.layers.27.post_attention_layernorm.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([3584])\n",
      "    Key: llama_model.base_model.model.model.norm.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([3584])\n",
      "    Key: llama_model.base_model.model.lm_head.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([152064, 3584])\n",
      "    Key: down_sample_2.conv.1.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024, 3])\n",
      "    Key: down_sample_2.conv.1.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: down_sample_2.conv.4.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024, 3])\n",
      "    Key: down_sample_2.conv.4.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: down_sample_2.conv.7.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024, 1024, 3])\n",
      "    Key: down_sample_2.conv.7.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([1024])\n",
      "    Key: speech_llama_proj.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([3584, 1024])\n",
      "    Key: speech_llama_proj.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([3584])\n",
      "    Key: embed_tokens.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([152064, 3584])\n",
      "    Key: lm_head.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([152064, 3584])\n",
      "    Key: speech_token_emded.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([4099, 3584])\n",
      "    Key: speaker_head.weight\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([4097, 3584])\n",
      "    Key: speaker_head.bias\n",
      "        Type: <class 'torch.Tensor'>, Shape: torch.Size([4097])\n",
      "Key: buffer_names\n",
      "    Type: <class 'list'>, Value: ['encoder.embed.pos_enc.pe', 'speech_transformer.embed.pos_enc.pe']\n",
      "Key: optimizer\n",
      "    Type: <class 'NoneType'>, Value: None\n",
      "Key: param_shapes\n",
      "    Type: <class 'list'>, Value: [OrderedDict([('encoder.embed.conv.0.weight', torch.Size([1024, 80, 3])), ('encoder.embed.conv.0.bias', torch.Size([1024])), ('encoder.embed.conv.2.weight', torch.Size([1024, 1024, 3])), ('encoder.embed.conv.2.bias', torch.Size([1024])), ('encoder.after_norm.weight', torch.Size([1024])), ('encoder.after_norm.bias', torch.Size([1024])), ('encoder.encoders.0.self_attn.linear_q.weight', torch.Size([1024, 1024])), ('encoder.encoders.0.self_attn.linear_q.bias', torch.Size([1024])), ('encoder.encoders.0.self_attn.linear_k.weight', torch.Size([1024, 1024])), ('encoder.encoders.0.self_attn.linear_v.weight', torch.Size([1024, 1024])), ('encoder.encoders.0.self_attn.linear_v.bias', torch.Size([1024])), ('encoder.encoders.0.self_attn.linear_out.weight', torch.Size([1024, 1024])), ('encoder.encoders.0.self_attn.linear_out.bias', torch.Size([1024])), ('encoder.encoders.0.feed_forward.w_1.weight', torch.Size([4096, 1024])), ('encoder.encoders.0.feed_forward.w_1.bias', torch.Size([4096])), ('encoder.encoders.0.feed_forward.w_2.weight', torch.Size([1024, 4096])), ('encoder.encoders.0.feed_forward.w_2.bias', torch.Size([1024])), ('encoder.encoders.0.norm1.weight', torch.Size([1024])), ('encoder.encoders.0.norm1.bias', torch.Size([1024])), ('encoder.encoders.0.norm2.weight', torch.Size([1024])), ('encoder.encoders.0.norm2.bias', torch.Size([1024])), ('encoder.encoders.1.self_attn.linear_q.weight', torch.Size([1024, 1024])), ('encoder.encoders.1.self_attn.linear_q.bias', torch.Size([1024])), ('encoder.encoders.1.self_attn.linear_k.weight', torch.Size([1024, 1024])), ('encoder.encoders.1.self_attn.linear_v.weight', torch.Size([1024, 1024])), ('encoder.encoders.1.self_attn.linear_v.bias', torch.Size([1024])), ('encoder.encoders.1.self_attn.linear_out.weight', torch.Size([1024, 1024])), ('encoder.encoders.1.self_attn.linear_out.bias', torch.Size([1024])), ('encoder.encoders.1.feed_forward.w_1.weight', torch.Size([4096, 1024])), ('encoder.encoders.1.feed_forward.w_1.bias', torch.Size([4096])), ('encoder.encoders.1.feed_forward.w_2.weight', torch.Size([1024, 4096])), ('encoder.encoders.1.feed_forward.w_2.bias', torch.Size([1024])), ('encoder.encoders.1.norm1.weight', torch.Size([1024])), ('encoder.encoders.1.norm1.bias', torch.Size([1024])), ('encoder.encoders.1.norm2.weight', torch.Size([1024])), ('encoder.encoders.1.norm2.bias', torch.Size([1024])), ('encoder.encoders.2.self_attn.linear_q.weight', torch.Size([1024, 1024])), ('encoder.encoders.2.self_attn.linear_q.bias', torch.Size([1024])), ('encoder.encoders.2.self_attn.linear_k.weight', torch.Size([1024, 1024])), ('encoder.encoders.2.self_attn.linear_v.weight', torch.Size([1024, 1024])), ('encoder.encoders.2.self_attn.linear_v.bias', torch.Size([1024])), ('encoder.encoders.2.self_attn.linear_out.weight', torch.Size([1024, 1024])), ('encoder.encoders.2.self_attn.linear_out.bias', torch.Size([1024])), ('encoder.encoders.2.feed_forward.w_1.weight', torch.Size([4096, 1024])), ('encoder.encoders.2.feed_forward.w_1.bias', torch.Size([4096])), ('encoder.encoders.2.feed_forward.w_2.weight', torch.Size([1024, 4096])), ('encoder.encoders.2.feed_forward.w_2.bias', torch.Size([1024])), ('encoder.encoders.2.norm1.weight', torch.Size([1024])), ('encoder.encoders.2.norm1.bias', torch.Size([1024])), ('encoder.encoders.2.norm2.weight', torch.Size([1024])), ('encoder.encoders.2.norm2.bias', torch.Size([1024])), ('encoder.encoders.3.self_attn.linear_q.weight', torch.Size([1024, 1024])), ('encoder.encoders.3.self_attn.linear_q.bias', torch.Size([1024])), ('encoder.encoders.3.self_attn.linear_k.weight', torch.Size([1024, 1024])), ('encoder.encoders.3.self_attn.linear_v.weight', torch.Size([1024, 1024])), ('encoder.encoders.3.self_attn.linear_v.bias', torch.Size([1024])), ('encoder.encoders.3.self_attn.linear_out.weight', torch.Size([1024, 1024])), ('encoder.encoders.3.self_attn.linear_out.bias', torch.Size([1024])), ('encoder.encoders.3.feed_forward.w_1.weight', torch.Size([4096, 1024])), ('encoder.encoders.3.feed_forward.w_1.bias', torch.Size([4096])), ('encoder.encoders.3.feed_forward.w_2.weight', torch.Size([1024, 4096])), ('encoder.encoders.3.feed_forward.w_2.bias', torch.Size([1024])), ('encoder.encoders.3.norm1.weight', torch.Size([1024])), ('encoder.encoders.3.norm1.bias', torch.Size([1024])), ('encoder.encoders.3.norm2.weight', torch.Size([1024])), ('encoder.encoders.3.norm2.bias', torch.Size([1024])), ('encoder.encoders.4.self_attn.linear_q.weight', torch.Size([1024, 1024])), ('encoder.encoders.4.self_attn.linear_q.bias', torch.Size([1024])), ('encoder.encoders.4.self_attn.linear_k.weight', torch.Size([1024, 1024])), ('encoder.encoders.4.self_attn.linear_v.weight', torch.Size([1024, 1024])), ('encoder.encoders.4.self_attn.linear_v.bias', torch.Size([1024])), ('encoder.encoders.4.self_attn.linear_out.weight', torch.Size([1024, 1024])), ('encoder.encoders.4.self_attn.linear_out.bias', torch.Size([1024])), ('encoder.encoders.4.feed_forward.w_1.weight', torch.Size([4096, 1024])), ('encoder.encoders.4.feed_forward.w_1.bias', torch.Size([4096])), ('encoder.encoders.4.feed_forward.w_2.weight', torch.Size([1024, 4096])), ('encoder.encoders.4.feed_forward.w_2.bias', torch.Size([1024])), ('encoder.encoders.4.norm1.weight', torch.Size([1024])), ('encoder.encoders.4.norm1.bias', torch.Size([1024])), ('encoder.encoders.4.norm2.weight', torch.Size([1024])), ('encoder.encoders.4.norm2.bias', torch.Size([1024])), ('encoder.encoders.5.self_attn.linear_q.weight', torch.Size([1024, 1024])), ('encoder.encoders.5.self_attn.linear_q.bias', torch.Size([1024])), ('encoder.encoders.5.self_attn.linear_k.weight', torch.Size([1024, 1024])), ('encoder.encoders.5.self_attn.linear_v.weight', torch.Size([1024, 1024])), ('encoder.encoders.5.self_attn.linear_v.bias', torch.Size([1024])), ('encoder.encoders.5.self_attn.linear_out.weight', torch.Size([1024, 1024])), ('encoder.encoders.5.self_attn.linear_out.bias', torch.Size([1024])), ('encoder.encoders.5.feed_forward.w_1.weight', torch.Size([4096, 1024])), ('encoder.encoders.5.feed_forward.w_1.bias', torch.Size([4096])), ('encoder.encoders.5.feed_forward.w_2.weight', torch.Size([1024, 4096])), ('encoder.encoders.5.feed_forward.w_2.bias', torch.Size([1024])), ('encoder.encoders.5.norm1.weight', torch.Size([1024])), ('encoder.encoders.5.norm1.bias', torch.Size([1024])), ('encoder.encoders.5.norm2.weight', torch.Size([1024])), ('encoder.encoders.5.norm2.bias', torch.Size([1024])), ('encoder.encoders.6.self_attn.linear_q.weight', torch.Size([1024, 1024])), ('encoder.encoders.6.self_attn.linear_q.bias', torch.Size([1024])), ('encoder.encoders.6.self_attn.linear_k.weight', torch.Size([1024, 1024])), ('encoder.encoders.6.self_attn.linear_v.weight', torch.Size([1024, 1024])), ('encoder.encoders.6.self_attn.linear_v.bias', torch.Size([1024])), ('encoder.encoders.6.self_attn.linear_out.weight', torch.Size([1024, 1024])), ('encoder.encoders.6.self_attn.linear_out.bias', torch.Size([1024])), ('encoder.encoders.6.feed_forward.w_1.weight', torch.Size([4096, 1024])), ('encoder.encoders.6.feed_forward.w_1.bias', torch.Size([4096])), ('encoder.encoders.6.feed_forward.w_2.weight', torch.Size([1024, 4096])), ('encoder.encoders.6.feed_forward.w_2.bias', torch.Size([1024])), ('encoder.encoders.6.norm1.weight', torch.Size([1024])), ('encoder.encoders.6.norm1.bias', torch.Size([1024])), ('encoder.encoders.6.norm2.weight', torch.Size([1024])), ('encoder.encoders.6.norm2.bias', torch.Size([1024])), ('encoder.encoders.7.self_attn.linear_q.weight', torch.Size([1024, 1024])), ('encoder.encoders.7.self_attn.linear_q.bias', torch.Size([1024])), ('encoder.encoders.7.self_attn.linear_k.weight', torch.Size([1024, 1024])), ('encoder.encoders.7.self_attn.linear_v.weight', torch.Size([1024, 1024])), ('encoder.encoders.7.self_attn.linear_v.bias', torch.Size([1024])), ('encoder.encoders.7.self_attn.linear_out.weight', torch.Size([1024, 1024])), ('encoder.encoders.7.self_attn.linear_out.bias', torch.Size([1024])), ('encoder.encoders.7.feed_forward.w_1.weight', torch.Size([4096, 1024])), ('encoder.encoders.7.feed_forward.w_1.bias', torch.Size([4096])), ('encoder.encoders.7.feed_forward.w_2.weight', torch.Size([1024, 4096])), ('encoder.encoders.7.feed_forward.w_2.bias', torch.Size([1024])), ('encoder.encoders.7.norm1.weight', torch.Size([1024])), ('encoder.encoders.7.norm1.bias', torch.Size([1024])), ('encoder.encoders.7.norm2.weight', torch.Size([1024])), ('encoder.encoders.7.norm2.bias', torch.Size([1024])), ('encoder.encoders.8.self_attn.linear_q.weight', torch.Size([1024, 1024])), ('encoder.encoders.8.self_attn.linear_q.bias', torch.Size([1024])), ('encoder.encoders.8.self_attn.linear_k.weight', torch.Size([1024, 1024])), ('encoder.encoders.8.self_attn.linear_v.weight', torch.Size([1024, 1024])), ('encoder.encoders.8.self_attn.linear_v.bias', torch.Size([1024])), ('encoder.encoders.8.self_attn.linear_out.weight', torch.Size([1024, 1024])), ('encoder.encoders.8.self_attn.linear_out.bias', torch.Size([1024])), ('encoder.encoders.8.feed_forward.w_1.weight', torch.Size([4096, 1024])), ('encoder.encoders.8.feed_forward.w_1.bias', torch.Size([4096])), ('encoder.encoders.8.feed_forward.w_2.weight', torch.Size([1024, 4096])), ('encoder.encoders.8.feed_forward.w_2.bias', torch.Size([1024])), ('encoder.encoders.8.norm1.weight', torch.Size([1024])), ('encoder.encoders.8.norm1.bias', torch.Size([1024])), ('encoder.encoders.8.norm2.weight', torch.Size([1024])), ('encoder.encoders.8.norm2.bias', torch.Size([1024])), ('encoder.encoders.9.self_attn.linear_q.weight', torch.Size([1024, 1024])), ('encoder.encoders.9.self_attn.linear_q.bias', torch.Size([1024])), ('encoder.encoders.9.self_attn.linear_k.weight', torch.Size([1024, 1024])), ('encoder.encoders.9.self_attn.linear_v.weight', torch.Size([1024, 1024])), ('encoder.encoders.9.self_attn.linear_v.bias', torch.Size([1024])), ('encoder.encoders.9.self_attn.linear_out.weight', torch.Size([1024, 1024])), ('encoder.encoders.9.self_attn.linear_out.bias', torch.Size([1024])), ('encoder.encoders.9.feed_forward.w_1.weight', torch.Size([4096, 1024])), ('encoder.encoders.9.feed_forward.w_1.bias', torch.Size([4096])), ('encoder.encoders.9.feed_forward.w_2.weight', torch.Size([1024, 4096])), ('encoder.encoders.9.feed_forward.w_2.bias', torch.Size([1024])), ('encoder.encoders.9.norm1.weight', torch.Size([1024])), ('encoder.encoders.9.norm1.bias', torch.Size([1024])), ('encoder.encoders.9.norm2.weight', torch.Size([1024])), ('encoder.encoders.9.norm2.bias', torch.Size([1024])), ('encoder.encoders.10.self_attn.linear_q.weight', torch.Size([1024, 1024])), ('encoder.encoders.10.self_attn.linear_q.bias', torch.Size([1024])), ('encoder.encoders.10.self_attn.linear_k.weight', torch.Size([1024, 1024])), ('encoder.encoders.10.self_attn.linear_v.weight', torch.Size([1024, 1024])), ('encoder.encoders.10.self_attn.linear_v.bias', torch.Size([1024])), ('encoder.encoders.10.self_attn.linear_out.weight', torch.Size([1024, 1024])), ('encoder.encoders.10.self_attn.linear_out.bias', torch.Size([1024])), ('encoder.encoders.10.feed_forward.w_1.weight', torch.Size([4096, 1024])), ('encoder.encoders.10.feed_forward.w_1.bias', torch.Size([4096])), ('encoder.encoders.10.feed_forward.w_2.weight', torch.Size([1024, 4096])), ('encoder.encoders.10.feed_forward.w_2.bias', torch.Size([1024])), ('encoder.encoders.10.norm1.weight', torch.Size([1024])), ('encoder.encoders.10.norm1.bias', torch.Size([1024])), ('encoder.encoders.10.norm2.weight', torch.Size([1024])), ('encoder.encoders.10.norm2.bias', torch.Size([1024])), ('encoder.encoders.11.self_attn.linear_q.weight', torch.Size([1024, 1024])), ('encoder.encoders.11.self_attn.linear_q.bias', torch.Size([1024])), ('encoder.encoders.11.self_attn.linear_k.weight', torch.Size([1024, 1024])), ('encoder.encoders.11.self_attn.linear_v.weight', torch.Size([1024, 1024])), ('encoder.encoders.11.self_attn.linear_v.bias', torch.Size([1024])), ('encoder.encoders.11.self_attn.linear_out.weight', torch.Size([1024, 1024])), ('encoder.encoders.11.self_attn.linear_out.bias', torch.Size([1024])), ('encoder.encoders.11.feed_forward.w_1.weight', torch.Size([4096, 1024])), ('encoder.encoders.11.feed_forward.w_1.bias', torch.Size([4096])), ('encoder.encoders.11.feed_forward.w_2.weight', torch.Size([1024, 4096])), ('encoder.encoders.11.feed_forward.w_2.bias', torch.Size([1024])), ('encoder.encoders.11.norm1.weight', torch.Size([1024])), ('encoder.encoders.11.norm1.bias', torch.Size([1024])), ('encoder.encoders.11.norm2.weight', torch.Size([1024])), ('encoder.encoders.11.norm2.bias', torch.Size([1024])), ('encoder.encoders.12.self_attn.linear_q.weight', torch.Size([1024, 1024])), ('encoder.encoders.12.self_attn.linear_q.bias', torch.Size([1024])), ('encoder.encoders.12.self_attn.linear_k.weight', torch.Size([1024, 1024])), ('encoder.encoders.12.self_attn.linear_v.weight', torch.Size([1024, 1024])), ('encoder.encoders.12.self_attn.linear_v.bias', torch.Size([1024])), ('encoder.encoders.12.self_attn.linear_out.weight', torch.Size([1024, 1024])), ('encoder.encoders.12.self_attn.linear_out.bias', torch.Size([1024])), ('encoder.encoders.12.feed_forward.w_1.weight', torch.Size([4096, 1024])), ('encoder.encoders.12.feed_forward.w_1.bias', torch.Size([4096])), ('encoder.encoders.12.feed_forward.w_2.weight', torch.Size([1024, 4096])), ('encoder.encoders.12.feed_forward.w_2.bias', torch.Size([1024])), ('encoder.encoders.12.norm1.weight', torch.Size([1024])), ('encoder.encoders.12.norm1.bias', torch.Size([1024])), ('encoder.encoders.12.norm2.weight', torch.Size([1024])), ('encoder.encoders.12.norm2.bias', torch.Size([1024])), ('encoder.encoders.13.self_attn.linear_q.weight', torch.Size([1024, 1024])), ('encoder.encoders.13.self_attn.linear_q.bias', torch.Size([1024])), ('encoder.encoders.13.self_attn.linear_k.weight', torch.Size([1024, 1024])), ('encoder.encoders.13.self_attn.linear_v.weight', torch.Size([1024, 1024])), ('encoder.encoders.13.self_attn.linear_v.bias', torch.Size([1024])), ('encoder.encoders.13.self_attn.linear_out.weight', torch.Size([1024, 1024])), ('encoder.encoders.13.self_attn.linear_out.bias', torch.Size([1024])), ('encoder.encoders.13.feed_forward.w_1.weight', torch.Size([4096, 1024])), ('encoder.encoders.13.feed_forward.w_1.bias', torch.Size([4096])), ('encoder.encoders.13.feed_forward.w_2.weight', torch.Size([1024, 4096])), ('encoder.encoders.13.feed_forward.w_2.bias', torch.Size([1024])), ('encoder.encoders.13.norm1.weight', torch.Size([1024])), ('encoder.encoders.13.norm1.bias', torch.Size([1024])), ('encoder.encoders.13.norm2.weight', torch.Size([1024])), ('encoder.encoders.13.norm2.bias', torch.Size([1024])), ('encoder.encoders.14.self_attn.linear_q.weight', torch.Size([1024, 1024])), ('encoder.encoders.14.self_attn.linear_q.bias', torch.Size([1024])), ('encoder.encoders.14.self_attn.linear_k.weight', torch.Size([1024, 1024])), ('encoder.encoders.14.self_attn.linear_v.weight', torch.Size([1024, 1024])), ('encoder.encoders.14.self_attn.linear_v.bias', torch.Size([1024])), ('encoder.encoders.14.self_attn.linear_out.weight', torch.Size([1024, 1024])), ('encoder.encoders.14.self_attn.linear_out.bias', torch.Size([1024])), ('encoder.encoders.14.feed_forward.w_1.weight', torch.Size([4096, 1024])), ('encoder.encoders.14.feed_forward.w_1.bias', torch.Size([4096])), ('encoder.encoders.14.feed_forward.w_2.weight', torch.Size([1024, 4096])), ('encoder.encoders.14.feed_forward.w_2.bias', torch.Size([1024])), ('encoder.encoders.14.norm1.weight', torch.Size([1024])), ('encoder.encoders.14.norm1.bias', torch.Size([1024])), ('encoder.encoders.14.norm2.weight', torch.Size([1024])), ('encoder.encoders.14.norm2.bias', torch.Size([1024])), ('encoder.encoders.15.self_attn.linear_q.weight', torch.Size([1024, 1024])), ('encoder.encoders.15.self_attn.linear_q.bias', torch.Size([1024])), ('encoder.encoders.15.self_attn.linear_k.weight', torch.Size([1024, 1024])), ('encoder.encoders.15.self_attn.linear_v.weight', torch.Size([1024, 1024])), ('encoder.encoders.15.self_attn.linear_v.bias', torch.Size([1024])), ('encoder.encoders.15.self_attn.linear_out.weight', torch.Size([1024, 1024])), ('encoder.encoders.15.self_attn.linear_out.bias', torch.Size([1024])), ('encoder.encoders.15.feed_forward.w_1.weight', torch.Size([4096, 1024])), ('encoder.encoders.15.feed_forward.w_1.bias', torch.Size([4096])), ('encoder.encoders.15.feed_forward.w_2.weight', torch.Size([1024, 4096])), ('encoder.encoders.15.feed_forward.w_2.bias', torch.Size([1024])), ('encoder.encoders.15.norm1.weight', torch.Size([1024])), ('encoder.encoders.15.norm1.bias', torch.Size([1024])), ('encoder.encoders.15.norm2.weight', torch.Size([1024])), ('encoder.encoders.15.norm2.bias', torch.Size([1024])), ('encoder.encoders.16.self_attn.linear_q.weight', torch.Size([1024, 1024])), ('encoder.encoders.16.self_attn.linear_q.bias', torch.Size([1024])), ('encoder.encoders.16.self_attn.linear_k.weight', torch.Size([1024, 1024])), ('encoder.encoders.16.self_attn.linear_v.weight', torch.Size([1024, 1024])), ('encoder.encoders.16.self_attn.linear_v.bias', torch.Size([1024])), ('encoder.encoders.16.self_attn.linear_out.weight', torch.Size([1024, 1024])), ('encoder.encoders.16.self_attn.linear_out.bias', torch.Size([1024])), ('encoder.encoders.16.feed_forward.w_1.weight', torch.Size([4096, 1024])), ('encoder.encoders.16.feed_forward.w_1.bias', torch.Size([4096])), ('encoder.encoders.16.feed_forward.w_2.weight', torch.Size([1024, 4096])), ('encoder.encoders.16.feed_forward.w_2.bias', torch.Size([1024])), ('encoder.encoders.16.norm1.weight', torch.Size([1024])), ('encoder.encoders.16.norm1.bias', torch.Size([1024])), ('encoder.encoders.16.norm2.weight', torch.Size([1024])), ('encoder.encoders.16.norm2.bias', torch.Size([1024])), ('encoder.encoders.17.self_attn.linear_q.weight', torch.Size([1024, 1024])), ('encoder.encoders.17.self_attn.linear_q.bias', torch.Size([1024])), ('encoder.encoders.17.self_attn.linear_k.weight', torch.Size([1024, 1024])), ('encoder.encoders.17.self_attn.linear_v.weight', torch.Size([1024, 1024])), ('encoder.encoders.17.self_attn.linear_v.bias', torch.Size([1024])), ('encoder.encoders.17.self_attn.linear_out.weight', torch.Size([1024, 1024])), ('encoder.encoders.17.self_attn.linear_out.bias', torch.Size([1024])), ('encoder.encoders.17.feed_forward.w_1.weight', torch.Size([4096, 1024])), ('encoder.encoders.17.feed_forward.w_1.bias', torch.Size([4096])), ('encoder.encoders.17.feed_forward.w_2.weight', torch.Size([1024, 4096])), ('encoder.encoders.17.feed_forward.w_2.bias', torch.Size([1024])), ('encoder.encoders.17.norm1.weight', torch.Size([1024])), ('encoder.encoders.17.norm1.bias', torch.Size([1024])), ('encoder.encoders.17.norm2.weight', torch.Size([1024])), ('encoder.encoders.17.norm2.bias', torch.Size([1024])), ('encoder.encoders.18.self_attn.linear_q.weight', torch.Size([1024, 1024])), ('encoder.encoders.18.self_attn.linear_q.bias', torch.Size([1024])), ('encoder.encoders.18.self_attn.linear_k.weight', torch.Size([1024, 1024])), ('encoder.encoders.18.self_attn.linear_v.weight', torch.Size([1024, 1024])), ('encoder.encoders.18.self_attn.linear_v.bias', torch.Size([1024])), ('encoder.encoders.18.self_attn.linear_out.weight', torch.Size([1024, 1024])), ('encoder.encoders.18.self_attn.linear_out.bias', torch.Size([1024])), ('encoder.encoders.18.feed_forward.w_1.weight', torch.Size([4096, 1024])), ('encoder.encoders.18.feed_forward.w_1.bias', torch.Size([4096])), ('encoder.encoders.18.feed_forward.w_2.weight', torch.Size([1024, 4096])), ('encoder.encoders.18.feed_forward.w_2.bias', torch.Size([1024])), ('encoder.encoders.18.norm1.weight', torch.Size([1024])), ('encoder.encoders.18.norm1.bias', torch.Size([1024])), ('encoder.encoders.18.norm2.weight', torch.Size([1024])), ('encoder.encoders.18.norm2.bias', torch.Size([1024])), ('encoder.encoders.19.self_attn.linear_q.weight', torch.Size([1024, 1024])), ('encoder.encoders.19.self_attn.linear_q.bias', torch.Size([1024])), ('encoder.encoders.19.self_attn.linear_k.weight', torch.Size([1024, 1024])), ('encoder.encoders.19.self_attn.linear_v.weight', torch.Size([1024, 1024])), ('encoder.encoders.19.self_attn.linear_v.bias', torch.Size([1024])), ('encoder.encoders.19.self_attn.linear_out.weight', torch.Size([1024, 1024])), ('encoder.encoders.19.self_attn.linear_out.bias', torch.Size([1024])), ('encoder.encoders.19.feed_forward.w_1.weight', torch.Size([4096, 1024])), ('encoder.encoders.19.feed_forward.w_1.bias', torch.Size([4096])), ('encoder.encoders.19.feed_forward.w_2.weight', torch.Size([1024, 4096])), ('encoder.encoders.19.feed_forward.w_2.bias', torch.Size([1024])), ('encoder.encoders.19.norm1.weight', torch.Size([1024])), ('encoder.encoders.19.norm1.bias', torch.Size([1024])), ('encoder.encoders.19.norm2.weight', torch.Size([1024])), ('encoder.encoders.19.norm2.bias', torch.Size([1024])), ('encoder.encoders.20.self_attn.linear_q.weight', torch.Size([1024, 1024])), ('encoder.encoders.20.self_attn.linear_q.bias', torch.Size([1024])), ('encoder.encoders.20.self_attn.linear_k.weight', torch.Size([1024, 1024])), ('encoder.encoders.20.self_attn.linear_v.weight', torch.Size([1024, 1024])), ('encoder.encoders.20.self_attn.linear_v.bias', torch.Size([1024])), ('encoder.encoders.20.self_attn.linear_out.weight', torch.Size([1024, 1024])), ('encoder.encoders.20.self_attn.linear_out.bias', torch.Size([1024])), ('encoder.encoders.20.feed_forward.w_1.weight', torch.Size([4096, 1024])), ('encoder.encoders.20.feed_forward.w_1.bias', torch.Size([4096])), ('encoder.encoders.20.feed_forward.w_2.weight', torch.Size([1024, 4096])), ('encoder.encoders.20.feed_forward.w_2.bias', torch.Size([1024])), ('encoder.encoders.20.norm1.weight', torch.Size([1024])), ('encoder.encoders.20.norm1.bias', torch.Size([1024])), ('encoder.encoders.20.norm2.weight', torch.Size([1024])), ('encoder.encoders.20.norm2.bias', torch.Size([1024])), ('encoder.encoders.21.self_attn.linear_q.weight', torch.Size([1024, 1024])), ('encoder.encoders.21.self_attn.linear_q.bias', torch.Size([1024])), ('encoder.encoders.21.self_attn.linear_k.weight', torch.Size([1024, 1024])), ('encoder.encoders.21.self_attn.linear_v.weight', torch.Size([1024, 1024])), ('encoder.encoders.21.self_attn.linear_v.bias', torch.Size([1024])), ('encoder.encoders.21.self_attn.linear_out.weight', torch.Size([1024, 1024])), ('encoder.encoders.21.self_attn.linear_out.bias', torch.Size([1024])), ('encoder.encoders.21.feed_forward.w_1.weight', torch.Size([4096, 1024])), ('encoder.encoders.21.feed_forward.w_1.bias', torch.Size([4096])), ('encoder.encoders.21.feed_forward.w_2.weight', torch.Size([1024, 4096])), ('encoder.encoders.21.feed_forward.w_2.bias', torch.Size([1024])), ('encoder.encoders.21.norm1.weight', torch.Size([1024])), ('encoder.encoders.21.norm1.bias', torch.Size([1024])), ('encoder.encoders.21.norm2.weight', torch.Size([1024])), ('encoder.encoders.21.norm2.bias', torch.Size([1024])), ('encoder.encoders.22.self_attn.linear_q.weight', torch.Size([1024, 1024])), ('encoder.encoders.22.self_attn.linear_q.bias', torch.Size([1024])), ('encoder.encoders.22.self_attn.linear_k.weight', torch.Size([1024, 1024])), ('encoder.encoders.22.self_attn.linear_v.weight', torch.Size([1024, 1024])), ('encoder.encoders.22.self_attn.linear_v.bias', torch.Size([1024])), ('encoder.encoders.22.self_attn.linear_out.weight', torch.Size([1024, 1024])), ('encoder.encoders.22.self_attn.linear_out.bias', torch.Size([1024])), ('encoder.encoders.22.feed_forward.w_1.weight', torch.Size([4096, 1024])), ('encoder.encoders.22.feed_forward.w_1.bias', torch.Size([4096])), ('encoder.encoders.22.feed_forward.w_2.weight', torch.Size([1024, 4096])), ('encoder.encoders.22.feed_forward.w_2.bias', torch.Size([1024])), ('encoder.encoders.22.norm1.weight', torch.Size([1024])), ('encoder.encoders.22.norm1.bias', torch.Size([1024])), ('encoder.encoders.22.norm2.weight', torch.Size([1024])), ('encoder.encoders.22.norm2.bias', torch.Size([1024])), ('encoder.encoders.23.self_attn.linear_q.weight', torch.Size([1024, 1024])), ('encoder.encoders.23.self_attn.linear_q.bias', torch.Size([1024])), ('encoder.encoders.23.self_attn.linear_k.weight', torch.Size([1024, 1024])), ('encoder.encoders.23.self_attn.linear_v.weight', torch.Size([1024, 1024])), ('encoder.encoders.23.self_attn.linear_v.bias', torch.Size([1024])), ('encoder.encoders.23.self_attn.linear_out.weight', torch.Size([1024, 1024])), ('encoder.encoders.23.self_attn.linear_out.bias', torch.Size([1024])), ('encoder.encoders.23.feed_forward.w_1.weight', torch.Size([4096, 1024])), ('encoder.encoders.23.feed_forward.w_1.bias', torch.Size([4096])), ('encoder.encoders.23.feed_forward.w_2.weight', torch.Size([1024, 4096])), ('encoder.encoders.23.feed_forward.w_2.bias', torch.Size([1024])), ('encoder.encoders.23.norm1.weight', torch.Size([1024])), ('encoder.encoders.23.norm1.bias', torch.Size([1024])), ('encoder.encoders.23.norm2.weight', torch.Size([1024])), ('encoder.encoders.23.norm2.bias', torch.Size([1024])), ('ln_speech.weight', torch.Size([1024])), ('ln_speech.bias', torch.Size([1024])), ('speech_transformer.embed.out.0.weight', torch.Size([1024, 1024])), ('speech_transformer.embed.out.0.bias', torch.Size([1024])), ('speech_transformer.embed.out.1.weight', torch.Size([1024])), ('speech_transformer.embed.out.1.bias', torch.Size([1024])), ('speech_transformer.after_norm.weight', torch.Size([1024])), ('speech_transformer.after_norm.bias', torch.Size([1024])), ('speech_transformer.encoders.0.self_attn.linear_q.weight', torch.Size([1024, 1024])), ('speech_transformer.encoders.0.self_attn.linear_q.bias', torch.Size([1024])), ('speech_transformer.encoders.0.self_attn.linear_k.weight', torch.Size([1024, 1024])), ('speech_transformer.encoders.0.self_attn.linear_k.bias', torch.Size([1024])), ('speech_transformer.encoders.0.self_attn.linear_v.weight', torch.Size([1024, 1024])), ('speech_transformer.encoders.0.self_attn.linear_v.bias', torch.Size([1024])), ('speech_transformer.encoders.0.self_attn.linear_out.weight', torch.Size([1024, 1024])), ('speech_transformer.encoders.0.self_attn.linear_out.bias', torch.Size([1024])), ('speech_transformer.encoders.0.feed_forward.w_1.weight', torch.Size([2560, 1024])), ('speech_transformer.encoders.0.feed_forward.w_1.bias', torch.Size([2560])), ('speech_transformer.encoders.0.feed_forward.w_2.weight', torch.Size([1024, 2560])), ('speech_transformer.encoders.0.feed_forward.w_2.bias', torch.Size([1024])), ('speech_transformer.encoders.0.norm1.weight', torch.Size([1024])), ('speech_transformer.encoders.0.norm1.bias', torch.Size([1024])), ('speech_transformer.encoders.0.norm2.weight', torch.Size([1024])), ('speech_transformer.encoders.0.norm2.bias', torch.Size([1024])), ('speech_transformer.encoders.1.self_attn.linear_q.weight', torch.Size([1024, 1024])), ('speech_transformer.encoders.1.self_attn.linear_q.bias', torch.Size([1024])), ('speech_transformer.encoders.1.self_attn.linear_k.weight', torch.Size([1024, 1024])), ('speech_transformer.encoders.1.self_attn.linear_k.bias', torch.Size([1024])), ('speech_transformer.encoders.1.self_attn.linear_v.weight', torch.Size([1024, 1024])), ('speech_transformer.encoders.1.self_attn.linear_v.bias', torch.Size([1024])), ('speech_transformer.encoders.1.self_attn.linear_out.weight', torch.Size([1024, 1024])), ('speech_transformer.encoders.1.self_attn.linear_out.bias', torch.Size([1024])), ('speech_transformer.encoders.1.feed_forward.w_1.weight', torch.Size([2560, 1024])), ('speech_transformer.encoders.1.feed_forward.w_1.bias', torch.Size([2560])), ('speech_transformer.encoders.1.feed_forward.w_2.weight', torch.Size([1024, 2560])), ('speech_transformer.encoders.1.feed_forward.w_2.bias', torch.Size([1024])), ('speech_transformer.encoders.1.norm1.weight', torch.Size([1024])), ('speech_transformer.encoders.1.norm1.bias', torch.Size([1024])), ('speech_transformer.encoders.1.norm2.weight', torch.Size([1024])), ('speech_transformer.encoders.1.norm2.bias', torch.Size([1024])), ('speech_transformer.encoders.2.self_attn.linear_q.weight', torch.Size([1024, 1024])), ('speech_transformer.encoders.2.self_attn.linear_q.bias', torch.Size([1024])), ('speech_transformer.encoders.2.self_attn.linear_k.weight', torch.Size([1024, 1024])), ('speech_transformer.encoders.2.self_attn.linear_k.bias', torch.Size([1024])), ('speech_transformer.encoders.2.self_attn.linear_v.weight', torch.Size([1024, 1024])), ('speech_transformer.encoders.2.self_attn.linear_v.bias', torch.Size([1024])), ('speech_transformer.encoders.2.self_attn.linear_out.weight', torch.Size([1024, 1024])), ('speech_transformer.encoders.2.self_attn.linear_out.bias', torch.Size([1024])), ('speech_transformer.encoders.2.feed_forward.w_1.weight', torch.Size([2560, 1024])), ('speech_transformer.encoders.2.feed_forward.w_1.bias', torch.Size([2560])), ('speech_transformer.encoders.2.feed_forward.w_2.weight', torch.Size([1024, 2560])), ('speech_transformer.encoders.2.feed_forward.w_2.bias', torch.Size([1024])), ('speech_transformer.encoders.2.norm1.weight', torch.Size([1024])), ('speech_transformer.encoders.2.norm1.bias', torch.Size([1024])), ('speech_transformer.encoders.2.norm2.weight', torch.Size([1024])), ('speech_transformer.encoders.2.norm2.bias', torch.Size([1024])), ('speech_transformer.encoders.3.self_attn.linear_q.weight', torch.Size([1024, 1024])), ('speech_transformer.encoders.3.self_attn.linear_q.bias', torch.Size([1024])), ('speech_transformer.encoders.3.self_attn.linear_k.weight', torch.Size([1024, 1024])), ('speech_transformer.encoders.3.self_attn.linear_k.bias', torch.Size([1024])), ('speech_transformer.encoders.3.self_attn.linear_v.weight', torch.Size([1024, 1024])), ('speech_transformer.encoders.3.self_attn.linear_v.bias', torch.Size([1024])), ('speech_transformer.encoders.3.self_attn.linear_out.weight', torch.Size([1024, 1024])), ('speech_transformer.encoders.3.self_attn.linear_out.bias', torch.Size([1024])), ('speech_transformer.encoders.3.feed_forward.w_1.weight', torch.Size([2560, 1024])), ('speech_transformer.encoders.3.feed_forward.w_1.bias', torch.Size([2560])), ('speech_transformer.encoders.3.feed_forward.w_2.weight', torch.Size([1024, 2560])), ('speech_transformer.encoders.3.feed_forward.w_2.bias', torch.Size([1024])), ('speech_transformer.encoders.3.norm1.weight', torch.Size([1024])), ('speech_transformer.encoders.3.norm1.bias', torch.Size([1024])), ('speech_transformer.encoders.3.norm2.weight', torch.Size([1024])), ('speech_transformer.encoders.3.norm2.bias', torch.Size([1024])), ('llama_model.base_model.model.model.layers.0.self_attn.o_proj.lora_A.default.weight', torch.Size([8, 3584])), ('llama_model.base_model.model.model.layers.0.self_attn.o_proj.lora_B.default.weight', torch.Size([3584, 8])), ('llama_model.base_model.model.model.layers.0.mlp.gate_proj.lora_A.default.weight', torch.Size([8, 3584])), ('llama_model.base_model.model.model.layers.0.mlp.gate_proj.lora_B.default.weight', torch.Size([18944, 8])), ('llama_model.base_model.model.model.layers.0.mlp.down_proj.lora_A.default.weight', torch.Size([8, 18944])), ('llama_model.base_model.model.model.layers.0.mlp.down_proj.lora_B.default.weight', torch.Size([3584, 8])), ('llama_model.base_model.model.model.layers.1.self_attn.o_proj.lora_A.default.weight', torch.Size([8, 3584])), ('llama_model.base_model.model.model.layers.1.self_attn.o_proj.lora_B.default.weight', torch.Size([3584, 8])), ('llama_model.base_model.model.model.layers.1.mlp.gate_proj.lora_A.default.weight', torch.Size([8, 3584])), ('llama_model.base_model.model.model.layers.1.mlp.gate_proj.lora_B.default.weight', torch.Size([18944, 8])), ('llama_model.base_model.model.model.layers.1.mlp.down_proj.lora_A.default.weight', torch.Size([8, 18944])), ('llama_model.base_model.model.model.layers.1.mlp.down_proj.lora_B.default.weight', torch.Size([3584, 8])), ('llama_model.base_model.model.model.layers.2.self_attn.o_proj.lora_A.default.weight', torch.Size([8, 3584])), ('llama_model.base_model.model.model.layers.2.self_attn.o_proj.lora_B.default.weight', torch.Size([3584, 8])), ('llama_model.base_model.model.model.layers.2.mlp.gate_proj.lora_A.default.weight', torch.Size([8, 3584])), ('llama_model.base_model.model.model.layers.2.mlp.gate_proj.lora_B.default.weight', torch.Size([18944, 8])), ('llama_model.base_model.model.model.layers.2.mlp.down_proj.lora_A.default.weight', torch.Size([8, 18944])), ('llama_model.base_model.model.model.layers.2.mlp.down_proj.lora_B.default.weight', torch.Size([3584, 8])), ('llama_model.base_model.model.model.layers.3.self_attn.o_proj.lora_A.default.weight', torch.Size([8, 3584])), ('llama_model.base_model.model.model.layers.3.self_attn.o_proj.lora_B.default.weight', torch.Size([3584, 8])), ('llama_model.base_model.model.model.layers.3.mlp.gate_proj.lora_A.default.weight', torch.Size([8, 3584])), ('llama_model.base_model.model.model.layers.3.mlp.gate_proj.lora_B.default.weight', torch.Size([18944, 8])), ('llama_model.base_model.model.model.layers.3.mlp.down_proj.lora_A.default.weight', torch.Size([8, 18944])), ('llama_model.base_model.model.model.layers.3.mlp.down_proj.lora_B.default.weight', torch.Size([3584, 8])), ('llama_model.base_model.model.model.layers.4.self_attn.o_proj.lora_A.default.weight', torch.Size([8, 3584])), ('llama_model.base_model.model.model.layers.4.self_attn.o_proj.lora_B.default.weight', torch.Size([3584, 8])), ('llama_model.base_model.model.model.layers.4.mlp.gate_proj.lora_A.default.weight', torch.Size([8, 3584])), ('llama_model.base_model.model.model.layers.4.mlp.gate_proj.lora_B.default.weight', torch.Size([18944, 8])), ('llama_model.base_model.model.model.layers.4.mlp.down_proj.lora_A.default.weight', torch.Size([8, 18944])), ('llama_model.base_model.model.model.layers.4.mlp.down_proj.lora_B.default.weight', torch.Size([3584, 8])), ('llama_model.base_model.model.model.layers.5.self_attn.o_proj.lora_A.default.weight', torch.Size([8, 3584])), ('llama_model.base_model.model.model.layers.5.self_attn.o_proj.lora_B.default.weight', torch.Size([3584, 8])), ('llama_model.base_model.model.model.layers.5.mlp.gate_proj.lora_A.default.weight', torch.Size([8, 3584])), ('llama_model.base_model.model.model.layers.5.mlp.gate_proj.lora_B.default.weight', torch.Size([18944, 8])), ('llama_model.base_model.model.model.layers.5.mlp.down_proj.lora_A.default.weight', torch.Size([8, 18944])), ('llama_model.base_model.model.model.layers.5.mlp.down_proj.lora_B.default.weight', torch.Size([3584, 8])), ('llama_model.base_model.model.model.layers.6.self_attn.o_proj.lora_A.default.weight', torch.Size([8, 3584])), ('llama_model.base_model.model.model.layers.6.self_attn.o_proj.lora_B.default.weight', torch.Size([3584, 8])), ('llama_model.base_model.model.model.layers.6.mlp.gate_proj.lora_A.default.weight', torch.Size([8, 3584])), ('llama_model.base_model.model.model.layers.6.mlp.gate_proj.lora_B.default.weight', torch.Size([18944, 8])), ('llama_model.base_model.model.model.layers.6.mlp.down_proj.lora_A.default.weight', torch.Size([8, 18944])), ('llama_model.base_model.model.model.layers.6.mlp.down_proj.lora_B.default.weight', torch.Size([3584, 8])), ('llama_model.base_model.model.model.layers.7.self_attn.o_proj.lora_A.default.weight', torch.Size([8, 3584])), ('llama_model.base_model.model.model.layers.7.self_attn.o_proj.lora_B.default.weight', torch.Size([3584, 8])), ('llama_model.base_model.model.model.layers.7.mlp.gate_proj.lora_A.default.weight', torch.Size([8, 3584])), ('llama_model.base_model.model.model.layers.7.mlp.gate_proj.lora_B.default.weight', torch.Size([18944, 8])), ('llama_model.base_model.model.model.layers.7.mlp.down_proj.lora_A.default.weight', torch.Size([8, 18944])), ('llama_model.base_model.model.model.layers.7.mlp.down_proj.lora_B.default.weight', torch.Size([3584, 8])), ('llama_model.base_model.model.model.layers.8.self_attn.o_proj.lora_A.default.weight', torch.Size([8, 3584])), ('llama_model.base_model.model.model.layers.8.self_attn.o_proj.lora_B.default.weight', torch.Size([3584, 8])), ('llama_model.base_model.model.model.layers.8.mlp.gate_proj.lora_A.default.weight', torch.Size([8, 3584])), ('llama_model.base_model.model.model.layers.8.mlp.gate_proj.lora_B.default.weight', torch.Size([18944, 8])), ('llama_model.base_model.model.model.layers.8.mlp.down_proj.lora_A.default.weight', torch.Size([8, 18944])), ('llama_model.base_model.model.model.layers.8.mlp.down_proj.lora_B.default.weight', torch.Size([3584, 8])), ('llama_model.base_model.model.model.layers.9.self_attn.o_proj.lora_A.default.weight', torch.Size([8, 3584])), ('llama_model.base_model.model.model.layers.9.self_attn.o_proj.lora_B.default.weight', torch.Size([3584, 8])), ('llama_model.base_model.model.model.layers.9.mlp.gate_proj.lora_A.default.weight', torch.Size([8, 3584])), ('llama_model.base_model.model.model.layers.9.mlp.gate_proj.lora_B.default.weight', torch.Size([18944, 8])), ('llama_model.base_model.model.model.layers.9.mlp.down_proj.lora_A.default.weight', torch.Size([8, 18944])), ('llama_model.base_model.model.model.layers.9.mlp.down_proj.lora_B.default.weight', torch.Size([3584, 8])), ('llama_model.base_model.model.model.layers.10.self_attn.o_proj.lora_A.default.weight', torch.Size([8, 3584])), ('llama_model.base_model.model.model.layers.10.self_attn.o_proj.lora_B.default.weight', torch.Size([3584, 8])), ('llama_model.base_model.model.model.layers.10.mlp.gate_proj.lora_A.default.weight', torch.Size([8, 3584])), ('llama_model.base_model.model.model.layers.10.mlp.gate_proj.lora_B.default.weight', torch.Size([18944, 8])), ('llama_model.base_model.model.model.layers.10.mlp.down_proj.lora_A.default.weight', torch.Size([8, 18944])), ('llama_model.base_model.model.model.layers.10.mlp.down_proj.lora_B.default.weight', torch.Size([3584, 8])), ('llama_model.base_model.model.model.layers.11.self_attn.o_proj.lora_A.default.weight', torch.Size([8, 3584])), ('llama_model.base_model.model.model.layers.11.self_attn.o_proj.lora_B.default.weight', torch.Size([3584, 8])), ('llama_model.base_model.model.model.layers.11.mlp.gate_proj.lora_A.default.weight', torch.Size([8, 3584])), ('llama_model.base_model.model.model.layers.11.mlp.gate_proj.lora_B.default.weight', torch.Size([18944, 8])), ('llama_model.base_model.model.model.layers.11.mlp.down_proj.lora_A.default.weight', torch.Size([8, 18944])), ('llama_model.base_model.model.model.layers.11.mlp.down_proj.lora_B.default.weight', torch.Size([3584, 8])), ('llama_model.base_model.model.model.layers.12.self_attn.o_proj.lora_A.default.weight', torch.Size([8, 3584])), ('llama_model.base_model.model.model.layers.12.self_attn.o_proj.lora_B.default.weight', torch.Size([3584, 8])), ('llama_model.base_model.model.model.layers.12.mlp.gate_proj.lora_A.default.weight', torch.Size([8, 3584])), ('llama_model.base_model.model.model.layers.12.mlp.gate_proj.lora_B.default.weight', torch.Size([18944, 8])), ('llama_model.base_model.model.model.layers.12.mlp.down_proj.lora_A.default.weight', torch.Size([8, 18944])), ('llama_model.base_model.model.model.layers.12.mlp.down_proj.lora_B.default.weight', torch.Size([3584, 8])), ('llama_model.base_model.model.model.layers.13.self_attn.o_proj.lora_A.default.weight', torch.Size([8, 3584])), ('llama_model.base_model.model.model.layers.13.self_attn.o_proj.lora_B.default.weight', torch.Size([3584, 8])), ('llama_model.base_model.model.model.layers.13.mlp.gate_proj.lora_A.default.weight', torch.Size([8, 3584])), ('llama_model.base_model.model.model.layers.13.mlp.gate_proj.lora_B.default.weight', torch.Size([18944, 8])), ('llama_model.base_model.model.model.layers.13.mlp.down_proj.lora_A.default.weight', torch.Size([8, 18944])), ('llama_model.base_model.model.model.layers.13.mlp.down_proj.lora_B.default.weight', torch.Size([3584, 8])), ('llama_model.base_model.model.model.layers.14.self_attn.o_proj.lora_A.default.weight', torch.Size([8, 3584])), ('llama_model.base_model.model.model.layers.14.self_attn.o_proj.lora_B.default.weight', torch.Size([3584, 8])), ('llama_model.base_model.model.model.layers.14.mlp.gate_proj.lora_A.default.weight', torch.Size([8, 3584])), ('llama_model.base_model.model.model.layers.14.mlp.gate_proj.lora_B.default.weight', torch.Size([18944, 8])), ('llama_model.base_model.model.model.layers.14.mlp.down_proj.lora_A.default.weight', torch.Size([8, 18944])), ('llama_model.base_model.model.model.layers.14.mlp.down_proj.lora_B.default.weight', torch.Size([3584, 8])), ('llama_model.base_model.model.model.layers.15.self_attn.o_proj.lora_A.default.weight', torch.Size([8, 3584])), ('llama_model.base_model.model.model.layers.15.self_attn.o_proj.lora_B.default.weight', torch.Size([3584, 8])), ('llama_model.base_model.model.model.layers.15.mlp.gate_proj.lora_A.default.weight', torch.Size([8, 3584])), ('llama_model.base_model.model.model.layers.15.mlp.gate_proj.lora_B.default.weight', torch.Size([18944, 8])), ('llama_model.base_model.model.model.layers.15.mlp.down_proj.lora_A.default.weight', torch.Size([8, 18944])), ('llama_model.base_model.model.model.layers.15.mlp.down_proj.lora_B.default.weight', torch.Size([3584, 8])), ('llama_model.base_model.model.model.layers.16.self_attn.o_proj.lora_A.default.weight', torch.Size([8, 3584])), ('llama_model.base_model.model.model.layers.16.self_attn.o_proj.lora_B.default.weight', torch.Size([3584, 8])), ('llama_model.base_model.model.model.layers.16.mlp.gate_proj.lora_A.default.weight', torch.Size([8, 3584])), ('llama_model.base_model.model.model.layers.16.mlp.gate_proj.lora_B.default.weight', torch.Size([18944, 8])), ('llama_model.base_model.model.model.layers.16.mlp.down_proj.lora_A.default.weight', torch.Size([8, 18944])), ('llama_model.base_model.model.model.layers.16.mlp.down_proj.lora_B.default.weight', torch.Size([3584, 8])), ('llama_model.base_model.model.model.layers.17.self_attn.o_proj.lora_A.default.weight', torch.Size([8, 3584])), ('llama_model.base_model.model.model.layers.17.self_attn.o_proj.lora_B.default.weight', torch.Size([3584, 8])), ('llama_model.base_model.model.model.layers.17.mlp.gate_proj.lora_A.default.weight', torch.Size([8, 3584])), ('llama_model.base_model.model.model.layers.17.mlp.gate_proj.lora_B.default.weight', torch.Size([18944, 8])), ('llama_model.base_model.model.model.layers.17.mlp.down_proj.lora_A.default.weight', torch.Size([8, 18944])), ('llama_model.base_model.model.model.layers.17.mlp.down_proj.lora_B.default.weight', torch.Size([3584, 8])), ('llama_model.base_model.model.model.layers.18.self_attn.o_proj.lora_A.default.weight', torch.Size([8, 3584])), ('llama_model.base_model.model.model.layers.18.self_attn.o_proj.lora_B.default.weight', torch.Size([3584, 8])), ('llama_model.base_model.model.model.layers.18.mlp.gate_proj.lora_A.default.weight', torch.Size([8, 3584])), ('llama_model.base_model.model.model.layers.18.mlp.gate_proj.lora_B.default.weight', torch.Size([18944, 8])), ('llama_model.base_model.model.model.layers.18.mlp.down_proj.lora_A.default.weight', torch.Size([8, 18944])), ('llama_model.base_model.model.model.layers.18.mlp.down_proj.lora_B.default.weight', torch.Size([3584, 8])), ('llama_model.base_model.model.model.layers.19.self_attn.o_proj.lora_A.default.weight', torch.Size([8, 3584])), ('llama_model.base_model.model.model.layers.19.self_attn.o_proj.lora_B.default.weight', torch.Size([3584, 8])), ('llama_model.base_model.model.model.layers.19.mlp.gate_proj.lora_A.default.weight', torch.Size([8, 3584])), ('llama_model.base_model.model.model.layers.19.mlp.gate_proj.lora_B.default.weight', torch.Size([18944, 8])), ('llama_model.base_model.model.model.layers.19.mlp.down_proj.lora_A.default.weight', torch.Size([8, 18944])), ('llama_model.base_model.model.model.layers.19.mlp.down_proj.lora_B.default.weight', torch.Size([3584, 8])), ('llama_model.base_model.model.model.layers.20.self_attn.o_proj.lora_A.default.weight', torch.Size([8, 3584])), ('llama_model.base_model.model.model.layers.20.self_attn.o_proj.lora_B.default.weight', torch.Size([3584, 8])), ('llama_model.base_model.model.model.layers.20.mlp.gate_proj.lora_A.default.weight', torch.Size([8, 3584])), ('llama_model.base_model.model.model.layers.20.mlp.gate_proj.lora_B.default.weight', torch.Size([18944, 8])), ('llama_model.base_model.model.model.layers.20.mlp.down_proj.lora_A.default.weight', torch.Size([8, 18944])), ('llama_model.base_model.model.model.layers.20.mlp.down_proj.lora_B.default.weight', torch.Size([3584, 8])), ('llama_model.base_model.model.model.layers.21.self_attn.o_proj.lora_A.default.weight', torch.Size([8, 3584])), ('llama_model.base_model.model.model.layers.21.self_attn.o_proj.lora_B.default.weight', torch.Size([3584, 8])), ('llama_model.base_model.model.model.layers.21.mlp.gate_proj.lora_A.default.weight', torch.Size([8, 3584])), ('llama_model.base_model.model.model.layers.21.mlp.gate_proj.lora_B.default.weight', torch.Size([18944, 8])), ('llama_model.base_model.model.model.layers.21.mlp.down_proj.lora_A.default.weight', torch.Size([8, 18944])), ('llama_model.base_model.model.model.layers.21.mlp.down_proj.lora_B.default.weight', torch.Size([3584, 8])), ('llama_model.base_model.model.model.layers.22.self_attn.o_proj.lora_A.default.weight', torch.Size([8, 3584])), ('llama_model.base_model.model.model.layers.22.self_attn.o_proj.lora_B.default.weight', torch.Size([3584, 8])), ('llama_model.base_model.model.model.layers.22.mlp.gate_proj.lora_A.default.weight', torch.Size([8, 3584])), ('llama_model.base_model.model.model.layers.22.mlp.gate_proj.lora_B.default.weight', torch.Size([18944, 8])), ('llama_model.base_model.model.model.layers.22.mlp.down_proj.lora_A.default.weight', torch.Size([8, 18944])), ('llama_model.base_model.model.model.layers.22.mlp.down_proj.lora_B.default.weight', torch.Size([3584, 8])), ('llama_model.base_model.model.model.layers.23.self_attn.o_proj.lora_A.default.weight', torch.Size([8, 3584])), ('llama_model.base_model.model.model.layers.23.self_attn.o_proj.lora_B.default.weight', torch.Size([3584, 8])), ('llama_model.base_model.model.model.layers.23.mlp.gate_proj.lora_A.default.weight', torch.Size([8, 3584])), ('llama_model.base_model.model.model.layers.23.mlp.gate_proj.lora_B.default.weight', torch.Size([18944, 8])), ('llama_model.base_model.model.model.layers.23.mlp.down_proj.lora_A.default.weight', torch.Size([8, 18944])), ('llama_model.base_model.model.model.layers.23.mlp.down_proj.lora_B.default.weight', torch.Size([3584, 8])), ('llama_model.base_model.model.model.layers.24.self_attn.o_proj.lora_A.default.weight', torch.Size([8, 3584])), ('llama_model.base_model.model.model.layers.24.self_attn.o_proj.lora_B.default.weight', torch.Size([3584, 8])), ('llama_model.base_model.model.model.layers.24.mlp.gate_proj.lora_A.default.weight', torch.Size([8, 3584])), ('llama_model.base_model.model.model.layers.24.mlp.gate_proj.lora_B.default.weight', torch.Size([18944, 8])), ('llama_model.base_model.model.model.layers.24.mlp.down_proj.lora_A.default.weight', torch.Size([8, 18944])), ('llama_model.base_model.model.model.layers.24.mlp.down_proj.lora_B.default.weight', torch.Size([3584, 8])), ('llama_model.base_model.model.model.layers.25.self_attn.o_proj.lora_A.default.weight', torch.Size([8, 3584])), ('llama_model.base_model.model.model.layers.25.self_attn.o_proj.lora_B.default.weight', torch.Size([3584, 8])), ('llama_model.base_model.model.model.layers.25.mlp.gate_proj.lora_A.default.weight', torch.Size([8, 3584])), ('llama_model.base_model.model.model.layers.25.mlp.gate_proj.lora_B.default.weight', torch.Size([18944, 8])), ('llama_model.base_model.model.model.layers.25.mlp.down_proj.lora_A.default.weight', torch.Size([8, 18944])), ('llama_model.base_model.model.model.layers.25.mlp.down_proj.lora_B.default.weight', torch.Size([3584, 8])), ('llama_model.base_model.model.model.layers.26.self_attn.o_proj.lora_A.default.weight', torch.Size([8, 3584])), ('llama_model.base_model.model.model.layers.26.self_attn.o_proj.lora_B.default.weight', torch.Size([3584, 8])), ('llama_model.base_model.model.model.layers.26.mlp.gate_proj.lora_A.default.weight', torch.Size([8, 3584])), ('llama_model.base_model.model.model.layers.26.mlp.gate_proj.lora_B.default.weight', torch.Size([18944, 8])), ('llama_model.base_model.model.model.layers.26.mlp.down_proj.lora_A.default.weight', torch.Size([8, 18944])), ('llama_model.base_model.model.model.layers.26.mlp.down_proj.lora_B.default.weight', torch.Size([3584, 8])), ('llama_model.base_model.model.model.layers.27.self_attn.o_proj.lora_A.default.weight', torch.Size([8, 3584])), ('llama_model.base_model.model.model.layers.27.self_attn.o_proj.lora_B.default.weight', torch.Size([3584, 8])), ('llama_model.base_model.model.model.layers.27.mlp.gate_proj.lora_A.default.weight', torch.Size([8, 3584])), ('llama_model.base_model.model.model.layers.27.mlp.gate_proj.lora_B.default.weight', torch.Size([18944, 8])), ('llama_model.base_model.model.model.layers.27.mlp.down_proj.lora_A.default.weight', torch.Size([8, 18944])), ('llama_model.base_model.model.model.layers.27.mlp.down_proj.lora_B.default.weight', torch.Size([3584, 8])), ('down_sample_2.conv.1.weight', torch.Size([1024, 1024, 3])), ('down_sample_2.conv.1.bias', torch.Size([1024])), ('down_sample_2.conv.4.weight', torch.Size([1024, 1024, 3])), ('down_sample_2.conv.4.bias', torch.Size([1024])), ('down_sample_2.conv.7.weight', torch.Size([1024, 1024, 3])), ('down_sample_2.conv.7.bias', torch.Size([1024])), ('speech_llama_proj.weight', torch.Size([3584, 1024])), ('speech_llama_proj.bias', torch.Size([3584])), ('speech_token_emded.weight', torch.Size([4099, 3584])), ('speaker_head.weight', torch.Size([4097, 3584])), ('speaker_head.bias', torch.Size([4097]))])]\n",
      "Key: frozen_param_shapes\n",
      "    Type: <class 'NoneType'>, Value: None\n",
      "Key: shared_params\n",
      "    Key: embed_tokens.weight\n",
      "        Type: <class 'str'>, Value: llama_model.base_model.model.model.embed_tokens.weight\n",
      "    Key: lm_head.weight\n",
      "        Type: <class 'str'>, Value: llama_model.base_model.model.lm_head.weight\n",
      "Key: frozen_param_fragments\n",
      "    Type: <class 'NoneType'>, Value: None\n",
      "Key: lr_scheduler\n",
      "    Key: warmup_steps\n",
      "        Type: <class 'int'>, Value: 8000\n",
      "    Key: base_lrs\n",
      "        Type: <class 'list'>, Value: [5e-05]\n",
      "    Key: last_epoch\n",
      "        Type: <class 'int'>, Value: -1\n",
      "    Key: verbose\n",
      "        Type: <class 'bool'>, Value: False\n",
      "    Key: _step_count\n",
      "        Type: <class 'int'>, Value: 1\n",
      "    Key: _get_lr_called_within_step\n",
      "        Type: <class 'bool'>, Value: False\n",
      "    Key: _last_lr\n",
      "        Type: <class 'list'>, Value: [6.2500000000000005e-09]\n",
      "Key: data_sampler\n",
      "    Type: <class 'NoneType'>, Value: None\n",
      "Key: random_ltd\n",
      "    Type: <class 'NoneType'>, Value: None\n",
      "Key: sparse_tensor_module_names\n",
      "    Type: <class 'set'>, Value: set()\n",
      "Key: skipped_steps\n",
      "    Type: <class 'int'>, Value: 0\n",
      "Key: global_steps\n",
      "    Type: <class 'int'>, Value: 0\n",
      "Key: global_samples\n",
      "    Type: <class 'int'>, Value: 0\n",
      "Key: dp_world_size\n",
      "    Type: <class 'int'>, Value: 2\n",
      "Key: mp_world_size\n",
      "    Type: <class 'int'>, Value: 1\n",
      "Key: ds_config\n",
      "    Type: <class 'str'>, Value: conf/ds_stage2.json\n",
      "Key: ds_version\n",
      "    Type: <class 'str'>, Value: 0.16.2\n",
      "Key: model\n",
      "    Type: <class 'str'>, Value: llmasr\n",
      "Key: tokenizer\n",
      "    Type: <class 'str'>, Value: huggingface\n",
      "Key: tokenizer_conf\n",
      "    Key: llm_path\n",
      "        Type: <class 'str'>, Value: /mnt/sfs/.cache/huggingface/hub/models--Qwen--Qwen2-7B/snapshots/453ed1575b739b5b03ce3758b23befdb0967f40e\n",
      "Key: use_lora\n",
      "    Type: <class 'bool'>, Value: True\n",
      "Key: lora_alpha\n",
      "    Type: <class 'int'>, Value: 32\n",
      "Key: lora_rank\n",
      "    Type: <class 'int'>, Value: 8\n",
      "Key: lora_dropout\n",
      "    Type: <class 'float'>, Value: 0.1\n",
      "Key: speech_token_num\n",
      "    Type: <class 'int'>, Value: 4097\n",
      "Key: fire_module\n",
      "    Type: <class 'str'>, Value: link_and_encoder_and_lora\n",
      "Key: downsample_rate\n",
      "    Type: <class 'int'>, Value: 4\n",
      "Key: adapter_type\n",
      "    Type: <class 'str'>, Value: gxl\n",
      "Key: llm_path\n",
      "    Type: <class 'str'>, Value: /mnt/sfs/.cache/huggingface/hub/models--Qwen--Qwen2-7B/snapshots/453ed1575b739b5b03ce3758b23befdb0967f40e\n",
      "Key: optim\n",
      "    Type: <class 'str'>, Value: adamw\n",
      "Key: optim_conf\n",
      "    Key: betas\n",
      "        Type: <class 'list'>, Value: [0.9, 0.99]\n",
      "    Key: eps\n",
      "        Type: <class 'float'>, Value: 1e-06\n",
      "    Key: lr\n",
      "        Type: <class 'float'>, Value: 5e-05\n",
      "    Key: weight_decay\n",
      "        Type: <class 'float'>, Value: 0.01\n",
      "Key: scheduler\n",
      "    Type: <class 'str'>, Value: warmuplr\n",
      "Key: scheduler_conf\n",
      "    Key: warmup_steps\n",
      "        Type: <class 'int'>, Value: 8000\n",
      "Key: cmvn\n",
      "    Type: <class 'NoneType'>, Value: None\n",
      "Key: cmvn_conf\n",
      "    Key: cmvn_file\n",
      "        Type: <class 'NoneType'>, Value: None\n",
      "    Key: is_json_cmvn\n",
      "        Type: <class 'NoneType'>, Value: None\n",
      "Key: ctc_conf\n",
      "    Key: ctc_blank_id\n",
      "        Type: <class 'int'>, Value: 50362\n",
      "Key: dataset\n",
      "    Type: <class 'str'>, Value: asr\n",
      "Key: dataset_conf\n",
      "    Key: emotion_en2zh_dict\n",
      "        Type: <class 'str'>, Value: conf/en2zh4emotion.json\n",
      "    Key: batch_conf\n",
      "        Key: batch_size\n",
      "            Type: <class 'int'>, Value: 26\n",
      "        Key: batch_type\n",
      "            Type: <class 'str'>, Value: dynamic\n",
      "        Key: max_frames_in_batch\n",
      "            Type: <class 'int'>, Value: 2000\n",
      "        Key: max_seq_in_batch\n",
      "            Type: <class 'int'>, Value: 1500\n",
      "    Key: feats_type\n",
      "        Type: <class 'str'>, Value: log_mel_spectrogram\n",
      "    Key: filter_conf\n",
      "        Key: max_length\n",
      "            Type: <class 'int'>, Value: 1000\n",
      "        Key: min_length\n",
      "            Type: <class 'int'>, Value: 0\n",
      "        Key: token_max_length\n",
      "            Type: <class 'int'>, Value: 200\n",
      "        Key: token_min_length\n",
      "            Type: <class 'int'>, Value: 1\n",
      "        Key: filter_no_extra_info\n",
      "            Type: <class 'bool'>, Value: True\n",
      "        Key: max_seq_len\n",
      "            Type: <class 'int'>, Value: 360\n",
      "    Key: language_conf\n",
      "        Key: limited_langs\n",
      "            Type: <class 'list'>, Value: ['zh']\n",
      "    Key: log_mel_spectrogram_conf\n",
      "        Key: hop_length\n",
      "            Type: <class 'int'>, Value: 160\n",
      "        Key: n_fft\n",
      "            Type: <class 'int'>, Value: 400\n",
      "        Key: num_mel_bins\n",
      "            Type: <class 'int'>, Value: 80\n",
      "        Key: padding\n",
      "            Type: <class 'int'>, Value: 0\n",
      "    Key: resample_conf\n",
      "        Key: resample_rate\n",
      "            Type: <class 'int'>, Value: 16000\n",
      "    Key: shuffle\n",
      "        Type: <class 'bool'>, Value: True\n",
      "    Key: shuffle_conf\n",
      "        Key: shuffle_size\n",
      "            Type: <class 'int'>, Value: 1500\n",
      "    Key: sort\n",
      "        Type: <class 'bool'>, Value: True\n",
      "    Key: sort_conf\n",
      "        Key: sort_size\n",
      "            Type: <class 'int'>, Value: 500\n",
      "    Key: spec_aug\n",
      "        Type: <class 'bool'>, Value: True\n",
      "    Key: spec_aug_conf\n",
      "        Key: max_f\n",
      "            Type: <class 'int'>, Value: 10\n",
      "        Key: max_t\n",
      "            Type: <class 'int'>, Value: 50\n",
      "        Key: num_f_mask\n",
      "            Type: <class 'int'>, Value: 2\n",
      "        Key: num_t_mask\n",
      "            Type: <class 'int'>, Value: 2\n",
      "    Key: spec_sub\n",
      "        Type: <class 'bool'>, Value: True\n",
      "    Key: spec_sub_conf\n",
      "        Key: max_t\n",
      "            Type: <class 'int'>, Value: 30\n",
      "        Key: num_t_sub\n",
      "            Type: <class 'int'>, Value: 3\n",
      "    Key: spec_trim\n",
      "        Type: <class 'bool'>, Value: False\n",
      "    Key: speed_perturb\n",
      "        Type: <class 'bool'>, Value: False\n",
      "    Key: eod_id\n",
      "        Type: <class 'int'>, Value: 151643\n",
      "    Key: split_num\n",
      "        Type: <class 'int'>, Value: 1\n",
      "    Key: multi_num\n",
      "        Type: <class 'int'>, Value: 1\n",
      "    Key: cycle\n",
      "        Type: <class 'int'>, Value: 100\n",
      "Key: decoder\n",
      "    Type: <class 'str'>, Value: transformer\n",
      "Key: decoder_conf\n",
      "    Key: activation_type\n",
      "        Type: <class 'str'>, Value: gelu\n",
      "    Key: attention_heads\n",
      "        Type: <class 'int'>, Value: 16\n",
      "    Key: dropout_rate\n",
      "        Type: <class 'float'>, Value: 0.1\n",
      "    Key: gradient_checkpointing\n",
      "        Type: <class 'bool'>, Value: True\n",
      "    Key: input_layer\n",
      "        Type: <class 'str'>, Value: embed_learnable_pe\n",
      "    Key: key_bias\n",
      "        Type: <class 'bool'>, Value: False\n",
      "    Key: linear_units\n",
      "        Type: <class 'int'>, Value: 4096\n",
      "    Key: normalize_before\n",
      "        Type: <class 'bool'>, Value: True\n",
      "    Key: num_blocks\n",
      "        Type: <class 'int'>, Value: 24\n",
      "    Key: positional_dropout_rate\n",
      "        Type: <class 'float'>, Value: 0.0\n",
      "    Key: self_attention_dropout_rate\n",
      "        Type: <class 'float'>, Value: 0.0\n",
      "    Key: src_attention\n",
      "        Type: <class 'bool'>, Value: True\n",
      "    Key: src_attention_dropout_rate\n",
      "        Type: <class 'float'>, Value: 0.0\n",
      "    Key: tie_word_embedding\n",
      "        Type: <class 'bool'>, Value: True\n",
      "    Key: use_output_layer\n",
      "        Type: <class 'bool'>, Value: True\n",
      "Key: encoder\n",
      "    Type: <class 'str'>, Value: transformer\n",
      "Key: encoder_conf\n",
      "    Key: selfattention_layer_type\n",
      "        Type: <class 'str'>, Value: npu_fusion_attention\n",
      "    Key: activation_type\n",
      "        Type: <class 'str'>, Value: gelu\n",
      "    Key: attention_dropout_rate\n",
      "        Type: <class 'float'>, Value: 0.0\n",
      "    Key: attention_heads\n",
      "        Type: <class 'int'>, Value: 16\n",
      "    Key: dropout_rate\n",
      "        Type: <class 'float'>, Value: 0.1\n",
      "    Key: gradient_checkpointing\n",
      "        Type: <class 'bool'>, Value: True\n",
      "    Key: input_layer\n",
      "        Type: <class 'str'>, Value: conv1d2\n",
      "    Key: key_bias\n",
      "        Type: <class 'bool'>, Value: False\n",
      "    Key: linear_units\n",
      "        Type: <class 'int'>, Value: 4096\n",
      "    Key: normalize_before\n",
      "        Type: <class 'bool'>, Value: True\n",
      "    Key: num_blocks\n",
      "        Type: <class 'int'>, Value: 24\n",
      "    Key: output_size\n",
      "        Type: <class 'int'>, Value: 1024\n",
      "    Key: pos_enc_layer_type\n",
      "        Type: <class 'str'>, Value: abs_pos_whisper\n",
      "    Key: positional_dropout_rate\n",
      "        Type: <class 'float'>, Value: 0.1\n",
      "    Key: static_chunk_size\n",
      "        Type: <class 'int'>, Value: -1\n",
      "    Key: use_dynamic_chunk\n",
      "        Type: <class 'bool'>, Value: False\n",
      "    Key: use_dynamic_left_chunk\n",
      "        Type: <class 'bool'>, Value: False\n",
      "Key: grad_clip\n",
      "    Type: <class 'int'>, Value: 5\n",
      "Key: accum_grad\n",
      "    Type: <class 'int'>, Value: 1\n",
      "Key: input_dim\n",
      "    Type: <class 'int'>, Value: 80\n",
      "Key: log_interval\n",
      "    Type: <class 'int'>, Value: 1\n",
      "Key: save_interval\n",
      "    Type: <class 'int'>, Value: 10000\n",
      "Key: max_epoch\n",
      "    Type: <class 'int'>, Value: 100\n",
      "Key: model_conf\n",
      "    Key: ctc_weight\n",
      "        Type: <class 'int'>, Value: 0\n",
      "    Key: length_normalized_loss\n",
      "        Type: <class 'bool'>, Value: False\n",
      "    Key: lsm_weight\n",
      "        Type: <class 'float'>, Value: 0.1\n",
      "Key: init_step\n",
      "    Type: <class 'bool'>, Value: True\n",
      "Key: vocab_size\n",
      "    Type: <class 'int'>, Value: 151646\n",
      "Key: dtype\n",
      "    Type: <class 'str'>, Value: bf16\n",
      "Key: output_dim\n",
      "    Type: <class 'int'>, Value: 151646\n",
      "Key: train_engine\n",
      "    Type: <class 'str'>, Value: deepspeed\n",
      "Key: use_amp\n",
      "    Type: <class 'bool'>, Value: True\n",
      "Key: model_dir\n",
      "    Type: <class 'str'>, Value: /mnt/obs/ckpt/um/qwen2_multi_task_4_fa/epoch_0_with_speechp\n",
      "Key: save_states\n",
      "    Type: <class 'str'>, Value: model+optimizer\n",
      "Key: epoch\n",
      "    Type: <class 'int'>, Value: 0\n",
      "Key: tag\n",
      "    Type: <class 'str'>, Value: step_0\n",
      "Key: loss_dict\n",
      "    Key: loss\n",
      "        Type: <class 'int'>, Value: 999\n",
      "    Key: acc\n",
      "        Type: <class 'int'>, Value: 999\n",
      "Key: save_time\n",
      "    Type: <class 'str'>, Value: 15/01/2025 07:46:01\n",
      "Key: lrs\n",
      "    Type: <class 'list'>, Value: [6.2500000000000005e-09]\n"
     ]
    }
   ],
   "source": [
    "print_ckpt_structure(new_ckpt)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Merge\n",
    "### encoder\n",
    "> 这个暂时不能改，whisper encoder的 k没有bias，但是q和v有bias，因此bias不能合并\n",
    "- `encoder.encoders.1.self_attn.linear_qkv.weight`: torch.Size([3072, 1024])\n",
    "    - `encoder.encoders.1.self_attn.linear_q.weight`: torch.Size([1024, 1024])\n",
    "    - `encoder.encoders.1.self_attn.linear_k.weight`: torch.Size([1024, 1024])\n",
    "    - `encoder.encoders.1.self_attn.linear_v.weight`: torch.Size([1024, 1024])\n",
    "- `encoder.encoders.1.self_attn.linear_qkv.bias`: torch.Size([3072])\n",
    "    - `encoder.encoders.1.self_attn.linear_q.bias`: torch.Size([1024])\n",
    "    - `encoder.encoders.1.self_attn.linear_k.bias`: torch.Size([1024])\n",
    "    - `encoder.encoders.1.self_attn.linear_v.bias`: torch.Size([1024])\n",
    "### llama\n",
    "- `llama_model.base_model.model.model.layers.0.self_attn.w_pack.weight`: torch.Size([4608, 3584])\n",
    "    - `llama_model.base_model.model.model.layers.0.self_attn.q_proj.weight`: torch.Size([3584, 3584])\n",
    "    - `llama_model.base_model.model.model.layers.0.self_attn.k_proj.weight`: torch.Size([512, 3584])\n",
    "    - `llama_model.base_model.model.model.layers.0.self_attn.v_proj.weight`: torch.Size([512, 3584])\n",
    "- `llama_model.base_model.model.model.layers.0.self_attn.w_pack.bias`: torch.Size([4608])\n",
    "    - `llama_model.base_model.model.model.layers.0.self_attn.q_proj.bias`: torch.Size([3584])\n",
    "    - `llama_model.base_model.model.model.layers.0.self_attn.k_proj.bias`: torch.Size([512])\n",
    "    - `llama_model.base_model.model.model.layers.0.self_attn.v_proj.bias`: torch.Size([512])\n",
    "\n",
    "param_shapes shape调整"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 53,
   "metadata": {},
   "outputs": [],
   "source": [
    "def merge(target, source):\n",
    "    # encoder_weight = 'encoder.encoders.{layer_idx}.self_attn.{layer_name}.weight'\n",
    "    # encoder_bias = 'encoder.encoders.{layer_idx}.self_attn.{layer_name}.bias'\n",
    "    # for i in range(24):\n",
    "    #     layer_idx = str(i)\n",
    "    #     target[encoder_weight.format(layer_idx=layer_idx, layer_name='linear_qkv')]= torch.cat(\n",
    "    #         [source[encoder_weight.format(layer_idx=layer_idx, layer_name='linear_q')],\n",
    "    #         source[encoder_weight.format(layer_idx=layer_idx, layer_name='linear_k')],\n",
    "    #         source[encoder_weight.format(layer_idx=layer_idx, layer_name='linear_v')]],\n",
    "    #         dim=0\n",
    "    #     ).contiguous()\n",
    "    #     target[encoder_bias.format(layer_idx=layer_idx, layer_name='linear_qkv')]= torch.cat(\n",
    "    #         [source[encoder_bias.format(layer_idx=layer_idx, layer_name='linear_q')],\n",
    "    #         source[encoder_bias.format(layer_idx=layer_idx, layer_name='linear_k')],\n",
    "    #         source[encoder_bias.format(layer_idx=layer_idx, layer_name='linear_v')]],\n",
    "    #         dim=0\n",
    "    #     ).contiguous()\n",
    "    llama_weight = 'llama_model.base_model.model.model.layers.{layer_idx}.self_attn.{layer_name}.weight'\n",
    "    llama_bias = 'llama_model.base_model.model.model.layers.{layer_idx}.self_attn.{layer_name}.bias'\n",
    "    for i in range(28):\n",
    "        layer_idx = str(i)\n",
    "        assert llama_weight.format(layer_idx=layer_idx, layer_name='w_pack') in target.keys()\n",
    "        target[llama_weight.format(layer_idx=layer_idx, layer_name='w_pack')]= torch.cat(\n",
    "            [source[llama_weight.format(layer_idx=layer_idx, layer_name='q_proj')],\n",
    "            source[llama_weight.format(layer_idx=layer_idx, layer_name='k_proj')],\n",
    "            source[llama_weight.format(layer_idx=layer_idx, layer_name='v_proj')]],\n",
    "            dim=0\n",
    "        ).contiguous()\n",
    "        print(target[llama_weight.format(layer_idx=layer_idx, layer_name='w_pack')].shape)\n",
    "\n",
    "        assert llama_bias.format(layer_idx=layer_idx, layer_name='w_pack') in target.keys()\n",
    "        target[llama_bias.format(layer_idx=layer_idx, layer_name='w_pack')]= torch.cat(\n",
    "            [source[llama_bias.format(layer_idx=layer_idx, layer_name='q_proj')],\n",
    "            source[llama_bias.format(layer_idx=layer_idx, layer_name='k_proj')],\n",
    "            source[llama_bias.format(layer_idx=layer_idx, layer_name='v_proj')]],\n",
    "            dim=0\n",
    "        ).contiguous()\n",
    "        print(target[llama_bias.format(layer_idx=layer_idx, layer_name='w_pack')].shape)\n",
    "\n",
    "def merge_v2(source):\n",
    "    llama_weight = 'llama_model.base_model.model.model.layers.{layer_idx}.self_attn.{layer_name}.weight'\n",
    "    llama_bias = 'llama_model.base_model.model.model.layers.{layer_idx}.self_attn.{layer_name}.bias'\n",
    "    for i in range(28):\n",
    "        layer_idx = str(i)\n",
    "        source[llama_weight.format(layer_idx=layer_idx, layer_name='w_pack')]= torch.cat(\n",
    "            [source[llama_weight.format(layer_idx=layer_idx, layer_name='q_proj')],\n",
    "            source[llama_weight.format(layer_idx=layer_idx, layer_name='k_proj')],\n",
    "            source[llama_weight.format(layer_idx=layer_idx, layer_name='v_proj')]],\n",
    "            dim=0\n",
    "        ).contiguous()\n",
    "        print(source[llama_weight.format(layer_idx=layer_idx, layer_name='w_pack')].shape)\n",
    "        # del source[llama_weight.format(layer_idx=layer_idx, layer_name='q_proj')]\n",
    "        # del source[llama_weight.format(layer_idx=layer_idx, layer_name='k_proj')]\n",
    "        # del source[llama_weight.format(layer_idx=layer_idx, layer_name='v_proj')]\n",
    "        source[llama_bias.format(layer_idx=layer_idx, layer_name='w_pack')]= torch.cat(\n",
    "            [source[llama_bias.format(layer_idx=layer_idx, layer_name='q_proj')],\n",
    "            source[llama_bias.format(layer_idx=layer_idx, layer_name='k_proj')],\n",
    "            source[llama_bias.format(layer_idx=layer_idx, layer_name='v_proj')]],\n",
    "            dim=0\n",
    "        ).contiguous()\n",
    "        print(source[llama_bias.format(layer_idx=layer_idx, layer_name='w_pack')].shape)\n",
    "        # del source[llama_bias.format(layer_idx=layer_idx, layer_name='q_proj')]\n",
    "        # del source[llama_bias.format(layer_idx=layer_idx, layer_name='k_proj')]\n",
    "        # del source[llama_bias.format(layer_idx=layer_idx, layer_name='v_proj')]\n",
    "\n",
    "def update_shape(ckpt):\n",
    "    for k, v in ckpt['module'].items():\n",
    "        ckpt['param_shapes'][0][k] = v.size()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 55,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "torch.Size([4608, 3584])\n",
      "torch.Size([4608])\n",
      "torch.Size([4608, 3584])\n",
      "torch.Size([4608])\n",
      "torch.Size([4608, 3584])\n",
      "torch.Size([4608])\n",
      "torch.Size([4608, 3584])\n",
      "torch.Size([4608])\n",
      "torch.Size([4608, 3584])\n",
      "torch.Size([4608])\n",
      "torch.Size([4608, 3584])\n",
      "torch.Size([4608])\n",
      "torch.Size([4608, 3584])\n",
      "torch.Size([4608])\n",
      "torch.Size([4608, 3584])\n",
      "torch.Size([4608])\n",
      "torch.Size([4608, 3584])\n",
      "torch.Size([4608])\n",
      "torch.Size([4608, 3584])\n",
      "torch.Size([4608])\n",
      "torch.Size([4608, 3584])\n",
      "torch.Size([4608])\n",
      "torch.Size([4608, 3584])\n",
      "torch.Size([4608])\n",
      "torch.Size([4608, 3584])\n",
      "torch.Size([4608])\n",
      "torch.Size([4608, 3584])\n",
      "torch.Size([4608])\n",
      "torch.Size([4608, 3584])\n",
      "torch.Size([4608])\n",
      "torch.Size([4608, 3584])\n",
      "torch.Size([4608])\n",
      "torch.Size([4608, 3584])\n",
      "torch.Size([4608])\n",
      "torch.Size([4608, 3584])\n",
      "torch.Size([4608])\n",
      "torch.Size([4608, 3584])\n",
      "torch.Size([4608])\n",
      "torch.Size([4608, 3584])\n",
      "torch.Size([4608])\n",
      "torch.Size([4608, 3584])\n",
      "torch.Size([4608])\n",
      "torch.Size([4608, 3584])\n",
      "torch.Size([4608])\n",
      "torch.Size([4608, 3584])\n",
      "torch.Size([4608])\n",
      "torch.Size([4608, 3584])\n",
      "torch.Size([4608])\n",
      "torch.Size([4608, 3584])\n",
      "torch.Size([4608])\n",
      "torch.Size([4608, 3584])\n",
      "torch.Size([4608])\n",
      "torch.Size([4608, 3584])\n",
      "torch.Size([4608])\n",
      "torch.Size([4608, 3584])\n",
      "torch.Size([4608])\n"
     ]
    }
   ],
   "source": [
    "merge_v2(ori_ckpt)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 35,
   "metadata": {},
   "outputs": [],
   "source": [
    "# update_shape(new_ckpt)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 56,
   "metadata": {},
   "outputs": [],
   "source": [
    "ckpt_output_path = \"/mnt/obs/ckpt/um/qwen2_multi_task_4_fa/epoch_0_with_speechp/step_0/mp_rank_00_model_states_merge_llama_qkv.pt\"\n",
    "torch.save(ori_ckpt, ckpt_output_path)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "gxl_base",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.11.11"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
