{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "9bd6dcef",
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/usr/lib/python3/dist-packages/scipy/__init__.py:146: UserWarning: A NumPy version >=1.17.3 and <1.25.0 is required for this version of SciPy (detected version 1.26.4\n",
      "  warnings.warn(f\"A NumPy version >={np_minversion} and <{np_maxversion}\"\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[2025-05-28 22:40:39,426] [INFO] [real_accelerator.py:239:get_accelerator] Setting ds_accelerator to cuda (auto detect)\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/usr/bin/ld: cannot find -laio: No such file or directory\n",
      "collect2: error: ld returned 1 exit status\n",
      "/usr/bin/ld: cannot find -laio: No such file or directory\n",
      "collect2: error: ld returned 1 exit status\n"
     ]
    }
   ],
   "source": [
    "from transformers import AddedToken, AutoModel, AutoTokenizer, AutoProcessor, Qwen2ForCausalLM, AutoConfig\n",
    "from transformers.models.whisper.modeling_whisper import WhisperEncoderLayer\n",
    "from transformers import WhisperPreTrainedModel, WhisperConfig\n",
    "from transformers.modeling_outputs import BaseModelOutput\n",
    "from datasets import Audio\n",
    "import math\n",
    "import torch\n",
    "from torch import nn"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "1eb7637d",
   "metadata": {},
   "outputs": [],
   "source": [
    "class WhisperEncoder(WhisperPreTrainedModel):\n",
    "    \n",
    "    def __init__(self, config: WhisperConfig):\n",
    "        super().__init__(config)\n",
    "        self.dropout = config.dropout\n",
    "        self.layerdrop = config.encoder_layerdrop\n",
    "\n",
    "        embed_dim = config.d_model\n",
    "        self.num_mel_bins = config.num_mel_bins\n",
    "        self.padding_idx = config.pad_token_id\n",
    "        self.max_source_positions = config.max_source_positions\n",
    "        self.embed_scale = math.sqrt(embed_dim) if config.scale_embedding else 1.0\n",
    "\n",
    "        self.conv1 = nn.Conv1d(self.num_mel_bins, embed_dim, kernel_size=3, padding=1)\n",
    "        self.conv2 = nn.Conv1d(embed_dim, embed_dim, kernel_size=3, stride=2, padding=1)\n",
    "        \n",
    "        self.range_max_source_positions = torch.arange(self.max_source_positions)\n",
    "\n",
    "        self.embed_positions = nn.Embedding(self.max_source_positions, embed_dim)\n",
    "        self.embed_positions.requires_grad_(False)\n",
    "\n",
    "        self.layers = nn.ModuleList([WhisperEncoderLayer(config) for _ in range(config.encoder_layers)])\n",
    "        self.layer_norm = nn.LayerNorm(config.d_model)\n",
    "\n",
    "        self.gradient_checkpointing = False\n",
    "        self.post_init()\n",
    "\n",
    "    def _freeze_parameters(self):\n",
    "        for param in self.parameters():\n",
    "            param.requires_grad = False\n",
    "        self._requires_grad = False\n",
    "\n",
    "    def get_input_embeddings(self) -> nn.Module:\n",
    "        return self.conv1\n",
    "\n",
    "    def set_input_embeddings(self, value: nn.Module):\n",
    "        self.conv1 = value\n",
    "\n",
    "    def forward(\n",
    "        self,\n",
    "        input_features,\n",
    "        attention_mask=None,\n",
    "        head_mask=None,\n",
    "        output_attentions=None,\n",
    "        output_hidden_states=None,\n",
    "        return_dict=None,\n",
    "    ):\n",
    "\n",
    "        expected_seq_length = self.config.max_source_positions * self.conv1.stride[0] * self.conv2.stride[0]\n",
    "        if input_features.shape[-1] != expected_seq_length:\n",
    "            raise ValueError(\n",
    "                f\"Whisper expects the mel input features to be of length {expected_seq_length}, but found {input_features.shape[-1]}. Make sure to pad the input mel features to {expected_seq_length}.\"\n",
    "            )\n",
    "\n",
    "        output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n",
    "        output_hidden_states = (\n",
    "            output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n",
    "        )\n",
    "        return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n",
    "        inputs_embeds = nn.functional.gelu(self.conv1(input_features))\n",
    "        inputs_embeds = nn.functional.gelu(self.conv2(inputs_embeds))\n",
    "\n",
    "        inputs_embeds = inputs_embeds.permute(0, 2, 1)\n",
    "        embed_pos = self.embed_positions(self.range_max_source_positions)\n",
    "\n",
    "        hidden_states = inputs_embeds + embed_pos\n",
    "        hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)\n",
    "\n",
    "        encoder_states = () if output_hidden_states else None\n",
    "        all_attentions = () if output_attentions else None\n",
    "\n",
    "        # check if head_mask has a correct number of layers specified if desired\n",
    "        if head_mask is not None:\n",
    "            assert head_mask.size()[0] == (len(self.layers)), (\n",
    "                f\"The head_mask should be specified for {len(self.layers)} layers, but it is for {head_mask.size()[0]}.\"\n",
    "            )\n",
    "\n",
    "        for idx, encoder_layer in enumerate(self.layers):\n",
    "            if output_hidden_states:\n",
    "                encoder_states = encoder_states + (hidden_states,)\n",
    "            # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)\n",
    "            to_drop = False\n",
    "            if self.training:\n",
    "                dropout_probability = torch.rand([])\n",
    "                if dropout_probability < self.layerdrop:  # skip the layer\n",
    "                    to_drop = True\n",
    "\n",
    "            if to_drop:\n",
    "                layer_outputs = (None, None)\n",
    "            else:\n",
    "                if self.gradient_checkpointing and self.training:\n",
    "                    layer_outputs = self._gradient_checkpointing_func(\n",
    "                        encoder_layer.__call__,\n",
    "                        hidden_states,\n",
    "                        None,\n",
    "                        (head_mask[idx] if head_mask is not None else None),\n",
    "                        output_attentions,\n",
    "                    )\n",
    "                else:\n",
    "                    layer_outputs = encoder_layer(\n",
    "                        hidden_states,\n",
    "                        None,\n",
    "                        layer_head_mask=(head_mask[idx] if head_mask is not None else None),\n",
    "                        output_attentions=output_attentions,\n",
    "                    )\n",
    "\n",
    "                hidden_states = layer_outputs[0]\n",
    "\n",
    "            if output_attentions:\n",
    "                all_attentions = all_attentions + (layer_outputs[1],)\n",
    "\n",
    "        hidden_states = self.layer_norm(hidden_states)\n",
    "        if output_hidden_states:\n",
    "            encoder_states = encoder_states + (hidden_states,)\n",
    "\n",
    "        if not return_dict:\n",
    "            return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None)\n",
    "        return BaseModelOutput(\n",
    "            last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions\n",
    "        )"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "id": "1c33de39",
   "metadata": {},
   "outputs": [],
   "source": [
    "class Model(Qwen2ForCausalLM):\n",
    "    def __init__(self, config):\n",
    "        super().__init__(config)\n",
    "        self.encoder = WhisperEncoder(config.audio_encoder_config)\n",
    "        self.projection = nn.Linear(self.encoder.config.d_model, self.config.hidden_size, bias=True)\n",
    "    \n",
    "    def forward(\n",
    "        self, \n",
    "        input_ids, \n",
    "        attention_mask, \n",
    "        input_features = None, \n",
    "        feature_attention_mask = None, \n",
    "        labels = None, \n",
    "        **kwargs,\n",
    "    ):\n",
    "        inputs_embeds = self.get_input_embeddings()(input_ids)\n",
    "        if input_features is not None:\n",
    "            batch_size, _, max_mel_seq_len = input_features.shape\n",
    "            max_seq_len = (max_mel_seq_len - 2) // 2 + 1\n",
    "            audio_feat_lengths = self.encoder._get_feat_extract_output_lengths(feature_attention_mask.sum(-1))\n",
    "            seq_range = (\n",
    "                torch.arange(0, max_seq_len, dtype=audio_feat_lengths.dtype, device=audio_feat_lengths.device)\n",
    "                .unsqueeze(0)\n",
    "                .expand(batch_size, max_seq_len)\n",
    "            )\n",
    "            lengths_expand = audio_feat_lengths.unsqueeze(1).expand(batch_size, max_seq_len)\n",
    "            padding_mask = seq_range >= lengths_expand\n",
    "\n",
    "            audio_attention_mask_ = padding_mask.view(batch_size, 1, 1, max_seq_len).expand(\n",
    "                batch_size, 1, max_seq_len, max_seq_len\n",
    "            )\n",
    "            audio_attention_mask = audio_attention_mask_.to(\n",
    "                dtype=self.encoder.conv1.weight.dtype, device=self.encoder.conv1.weight.device\n",
    "            )\n",
    "            audio_attention_mask[audio_attention_mask_] = float(\"-inf\")\n",
    "            audio_outputs = self.encoder(input_features, attention_mask=audio_attention_mask)\n",
    "            selected_audio_feature = audio_outputs.last_hidden_state\n",
    "            audio_features = self.projection(selected_audio_feature)\n",
    "            num_audio_tokens = audio_feat_lengths\n",
    "            num_audios, max_audio_tokens, embed_dim = audio_features.shape\n",
    "            audio_features_mask = torch.arange(max_audio_tokens).expand(num_audios, max_audio_tokens).to(\n",
    "                num_audio_tokens.device\n",
    "            ) < num_audio_tokens.unsqueeze(1)\n",
    "            masked_audio_features = audio_features[audio_features_mask].view(-1, embed_dim)\n",
    "            inputs_embeds[input_ids == model.config.audio_token_index] = masked_audio_features.contiguous()\n",
    "        \n",
    "        super_out = self.model.forward(\n",
    "            inputs_embeds = inputs_embeds, \n",
    "            attention_mask = attention_mask,\n",
    "            output_hidden_states = True,\n",
    "        )\n",
    "        return super_out"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "id": "b0a9a2d3",
   "metadata": {
    "scrolled": false
   },
   "outputs": [],
   "source": [
    "tokenizer = AutoTokenizer.from_pretrained('Qwen/Qwen2.5-7B-Instruct')\n",
    "processor = AutoProcessor.from_pretrained('openai/whisper-large-v3')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "id": "bb06885b",
   "metadata": {},
   "outputs": [],
   "source": [
    "chat_template = \"{% set audio_count = namespace(value=0) %}{% for message in messages %}{% if loop.first and message['role'] != 'system' %}<|im_start|>system\\nYou are a helpful assistant.<|im_end|>\\n{% endif %}<|im_start|>{{ message['role'] }}\\n{% if message['content'] is string %}{{ message['content'] }}<|im_end|>\\n{% else %}{% for content in message['content'] %}{% if 'audio' in content or 'audio_url' in content or message['type'] == 'audio' %}{% set audio_count.value = audio_count.value + 1 %}Audio {{ audio_count.value }}: <|audio_bos|><|file_sep|><|audio_eos|>\\n{% elif 'text' in content %}{{ content['text'] }}{% endif %}{% endfor %}<|im_end|>\\n{% endif %}{% endfor %}{% if add_generation_prompt %}<|im_start|>assistant\\n{% endif %}\"\n",
    "tokenizer.chat_template = chat_template"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "id": "f4a25a4f",
   "metadata": {},
   "outputs": [],
   "source": [
    "config = AutoConfig.from_pretrained('Qwen/Qwen2.5-7B-Instruct')\n",
    "audio_encoder_config = AutoConfig.from_pretrained('huseinzol05/whisper-large-v3-encoder')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "id": "ab46f97c",
   "metadata": {},
   "outputs": [],
   "source": [
    "config.audio_encoder_config = audio_encoder_config"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "id": "80439fa0",
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Sliding Window Attention is enabled but not implemented for `sdpa`; unexpected results may be encountered.\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "5e38b1c3a28f4ef5a45997bab929fe41",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "Loading checkpoint shards:   0%|          | 0/4 [00:00<?, ?it/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Some weights of Model were not initialized from the model checkpoint at Qwen/Qwen2.5-7B-Instruct and are newly initialized: ['encoder.conv1.bias', 'encoder.conv1.weight', 'encoder.conv2.bias', 'encoder.conv2.weight', 'encoder.embed_positions.weight', 'encoder.layer_norm.bias', 'encoder.layer_norm.weight', 'encoder.layers.0.fc1.bias', 'encoder.layers.0.fc1.weight', 'encoder.layers.0.fc2.bias', 'encoder.layers.0.fc2.weight', 'encoder.layers.0.final_layer_norm.bias', 'encoder.layers.0.final_layer_norm.weight', 'encoder.layers.0.self_attn.k_proj.weight', 'encoder.layers.0.self_attn.out_proj.bias', 'encoder.layers.0.self_attn.out_proj.weight', 'encoder.layers.0.self_attn.q_proj.bias', 'encoder.layers.0.self_attn.q_proj.weight', 'encoder.layers.0.self_attn.v_proj.bias', 'encoder.layers.0.self_attn.v_proj.weight', 'encoder.layers.0.self_attn_layer_norm.bias', 'encoder.layers.0.self_attn_layer_norm.weight', 'encoder.layers.1.fc1.bias', 'encoder.layers.1.fc1.weight', 'encoder.layers.1.fc2.bias', 'encoder.layers.1.fc2.weight', 'encoder.layers.1.final_layer_norm.bias', 'encoder.layers.1.final_layer_norm.weight', 'encoder.layers.1.self_attn.k_proj.weight', 'encoder.layers.1.self_attn.out_proj.bias', 'encoder.layers.1.self_attn.out_proj.weight', 'encoder.layers.1.self_attn.q_proj.bias', 'encoder.layers.1.self_attn.q_proj.weight', 'encoder.layers.1.self_attn.v_proj.bias', 'encoder.layers.1.self_attn.v_proj.weight', 'encoder.layers.1.self_attn_layer_norm.bias', 'encoder.layers.1.self_attn_layer_norm.weight', 'encoder.layers.10.fc1.bias', 'encoder.layers.10.fc1.weight', 'encoder.layers.10.fc2.bias', 'encoder.layers.10.fc2.weight', 'encoder.layers.10.final_layer_norm.bias', 'encoder.layers.10.final_layer_norm.weight', 'encoder.layers.10.self_attn.k_proj.weight', 'encoder.layers.10.self_attn.out_proj.bias', 'encoder.layers.10.self_attn.out_proj.weight', 'encoder.layers.10.self_attn.q_proj.bias', 'encoder.layers.10.self_attn.q_proj.weight', 'encoder.layers.10.self_attn.v_proj.bias', 'encoder.layers.10.self_attn.v_proj.weight', 'encoder.layers.10.self_attn_layer_norm.bias', 'encoder.layers.10.self_attn_layer_norm.weight', 'encoder.layers.11.fc1.bias', 'encoder.layers.11.fc1.weight', 'encoder.layers.11.fc2.bias', 'encoder.layers.11.fc2.weight', 'encoder.layers.11.final_layer_norm.bias', 'encoder.layers.11.final_layer_norm.weight', 'encoder.layers.11.self_attn.k_proj.weight', 'encoder.layers.11.self_attn.out_proj.bias', 'encoder.layers.11.self_attn.out_proj.weight', 'encoder.layers.11.self_attn.q_proj.bias', 'encoder.layers.11.self_attn.q_proj.weight', 'encoder.layers.11.self_attn.v_proj.bias', 'encoder.layers.11.self_attn.v_proj.weight', 'encoder.layers.11.self_attn_layer_norm.bias', 'encoder.layers.11.self_attn_layer_norm.weight', 'encoder.layers.12.fc1.bias', 'encoder.layers.12.fc1.weight', 'encoder.layers.12.fc2.bias', 'encoder.layers.12.fc2.weight', 'encoder.layers.12.final_layer_norm.bias', 'encoder.layers.12.final_layer_norm.weight', 'encoder.layers.12.self_attn.k_proj.weight', 'encoder.layers.12.self_attn.out_proj.bias', 'encoder.layers.12.self_attn.out_proj.weight', 'encoder.layers.12.self_attn.q_proj.bias', 'encoder.layers.12.self_attn.q_proj.weight', 'encoder.layers.12.self_attn.v_proj.bias', 'encoder.layers.12.self_attn.v_proj.weight', 'encoder.layers.12.self_attn_layer_norm.bias', 'encoder.layers.12.self_attn_layer_norm.weight', 'encoder.layers.13.fc1.bias', 'encoder.layers.13.fc1.weight', 'encoder.layers.13.fc2.bias', 'encoder.layers.13.fc2.weight', 'encoder.layers.13.final_layer_norm.bias', 'encoder.layers.13.final_layer_norm.weight', 'encoder.layers.13.self_attn.k_proj.weight', 'encoder.layers.13.self_attn.out_proj.bias', 'encoder.layers.13.self_attn.out_proj.weight', 'encoder.layers.13.self_attn.q_proj.bias', 'encoder.layers.13.self_attn.q_proj.weight', 'encoder.layers.13.self_attn.v_proj.bias', 'encoder.layers.13.self_attn.v_proj.weight', 'encoder.layers.13.self_attn_layer_norm.bias', 'encoder.layers.13.self_attn_layer_norm.weight', 'encoder.layers.14.fc1.bias', 'encoder.layers.14.fc1.weight', 'encoder.layers.14.fc2.bias', 'encoder.layers.14.fc2.weight', 'encoder.layers.14.final_layer_norm.bias', 'encoder.layers.14.final_layer_norm.weight', 'encoder.layers.14.self_attn.k_proj.weight', 'encoder.layers.14.self_attn.out_proj.bias', 'encoder.layers.14.self_attn.out_proj.weight', 'encoder.layers.14.self_attn.q_proj.bias', 'encoder.layers.14.self_attn.q_proj.weight', 'encoder.layers.14.self_attn.v_proj.bias', 'encoder.layers.14.self_attn.v_proj.weight', 'encoder.layers.14.self_attn_layer_norm.bias', 'encoder.layers.14.self_attn_layer_norm.weight', 'encoder.layers.15.fc1.bias', 'encoder.layers.15.fc1.weight', 'encoder.layers.15.fc2.bias', 'encoder.layers.15.fc2.weight', 'encoder.layers.15.final_layer_norm.bias', 'encoder.layers.15.final_layer_norm.weight', 'encoder.layers.15.self_attn.k_proj.weight', 'encoder.layers.15.self_attn.out_proj.bias', 'encoder.layers.15.self_attn.out_proj.weight', 'encoder.layers.15.self_attn.q_proj.bias', 'encoder.layers.15.self_attn.q_proj.weight', 'encoder.layers.15.self_attn.v_proj.bias', 'encoder.layers.15.self_attn.v_proj.weight', 'encoder.layers.15.self_attn_layer_norm.bias', 'encoder.layers.15.self_attn_layer_norm.weight', 'encoder.layers.16.fc1.bias', 'encoder.layers.16.fc1.weight', 'encoder.layers.16.fc2.bias', 'encoder.layers.16.fc2.weight', 'encoder.layers.16.final_layer_norm.bias', 'encoder.layers.16.final_layer_norm.weight', 'encoder.layers.16.self_attn.k_proj.weight', 'encoder.layers.16.self_attn.out_proj.bias', 'encoder.layers.16.self_attn.out_proj.weight', 'encoder.layers.16.self_attn.q_proj.bias', 'encoder.layers.16.self_attn.q_proj.weight', 'encoder.layers.16.self_attn.v_proj.bias', 'encoder.layers.16.self_attn.v_proj.weight', 'encoder.layers.16.self_attn_layer_norm.bias', 'encoder.layers.16.self_attn_layer_norm.weight', 'encoder.layers.17.fc1.bias', 'encoder.layers.17.fc1.weight', 'encoder.layers.17.fc2.bias', 'encoder.layers.17.fc2.weight', 'encoder.layers.17.final_layer_norm.bias', 'encoder.layers.17.final_layer_norm.weight', 'encoder.layers.17.self_attn.k_proj.weight', 'encoder.layers.17.self_attn.out_proj.bias', 'encoder.layers.17.self_attn.out_proj.weight', 'encoder.layers.17.self_attn.q_proj.bias', 'encoder.layers.17.self_attn.q_proj.weight', 'encoder.layers.17.self_attn.v_proj.bias', 'encoder.layers.17.self_attn.v_proj.weight', 'encoder.layers.17.self_attn_layer_norm.bias', 'encoder.layers.17.self_attn_layer_norm.weight', 'encoder.layers.18.fc1.bias', 'encoder.layers.18.fc1.weight', 'encoder.layers.18.fc2.bias', 'encoder.layers.18.fc2.weight', 'encoder.layers.18.final_layer_norm.bias', 'encoder.layers.18.final_layer_norm.weight', 'encoder.layers.18.self_attn.k_proj.weight', 'encoder.layers.18.self_attn.out_proj.bias', 'encoder.layers.18.self_attn.out_proj.weight', 'encoder.layers.18.self_attn.q_proj.bias', 'encoder.layers.18.self_attn.q_proj.weight', 'encoder.layers.18.self_attn.v_proj.bias', 'encoder.layers.18.self_attn.v_proj.weight', 'encoder.layers.18.self_attn_layer_norm.bias', 'encoder.layers.18.self_attn_layer_norm.weight', 'encoder.layers.19.fc1.bias', 'encoder.layers.19.fc1.weight', 'encoder.layers.19.fc2.bias', 'encoder.layers.19.fc2.weight', 'encoder.layers.19.final_layer_norm.bias', 'encoder.layers.19.final_layer_norm.weight', 'encoder.layers.19.self_attn.k_proj.weight', 'encoder.layers.19.self_attn.out_proj.bias', 'encoder.layers.19.self_attn.out_proj.weight', 'encoder.layers.19.self_attn.q_proj.bias', 'encoder.layers.19.self_attn.q_proj.weight', 'encoder.layers.19.self_attn.v_proj.bias', 'encoder.layers.19.self_attn.v_proj.weight', 'encoder.layers.19.self_attn_layer_norm.bias', 'encoder.layers.19.self_attn_layer_norm.weight', 'encoder.layers.2.fc1.bias', 'encoder.layers.2.fc1.weight', 'encoder.layers.2.fc2.bias', 'encoder.layers.2.fc2.weight', 'encoder.layers.2.final_layer_norm.bias', 'encoder.layers.2.final_layer_norm.weight', 'encoder.layers.2.self_attn.k_proj.weight', 'encoder.layers.2.self_attn.out_proj.bias', 'encoder.layers.2.self_attn.out_proj.weight', 'encoder.layers.2.self_attn.q_proj.bias', 'encoder.layers.2.self_attn.q_proj.weight', 'encoder.layers.2.self_attn.v_proj.bias', 'encoder.layers.2.self_attn.v_proj.weight', 'encoder.layers.2.self_attn_layer_norm.bias', 'encoder.layers.2.self_attn_layer_norm.weight', 'encoder.layers.20.fc1.bias', 'encoder.layers.20.fc1.weight', 'encoder.layers.20.fc2.bias', 'encoder.layers.20.fc2.weight', 'encoder.layers.20.final_layer_norm.bias', 'encoder.layers.20.final_layer_norm.weight', 'encoder.layers.20.self_attn.k_proj.weight', 'encoder.layers.20.self_attn.out_proj.bias', 'encoder.layers.20.self_attn.out_proj.weight', 'encoder.layers.20.self_attn.q_proj.bias', 'encoder.layers.20.self_attn.q_proj.weight', 'encoder.layers.20.self_attn.v_proj.bias', 'encoder.layers.20.self_attn.v_proj.weight', 'encoder.layers.20.self_attn_layer_norm.bias', 'encoder.layers.20.self_attn_layer_norm.weight', 'encoder.layers.21.fc1.bias', 'encoder.layers.21.fc1.weight', 'encoder.layers.21.fc2.bias', 'encoder.layers.21.fc2.weight', 'encoder.layers.21.final_layer_norm.bias', 'encoder.layers.21.final_layer_norm.weight', 'encoder.layers.21.self_attn.k_proj.weight', 'encoder.layers.21.self_attn.out_proj.bias', 'encoder.layers.21.self_attn.out_proj.weight', 'encoder.layers.21.self_attn.q_proj.bias', 'encoder.layers.21.self_attn.q_proj.weight', 'encoder.layers.21.self_attn.v_proj.bias', 'encoder.layers.21.self_attn.v_proj.weight', 'encoder.layers.21.self_attn_layer_norm.bias', 'encoder.layers.21.self_attn_layer_norm.weight', 'encoder.layers.22.fc1.bias', 'encoder.layers.22.fc1.weight', 'encoder.layers.22.fc2.bias', 'encoder.layers.22.fc2.weight', 'encoder.layers.22.final_layer_norm.bias', 'encoder.layers.22.final_layer_norm.weight', 'encoder.layers.22.self_attn.k_proj.weight', 'encoder.layers.22.self_attn.out_proj.bias', 'encoder.layers.22.self_attn.out_proj.weight', 'encoder.layers.22.self_attn.q_proj.bias', 'encoder.layers.22.self_attn.q_proj.weight', 'encoder.layers.22.self_attn.v_proj.bias', 'encoder.layers.22.self_attn.v_proj.weight', 'encoder.layers.22.self_attn_layer_norm.bias', 'encoder.layers.22.self_attn_layer_norm.weight', 'encoder.layers.23.fc1.bias', 'encoder.layers.23.fc1.weight', 'encoder.layers.23.fc2.bias', 'encoder.layers.23.fc2.weight', 'encoder.layers.23.final_layer_norm.bias', 'encoder.layers.23.final_layer_norm.weight', 'encoder.layers.23.self_attn.k_proj.weight', 'encoder.layers.23.self_attn.out_proj.bias', 'encoder.layers.23.self_attn.out_proj.weight', 'encoder.layers.23.self_attn.q_proj.bias', 'encoder.layers.23.self_attn.q_proj.weight', 'encoder.layers.23.self_attn.v_proj.bias', 'encoder.layers.23.self_attn.v_proj.weight', 'encoder.layers.23.self_attn_layer_norm.bias', 'encoder.layers.23.self_attn_layer_norm.weight', 'encoder.layers.24.fc1.bias', 'encoder.layers.24.fc1.weight', 'encoder.layers.24.fc2.bias', 'encoder.layers.24.fc2.weight', 'encoder.layers.24.final_layer_norm.bias', 'encoder.layers.24.final_layer_norm.weight', 'encoder.layers.24.self_attn.k_proj.weight', 'encoder.layers.24.self_attn.out_proj.bias', 'encoder.layers.24.self_attn.out_proj.weight', 'encoder.layers.24.self_attn.q_proj.bias', 'encoder.layers.24.self_attn.q_proj.weight', 'encoder.layers.24.self_attn.v_proj.bias', 'encoder.layers.24.self_attn.v_proj.weight', 'encoder.layers.24.self_attn_layer_norm.bias', 'encoder.layers.24.self_attn_layer_norm.weight', 'encoder.layers.25.fc1.bias', 'encoder.layers.25.fc1.weight', 'encoder.layers.25.fc2.bias', 'encoder.layers.25.fc2.weight', 'encoder.layers.25.final_layer_norm.bias', 'encoder.layers.25.final_layer_norm.weight', 'encoder.layers.25.self_attn.k_proj.weight', 'encoder.layers.25.self_attn.out_proj.bias', 'encoder.layers.25.self_attn.out_proj.weight', 'encoder.layers.25.self_attn.q_proj.bias', 'encoder.layers.25.self_attn.q_proj.weight', 'encoder.layers.25.self_attn.v_proj.bias', 'encoder.layers.25.self_attn.v_proj.weight', 'encoder.layers.25.self_attn_layer_norm.bias', 'encoder.layers.25.self_attn_layer_norm.weight', 'encoder.layers.26.fc1.bias', 'encoder.layers.26.fc1.weight', 'encoder.layers.26.fc2.bias', 'encoder.layers.26.fc2.weight', 'encoder.layers.26.final_layer_norm.bias', 'encoder.layers.26.final_layer_norm.weight', 'encoder.layers.26.self_attn.k_proj.weight', 'encoder.layers.26.self_attn.out_proj.bias', 'encoder.layers.26.self_attn.out_proj.weight', 'encoder.layers.26.self_attn.q_proj.bias', 'encoder.layers.26.self_attn.q_proj.weight', 'encoder.layers.26.self_attn.v_proj.bias', 'encoder.layers.26.self_attn.v_proj.weight', 'encoder.layers.26.self_attn_layer_norm.bias', 'encoder.layers.26.self_attn_layer_norm.weight', 'encoder.layers.27.fc1.bias', 'encoder.layers.27.fc1.weight', 'encoder.layers.27.fc2.bias', 'encoder.layers.27.fc2.weight', 'encoder.layers.27.final_layer_norm.bias', 'encoder.layers.27.final_layer_norm.weight', 'encoder.layers.27.self_attn.k_proj.weight', 'encoder.layers.27.self_attn.out_proj.bias', 'encoder.layers.27.self_attn.out_proj.weight', 'encoder.layers.27.self_attn.q_proj.bias', 'encoder.layers.27.self_attn.q_proj.weight', 'encoder.layers.27.self_attn.v_proj.bias', 'encoder.layers.27.self_attn.v_proj.weight', 'encoder.layers.27.self_attn_layer_norm.bias', 'encoder.layers.27.self_attn_layer_norm.weight', 'encoder.layers.28.fc1.bias', 'encoder.layers.28.fc1.weight', 'encoder.layers.28.fc2.bias', 'encoder.layers.28.fc2.weight', 'encoder.layers.28.final_layer_norm.bias', 'encoder.layers.28.final_layer_norm.weight', 'encoder.layers.28.self_attn.k_proj.weight', 'encoder.layers.28.self_attn.out_proj.bias', 'encoder.layers.28.self_attn.out_proj.weight', 'encoder.layers.28.self_attn.q_proj.bias', 'encoder.layers.28.self_attn.q_proj.weight', 'encoder.layers.28.self_attn.v_proj.bias', 'encoder.layers.28.self_attn.v_proj.weight', 'encoder.layers.28.self_attn_layer_norm.bias', 'encoder.layers.28.self_attn_layer_norm.weight', 'encoder.layers.29.fc1.bias', 'encoder.layers.29.fc1.weight', 'encoder.layers.29.fc2.bias', 'encoder.layers.29.fc2.weight', 'encoder.layers.29.final_layer_norm.bias', 'encoder.layers.29.final_layer_norm.weight', 'encoder.layers.29.self_attn.k_proj.weight', 'encoder.layers.29.self_attn.out_proj.bias', 'encoder.layers.29.self_attn.out_proj.weight', 'encoder.layers.29.self_attn.q_proj.bias', 'encoder.layers.29.self_attn.q_proj.weight', 'encoder.layers.29.self_attn.v_proj.bias', 'encoder.layers.29.self_attn.v_proj.weight', 'encoder.layers.29.self_attn_layer_norm.bias', 'encoder.layers.29.self_attn_layer_norm.weight', 'encoder.layers.3.fc1.bias', 'encoder.layers.3.fc1.weight', 'encoder.layers.3.fc2.bias', 'encoder.layers.3.fc2.weight', 'encoder.layers.3.final_layer_norm.bias', 'encoder.layers.3.final_layer_norm.weight', 'encoder.layers.3.self_attn.k_proj.weight', 'encoder.layers.3.self_attn.out_proj.bias', 'encoder.layers.3.self_attn.out_proj.weight', 'encoder.layers.3.self_attn.q_proj.bias', 'encoder.layers.3.self_attn.q_proj.weight', 'encoder.layers.3.self_attn.v_proj.bias', 'encoder.layers.3.self_attn.v_proj.weight', 'encoder.layers.3.self_attn_layer_norm.bias', 'encoder.layers.3.self_attn_layer_norm.weight', 'encoder.layers.30.fc1.bias', 'encoder.layers.30.fc1.weight', 'encoder.layers.30.fc2.bias', 'encoder.layers.30.fc2.weight', 'encoder.layers.30.final_layer_norm.bias', 'encoder.layers.30.final_layer_norm.weight', 'encoder.layers.30.self_attn.k_proj.weight', 'encoder.layers.30.self_attn.out_proj.bias', 'encoder.layers.30.self_attn.out_proj.weight', 'encoder.layers.30.self_attn.q_proj.bias', 'encoder.layers.30.self_attn.q_proj.weight', 'encoder.layers.30.self_attn.v_proj.bias', 'encoder.layers.30.self_attn.v_proj.weight', 'encoder.layers.30.self_attn_layer_norm.bias', 'encoder.layers.30.self_attn_layer_norm.weight', 'encoder.layers.31.fc1.bias', 'encoder.layers.31.fc1.weight', 'encoder.layers.31.fc2.bias', 'encoder.layers.31.fc2.weight', 'encoder.layers.31.final_layer_norm.bias', 'encoder.layers.31.final_layer_norm.weight', 'encoder.layers.31.self_attn.k_proj.weight', 'encoder.layers.31.self_attn.out_proj.bias', 'encoder.layers.31.self_attn.out_proj.weight', 'encoder.layers.31.self_attn.q_proj.bias', 'encoder.layers.31.self_attn.q_proj.weight', 'encoder.layers.31.self_attn.v_proj.bias', 'encoder.layers.31.self_attn.v_proj.weight', 'encoder.layers.31.self_attn_layer_norm.bias', 'encoder.layers.31.self_attn_layer_norm.weight', 'encoder.layers.4.fc1.bias', 'encoder.layers.4.fc1.weight', 'encoder.layers.4.fc2.bias', 'encoder.layers.4.fc2.weight', 'encoder.layers.4.final_layer_norm.bias', 'encoder.layers.4.final_layer_norm.weight', 'encoder.layers.4.self_attn.k_proj.weight', 'encoder.layers.4.self_attn.out_proj.bias', 'encoder.layers.4.self_attn.out_proj.weight', 'encoder.layers.4.self_attn.q_proj.bias', 'encoder.layers.4.self_attn.q_proj.weight', 'encoder.layers.4.self_attn.v_proj.bias', 'encoder.layers.4.self_attn.v_proj.weight', 'encoder.layers.4.self_attn_layer_norm.bias', 'encoder.layers.4.self_attn_layer_norm.weight', 'encoder.layers.5.fc1.bias', 'encoder.layers.5.fc1.weight', 'encoder.layers.5.fc2.bias', 'encoder.layers.5.fc2.weight', 'encoder.layers.5.final_layer_norm.bias', 'encoder.layers.5.final_layer_norm.weight', 'encoder.layers.5.self_attn.k_proj.weight', 'encoder.layers.5.self_attn.out_proj.bias', 'encoder.layers.5.self_attn.out_proj.weight', 'encoder.layers.5.self_attn.q_proj.bias', 'encoder.layers.5.self_attn.q_proj.weight', 'encoder.layers.5.self_attn.v_proj.bias', 'encoder.layers.5.self_attn.v_proj.weight', 'encoder.layers.5.self_attn_layer_norm.bias', 'encoder.layers.5.self_attn_layer_norm.weight', 'encoder.layers.6.fc1.bias', 'encoder.layers.6.fc1.weight', 'encoder.layers.6.fc2.bias', 'encoder.layers.6.fc2.weight', 'encoder.layers.6.final_layer_norm.bias', 'encoder.layers.6.final_layer_norm.weight', 'encoder.layers.6.self_attn.k_proj.weight', 'encoder.layers.6.self_attn.out_proj.bias', 'encoder.layers.6.self_attn.out_proj.weight', 'encoder.layers.6.self_attn.q_proj.bias', 'encoder.layers.6.self_attn.q_proj.weight', 'encoder.layers.6.self_attn.v_proj.bias', 'encoder.layers.6.self_attn.v_proj.weight', 'encoder.layers.6.self_attn_layer_norm.bias', 'encoder.layers.6.self_attn_layer_norm.weight', 'encoder.layers.7.fc1.bias', 'encoder.layers.7.fc1.weight', 'encoder.layers.7.fc2.bias', 'encoder.layers.7.fc2.weight', 'encoder.layers.7.final_layer_norm.bias', 'encoder.layers.7.final_layer_norm.weight', 'encoder.layers.7.self_attn.k_proj.weight', 'encoder.layers.7.self_attn.out_proj.bias', 'encoder.layers.7.self_attn.out_proj.weight', 'encoder.layers.7.self_attn.q_proj.bias', 'encoder.layers.7.self_attn.q_proj.weight', 'encoder.layers.7.self_attn.v_proj.bias', 'encoder.layers.7.self_attn.v_proj.weight', 'encoder.layers.7.self_attn_layer_norm.bias', 'encoder.layers.7.self_attn_layer_norm.weight', 'encoder.layers.8.fc1.bias', 'encoder.layers.8.fc1.weight', 'encoder.layers.8.fc2.bias', 'encoder.layers.8.fc2.weight', 'encoder.layers.8.final_layer_norm.bias', 'encoder.layers.8.final_layer_norm.weight', 'encoder.layers.8.self_attn.k_proj.weight', 'encoder.layers.8.self_attn.out_proj.bias', 'encoder.layers.8.self_attn.out_proj.weight', 'encoder.layers.8.self_attn.q_proj.bias', 'encoder.layers.8.self_attn.q_proj.weight', 'encoder.layers.8.self_attn.v_proj.bias', 'encoder.layers.8.self_attn.v_proj.weight', 'encoder.layers.8.self_attn_layer_norm.bias', 'encoder.layers.8.self_attn_layer_norm.weight', 'encoder.layers.9.fc1.bias', 'encoder.layers.9.fc1.weight', 'encoder.layers.9.fc2.bias', 'encoder.layers.9.fc2.weight', 'encoder.layers.9.final_layer_norm.bias', 'encoder.layers.9.final_layer_norm.weight', 'encoder.layers.9.self_attn.k_proj.weight', 'encoder.layers.9.self_attn.out_proj.bias', 'encoder.layers.9.self_attn.out_proj.weight', 'encoder.layers.9.self_attn.q_proj.bias', 'encoder.layers.9.self_attn.q_proj.weight', 'encoder.layers.9.self_attn.v_proj.bias', 'encoder.layers.9.self_attn.v_proj.weight', 'encoder.layers.9.self_attn_layer_norm.bias', 'encoder.layers.9.self_attn_layer_norm.weight', 'projection.bias', 'projection.weight']\n",
      "You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.\n"
     ]
    }
   ],
   "source": [
    "model = Model.from_pretrained('Qwen/Qwen2.5-7B-Instruct', config = config)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "id": "996c1bd3",
   "metadata": {},
   "outputs": [],
   "source": [
    "model.encoder = model.encoder.from_pretrained('huseinzol05/whisper-large-v3-encoder')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "id": "30977b57",
   "metadata": {},
   "outputs": [],
   "source": [
    "# _ = model.cuda()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "id": "a4255080",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "2"
      ]
     },
     "execution_count": 11,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "audio_token = \"<|file_sep|>\"\n",
    "audio_bos_token = \"<|audio_bos|>\"\n",
    "audio_eos_token = \"<|audio_eos|>\"\n",
    "audio_token_id = tokenizer._convert_token_to_id_with_added_voc(audio_token)\n",
    "pad_token_id = tokenizer.pad_token_id\n",
    "new_tokens = [AddedToken(audio_bos_token), AddedToken(audio_eos_token)]\n",
    "tokenizer.add_tokens(new_tokens)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "id": "98cf2ed1",
   "metadata": {},
   "outputs": [],
   "source": [
    "model.config.audio_token_index = audio_token_id"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "id": "cab47d73",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "'<|im_start|>system\\nYou are a helpful assistant.<|im_end|>\\n<|im_start|>user\\nAudio 1: <|audio_bos|><|file_sep|><|audio_eos|>\\nWhat does the person say?<|im_end|>\\n<|im_start|>assistant\\nYes, the speaker is female and in her twenties.<|im_end|>\\n<|im_start|>assistant\\n'"
      ]
     },
     "execution_count": 13,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "conversation = [\n",
    "    {\"role\": \"user\", \"content\": [\n",
    "        {\"type\": \"audio\", \"audio_url\": \"audio.wav\"},\n",
    "        {\"type\": \"text\", \"text\": \"What does the person say?\"},\n",
    "    ]},\n",
    "    {\"role\": \"assistant\", \"content\": \"Yes, the speaker is female and in her twenties.\"},\n",
    "]\n",
    "text = tokenizer.apply_chat_template(conversation, add_generation_prompt=True, tokenize=False)\n",
    "text"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "id": "2d473abb",
   "metadata": {},
   "outputs": [],
   "source": [
    "audio_class = Audio(sampling_rate=16000)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "id": "279e7443",
   "metadata": {},
   "outputs": [],
   "source": [
    "f = 'line-4.mp3'\n",
    "audio_ = audio_class.decode_example(audio_class.encode_example(f))['array']"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "id": "6383f584",
   "metadata": {},
   "outputs": [],
   "source": [
    "audio_lengths = [min(3000, math.ceil(len(audio_) / processor.feature_extractor.hop_length))]\n",
    "audio_length = audio_lengths.pop(0)\n",
    "input_length = (audio_length - 1) // 2 + 1\n",
    "\n",
    "expanded_audio_token = audio_token * input_length"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "id": "d3000010",
   "metadata": {},
   "outputs": [],
   "source": [
    "text = text.replace(audio_token, expanded_audio_token)\n",
    "inputs = tokenizer(text, return_tensors = 'pt')\n",
    "input_ids = inputs['input_ids']"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "id": "51c0ee78",
   "metadata": {},
   "outputs": [],
   "source": [
    "inputs_audio = processor.feature_extractor(\n",
    "    [audio_], \n",
    "    return_attention_mask=True, \n",
    "    padding=\"max_length\", \n",
    "    sampling_rate=16000,\n",
    "    return_tensors = 'pt'\n",
    ")\n",
    "\n",
    "input_features = inputs_audio['input_features']\n",
    "feature_attention_mask = inputs_audio['attention_mask']"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 19,
   "id": "271bed52",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "BaseModelOutputWithPast(last_hidden_state=tensor([[[-0.1157,  0.1346,  1.1895,  ..., -0.8239,  0.3654,  1.1142],\n",
       "         [ 0.3169,  0.7667, -0.2151,  ..., -4.4913,  3.2864,  4.9101],\n",
       "         [-0.2525,  0.3457, -3.7474,  ...,  0.9931, -0.0381,  3.4437],\n",
       "         ...,\n",
       "         [ 3.7294,  3.0233, -0.6418,  ..., -7.0455,  3.4381,  5.4175],\n",
       "         [ 1.6567,  1.7979, -6.4976,  ..., -5.2488,  2.7147, -0.0828],\n",
       "         [ 3.4675,  7.1966,  3.6888,  ..., -0.3968, -0.7041, -0.8152]]],\n",
       "       grad_fn=<MulBackward0>), past_key_values=<transformers.cache_utils.DynamicCache object at 0x7f1e081f01c0>, hidden_states=(tensor([[[ 1.2398e-04, -9.0599e-05,  1.2064e-04,  ..., -2.3842e-04,\n",
       "          -2.8491e-05, -1.6785e-04],\n",
       "         [-6.7520e-04, -8.3618e-03, -1.0376e-03,  ..., -3.0273e-02,\n",
       "          -1.8433e-02, -7.6904e-03],\n",
       "         [ 1.4572e-03, -4.5776e-03, -8.4229e-03,  ..., -2.0447e-03,\n",
       "          -8.9264e-04, -6.8970e-03],\n",
       "         ...,\n",
       "         [ 1.2398e-04, -9.0599e-05,  1.2064e-04,  ..., -2.3842e-04,\n",
       "          -2.8491e-05, -1.6785e-04],\n",
       "         [ 1.0620e-02, -1.2024e-02,  3.2654e-03,  ..., -2.0386e-02,\n",
       "          -1.2817e-02,  1.6968e-02],\n",
       "         [ 1.4572e-03, -4.5776e-03, -8.4229e-03,  ..., -2.0447e-03,\n",
       "          -8.9264e-04, -6.8970e-03]]], grad_fn=<IndexPutBackward0>), tensor([[[-0.1067,  0.0537, -0.3166,  ..., -0.0558, -0.0466,  0.0450],\n",
       "         [-0.1590,  0.0013, -0.2705,  ..., -0.0817, -0.1567, -0.0295],\n",
       "         [-0.0215,  0.1094,  0.0654,  ...,  0.0983, -0.1311, -0.0687],\n",
       "         ...,\n",
       "         [-0.0902,  0.0442, -0.1101,  ...,  0.0374, -0.0313,  0.0036],\n",
       "         [-0.1404, -0.0648, -0.2419,  ...,  0.0037, -0.0600,  0.0327],\n",
       "         [-0.0908, -0.1602, -0.0626,  ..., -0.0110, -0.0873, -0.0081]]],\n",
       "       grad_fn=<AddBackward0>), tensor([[[-1.9126, -2.0371, -0.4291,  ...,  0.2650,  0.5287,  1.6870],\n",
       "         [-0.2220, -0.1096, -0.0891,  ..., -0.3784, -0.1734,  0.1019],\n",
       "         [ 0.0311,  0.0943, -0.0171,  ...,  0.0496, -0.3000, -0.1138],\n",
       "         ...,\n",
       "         [ 0.0762, -0.0741, -0.1250,  ..., -0.0573,  0.0322,  0.0357],\n",
       "         [-0.1594, -0.1129, -0.2922,  ...,  0.0406, -0.0689,  0.0284],\n",
       "         [ 0.0351, -0.1994, -0.1320,  ..., -0.0414, -0.1216, -0.0585]]],\n",
       "       grad_fn=<AddBackward0>), tensor([[[-2.4389e+00,  2.5178e-01, -2.6725e+00,  ...,  1.5676e-01,\n",
       "           1.5317e+00,  1.6511e-01],\n",
       "         [-3.8063e-01, -1.3753e-01,  3.3109e-02,  ..., -3.5656e-01,\n",
       "           1.5909e-01, -2.5623e-01],\n",
       "         [ 4.2816e-02,  2.9474e-01,  2.4056e-01,  ..., -9.2246e-03,\n",
       "          -9.5078e-02,  4.6867e-02],\n",
       "         ...,\n",
       "         [ 1.2722e-01, -1.0162e-01,  1.1654e-01,  ..., -2.0871e-01,\n",
       "           1.3590e-01,  1.8052e-01],\n",
       "         [-1.3892e-01, -9.2127e-01,  5.1093e-01,  ...,  6.1111e-02,\n",
       "          -5.9748e-01,  2.2932e-01],\n",
       "         [-6.0662e-03, -1.5377e-01, -6.8642e-02,  ..., -2.2276e-02,\n",
       "          -8.8068e-02,  9.0251e-04]]], grad_fn=<AddBackward0>), tensor([[[-2.7412e+00, -1.1701e+00, -1.6512e+00,  ..., -1.6554e+00,\n",
       "           1.8960e+00,  1.4372e+00],\n",
       "         [-8.1770e-01, -3.3222e-01,  2.9385e-01,  ..., -7.3495e-01,\n",
       "          -1.9548e-02, -2.4286e-02],\n",
       "         [-4.0033e+00,  1.4612e+01, -6.1245e+00,  ..., -6.1897e-02,\n",
       "           3.3843e+00,  8.4826e+00],\n",
       "         ...,\n",
       "         [ 9.5558e-02, -2.7110e-02,  6.8178e-02,  ..., -1.7887e-01,\n",
       "           7.4066e-02,  1.2907e-01],\n",
       "         [-9.1411e-02, -7.5669e-01, -1.2254e-02,  ...,  3.0096e-01,\n",
       "          -5.1657e-01,  4.7846e-01],\n",
       "         [-7.4778e-02, -1.2016e-01, -3.7070e-02,  ...,  4.0023e-02,\n",
       "          -1.0588e-01, -9.6773e-02]]], grad_fn=<AddBackward0>), tensor([[[-1.7827,  1.0300, -2.8472,  ..., -7.4898,  1.5024,  2.1981],\n",
       "         [-1.0939, -0.7639,  0.2635,  ..., -1.0434, -0.7912,  2.0169],\n",
       "         [-3.7422, 10.0744, -5.9083,  ..., -2.7406,  7.6519,  3.8912],\n",
       "         ...,\n",
       "         [-0.1197, -0.0857,  0.0500,  ..., -0.2541,  0.1710, -0.1151],\n",
       "         [-0.4313, -1.2135, -0.0435,  ...,  0.0131, -0.4396,  0.1126],\n",
       "         [-0.4195, -0.1020,  0.0409,  ..., -0.0194, -0.1840,  0.2160]]],\n",
       "       grad_fn=<AddBackward0>), tensor([[[-2.1222e+00,  1.8553e-03,  1.9158e+00,  ..., -8.7819e+00,\n",
       "           2.8110e+00,  4.0614e-01],\n",
       "         [-1.2462e+00, -1.8619e-01, -3.2511e-01,  ..., -1.3508e+00,\n",
       "          -9.7032e-01,  9.4287e-01],\n",
       "         [-4.9244e+00,  8.2282e+00, -6.8887e+00,  ..., -1.2015e+00,\n",
       "           6.6229e+00,  4.0711e+00],\n",
       "         ...,\n",
       "         [-2.4059e-02,  6.3549e-02, -6.1579e-02,  ..., -3.6595e-01,\n",
       "           6.5826e-02, -1.2088e-01],\n",
       "         [-1.5450e-01, -7.6378e-01, -1.6203e-01,  ..., -5.4621e-02,\n",
       "          -2.4981e-01,  7.2097e-02],\n",
       "         [-3.6738e-01, -1.8903e-01, -2.7839e-01,  ...,  9.8997e-02,\n",
       "          -8.8994e-03,  3.4346e-01]]], grad_fn=<AddBackward0>), tensor([[[-4.5433, -1.0857,  3.5172,  ..., -2.9770,  0.3077,  0.0313],\n",
       "         [-1.2844, -0.2783, -0.3028,  ..., -0.8184, -0.7949,  1.6671],\n",
       "         [-4.4853,  6.8724, -5.4026,  ..., -0.3964,  6.8706,  3.5441],\n",
       "         ...,\n",
       "         [-0.0389, -0.0129,  0.3222,  ..., -0.3052, -0.3157,  0.1086],\n",
       "         [ 0.1286, -0.8038,  0.0207,  ..., -0.1818,  0.0649,  0.6965],\n",
       "         [-0.2118, -0.6047,  0.1911,  ...,  0.0082,  0.0435,  0.3847]]],\n",
       "       grad_fn=<AddBackward0>), tensor([[[-5.0145, -1.5916,  2.6195,  ..., -2.4319,  0.0540, -0.7746],\n",
       "         [-1.5218,  0.0878, -0.3661,  ..., -2.0684, -0.6913,  1.4693],\n",
       "         [-4.0788,  6.4314, -5.4378,  ..., -0.3411,  6.4225,  3.0334],\n",
       "         ...,\n",
       "         [ 0.2435, -0.0726, -0.0809,  ...,  0.1432, -0.1530,  0.0139],\n",
       "         [ 0.3053, -0.8863,  0.1888,  ..., -0.6166, -0.5318,  0.1200],\n",
       "         [-0.1140, -0.5162, -0.0493,  ..., -0.5512, -0.3473,  0.0259]]],\n",
       "       grad_fn=<AddBackward0>), tensor([[[-4.8721, -1.4896,  1.9149,  ..., -1.4224,  0.1440, -1.1549],\n",
       "         [-0.8519,  0.4214,  0.2355,  ..., -1.9351, -0.4868,  1.0329],\n",
       "         [-4.0796,  6.8767, -4.8930,  ..., -0.1680,  6.5476,  2.2160],\n",
       "         ...,\n",
       "         [ 0.4783,  0.3934,  0.1669,  ..., -0.1893,  0.1381,  0.0430],\n",
       "         [ 0.4605, -1.0919,  0.1272,  ..., -0.5122, -0.2428,  0.0699],\n",
       "         [ 0.0643, -0.6871, -0.0960,  ..., -0.7397, -0.2389,  0.0974]]],\n",
       "       grad_fn=<AddBackward0>), tensor([[[-5.5504e+00, -2.1014e-01,  3.2636e+00,  ..., -2.2030e+00,\n",
       "          -2.9640e-01, -2.4567e+00],\n",
       "         [-6.7325e-01,  7.1185e-01,  5.0613e-02,  ..., -1.9057e+00,\n",
       "          -4.3873e-01,  8.9838e-01],\n",
       "         [-4.1609e+00,  4.8412e+00, -4.5479e+00,  ..., -5.7644e-01,\n",
       "           7.5779e+00,  4.3390e-01],\n",
       "         ...,\n",
       "         [ 3.0453e-01,  3.4136e-01,  6.3021e-01,  ..., -6.6005e-01,\n",
       "          -8.4587e-02, -3.5250e-01],\n",
       "         [-1.7535e-01, -2.4224e-01,  1.2125e-01,  ..., -8.7620e-01,\n",
       "           1.3120e-02, -1.8001e-02],\n",
       "         [ 3.3189e-01, -3.7644e-03,  1.9680e-01,  ..., -8.9149e-01,\n",
       "          -3.3809e-01, -2.1033e-01]]], grad_fn=<AddBackward0>), tensor([[[-4.4327e+00,  3.0861e-01,  3.8509e+00,  ..., -2.2189e+00,\n",
       "           4.9922e-01, -2.4895e+00],\n",
       "         [-5.1381e-01,  4.2533e-01, -1.9750e-01,  ..., -2.0302e+00,\n",
       "          -1.8547e-03,  1.1532e+00],\n",
       "         [-4.0645e+00,  4.6991e+00, -4.8221e+00,  ..., -2.6669e-01,\n",
       "           7.4836e+00,  6.5169e-01],\n",
       "         ...,\n",
       "         [-2.3940e-01,  8.6733e-02,  2.0436e-01,  ..., -4.5038e-01,\n",
       "          -1.7293e-01, -1.4188e-01],\n",
       "         [-1.0114e-01,  1.9321e-01, -1.9518e-03,  ..., -2.9175e-01,\n",
       "           1.7526e-01, -3.4632e-01],\n",
       "         [-2.8272e-02,  1.8680e-01, -1.6870e-02,  ..., -3.6050e-01,\n",
       "          -3.0024e-01,  1.6983e-01]]], grad_fn=<AddBackward0>), tensor([[[-3.5510e+00,  1.3128e+00,  3.3261e+00,  ..., -2.2604e+00,\n",
       "           1.2274e-01, -1.5677e+00],\n",
       "         [ 6.7198e-02,  7.6317e-01,  2.4396e-01,  ..., -1.4420e+00,\n",
       "          -1.1399e-01,  8.3403e-01],\n",
       "         [-3.9735e+00,  5.0770e+00, -3.7119e+00,  ...,  6.0402e-01,\n",
       "           7.3396e+00,  4.1533e-01],\n",
       "         ...,\n",
       "         [-1.2902e-03,  3.8482e-03,  1.9098e-01,  ..., -4.6879e-01,\n",
       "          -2.9666e-02,  3.7915e-01],\n",
       "         [ 2.3918e-01, -4.9532e-02, -3.7867e-01,  ..., -1.0022e+00,\n",
       "          -2.1507e-03, -1.3285e-01],\n",
       "         [ 7.7126e-02,  2.2161e-01,  2.6998e-02,  ..., -2.6373e-01,\n",
       "           6.1962e-02, -5.1866e-02]]], grad_fn=<AddBackward0>), tensor([[[-2.7250,  2.2881,  2.7703,  ..., -1.9865,  0.2456, -1.3039],\n",
       "         [ 0.0608,  0.2520,  0.7787,  ..., -1.4915, -0.0264,  0.7188],\n",
       "         [-4.2546,  4.2427, -3.3456,  ...,  1.3564,  6.8101,  0.2191],\n",
       "         ...,\n",
       "         [-0.1767, -0.0525, -0.2374,  ..., -0.5361, -0.1201,  0.8271],\n",
       "         [ 0.2245, -0.3702,  0.3040,  ..., -0.7676,  0.1866, -0.1471],\n",
       "         [ 0.3251, -0.1302,  0.8676,  ..., -0.2448,  0.2791,  0.2284]]],\n",
       "       grad_fn=<AddBackward0>), tensor([[[-1.8428,  2.3767,  2.6511,  ..., -1.7522,  0.2849, -0.6835],\n",
       "         [ 0.0528,  0.7363,  0.7713,  ..., -1.4527,  0.1838,  0.1984],\n",
       "         [-3.9934,  3.8042, -3.9233,  ...,  1.6977,  6.6690,  0.5538],\n",
       "         ...,\n",
       "         [-0.1524, -0.0447,  0.4920,  ..., -0.4084, -0.3965,  0.1906],\n",
       "         [-0.0996, -0.4665, -0.3912,  ..., -0.6044, -0.3057,  0.6286],\n",
       "         [-0.0632, -0.5432,  0.0539,  ..., -0.4470, -0.1468,  0.2175]]],\n",
       "       grad_fn=<AddBackward0>), tensor([[[-0.9264,  2.0383,  3.7135,  ..., -1.9024,  0.1813, -0.3463],\n",
       "         [ 0.0196,  0.2140,  1.1613,  ..., -2.1905, -0.2088,  0.4022],\n",
       "         [-3.6356,  3.8459, -3.8655,  ...,  2.0656,  6.7318,  0.0803],\n",
       "         ...,\n",
       "         [-0.0631, -0.7108,  0.4054,  ..., -0.6003, -0.4901,  0.0096],\n",
       "         [-0.1218, -0.0621,  0.2823,  ..., -0.4447,  0.4361,  0.1478],\n",
       "         [ 0.1920, -0.8238,  0.8681,  ..., -0.3835,  0.2985, -0.3632]]],\n",
       "       grad_fn=<AddBackward0>), tensor([[[-1.2443,  2.3923,  2.8559,  ..., -2.0778, -0.9203, -0.5350],\n",
       "         [ 0.0758,  0.8277,  0.2997,  ..., -2.5110,  0.1425,  0.0799],\n",
       "         [-3.7272,  3.6014, -3.5012,  ...,  2.0998,  6.8708, -0.1825],\n",
       "         ...,\n",
       "         [-0.3400, -0.3742, -0.0313,  ..., -0.6331,  0.1313, -0.3857],\n",
       "         [-0.3654,  0.0429, -0.3161,  ...,  0.1594,  0.3018,  0.4187],\n",
       "         [-0.3712, -0.7438, -0.0345,  ...,  0.2086,  0.6364, -0.0396]]],\n",
       "       grad_fn=<AddBackward0>), tensor([[[-1.4753,  1.2687,  2.8218,  ..., -1.3977, -1.3504, -1.8422],\n",
       "         [ 0.3146,  0.7359,  0.4159,  ..., -3.0869,  0.0451,  0.5479],\n",
       "         [-3.4380,  3.3615, -3.0646,  ...,  2.1365,  7.0694, -0.2739],\n",
       "         ...,\n",
       "         [-0.5729, -0.5285,  0.0318,  ..., -1.0966,  0.4205, -0.1787],\n",
       "         [-0.5350, -1.3001,  0.6815,  ..., -0.1217, -0.6951,  0.2137],\n",
       "         [-0.3645, -1.3411, -0.3153,  ...,  0.3581, -0.1521, -0.7691]]],\n",
       "       grad_fn=<AddBackward0>), tensor([[[-1.7807,  1.3173,  2.6520,  ..., -0.7844, -0.7451, -1.5583],\n",
       "         [ 0.7394,  0.3521,  0.8517,  ..., -2.8872, -0.3483,  0.5071],\n",
       "         [-3.2840,  3.1107, -2.6194,  ...,  2.1184,  7.3232, -0.2060],\n",
       "         ...,\n",
       "         [ 0.3332, -0.7619,  0.3513,  ..., -0.6332, -0.2015,  0.3645],\n",
       "         [-0.2484, -0.8135,  0.8652,  ...,  0.3436, -0.1698,  0.2089],\n",
       "         [-0.3812, -0.8283, -0.1682,  ...,  0.3194, -0.6326, -0.7057]]],\n",
       "       grad_fn=<AddBackward0>), tensor([[[-1.7949,  0.9500,  2.3516,  ..., -1.2482,  0.2858, -1.2778],\n",
       "         [ 0.6793,  0.7814, -0.0397,  ..., -3.7646, -0.2264, -0.0113],\n",
       "         [-2.9533,  2.8996, -1.7860,  ...,  2.6279,  7.2518,  0.0580],\n",
       "         ...,\n",
       "         [-0.3398,  0.1259, -0.3750,  ..., -0.7723,  0.1083, -0.0592],\n",
       "         [-0.2201, -0.8787,  0.4165,  ..., -0.4718, -1.1514,  0.4976],\n",
       "         [-0.5426, -1.8084,  0.3486,  ...,  0.1386, -0.6168, -0.7029]]],\n",
       "       grad_fn=<AddBackward0>), tensor([[[-0.8157,  0.6136,  1.8924,  ..., -1.3004,  0.7550, -2.0288],\n",
       "         [ 0.1432,  0.3346,  0.0310,  ..., -4.0350, -0.0624, -0.6473],\n",
       "         [-2.7488,  2.0258, -0.9180,  ...,  2.8812,  7.2740,  0.1212],\n",
       "         ...,\n",
       "         [-0.2181, -0.5271, -0.1802,  ..., -0.4915,  0.1353, -0.4049],\n",
       "         [ 0.5678,  0.0608,  1.6018,  ...,  0.6666, -0.2898, -0.0344],\n",
       "         [-0.0715, -0.7285,  0.1250,  ...,  0.1551,  0.4057, -0.4464]]],\n",
       "       grad_fn=<AddBackward0>), tensor([[[-0.7765,  0.6072,  0.8434,  ..., -0.2011,  0.4474, -1.8204],\n",
       "         [ 0.3332, -0.0081, -1.4562,  ..., -3.9410, -0.1715, -0.8315],\n",
       "         [-2.9135,  1.2343, -1.2733,  ...,  3.0486,  7.2761,  0.1469],\n",
       "         ...,\n",
       "         [-0.0760, -0.3539, -1.6603,  ...,  0.5169, -0.7126,  0.3652],\n",
       "         [ 0.0413,  0.2001,  0.2426,  ...,  0.9459, -0.4298,  0.8256],\n",
       "         [-0.9337,  1.3396,  1.0938,  ..., -0.3079, -0.1621, -1.1566]]],\n",
       "       grad_fn=<AddBackward0>), tensor([[[-1.3771,  0.2050, -0.4873,  ...,  0.7841,  2.1738, -0.2352],\n",
       "         [-0.4214,  0.0514, -0.2241,  ..., -3.5520,  0.1092,  0.1376],\n",
       "         [-3.3739,  0.1161, -0.5390,  ...,  3.2785,  7.1877, -0.1447],\n",
       "         ...,\n",
       "         [ 0.4665, -1.7080, -1.7664,  ...,  0.3773, -0.1662,  0.2267],\n",
       "         [ 0.9096,  0.5407, -1.0749,  ...,  0.6912,  0.8100,  0.1684],\n",
       "         [-0.1164,  1.4535,  0.3211,  ...,  0.1919,  0.5522, -2.1373]]],\n",
       "       grad_fn=<AddBackward0>), tensor([[[-0.7257, -0.2858,  1.3161,  ..., -0.5259,  2.5745, -0.0941],\n",
       "         [-1.3814, -2.4360,  0.8474,  ..., -5.3921, -0.9837,  1.2255],\n",
       "         [-3.8674, -0.9671, -0.0603,  ...,  3.6740,  6.2318, -1.2506],\n",
       "         ...,\n",
       "         [ 0.2618, -1.1597, -1.9035,  ...,  0.4232, -1.8835,  0.7528],\n",
       "         [ 0.3802,  0.1893, -1.4139,  ..., -0.4878,  0.7769,  1.1597],\n",
       "         [-0.2259, -0.2241, -1.0921,  ...,  0.7281,  0.1090, -1.9551]]],\n",
       "       grad_fn=<AddBackward0>), tensor([[[ 1.3619,  0.3577,  2.5460,  ...,  1.1157,  2.2209,  0.2191],\n",
       "         [ 0.0478, -2.0803,  2.2198,  ..., -4.9389, -0.5748,  1.8924],\n",
       "         [-4.2912, -2.1126,  1.4338,  ...,  4.3719,  5.8966, -1.1847],\n",
       "         ...,\n",
       "         [ 0.5480, -1.4027, -2.6469,  ..., -1.3805, -1.5543,  1.1914],\n",
       "         [ 1.0784,  0.2787, -0.5365,  ..., -2.8025,  1.8237, -0.3923],\n",
       "         [-0.4720,  1.4218, -0.3279,  ..., -0.0730,  0.0683, -1.9989]]],\n",
       "       grad_fn=<AddBackward0>), tensor([[[ 1.2758,  0.0207,  3.0143,  ...,  0.1491,  2.6136,  1.0742],\n",
       "         [-0.8938, -1.3446,  1.7682,  ..., -4.8869,  0.4493,  0.9261],\n",
       "         [-4.2746, -3.0720,  2.6086,  ...,  6.6693,  4.7221, -3.1316],\n",
       "         ...,\n",
       "         [-0.8937, -1.4860, -2.8505,  ..., -2.5891, -1.6861,  0.9981],\n",
       "         [ 2.1775, -0.7932, -1.7009,  ..., -2.7283,  0.4026, -2.4922],\n",
       "         [-0.8318,  1.7514,  0.9320,  ...,  0.1439,  0.6304, -1.7237]]],\n",
       "       grad_fn=<AddBackward0>), tensor([[[-0.5139,  1.5141,  2.1277,  ..., -0.5918,  2.9975,  0.8977],\n",
       "         [ 0.3593,  0.9814, -1.3118,  ..., -6.0539,  1.9776,  0.9424],\n",
       "         [-1.8723,  0.5928,  2.8207,  ...,  7.7350,  3.5879, -4.4128],\n",
       "         ...,\n",
       "         [-2.6237,  0.2868, -1.6649,  ..., -2.4639, -0.2793,  1.1208],\n",
       "         [ 1.8595,  0.5771, -3.6070,  ..., -1.2144, -0.6090, -3.3325],\n",
       "         [ 0.5556,  3.9138,  0.3709,  ..., -0.5958,  0.3585, -0.5677]]],\n",
       "       grad_fn=<AddBackward0>), tensor([[[ -0.3482,  -0.9900,   6.4564,  ...,  -1.6846,   1.8866,  -1.5798],\n",
       "         [ -2.2973,   0.4131,  -0.3242,  ...,  -7.1029,   4.5977,   8.9723],\n",
       "         [ -8.8783, -12.5585,  14.2286,  ..., -15.2574,  -9.8568,  -0.2785],\n",
       "         ...,\n",
       "         [ -2.0165,  -2.0732,  -3.6572,  ...,  -0.4582,  -0.3736,   1.9686],\n",
       "         [  0.6965,  -0.3842,  -4.1112,  ...,  -0.8008,  -0.2339,  -1.4484],\n",
       "         [ -0.1248,   5.4479,   1.4470,  ...,  -0.4120,  -2.0790,  -3.7087]]],\n",
       "       grad_fn=<AddBackward0>), tensor([[[-0.1157,  0.1346,  1.1895,  ..., -0.8239,  0.3654,  1.1142],\n",
       "         [ 0.3169,  0.7667, -0.2151,  ..., -4.4913,  3.2864,  4.9101],\n",
       "         [-0.2525,  0.3457, -3.7474,  ...,  0.9931, -0.0381,  3.4437],\n",
       "         ...,\n",
       "         [ 3.7294,  3.0233, -0.6418,  ..., -7.0455,  3.4381,  5.4175],\n",
       "         [ 1.6567,  1.7979, -6.4976,  ..., -5.2488,  2.7147, -0.0828],\n",
       "         [ 3.4675,  7.1966,  3.6888,  ..., -0.3968, -0.7041, -0.8152]]],\n",
       "       grad_fn=<MulBackward0>)), attentions=None)"
      ]
     },
     "execution_count": 19,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "model(\n",
    "    input_ids = input_ids, \n",
    "    attention_mask = inputs['attention_mask'],\n",
    "    input_features = input_features,\n",
    "    feature_attention_mask = feature_attention_mask,\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "9ec0c1a6",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.10.12"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
