{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/home/user/anaconda3/envs/wyf/lib/python3.11/site-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n",
      "  from .autonotebook import tqdm as notebook_tqdm\n"
     ]
    }
   ],
   "source": [
    "from transformers import PreTrainedModel, PretrainedConfig, AutoTokenizer, AutoModelForCausalLM\n",
    "from PIL import Image\n",
    "import requests\n",
    "from transformers import AutoProcessor, AutoModel\n",
    "import torch\n",
    "import torch.nn as nn\n",
    "import torch.nn.functional as F\n",
    "from transformers.modeling_outputs import CausalLMOutputWithPast\n",
    "from typing import List, Dict, Any"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [],
   "source": [
    "tokenizer = AutoTokenizer.from_pretrained('/home/user/Downloads/Qwen2.5-0.5B-Instruct')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "Qwen2TokenizerFast(name_or_path='/home/user/Downloads/Qwen2.5-0.5B-Instruct', vocab_size=151643, model_max_length=131072, is_fast=True, padding_side='right', truncation_side='right', special_tokens={'eos_token': '<|im_end|>', 'pad_token': '<|endoftext|>', 'additional_special_tokens': ['<|im_start|>', '<|im_end|>', '<|object_ref_start|>', '<|object_ref_end|>', '<|box_start|>', '<|box_end|>', '<|quad_start|>', '<|quad_end|>', '<|vision_start|>', '<|vision_end|>', '<|vision_pad|>', '<|image_pad|>', '<|video_pad|>']}, clean_up_tokenization_spaces=False),  added_tokens_decoder={\n",
       "\t151643: AddedToken(\"<|endoftext|>\", rstrip=False, lstrip=False, single_word=False, normalized=False, special=True),\n",
       "\t151644: AddedToken(\"<|im_start|>\", rstrip=False, lstrip=False, single_word=False, normalized=False, special=True),\n",
       "\t151645: AddedToken(\"<|im_end|>\", rstrip=False, lstrip=False, single_word=False, normalized=False, special=True),\n",
       "\t151646: AddedToken(\"<|object_ref_start|>\", rstrip=False, lstrip=False, single_word=False, normalized=False, special=True),\n",
       "\t151647: AddedToken(\"<|object_ref_end|>\", rstrip=False, lstrip=False, single_word=False, normalized=False, special=True),\n",
       "\t151648: AddedToken(\"<|box_start|>\", rstrip=False, lstrip=False, single_word=False, normalized=False, special=True),\n",
       "\t151649: AddedToken(\"<|box_end|>\", rstrip=False, lstrip=False, single_word=False, normalized=False, special=True),\n",
       "\t151650: AddedToken(\"<|quad_start|>\", rstrip=False, lstrip=False, single_word=False, normalized=False, special=True),\n",
       "\t151651: AddedToken(\"<|quad_end|>\", rstrip=False, lstrip=False, single_word=False, normalized=False, special=True),\n",
       "\t151652: AddedToken(\"<|vision_start|>\", rstrip=False, lstrip=False, single_word=False, normalized=False, special=True),\n",
       "\t151653: AddedToken(\"<|vision_end|>\", rstrip=False, lstrip=False, single_word=False, normalized=False, special=True),\n",
       "\t151654: AddedToken(\"<|vision_pad|>\", rstrip=False, lstrip=False, single_word=False, normalized=False, special=True),\n",
       "\t151655: AddedToken(\"<|image_pad|>\", rstrip=False, lstrip=False, single_word=False, normalized=False, special=True),\n",
       "\t151656: AddedToken(\"<|video_pad|>\", rstrip=False, lstrip=False, single_word=False, normalized=False, special=True),\n",
       "\t151657: AddedToken(\"<tool_call>\", rstrip=False, lstrip=False, single_word=False, normalized=False, special=False),\n",
       "\t151658: AddedToken(\"</tool_call>\", rstrip=False, lstrip=False, single_word=False, normalized=False, special=False),\n",
       "\t151659: AddedToken(\"<|fim_prefix|>\", rstrip=False, lstrip=False, single_word=False, normalized=False, special=False),\n",
       "\t151660: AddedToken(\"<|fim_middle|>\", rstrip=False, lstrip=False, single_word=False, normalized=False, special=False),\n",
       "\t151661: AddedToken(\"<|fim_suffix|>\", rstrip=False, lstrip=False, single_word=False, normalized=False, special=False),\n",
       "\t151662: AddedToken(\"<|fim_pad|>\", rstrip=False, lstrip=False, single_word=False, normalized=False, special=False),\n",
       "\t151663: AddedToken(\"<|repo_name|>\", rstrip=False, lstrip=False, single_word=False, normalized=False, special=False),\n",
       "\t151664: AddedToken(\"<|file_sep|>\", rstrip=False, lstrip=False, single_word=False, normalized=False, special=False),\n",
       "}"
      ]
     },
     "execution_count": 8,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "tokenizer"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "class VLMConfig(PretrainedConfig):\n",
    "    model_type = \"vlm_model\"\n",
    "    def __init__(self,llm_model_path = '/home/user/Downloads/Qwen2.5-0.5B-Instruct',\n",
    "                 vision_model_path = '/home/user/Downloads/siglip-so400m-patch14-384',\n",
    "                 freeze_vision_model = True,\n",
    "                 image_pad_num = 49,\n",
    "                **kwargs):\n",
    "        self.vision_model_path = vision_model_path\n",
    "        self.llm_model_path = llm_model_path\n",
    "        self.freeze_vision_model = freeze_vision_model\n",
    "        self.image_pad_num = image_pad_num\n",
    "        super().__init__(**kwargs)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "class VLM(PreTrainedModel):\n",
    "    config_class = VLMConfig\n",
    "    def __init__(self, config):\n",
    "        super().__init__(config)\n",
    "        self.config = config\n",
    "        self.vision_model = AutoModel.from_pretrained(self.config.vision_model_path)\n",
    "        self.processor = AutoProcessor.from_pretrained(self.config.vision_model_path)\n",
    "        self.llm_model = AutoModelForCausalLM.from_pretrained(self.config.llm_model_path)\n",
    "        self.tokenizer = AutoTokenizer.from_pretrained(self.config.llm_model_path)\n",
    "        self.linear1 = nn.Linear(self.vision_model.config.vision_config.hidden_size, self.llm_model.config.hidden_size)\n",
    "        self.linear2 = nn.Linear(self.llm_model.config.hidden_size, self.llm_model.config.hidden_size)\n",
    "        \n",
    "    def forward(self, pixel_values, input_ids, attention_mask, labels):\n",
    "        text_embeds = self.llm_model.get_input_embeddings()(input_ids)\n",
    "        image_embeds = self.vision_model.vision_model(pixel_values).last_hidden_state \n",
    "        image_features = self.linear2(F.silu(self.linear1(image_embeds)))\n",
    "        \n",
    "        inputs_embeds = self.merge_input_ids_with_image_features(image_features, text_embeds, input_ids)\n",
    "        outputs = self.llm_model(inputs_embeds=inputs_embeds, attention_mask=attention_mask)\n",
    "        logits = outputs[0]\n",
    "        loss = None\n",
    "        if labels is not None:\n",
    "            loss_fct = nn.CrossEntropyLoss()\n",
    "            loss = loss_fct(\n",
    "                logits.view(-1, logits.size(-1)), labels.view(-1).to(logits.device)\n",
    "            )\n",
    "        return CausalLMOutputWithPast(loss=loss, logits=logits)\n",
    "        \n",
    "    def merge_input_ids_with_image_features(self, image_features, inputs_embeds, input_ids):\n",
    "        num_images, num_image_patches, embed_dim = image_features.shape\n",
    "        batch_indices, image_indices = torch.where(input_ids == self.tokenizer('<|image_pad|>')['input_ids'][0])\n",
    "        inputs_embeds[batch_indices, image_indices] = image_features.view(-1, embed_dim)\n",
    "        return inputs_embeds"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import zipfile\n",
    "from PIL import Image\n",
    "import io\n",
    "import json\n",
    "from torch.utils.data import Dataset\n",
    "tokenizer = AutoTokenizer.from_pretrained('/home/user/Downloads/Qwen2.5-0.5B-Instruct')\n",
    "processor = AutoProcessor.from_pretrained(\"/home/user/wyf/siglip-base-patch16-224\")\n",
    "\n",
    "\n",
    "class MyDataset(Dataset):\n",
    "    def __init__(self, images_path, data_path, tokenizer, processor, config):\n",
    "        super().__init__()\n",
    "        self.data_path = data_path\n",
    "        self.images_path = images_path\n",
    "        self.tokenizer = tokenizer\n",
    "        self.processor = processor\n",
    "        self.config = config\n",
    "        with open(self.data_path, 'r', encoding='utf-8') as f:\n",
    "            self.datas = json.load(f)   \n",
    "        \n",
    "            \n",
    "    def __len__(self):\n",
    "        return len(self.datas)\n",
    "    \n",
    "    def __getitem__(self, index):\n",
    "        sample = self.datas[index]\n",
    "        try:\n",
    "            image_name = sample['image']\n",
    "            conversations = sample['conversations']\n",
    "            q_text = self.tokenizer.apply_chat_template([{\"role\":\"system\", \"content\":'You are a helpful assistant.'}, {\"role\":\"user\", \"content\":conversations[0]['value']}], \\\n",
    "                tokenize=False, \\\n",
    "                add_generation_prompt=True).replace('<image>', '<|image_pad|>'*self.config.image_pad_num)\n",
    "            a_text = conversations[1]['value'] + self.tokenizer.eos_token\n",
    "            q_input_ids = self.tokenizer(q_text)['input_ids']\n",
    "            a_input_ids = self.tokenizer(a_text)['input_ids']\n",
    "            input_ids = q_input_ids + a_input_ids\n",
    "            labels = [tokenizer.pad_token_id] * len(q_input_ids) + a_input_ids\n",
    "            input_ids = input_ids[:-1]\n",
    "            labels = labels[1:]\n",
    "        \n",
    "            \n",
    "            image = Image.open(os.path.join(self.images_path, image_name)).convert('RGB')\n",
    "            pixel_values = self.processor(text=None, images=image)['pixel_values']\n",
    "        except:\n",
    "                default_image = Image.new('RGB', (224, 224), color='white')\n",
    "                pixel_values = self.processor(text=None, images=default_image)['pixel_values']\n",
    "                q_text = self.tokenizer.apply_chat_template([{\"role\":\"system\", \"content\":'You are a helpful assistant.'}, {\"role\":\"user\", \"content\":\"图片内容是什么\\n<image>\"}], \\\n",
    "                    tokenize=False, \\\n",
    "                    add_generation_prompt=True).replace('<image>', '<|image_pad|>'*self.config.image_pad_num)\n",
    "                a_text = '图片内容为空' + self.tokenizer.eos_token\n",
    "                q_input_ids = self.tokenizer(q_text)['input_ids']\n",
    "                a_input_ids = self.tokenizer(a_text)['input_ids']\n",
    "                input_ids = q_input_ids + a_input_ids\n",
    "                labels = [tokenizer.pad_token_id] * len(q_input_ids) + a_input_ids\n",
    "                input_ids = input_ids[:-1]\n",
    "                labels = labels[1:]\n",
    "        \n",
    "        return {\n",
    "            'input_ids': input_ids,\n",
    "            'labels': labels,\n",
    "            'pixel_values': pixel_values\n",
    "        } "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 45,
   "metadata": {},
   "outputs": [],
   "source": [
    "import os\n",
    "class SFTDataset(Dataset):\n",
    "    def __init__(self, images_path, data_path, tokenizer, processor, config):\n",
    "        super().__init__()\n",
    "        self.data_path = data_path\n",
    "        self.images_path = images_path\n",
    "        self.tokenizer = tokenizer\n",
    "        self.processor = processor\n",
    "        self.config = config\n",
    "        with open(self.data_path, 'r', encoding='utf-8') as f:\n",
    "            self.datas = json.load(f)   \n",
    "        \n",
    "            \n",
    "    def __len__(self):\n",
    "        return len(self.datas)\n",
    "    \n",
    "    def __getitem__(self, index):\n",
    "        sample = self.datas[index]\n",
    "        try:\n",
    "            image_name = 'COCO_train2014_' + str(sample['image'])\n",
    "            conversations = sample['conversations']\n",
    "            q_text = self.tokenizer.apply_chat_template([{\"role\":\"system\", \"content\":'You are a helpful assistant.'}, {\"role\":\"user\", \"content\":conversations[0]['value']}], \\\n",
    "                tokenize=False, \\\n",
    "                add_generation_prompt=True).replace('<image>', '<|image_pad|>'*self.config.image_pad_num)\n",
    "            a_text = conversations[1]['value'] + self.tokenizer.eos_token\n",
    "            q_input_ids = self.tokenizer(q_text)['input_ids']\n",
    "            a_input_ids = self.tokenizer(a_text)['input_ids']\n",
    "            input_ids = q_input_ids + a_input_ids\n",
    "            labels = [tokenizer.pad_token_id] * len(q_input_ids) + a_input_ids\n",
    "            input_ids = input_ids[:-1]\n",
    "            labels = labels[1:]\n",
    "        \n",
    "            \n",
    "            image = Image.open(os.path.join(self.images_path, image_name)).convert('RGB')\n",
    "            \n",
    "            pixel_values = self.processor(text=None, images=image)['pixel_values']\n",
    "        except:\n",
    "            \n",
    "            default_image = Image.new('RGB', (224, 224), color='white')\n",
    "            pixel_values = self.processor(text=None, images=default_image)['pixel_values']\n",
    "            q_text = self.tokenizer.apply_chat_template([{\"role\":\"system\", \"content\":'You are a helpful assistant.'}, {\"role\":\"user\", \"content\":\"图片内容是什么\\n<image>\"}], \\\n",
    "                tokenize=False, \\\n",
    "                add_generation_prompt=True).replace('<image>', '<|image_pad|>'*self.config.image_pad_num)\n",
    "            a_text = '图片内容为空' + self.tokenizer.eos_token\n",
    "            q_input_ids = self.tokenizer(q_text)['input_ids']\n",
    "            a_input_ids = self.tokenizer(a_text)['input_ids']\n",
    "            input_ids = q_input_ids + a_input_ids\n",
    "            labels = [tokenizer.pad_token_id] * len(q_input_ids) + a_input_ids\n",
    "            input_ids = input_ids[:-1]\n",
    "            labels = labels[1:]\n",
    "        \n",
    "        return {\n",
    "            'input_ids': input_ids,\n",
    "            'labels': labels,\n",
    "            'pixel_values': pixel_values\n",
    "        } "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 46,
   "metadata": {},
   "outputs": [],
   "source": [
    "config = VLMConfig()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 47,
   "metadata": {},
   "outputs": [],
   "source": [
    "images_path = '/home/user/wyf/train_multimodal_from_scratch/sft_images'\n",
    "data_path = '/home/user/wyf/llava_instruct_230k.json'\n",
    "ds = SFTDataset(images_path, data_path, tokenizer, processor, config)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 48,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "{'input_ids': [151644,\n",
       "  8948,\n",
       "  198,\n",
       "  2610,\n",
       "  525,\n",
       "  264,\n",
       "  10950,\n",
       "  17847,\n",
       "  13,\n",
       "  151645,\n",
       "  198,\n",
       "  151644,\n",
       "  872,\n",
       "  198,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  151655,\n",
       "  198,\n",
       "  54623,\n",
       "  15946,\n",
       "  99232,\n",
       "  100822,\n",
       "  104396,\n",
       "  102284,\n",
       "  102021,\n",
       "  30,\n",
       "  151645,\n",
       "  198,\n",
       "  151644,\n",
       "  77091,\n",
       "  198,\n",
       "  45930,\n",
       "  101047,\n",
       "  99232,\n",
       "  100822,\n",
       "  104396,\n",
       "  102284,\n",
       "  20412,\n",
       "  105681,\n",
       "  1773],\n",
       " 'labels': [151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  151643,\n",
       "  45930,\n",
       "  101047,\n",
       "  99232,\n",
       "  100822,\n",
       "  104396,\n",
       "  102284,\n",
       "  20412,\n",
       "  105681,\n",
       "  1773,\n",
       "  151645],\n",
       " 'pixel_values': tensor([[[[-0.9294, -0.9608, -1.0000,  ..., -0.9686, -0.9686, -0.9686],\n",
       "           [-0.9529, -0.9608, -0.9686,  ..., -0.9686, -0.9686, -0.9686],\n",
       "           [-0.9922, -0.9529, -0.9059,  ..., -0.9686, -0.9686, -0.9686],\n",
       "           ...,\n",
       "           [-0.1843, -0.1843, -0.1765,  ..., -0.1216, -0.1294, -0.1373],\n",
       "           [-0.1843, -0.1843, -0.1765,  ..., -0.1373, -0.1451, -0.1529],\n",
       "           [-0.1843, -0.1843, -0.1765,  ..., -0.1451, -0.1529, -0.1529]],\n",
       " \n",
       "          [[-0.9216, -0.9529, -1.0000,  ..., -0.9843, -0.9843, -0.9843],\n",
       "           [-0.9451, -0.9529, -0.9686,  ..., -0.9843, -0.9843, -0.9843],\n",
       "           [-0.9843, -0.9529, -0.9059,  ..., -0.9843, -0.9843, -0.9843],\n",
       "           ...,\n",
       "           [-0.1451, -0.1451, -0.1373,  ..., -0.0902, -0.0980, -0.1059],\n",
       "           [-0.1451, -0.1451, -0.1373,  ..., -0.1059, -0.1137, -0.1216],\n",
       "           [-0.1451, -0.1451, -0.1373,  ..., -0.1137, -0.1216, -0.1216]],\n",
       " \n",
       "          [[-0.7569, -0.7961, -0.8510,  ..., -0.8980, -0.8980, -0.8980],\n",
       "           [-0.7882, -0.7961, -0.8196,  ..., -0.8980, -0.8980, -0.8980],\n",
       "           [-0.8353, -0.8118, -0.7725,  ..., -0.8980, -0.8980, -0.8980],\n",
       "           ...,\n",
       "           [-0.3176, -0.3176, -0.3098,  ..., -0.2863, -0.2941, -0.3020],\n",
       "           [-0.3176, -0.3176, -0.3098,  ..., -0.3020, -0.3020, -0.3020],\n",
       "           [-0.3176, -0.3176, -0.3098,  ..., -0.3098, -0.3020, -0.3020]]]])}"
      ]
     },
     "execution_count": 48,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "ds[10]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "class MyDataCollator:\n",
    "    def __init__(self, tokenizer):\n",
    "        self.tokenizer = tokenizer\n",
    "    \n",
    "    def __call__(self, features: List[Dict[str, Any]]) -> Dict[str, Any]:\n",
    "        max_len = max(len(feature['input_ids']) for feature in features)\n",
    "        input_ids = []\n",
    "        labels = []\n",
    "        pixel_values = []\n",
    "        for feature in features:\n",
    "            input_ids.append(feature['input_ids'] + [self.tokenizer.pad_token_id] * (max_len - len(feature['input_ids'])))\n",
    "            labels.append(feature['labels'] + [self.tokenizer.pad_token_id] * (max_len - len(feature['labels'])))\n",
    "            pixel_values.append(feature['pixel_values'])\n",
    "        \n",
    "        return {'input_ids': torch.tensor(input_ids, dtype=torch.long),\n",
    "                'labels': torch.tensor(labels, dtype=torch.long),\n",
    "                'pixel_values': torch.tensor(pixel_values, dtype=torch.float)}"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "model = AutoModelForCausalLM.from_pretrained('/home/user/Downloads/Qwen2.5-0.5B-Instruct')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "model.embed_tokens.weight\n",
      "model.layers.0.self_attn.q_proj.weight\n",
      "model.layers.0.self_attn.q_proj.bias\n",
      "model.layers.0.self_attn.k_proj.weight\n",
      "model.layers.0.self_attn.k_proj.bias\n",
      "model.layers.0.self_attn.v_proj.weight\n",
      "model.layers.0.self_attn.v_proj.bias\n",
      "model.layers.0.self_attn.o_proj.weight\n",
      "model.layers.0.mlp.gate_proj.weight\n",
      "model.layers.0.mlp.up_proj.weight\n",
      "model.layers.0.mlp.down_proj.weight\n",
      "model.layers.0.input_layernorm.weight\n",
      "model.layers.0.post_attention_layernorm.weight\n",
      "model.layers.1.self_attn.q_proj.weight\n",
      "model.layers.1.self_attn.q_proj.bias\n",
      "model.layers.1.self_attn.k_proj.weight\n",
      "model.layers.1.self_attn.k_proj.bias\n",
      "model.layers.1.self_attn.v_proj.weight\n",
      "model.layers.1.self_attn.v_proj.bias\n",
      "model.layers.1.self_attn.o_proj.weight\n",
      "model.layers.1.mlp.gate_proj.weight\n",
      "model.layers.1.mlp.up_proj.weight\n",
      "model.layers.1.mlp.down_proj.weight\n",
      "model.layers.1.input_layernorm.weight\n",
      "model.layers.1.post_attention_layernorm.weight\n",
      "model.layers.2.self_attn.q_proj.weight\n",
      "model.layers.2.self_attn.q_proj.bias\n",
      "model.layers.2.self_attn.k_proj.weight\n",
      "model.layers.2.self_attn.k_proj.bias\n",
      "model.layers.2.self_attn.v_proj.weight\n",
      "model.layers.2.self_attn.v_proj.bias\n",
      "model.layers.2.self_attn.o_proj.weight\n",
      "model.layers.2.mlp.gate_proj.weight\n",
      "model.layers.2.mlp.up_proj.weight\n",
      "model.layers.2.mlp.down_proj.weight\n",
      "model.layers.2.input_layernorm.weight\n",
      "model.layers.2.post_attention_layernorm.weight\n",
      "model.layers.3.self_attn.q_proj.weight\n",
      "model.layers.3.self_attn.q_proj.bias\n",
      "model.layers.3.self_attn.k_proj.weight\n",
      "model.layers.3.self_attn.k_proj.bias\n",
      "model.layers.3.self_attn.v_proj.weight\n",
      "model.layers.3.self_attn.v_proj.bias\n",
      "model.layers.3.self_attn.o_proj.weight\n",
      "model.layers.3.mlp.gate_proj.weight\n",
      "model.layers.3.mlp.up_proj.weight\n",
      "model.layers.3.mlp.down_proj.weight\n",
      "model.layers.3.input_layernorm.weight\n",
      "model.layers.3.post_attention_layernorm.weight\n",
      "model.layers.4.self_attn.q_proj.weight\n",
      "model.layers.4.self_attn.q_proj.bias\n",
      "model.layers.4.self_attn.k_proj.weight\n",
      "model.layers.4.self_attn.k_proj.bias\n",
      "model.layers.4.self_attn.v_proj.weight\n",
      "model.layers.4.self_attn.v_proj.bias\n",
      "model.layers.4.self_attn.o_proj.weight\n",
      "model.layers.4.mlp.gate_proj.weight\n",
      "model.layers.4.mlp.up_proj.weight\n",
      "model.layers.4.mlp.down_proj.weight\n",
      "model.layers.4.input_layernorm.weight\n",
      "model.layers.4.post_attention_layernorm.weight\n",
      "model.layers.5.self_attn.q_proj.weight\n",
      "model.layers.5.self_attn.q_proj.bias\n",
      "model.layers.5.self_attn.k_proj.weight\n",
      "model.layers.5.self_attn.k_proj.bias\n",
      "model.layers.5.self_attn.v_proj.weight\n",
      "model.layers.5.self_attn.v_proj.bias\n",
      "model.layers.5.self_attn.o_proj.weight\n",
      "model.layers.5.mlp.gate_proj.weight\n",
      "model.layers.5.mlp.up_proj.weight\n",
      "model.layers.5.mlp.down_proj.weight\n",
      "model.layers.5.input_layernorm.weight\n",
      "model.layers.5.post_attention_layernorm.weight\n",
      "model.layers.6.self_attn.q_proj.weight\n",
      "model.layers.6.self_attn.q_proj.bias\n",
      "model.layers.6.self_attn.k_proj.weight\n",
      "model.layers.6.self_attn.k_proj.bias\n",
      "model.layers.6.self_attn.v_proj.weight\n",
      "model.layers.6.self_attn.v_proj.bias\n",
      "model.layers.6.self_attn.o_proj.weight\n",
      "model.layers.6.mlp.gate_proj.weight\n",
      "model.layers.6.mlp.up_proj.weight\n",
      "model.layers.6.mlp.down_proj.weight\n",
      "model.layers.6.input_layernorm.weight\n",
      "model.layers.6.post_attention_layernorm.weight\n",
      "model.layers.7.self_attn.q_proj.weight\n",
      "model.layers.7.self_attn.q_proj.bias\n",
      "model.layers.7.self_attn.k_proj.weight\n",
      "model.layers.7.self_attn.k_proj.bias\n",
      "model.layers.7.self_attn.v_proj.weight\n",
      "model.layers.7.self_attn.v_proj.bias\n",
      "model.layers.7.self_attn.o_proj.weight\n",
      "model.layers.7.mlp.gate_proj.weight\n",
      "model.layers.7.mlp.up_proj.weight\n",
      "model.layers.7.mlp.down_proj.weight\n",
      "model.layers.7.input_layernorm.weight\n",
      "model.layers.7.post_attention_layernorm.weight\n",
      "model.layers.8.self_attn.q_proj.weight\n",
      "model.layers.8.self_attn.q_proj.bias\n",
      "model.layers.8.self_attn.k_proj.weight\n",
      "model.layers.8.self_attn.k_proj.bias\n",
      "model.layers.8.self_attn.v_proj.weight\n",
      "model.layers.8.self_attn.v_proj.bias\n",
      "model.layers.8.self_attn.o_proj.weight\n",
      "model.layers.8.mlp.gate_proj.weight\n",
      "model.layers.8.mlp.up_proj.weight\n",
      "model.layers.8.mlp.down_proj.weight\n",
      "model.layers.8.input_layernorm.weight\n",
      "model.layers.8.post_attention_layernorm.weight\n",
      "model.layers.9.self_attn.q_proj.weight\n",
      "model.layers.9.self_attn.q_proj.bias\n",
      "model.layers.9.self_attn.k_proj.weight\n",
      "model.layers.9.self_attn.k_proj.bias\n",
      "model.layers.9.self_attn.v_proj.weight\n",
      "model.layers.9.self_attn.v_proj.bias\n",
      "model.layers.9.self_attn.o_proj.weight\n",
      "model.layers.9.mlp.gate_proj.weight\n",
      "model.layers.9.mlp.up_proj.weight\n",
      "model.layers.9.mlp.down_proj.weight\n",
      "model.layers.9.input_layernorm.weight\n",
      "model.layers.9.post_attention_layernorm.weight\n",
      "model.layers.10.self_attn.q_proj.weight\n",
      "model.layers.10.self_attn.q_proj.bias\n",
      "model.layers.10.self_attn.k_proj.weight\n",
      "model.layers.10.self_attn.k_proj.bias\n",
      "model.layers.10.self_attn.v_proj.weight\n",
      "model.layers.10.self_attn.v_proj.bias\n",
      "model.layers.10.self_attn.o_proj.weight\n",
      "model.layers.10.mlp.gate_proj.weight\n",
      "model.layers.10.mlp.up_proj.weight\n",
      "model.layers.10.mlp.down_proj.weight\n",
      "model.layers.10.input_layernorm.weight\n",
      "model.layers.10.post_attention_layernorm.weight\n",
      "model.layers.11.self_attn.q_proj.weight\n",
      "model.layers.11.self_attn.q_proj.bias\n",
      "model.layers.11.self_attn.k_proj.weight\n",
      "model.layers.11.self_attn.k_proj.bias\n",
      "model.layers.11.self_attn.v_proj.weight\n",
      "model.layers.11.self_attn.v_proj.bias\n",
      "model.layers.11.self_attn.o_proj.weight\n",
      "model.layers.11.mlp.gate_proj.weight\n",
      "model.layers.11.mlp.up_proj.weight\n",
      "model.layers.11.mlp.down_proj.weight\n",
      "model.layers.11.input_layernorm.weight\n",
      "model.layers.11.post_attention_layernorm.weight\n",
      "model.layers.12.self_attn.q_proj.weight\n",
      "model.layers.12.self_attn.q_proj.bias\n",
      "model.layers.12.self_attn.k_proj.weight\n",
      "model.layers.12.self_attn.k_proj.bias\n",
      "model.layers.12.self_attn.v_proj.weight\n",
      "model.layers.12.self_attn.v_proj.bias\n",
      "model.layers.12.self_attn.o_proj.weight\n",
      "model.layers.12.mlp.gate_proj.weight\n",
      "model.layers.12.mlp.up_proj.weight\n",
      "model.layers.12.mlp.down_proj.weight\n",
      "model.layers.12.input_layernorm.weight\n",
      "model.layers.12.post_attention_layernorm.weight\n",
      "model.layers.13.self_attn.q_proj.weight\n",
      "model.layers.13.self_attn.q_proj.bias\n",
      "model.layers.13.self_attn.k_proj.weight\n",
      "model.layers.13.self_attn.k_proj.bias\n",
      "model.layers.13.self_attn.v_proj.weight\n",
      "model.layers.13.self_attn.v_proj.bias\n",
      "model.layers.13.self_attn.o_proj.weight\n",
      "model.layers.13.mlp.gate_proj.weight\n",
      "model.layers.13.mlp.up_proj.weight\n",
      "model.layers.13.mlp.down_proj.weight\n",
      "model.layers.13.input_layernorm.weight\n",
      "model.layers.13.post_attention_layernorm.weight\n",
      "model.layers.14.self_attn.q_proj.weight\n",
      "model.layers.14.self_attn.q_proj.bias\n",
      "model.layers.14.self_attn.k_proj.weight\n",
      "model.layers.14.self_attn.k_proj.bias\n",
      "model.layers.14.self_attn.v_proj.weight\n",
      "model.layers.14.self_attn.v_proj.bias\n",
      "model.layers.14.self_attn.o_proj.weight\n",
      "model.layers.14.mlp.gate_proj.weight\n",
      "model.layers.14.mlp.up_proj.weight\n",
      "model.layers.14.mlp.down_proj.weight\n",
      "model.layers.14.input_layernorm.weight\n",
      "model.layers.14.post_attention_layernorm.weight\n",
      "model.layers.15.self_attn.q_proj.weight\n",
      "model.layers.15.self_attn.q_proj.bias\n",
      "model.layers.15.self_attn.k_proj.weight\n",
      "model.layers.15.self_attn.k_proj.bias\n",
      "model.layers.15.self_attn.v_proj.weight\n",
      "model.layers.15.self_attn.v_proj.bias\n",
      "model.layers.15.self_attn.o_proj.weight\n",
      "model.layers.15.mlp.gate_proj.weight\n",
      "model.layers.15.mlp.up_proj.weight\n",
      "model.layers.15.mlp.down_proj.weight\n",
      "model.layers.15.input_layernorm.weight\n",
      "model.layers.15.post_attention_layernorm.weight\n",
      "model.layers.16.self_attn.q_proj.weight\n",
      "model.layers.16.self_attn.q_proj.bias\n",
      "model.layers.16.self_attn.k_proj.weight\n",
      "model.layers.16.self_attn.k_proj.bias\n",
      "model.layers.16.self_attn.v_proj.weight\n",
      "model.layers.16.self_attn.v_proj.bias\n",
      "model.layers.16.self_attn.o_proj.weight\n",
      "model.layers.16.mlp.gate_proj.weight\n",
      "model.layers.16.mlp.up_proj.weight\n",
      "model.layers.16.mlp.down_proj.weight\n",
      "model.layers.16.input_layernorm.weight\n",
      "model.layers.16.post_attention_layernorm.weight\n",
      "model.layers.17.self_attn.q_proj.weight\n",
      "model.layers.17.self_attn.q_proj.bias\n",
      "model.layers.17.self_attn.k_proj.weight\n",
      "model.layers.17.self_attn.k_proj.bias\n",
      "model.layers.17.self_attn.v_proj.weight\n",
      "model.layers.17.self_attn.v_proj.bias\n",
      "model.layers.17.self_attn.o_proj.weight\n",
      "model.layers.17.mlp.gate_proj.weight\n",
      "model.layers.17.mlp.up_proj.weight\n",
      "model.layers.17.mlp.down_proj.weight\n",
      "model.layers.17.input_layernorm.weight\n",
      "model.layers.17.post_attention_layernorm.weight\n",
      "model.layers.18.self_attn.q_proj.weight\n",
      "model.layers.18.self_attn.q_proj.bias\n",
      "model.layers.18.self_attn.k_proj.weight\n",
      "model.layers.18.self_attn.k_proj.bias\n",
      "model.layers.18.self_attn.v_proj.weight\n",
      "model.layers.18.self_attn.v_proj.bias\n",
      "model.layers.18.self_attn.o_proj.weight\n",
      "model.layers.18.mlp.gate_proj.weight\n",
      "model.layers.18.mlp.up_proj.weight\n",
      "model.layers.18.mlp.down_proj.weight\n",
      "model.layers.18.input_layernorm.weight\n",
      "model.layers.18.post_attention_layernorm.weight\n",
      "model.layers.19.self_attn.q_proj.weight\n",
      "model.layers.19.self_attn.q_proj.bias\n",
      "model.layers.19.self_attn.k_proj.weight\n",
      "model.layers.19.self_attn.k_proj.bias\n",
      "model.layers.19.self_attn.v_proj.weight\n",
      "model.layers.19.self_attn.v_proj.bias\n",
      "model.layers.19.self_attn.o_proj.weight\n",
      "model.layers.19.mlp.gate_proj.weight\n",
      "model.layers.19.mlp.up_proj.weight\n",
      "model.layers.19.mlp.down_proj.weight\n",
      "model.layers.19.input_layernorm.weight\n",
      "model.layers.19.post_attention_layernorm.weight\n",
      "model.layers.20.self_attn.q_proj.weight\n",
      "model.layers.20.self_attn.q_proj.bias\n",
      "model.layers.20.self_attn.k_proj.weight\n",
      "model.layers.20.self_attn.k_proj.bias\n",
      "model.layers.20.self_attn.v_proj.weight\n",
      "model.layers.20.self_attn.v_proj.bias\n",
      "model.layers.20.self_attn.o_proj.weight\n",
      "model.layers.20.mlp.gate_proj.weight\n",
      "model.layers.20.mlp.up_proj.weight\n",
      "model.layers.20.mlp.down_proj.weight\n",
      "model.layers.20.input_layernorm.weight\n",
      "model.layers.20.post_attention_layernorm.weight\n",
      "model.layers.21.self_attn.q_proj.weight\n",
      "model.layers.21.self_attn.q_proj.bias\n",
      "model.layers.21.self_attn.k_proj.weight\n",
      "model.layers.21.self_attn.k_proj.bias\n",
      "model.layers.21.self_attn.v_proj.weight\n",
      "model.layers.21.self_attn.v_proj.bias\n",
      "model.layers.21.self_attn.o_proj.weight\n",
      "model.layers.21.mlp.gate_proj.weight\n",
      "model.layers.21.mlp.up_proj.weight\n",
      "model.layers.21.mlp.down_proj.weight\n",
      "model.layers.21.input_layernorm.weight\n",
      "model.layers.21.post_attention_layernorm.weight\n",
      "model.layers.22.self_attn.q_proj.weight\n",
      "model.layers.22.self_attn.q_proj.bias\n",
      "model.layers.22.self_attn.k_proj.weight\n",
      "model.layers.22.self_attn.k_proj.bias\n",
      "model.layers.22.self_attn.v_proj.weight\n",
      "model.layers.22.self_attn.v_proj.bias\n",
      "model.layers.22.self_attn.o_proj.weight\n",
      "model.layers.22.mlp.gate_proj.weight\n",
      "model.layers.22.mlp.up_proj.weight\n",
      "model.layers.22.mlp.down_proj.weight\n",
      "model.layers.22.input_layernorm.weight\n",
      "model.layers.22.post_attention_layernorm.weight\n",
      "model.layers.23.self_attn.q_proj.weight\n",
      "model.layers.23.self_attn.q_proj.bias\n",
      "model.layers.23.self_attn.k_proj.weight\n",
      "model.layers.23.self_attn.k_proj.bias\n",
      "model.layers.23.self_attn.v_proj.weight\n",
      "model.layers.23.self_attn.v_proj.bias\n",
      "model.layers.23.self_attn.o_proj.weight\n",
      "model.layers.23.mlp.gate_proj.weight\n",
      "model.layers.23.mlp.up_proj.weight\n",
      "model.layers.23.mlp.down_proj.weight\n",
      "model.layers.23.input_layernorm.weight\n",
      "model.layers.23.post_attention_layernorm.weight\n",
      "model.norm.weight\n"
     ]
    }
   ],
   "source": [
    "for name, param in model.named_parameters():\n",
    "    \n",
    "    print(name)"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "wyf",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.11.0"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
