{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "558ed851-0244-4f62-9de8-47c143926e7d",
   "metadata": {},
   "outputs": [],
   "source": [
    "import subprocess\n",
    "import os\n",
    "\n",
    "result = subprocess.run('bash -c \"source /etc/network_turbo && env | grep proxy\"', shell=True, capture_output=True, text=True)\n",
    "output = result.stdout\n",
    "for line in output.splitlines():\n",
    "    if '=' in line:\n",
    "        var, value = line.split('=', 1)\n",
    "        os.environ[var] = value"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "f068ae87-be68-466c-abf0-8d6634d3ff4d",
   "metadata": {},
   "outputs": [],
   "source": [
    "from datasets import load_dataset\n",
    "\n",
    "ds = load_dataset(\"Shekswess/medical_llama3_instruct_dataset_short\",cache_dir='/root/autodl-tmp/dataset/medical-qa')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "id": "877dc55d-465a-439f-a78d-0892f82d45e4",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "DatasetDict({\n",
       "    train: Dataset({\n",
       "        features: ['output', 'input', 'instruction', 'prompt'],\n",
       "        num_rows: 2000\n",
       "    })\n",
       "})"
      ]
     },
     "execution_count": 3,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "ds"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "id": "11afb222-3b2c-4860-96af-eddd49162dc8",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "{'output': 'Squamous cell carcinoma of the lung may be classified according to the WHO histological classification system into 4 main types: papillary, clear cell, small cell, and basaloid.',\n",
       " 'input': \"Can you provide an overview of the lung's squamous cell carcinoma?\",\n",
       " 'instruction': 'Answer the question truthfully, you are a medical professional.',\n",
       " 'prompt': \"<|start_header_id|>system<|end_header_id|> Answer the question truthfully, you are a medical professional.<|eot_id|><|start_header_id|>user<|end_header_id|> This is the question: Can you provide an overview of the lung's squamous cell carcinoma?<|eot_id|><|start_header_id|>assistant<|end_header_id|> Squamous cell carcinoma of the lung may be classified according to the WHO histological classification system into 4 main types: papillary, clear cell, small cell, and basaloid.<|eot_id|>\"}"
      ]
     },
     "execution_count": 4,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "ds['train'][0]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "id": "21ef7d44-4b7b-48f9-abd9-1e791bd700a8",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "训练集大小: 500\n",
      "验证集大小: 50\n",
      "测试集大小: 10\n"
     ]
    }
   ],
   "source": [
    "import random\n",
    "\n",
    "# 假设 ds['train'] 是你的数据集\n",
    "data = ds['train']\n",
    "\n",
    "# 数据格式转换\n",
    "formatted_data = []\n",
    "for item in data:\n",
    "    formatted_item = {\n",
    "        \"instruction\": item[\"instruction\"],\n",
    "        \"input\": item[\"input\"],\n",
    "        \"output\": item[\"output\"]\n",
    "    }\n",
    "    formatted_data.append(formatted_item)\n",
    "\n",
    "# 随机打乱数据\n",
    "random.shuffle(formatted_data)\n",
    "\n",
    "# 数据划分\n",
    "train_set = formatted_data[:500]\n",
    "val_set = formatted_data[1200:1250]\n",
    "test_set = formatted_data[1250:1260]\n",
    "\n",
    "# 打印划分后的数据量\n",
    "print(f\"训练集大小: {len(train_set)}\")\n",
    "print(f\"验证集大小: {len(val_set)}\")\n",
    "print(f\"测试集大小: {len(test_set)}\")\n",
    "\n",
    "# 保存为 JSON 文件（可选）\n",
    "import json\n",
    "\n",
    "with open(\"train_set.json\", \"w\") as f:\n",
    "    json.dump(train_set, f, indent=4)\n",
    "\n",
    "with open(\"val_set.json\", \"w\") as f:\n",
    "    json.dump(val_set, f, indent=4)\n",
    "\n",
    "with open(\"test_set.json\", \"w\") as f:\n",
    "    json.dump(test_set, f, indent=4)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "id": "10733c18-0707-4f7d-9498-8c9d6e77fafc",
   "metadata": {},
   "outputs": [],
   "source": [
    "from datasets import Dataset\n",
    "import torch\n",
    "from modelscope import snapshot_download, AutoTokenizer\n",
    "from swanlab.integration.transformers import SwanLabCallback\n",
    "from qwen_vl_utils import process_vision_info\n",
    "from peft import LoraConfig, TaskType, get_peft_model, PeftModel,get_peft_model_state_dict\n",
    "from transformers import (\n",
    "    TrainingArguments,\n",
    "    Trainer,\n",
    "    DataCollatorForSeq2Seq,\n",
    "    Qwen2_5_VLForConditionalGeneration,\n",
    "    AutoProcessor,\n",
    ")\n",
    "import swanlab\n",
    "import json\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "id": "7aefc298-3507-4064-969b-f980929d946c",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "f2524e48946d423dbfc9b181fe4ff694",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "Generating train split: 0 examples [00:00, ? examples/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "3e09aa58450340e3bcce055270fcfa05",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "Map:   0%|          | 0/500 [00:00<?, ? examples/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    }
   ],
   "source": [
    "def process_func_batch(examples):\n",
    "    MAX_LENGTH = 2048\n",
    "    input_ids, attention_mask, labels = [], [], []\n",
    "    pixel_values, image_grid_thw = [], []\n",
    "    \n",
    "    # 遍历每个样本\n",
    "    for instruction, input_text, output_text in zip(examples[\"instruction\"], examples[\"input\"], examples[\"output\"]):\n",
    "        # 构建 messages\n",
    "        messages = [\n",
    "                    {\n",
    "                        \"role\": \"system\",\n",
    "                        \"content\": [\n",
    "                            {\"type\": \"text\", \"text\": instruction},\n",
    "                        ],\n",
    "                    },\n",
    "                    {\n",
    "                        \"role\": \"user\",\n",
    "                        \"content\": [\n",
    "                            {\"type\": \"text\", \"text\": input_text}\n",
    "                        ]\n",
    "                    }\n",
    "                ]\n",
    "        \n",
    "        # 处理视觉信息\n",
    "        image_inputs, video_inputs = process_vision_info(messages)\n",
    "        \n",
    "        # 如果 image_inputs 或 video_inputs 为空，则设为 None\n",
    "        if not image_inputs:\n",
    "            image_inputs = None\n",
    "        if not video_inputs:\n",
    "            video_inputs = None\n",
    "        \n",
    "        # 使用 processor 处理文本\n",
    "        text = processor.apply_chat_template(\n",
    "            messages, tokenize=False, add_generation_prompt=True\n",
    "        )\n",
    "        \n",
    "        # 处理文本输入\n",
    "        inputs = processor(\n",
    "            text=[text],\n",
    "            images=image_inputs,  # 添加图像输入\n",
    "            videos=video_inputs,  # 添加视频输入\n",
    "            padding=False,  # 先不填充\n",
    "            return_tensors=\"pt\",\n",
    "        )\n",
    "\n",
    "        # 提取 input_ids 和 attention_mask\n",
    "        inputs_dict = {key: value.tolist() for key, value in inputs.items()}\n",
    "        instruction_input_ids = inputs_dict['input_ids'][0]\n",
    "        instruction_attention_mask = inputs_dict['attention_mask'][0]\n",
    "\n",
    "        # 处理输出文本\n",
    "        response = tokenizer(f\"{output_text}\", add_special_tokens=False)\n",
    "        response_input_ids = response['input_ids']\n",
    "        response_attention_mask = response['attention_mask']\n",
    "\n",
    "        # 计算剩余可用长度给 response\n",
    "        remaining_length = MAX_LENGTH - len(instruction_input_ids) - 1  # 减去一个 PAD token 的空间\n",
    "\n",
    "        if remaining_length < 0:\n",
    "            # 如果指令部分已经超过最大长度，则需要截断指令部分\n",
    "            truncation_length = len(instruction_input_ids) + remaining_length\n",
    "            instruction_input_ids = instruction_input_ids[:truncation_length]\n",
    "            instruction_attention_mask = instruction_attention_mask[:truncation_length]\n",
    "            remaining_length = 0\n",
    "\n",
    "        # 截断 response 部分以适应剩余空间\n",
    "        current_input_ids = (\n",
    "            instruction_input_ids + response_input_ids[:remaining_length] + [tokenizer.pad_token_id]\n",
    "        )\n",
    "\n",
    "        current_attention_mask = (\n",
    "            instruction_attention_mask + response_attention_mask[:remaining_length] + [1]\n",
    "        )\n",
    "        current_labels = (\n",
    "            [-100] * len(instruction_input_ids) +\n",
    "            response_input_ids[:remaining_length] +\n",
    "            [tokenizer.pad_token_id]\n",
    "        )\n",
    "        \n",
    "        # 填充到 MAX_LENGTH\n",
    "        if len(current_input_ids) < MAX_LENGTH:\n",
    "            current_input_ids += [tokenizer.pad_token_id] * (MAX_LENGTH - len(current_input_ids))\n",
    "            current_attention_mask += [0] * (MAX_LENGTH - len(current_attention_mask))\n",
    "            current_labels += [-100] * (MAX_LENGTH - len(current_labels))\n",
    "\n",
    "        # 添加到列表中\n",
    "        input_ids.append(current_input_ids)\n",
    "        attention_mask.append(current_attention_mask)\n",
    "        labels.append(current_labels)\n",
    "        \n",
    "        # 处理 pixel_values 和 image_grid_thw\n",
    "        if image_inputs is not None:\n",
    "            pixel_values.append(inputs_dict['pixel_values'][0])\n",
    "            image_grid_thw.append(torch.tensor(inputs_dict['image_grid_thw'][0]).squeeze(0))\n",
    "        else:\n",
    "            pixel_values.append(None)\n",
    "            image_grid_thw.append(None)\n",
    "\n",
    "    # 返回结果\n",
    "    return {\n",
    "        \"input_ids\": torch.tensor(input_ids),  # 转换为 torch.Tensor\n",
    "        \"attention_mask\": torch.tensor(attention_mask),  # 转换为 torch.Tensor\n",
    "        \"labels\": torch.tensor(labels),  # 转换为 torch.Tensor\n",
    "        \"pixel_values\": torch.tensor(pixel_values) if any(pixel_values) else pixel_values,  # 转换为 torch.Tensor 或 None\n",
    "        \"image_grid_thw\": torch.stack(image_grid_thw) if any(image_grid_thw) else image_grid_thw  # 转换为 torch.Tensor 或 None\n",
    "    }\n",
    "\n",
    "# 加载数据集\n",
    "from datasets import Dataset\n",
    "\n",
    "tokenizer = AutoTokenizer.from_pretrained(\"/root/autodl-tmp/Qwen/Qwen2.5-VL-7B-Instruct/\", use_fast=True)\n",
    "min_pixels = 256 * 28 * 28\n",
    "max_pixels = 1280 * 28 * 28\n",
    "processor = AutoProcessor.from_pretrained(\"/root/autodl-tmp/Qwen/Qwen2.5-VL-7B-Instruct\", min_pixels=min_pixels, max_pixels=max_pixels, use_fast=True)\n",
    "train_ds = Dataset.from_json(\"train_set.json\")\n",
    "train_dataset = train_ds.map(process_func_batch, batched=True)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "id": "f81a83d4-8315-4a75-9b09-414fceced981",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "Dataset({\n",
       "    features: ['instruction', 'input', 'output', 'input_ids', 'attention_mask', 'labels', 'pixel_values', 'image_grid_thw'],\n",
       "    num_rows: 500\n",
       "})"
      ]
     },
     "execution_count": 8,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "train_dataset"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "id": "71c6082b-027e-4a28-b0ec-498c169691bf",
   "metadata": {},
   "outputs": [],
   "source": [
    "train_dataset[0]['pixel_values']"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "af31d47f-f94e-4d64-95a1-fe554e125f7c",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.12.3"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
