{
 "cells": [
  {
   "cell_type": "markdown",
   "id": "ff70471f062e52a7",
   "metadata": {},
   "source": [
    "## 模型蒸馏\n",
    "所谓模型蒸馏，就是让一个Student Model学习Teacher Model的输出，通常Student Model是一个小模型，Teacher Model是一个大型模型，Teacher Model的训练数据集通常比Student Model的数据集大很多。\n",
    "\n",
    "而所谓Teacher Model的输出分两种，根据输出可以分为两种蒸馏方式，一种是黑盒蒸馏，或者叫硬蒸馏，原理就是向Teacher Model提问，得到Teacher Model的答案，然后将问题和答案组织为训练数据，用来微调Student Model，这样Student Model就可以学习到Teacher Model的输出了。\n",
    "\n",
    "另外一种是白盒蒸馏，后者叫软蒸馏，还是向Teacher Model提问，在底层，Teacher Model会输出每个Token的概率分布，直接让Student Model来学习这个概率分布，达到向Teacher Model学习的效果。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "c6407997eafc2e13",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-08-02T12:39:31.476222Z",
     "start_time": "2025-08-02T12:35:27.518433Z"
    }
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Downloading Model from https://www.modelscope.cn to directory: /root/.cache/modelscope/hub/models/Qwen/Qwen3-1.7B\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2025-08-02 21:36:45,056 - modelscope - INFO - Target directory already exists, skipping creation.\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "d64bb795b3fc42d0a50d0919525d3d90",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "Loading checkpoint shards:   0%|          | 0/2 [00:00<?, ?it/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Downloading Model from https://www.modelscope.cn to directory: /root/.cache/modelscope/hub/models/Qwen/Qwen3-1.7B\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2025-08-02 21:36:47,512 - modelscope - INFO - Target directory already exists, skipping creation.\n"
     ]
    }
   ],
   "source": [
    "from modelscope import AutoModelForCausalLM\n",
    "from modelscope import AutoTokenizer\n",
    "\n",
    "model_name = \"Qwen/Qwen3-1.7B\"\n",
    "model = AutoModelForCausalLM.from_pretrained(model_name)\n",
    "tokenizer = AutoTokenizer.from_pretrained(model_name)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "id": "ed7302679510d23b",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-08-02T13:06:55.476706Z",
     "start_time": "2025-08-02T13:06:55.456247Z"
    }
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "tensor([[   198, 151644,    872,    198, 105043, 100165, 151645,    198, 151644,\n",
      "          77091,    198, 151667,    271, 151668,    198, 104198,  26288,  71268,\n",
      "          99625,  40542, 103487,   9370,  15469, 110498,    198, 151645, 151643,\n",
      "         151643, 151643, 151643, 151643, 151643, 151643, 151643, 151643, 151643,\n",
      "         151643, 151643, 151643, 151643, 151643, 151643, 151643, 151643, 151643,\n",
      "         151643, 151643, 151643, 151643, 151643, 151643, 151643, 151643, 151643,\n",
      "         151643, 151643, 151643, 151643, 151643, 151643, 151643, 151643, 151643,\n",
      "         151643, 151643, 151643, 151643, 151643, 151643, 151643, 151643, 151643,\n",
      "         151643, 151643, 151643, 151643, 151643, 151643, 151643, 151643, 151643,\n",
      "         151643, 151643, 151643, 151643, 151643, 151643, 151643, 151643, 151643,\n",
      "         151643, 151643, 151643, 151643, 151643, 151643, 151643, 151643, 151643,\n",
      "         151643, 151643, 151643, 151643, 151643, 151643, 151643, 151643, 151643,\n",
      "         151643, 151643, 151643, 151643, 151643, 151643, 151643, 151643, 151643,\n",
      "         151643, 151643, 151643, 151643, 151643, 151643, 151643, 151643, 151643,\n",
      "         151643, 151643]])\n"
     ]
    }
   ],
   "source": [
    "data = [\n",
    "    {\"Q\": \"你是谁\", \"A\": \"我是大都督周瑜的AI助手\"}\n",
    "]\n",
    "\n",
    "from torch.utils.data import Dataset, DataLoader\n",
    "\n",
    "lora_prompt_template = \"\"\"\n",
    "<|im_start|>user\n",
    "{question}<|im_end|>\n",
    "<|im_start|>assistant\n",
    "<think>\n",
    "\n",
    "</think>\n",
    "{answer}\n",
    "<|im_end|>\"\"\"\n",
    "\n",
    "\n",
    "class ZhouyuDataset(Dataset):\n",
    "    def __init__(self, data, max_length=128):\n",
    "        self.encodings = []\n",
    "        for qa in data:\n",
    "            text = lora_prompt_template.format(question=qa[\"Q\"], answer=qa[\"A\"])\n",
    "            encoded = tokenizer(\n",
    "                text,\n",
    "                max_length=max_length,\n",
    "                padding='max_length',\n",
    "                truncation=True,\n",
    "                return_tensors='pt'\n",
    "            )\n",
    "            input_ids = encoded['input_ids'].squeeze()\n",
    "            self.encodings.append(input_ids)\n",
    "\n",
    "    def __len__(self):\n",
    "        return len(self.encodings)\n",
    "\n",
    "    def __getitem__(self, idx):\n",
    "        return self.encodings[idx]\n",
    "\n",
    "\n",
    "dataset = ZhouyuDataset(data)\n",
    "data_loader = DataLoader(dataset, batch_size=1, shuffle=True)\n",
    "for batch in data_loader:\n",
    "    print(batch)\n",
    "    break"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "id": "b950500a0b381984",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-08-02T13:06:59.795645Z",
     "start_time": "2025-08-02T13:06:59.668746Z"
    }
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "trainable params: 573,440 || all params: 1,721,148,416 || trainable%: 0.0333\n"
     ]
    }
   ],
   "source": [
    "from peft import LoraConfig, get_peft_model, TaskType\n",
    "\n",
    "lora_config = LoraConfig(\n",
    "    task_type=TaskType.CAUSAL_LM,\n",
    "    r=2,\n",
    "    target_modules=[\"q_proj\", \"k_proj\", \"v_proj\"]\n",
    ")\n",
    "\n",
    "# 应用LoRA\n",
    "lora_model = get_peft_model(model, lora_config)\n",
    "lora_model.print_trainable_parameters()  # 查看可训练参数"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "id": "812c3da59958534a",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-08-02T13:08:11.580027Z",
     "start_time": "2025-08-02T13:07:04.318140Z"
    }
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 1, Loss: 13.7732\n",
      "Epoch 11, Loss: 0.6814\n",
      "Epoch 21, Loss: 0.2855\n",
      "Epoch 31, Loss: 0.1365\n",
      "Epoch 41, Loss: 0.1266\n"
     ]
    }
   ],
   "source": [
    "import torch\n",
    "\n",
    "optimizer = torch.optim.Adam(lora_model.parameters(), lr=0.001)\n",
    "device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n",
    "lora_model.to(device)\n",
    "\n",
    "EPOCHS = 50\n",
    "\n",
    "for epoch in range(EPOCHS):\n",
    "\n",
    "    for input_ids in data_loader:\n",
    "        input_ids = input_ids.to(device)\n",
    "\n",
    "        outputs = lora_model(\n",
    "            input_ids, labels=input_ids\n",
    "        )\n",
    "\n",
    "        loss = outputs.loss\n",
    "        optimizer.zero_grad()\n",
    "        loss.backward()\n",
    "        optimizer.step()\n",
    "\n",
    "        if epoch % 10 == 0:\n",
    "            print(f'Epoch {epoch + 1}, Loss: {loss:.4f}')\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "id": "31caf20c603455f9",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-08-02T13:10:26.834917Z",
     "start_time": "2025-08-02T13:09:28.565485Z"
    }
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Downloading Model from https://www.modelscope.cn to directory: /root/.cache/modelscope/hub/models/Qwen/Qwen3-1.7B\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2025-08-02 21:37:49,946 - modelscope - INFO - Target directory already exists, skipping creation.\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "d23b6592782446209881debb482c6383",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "Loading checkpoint shards:   0%|          | 0/2 [00:00<?, ?it/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "<|im_start|>user\n",
      "你是谁<|im_end|>\n",
      "<|im_start|>assistant\n",
      "<think>\n",
      "\n",
      "</think>\n",
      "\n",
      "我是大都督周瑜的AI助手\n",
      "<|im_end|>\n"
     ]
    }
   ],
   "source": [
    "model_name = \"Qwen/Qwen3-1.7B\"\n",
    "base_model = AutoModelForCausalLM.from_pretrained(model_name)\n",
    "\n",
    "prompt = \"你是谁\"\n",
    "messages = [\n",
    "    {\"role\": \"user\", \"content\": prompt}\n",
    "]\n",
    "text = tokenizer.apply_chat_template(\n",
    "    messages,\n",
    "    tokenize=False,\n",
    "    add_generation_prompt=True,\n",
    "    enable_thinking=False\n",
    ")\n",
    "model_inputs = tokenizer([text], return_tensors=\"pt\")\n",
    "model_inputs = model_inputs.to(device)\n",
    "generated_ids = lora_model.generate(**model_inputs, max_new_tokens=256)\n",
    "content = tokenizer.decode(generated_ids[0])\n",
    "print(content)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "id": "3cfeba2245e24319",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-08-02T13:10:33.514550Z",
     "start_time": "2025-08-02T13:10:33.455636Z"
    }
   },
   "outputs": [],
   "source": [
    "# 保存LoRA学到的参数\n",
    "lora_model.save_pretrained(\"./Zhouyu-Qwen3-1.7B\")"
   ]
  },
  {
   "metadata": {},
   "cell_type": "markdown",
   "source": "## 模型蒸馏-黑盒蒸馏",
   "id": "6b61d271e60ad83e"
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "82e9431d0c343a30",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-08-02T13:11:01.002933Z",
     "start_time": "2025-08-02T13:10:45.513939Z"
    }
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Downloading Model from https://www.modelscope.cn to directory: /root/.cache/modelscope/hub/models/Qwen/Qwen3-1.7B\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2025-08-02 22:06:30,891 - modelscope - INFO - Target directory already exists, skipping creation.\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "642f0b76fb3e41c8b73c44feb319f07d",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "Loading checkpoint shards:   0%|          | 0/2 [00:00<?, ?it/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Downloading Model from https://www.modelscope.cn to directory: /root/.cache/modelscope/hub/models/Qwen/Qwen3-1.7B\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2025-08-02 22:06:32,993 - modelscope - INFO - Target directory already exists, skipping creation.\n"
     ]
    }
   ],
   "source": [
    "from peft import PeftModel\n",
    "\n",
    "from modelscope import AutoModelForCausalLM\n",
    "from modelscope import AutoTokenizer\n",
    "\n",
    "teacher_model_name = \"Qwen/Qwen3-1.7B\"\n",
    "teacher_base_model = AutoModelForCausalLM.from_pretrained(teacher_model_name)\n",
    "teacher_tokenizer = AutoTokenizer.from_pretrained(teacher_model_name)\n",
    "teacher_model = PeftModel.from_pretrained(teacher_base_model, \"./Zhouyu-Qwen3-1.7B\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "e4fda820-fe36-4458-8eaa-558be2a65594",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "<|im_start|>user\n",
      "你是谁<|im_end|>\n",
      "<|im_start|>assistant\n",
      "<think>\n",
      "\n",
      "</think>\n",
      "\n",
      "我是大都督周瑜的AI助手\n",
      "<|im_end|>\n"
     ]
    }
   ],
   "source": [
    "prompt = \"你是谁\"\n",
    "messages = [\n",
    "    {\"role\": \"user\", \"content\": prompt}\n",
    "]\n",
    "text = teacher_tokenizer.apply_chat_template(\n",
    "    messages,\n",
    "    tokenize=False,\n",
    "    add_generation_prompt=True,\n",
    "    enable_thinking=False\n",
    ")\n",
    "model_inputs = teacher_tokenizer([text], return_tensors=\"pt\")\n",
    "generated_ids = teacher_model.generate(**model_inputs, max_new_tokens=256)\n",
    "content = teacher_tokenizer.decode(generated_ids[0])\n",
    "print(content)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "id": "8115e80e018fbe33",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-08-02T13:11:19.422865Z",
     "start_time": "2025-08-02T13:11:11.424933Z"
    }
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Downloading Model from https://www.modelscope.cn to directory: /root/.cache/modelscope/hub/models/Qwen/Qwen3-0.6B\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2025-08-02 22:06:47,125 - modelscope - INFO - Target directory already exists, skipping creation.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Downloading Model from https://www.modelscope.cn to directory: /root/.cache/modelscope/hub/models/Qwen/Qwen3-0.6B\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2025-08-02 22:06:48,677 - modelscope - INFO - Target directory already exists, skipping creation.\n"
     ]
    }
   ],
   "source": [
    "# Student Model\n",
    "from modelscope import AutoModelForCausalLM\n",
    "from modelscope import AutoTokenizer\n",
    "\n",
    "student_model_name = \"Qwen/Qwen3-0.6B\"\n",
    "student_model = AutoModelForCausalLM.from_pretrained(student_model_name)\n",
    "student_tokenizer = AutoTokenizer.from_pretrained(student_model_name)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "id": "9a248feea6ee272b",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-08-02T13:31:58.909563Z",
     "start_time": "2025-08-02T13:18:46.793691Z"
    }
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n",
      "<|im_start|>user\n",
      "你是谁<|im_end|>\n",
      "<|im_start|>assistant\n",
      "<think>\n",
      "\n",
      "</think>\n",
      "\n",
      "tensor([[   198, 151644,    872,    198, 105043, 100165, 151645,    198, 151644,\n",
      "          77091,    198, 151667,    271, 151668,    198]])\n"
     ]
    }
   ],
   "source": [
    "# 蒸馏数据\n",
    "distillation_data = [\"你是谁\"]\n",
    "\n",
    "from torch.utils.data import Dataset, DataLoader\n",
    "\n",
    "chat_prompt_template = \"\"\"\n",
    "<|im_start|>user\n",
    "{question}<|im_end|>\n",
    "<|im_start|>assistant\n",
    "<think>\n",
    "\n",
    "</think>\n",
    "\"\"\"\n",
    "\n",
    "\n",
    "class ZhouyuDistillationDataset(Dataset):\n",
    "    def __init__(self, data, max_length=128):\n",
    "        self.encodings = []\n",
    "        for question in data:\n",
    "            text = chat_prompt_template.format(question=question)\n",
    "            print(text)\n",
    "            encoded = teacher_tokenizer(\n",
    "                [text],\n",
    "                return_tensors='pt'\n",
    "            )\n",
    "            input_ids = encoded['input_ids'].squeeze()\n",
    "            self.encodings.append(input_ids)\n",
    "\n",
    "    def __len__(self):\n",
    "        return len(self.encodings)\n",
    "\n",
    "    def __getitem__(self, idx):\n",
    "        return self.encodings[idx]\n",
    "\n",
    "\n",
    "distillation_dataset = ZhouyuDistillationDataset(distillation_data)\n",
    "distillation_dataloader = DataLoader(distillation_dataset, batch_size=1, shuffle=True)\n",
    "for batch in distillation_dataloader:\n",
    "    print(batch)\n",
    "    break"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "id": "99a1b365e61f938f",
   "metadata": {
    "ExecuteTime": {
     "start_time": "2025-08-02T13:22:54.117843Z"
    },
    "jupyter": {
     "is_executing": true
    }
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 1, Loss: 2.4878\n",
      "Epoch 11, Loss: 11.4409\n",
      "Epoch 21, Loss: 7.7254\n",
      "Epoch 31, Loss: 3.1194\n",
      "Epoch 41, Loss: 2.4446\n",
      "Epoch 51, Loss: 1.9051\n",
      "Epoch 61, Loss: 1.4154\n",
      "Epoch 71, Loss: 0.7498\n",
      "Epoch 81, Loss: 0.8846\n",
      "Epoch 91, Loss: 0.2167\n"
     ]
    }
   ],
   "source": [
    "import torch\n",
    "\n",
    "optimizer = torch.optim.Adam(student_model.parameters(), lr=5e-5)\n",
    "device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n",
    "teacher_model.to(device)\n",
    "student_model.to(device)\n",
    "\n",
    "EPOCHS = 100\n",
    "\n",
    "for epoch in range(EPOCHS):\n",
    "\n",
    "    for input_ids in distillation_dataloader:\n",
    "        input_ids = input_ids.to(device)\n",
    "\n",
    "        # 得到教师模型的输出\n",
    "        with torch.no_grad():\n",
    "            teacher_outputs = teacher_model.generate(input_ids, max_new_tokens=256)\n",
    "            # content = teacher_tokenizer.decode(teacher_outputs[0])\n",
    "            # print(content)\n",
    "\n",
    "        outputs = student_model(\n",
    "            teacher_outputs, labels=teacher_outputs\n",
    "        )\n",
    "\n",
    "        loss = outputs.loss\n",
    "        optimizer.zero_grad()\n",
    "        loss.backward()\n",
    "        optimizer.step()\n",
    "\n",
    "        if epoch % 10 == 0:\n",
    "            print(f'Epoch {epoch + 1}, Loss: {loss:.4f}')\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "id": "cbe0a8dd-5473-4407-afed-e6daec5ec9fb",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "<|im_start|>user\n",
      "你是谁<|im_end|>\n",
      "<|im_start|>assistant\n",
      "<think>\n",
      "\n",
      "</think>\n",
      "\n",
      "我是大都督周瑜的AI助手\n",
      "<|im_end|>\n"
     ]
    }
   ],
   "source": [
    "prompt = \"你是谁\"\n",
    "messages = [\n",
    "    {\"role\": \"user\", \"content\": prompt}\n",
    "]\n",
    "text = student_tokenizer.apply_chat_template(\n",
    "    messages,\n",
    "    tokenize=False,\n",
    "    add_generation_prompt=True,\n",
    "    enable_thinking=False\n",
    ")\n",
    "model_inputs = student_tokenizer([text], return_tensors=\"pt\")\n",
    "model_inputs = model_inputs.to(device)\n",
    "generated_ids = student_model.generate(**model_inputs, max_new_tokens=256)\n",
    "content = student_tokenizer.decode(generated_ids[0])\n",
    "print(content)"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.12.3"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
