{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "473ea9dc",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "d:\\VirtualProject\\Python37Env\\torch_py10\\lib\\site-packages\\torch\\cuda\\__init__.py:63: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you.\n",
      "  import pynvml  # type: ignore[import]\n"
     ]
    }
   ],
   "source": [
    "from transformers import AutoModelForCausalLM, AutoTokenizer"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 86,
   "id": "4ad2a469",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Qwen3ForCausalLM(\n",
      "  (model): Qwen3Model(\n",
      "    (embed_tokens): Embedding(151936, 1024)\n",
      "    (layers): ModuleList(\n",
      "      (0-27): 28 x Qwen3DecoderLayer(\n",
      "        (self_attn): Qwen3Attention(\n",
      "          (q_proj): Linear(in_features=1024, out_features=2048, bias=False)\n",
      "          (k_proj): Linear(in_features=1024, out_features=1024, bias=False)\n",
      "          (v_proj): Linear(in_features=1024, out_features=1024, bias=False)\n",
      "          (o_proj): Linear(in_features=2048, out_features=1024, bias=False)\n",
      "          (q_norm): Qwen3RMSNorm((128,), eps=1e-06)\n",
      "          (k_norm): Qwen3RMSNorm((128,), eps=1e-06)\n",
      "        )\n",
      "        (mlp): Qwen3MLP(\n",
      "          (gate_proj): Linear(in_features=1024, out_features=3072, bias=False)\n",
      "          (up_proj): Linear(in_features=1024, out_features=3072, bias=False)\n",
      "          (down_proj): Linear(in_features=3072, out_features=1024, bias=False)\n",
      "          (act_fn): SiLU()\n",
      "        )\n",
      "        (input_layernorm): Qwen3RMSNorm((1024,), eps=1e-06)\n",
      "        (post_attention_layernorm): Qwen3RMSNorm((1024,), eps=1e-06)\n",
      "      )\n",
      "    )\n",
      "    (norm): Qwen3RMSNorm((1024,), eps=1e-06)\n",
      "    (rotary_emb): Qwen3RotaryEmbedding()\n",
      "  )\n",
      "  (lm_head): Linear(in_features=1024, out_features=151936, bias=False)\n",
      ")\n",
      "<|im_start|>user\n",
      "Give me a short introduction to large language model.<|im_end|>\n",
      "<|im_start|>assistant\n",
      "\n"
     ]
    }
   ],
   "source": [
    "from transformers import AutoModelForCausalLM, AutoTokenizer\n",
    "\n",
    "\n",
    "'''\n",
    "huggingface-cli download --resume-download Qwen/Qwen3-0.6B --local-dir qwen3_0.6B\n",
    "\n",
    "'''\n",
    "model_folder = r'D:\\Models\\qwen3_0.6b'\n",
    "\n",
    "model_name = \"Qwen/Qwen3-0.6B\"\n",
    "tokenizer = AutoTokenizer.from_pretrained(model_folder)\n",
    "model = AutoModelForCausalLM.from_pretrained(\n",
    "    model_folder,\n",
    "    torch_dtype=\"auto\",\n",
    "    device_map=\"auto\"\n",
    ")\n",
    "print(model)\n",
    "\n",
    "prompt = \"Give me a short introduction to large language model.\"\n",
    "messages = [\n",
    "    {\"role\": \"user\", \"content\": prompt}\n",
    "]\n",
    "\n",
    "text = tokenizer.apply_chat_template(\n",
    "    messages,\n",
    "    tokenize=False,\n",
    "    add_generation_prompt=True,\n",
    "    enable_thinking=True # Switches between thinking and non-thinking modes. Default is True.\n",
    ")\n",
    "print(text)\n",
    "\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 88,
   "id": "6178b835",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "151643"
      ]
     },
     "execution_count": 88,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "tokenizer.pad_token_id"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 53,
   "id": "a0f39f86",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "'<|im_start|>user\\nGive me a short introduction to large language model.<|im_end|>\\n<|im_start|>assistant\\n'"
      ]
     },
     "execution_count": 53,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "text"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 54,
   "id": "04d91700",
   "metadata": {},
   "outputs": [],
   "source": [
    "text2='<|im_start|>user\\nGive me a short introduction to large language model.<|im_end|>\\n<|im_start|>'"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 87,
   "id": "05a64f4e",
   "metadata": {},
   "outputs": [],
   "source": [
    "text\n",
    "model_inputs = tokenizer([text], return_tensors=\"pt\").to(model.device)\n",
    "generated_ids = model.generate(\n",
    "    **model_inputs,\n",
    "    max_new_tokens=32768\n",
    ")\n",
    "output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() \n",
    "\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 58,
   "id": "03c0a82a",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "{'input_ids': tensor([[151644,    872,    198,  35127,    752,    264,   2805,  16800,    311,\n",
       "           3460,   4128,   1614,     13, 151645,    198, 151644, 151643, 151643],\n",
       "        [151644,    872,    198,  35127,    752,    264,   2805,  16800,    311,\n",
       "           3460,   4128,   1614,     13, 151645,    198, 151644,  77091,    198]]), 'attention_mask': tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0],\n",
       "        [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]])}"
      ]
     },
     "execution_count": 58,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "model_inputs = tokenizer([text2,text], return_tensors=\"pt\",padding=True,truncation=True).to(model.device)\n",
    "model_inputs"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "3df0ab30",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "a5d1906a",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 28,
   "id": "91eb1d86",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "torch.Size([2, 18])"
      ]
     },
     "execution_count": 28,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "model_inputs = tokenizer([text,text], return_tensors=\"pt\").to(model.device)\n",
    "model_inputs['input_ids'].size()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "id": "649285b4",
   "metadata": {},
   "outputs": [],
   "source": [
    "generated_ids = model.generate(\n",
    "    **model_inputs,\n",
    "    max_new_tokens=32768\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "id": "682183e9",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "334"
      ]
     },
     "execution_count": 10,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "len(generated_ids[0])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "id": "6e5492c0",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([151644,    872,    198,  35127,    752,    264,   2805,  16800,    311,\n",
       "          3460,   4128,   1614,     13, 151645,    198, 151644,  77091,    198])"
      ]
     },
     "execution_count": 12,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "model_inputs.input_ids[0]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 61,
   "id": "a3907581",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([151644,    872,    198,  35127,    752,    264,   2805,  16800,    311,\n",
       "          3460,   4128,   1614,     13, 151645,    198, 151644,  77091,    198,\n",
       "        151667,    198,  32313,     11,    279,   1196,    374,  10161,    369,\n",
       "           264,   2805,  16800,    311,    264,   3460,   4128,   1614,     13,\n",
       "          6771,    752,   1191,    553,  88646,   1128,    358,   1414,    911,\n",
       "           444,  10994,     82,     13,   2379,    525,   2409,   4128,   4119,\n",
       "            11,   1290,     30,   2055,    358,   1265,   6286,    429,    807,\n",
       "           525,  15235,   4119,   6188,    311,   3535,    323,   6923,   1467,\n",
       "           382,     40,   1184,    311,   2506,    432,  63594,     13,  10696,\n",
       "          1191,    448,    279,  31774,     25,    807,    646,   3535,    323,\n",
       "          6923,   1467,     11,    975,    389,   5257,   9079,   1075,  35764,\n",
       "          4755,     11,   4378,   9709,     11,   4992,     13,   5005,   6286,\n",
       "           862,  16928,     11,   1075,  11589,   5248,  15459,     11,   6351,\n",
       "          9079,     11,    323,   1246,    807,    525,   1483,    304,   2155,\n",
       "          5043,    382,  14190,     11,    279,   1196,   2578,    387,   3330,\n",
       "           369,    264,   3974,  23251,   2041,   3709,   2238,  10916,     13,\n",
       "           358,   1265,  11167,   1376,   3501,   2041,  42415,   1119,   3565,\n",
       "            13,   7281,     11,   1281,   2704,    311,    990,   4285,   4128,\n",
       "           323,   5648,    502,  70821,     13,   6771,    752,   1779,    421,\n",
       "          1052,    594,   4113,    770,    807,   2578,   1184,     13,   2379,\n",
       "          2578,    387,    264,   5458,    476,   4325,   5916,    311,   3960,\n",
       "           911,  15235,     11,    773,  31273,    374,   2989,    382,  80022,\n",
       "            11,   7196,   2924,   2494,    911,   1246,    807,    525,  16176,\n",
       "           389,   3460,  29425,    323,    525,   1661,    518,   8660,   2266,\n",
       "           323,  83789,     13,   2938,   4933,    862,  16928,   7797,   1101,\n",
       "          1467,   9471,     13,  97593,     11,    429,   1265,   3421,    279,\n",
       "         58786,     13,   6771,    752,   2182,    432,    678,   3786,    304,\n",
       "           264,  11657,    323,  38219,   1616,    624, 151668,    271,     32,\n",
       "          3460,   4128,   1614,    320,   4086,     44,      8,    374,    458,\n",
       "         15235,   1849,   6188,    311,   3535,    323,   6923,   1467,     11,\n",
       "          1741,    438,   4128,     11,   6540,     11,    323,   1008,   5810,\n",
       "          4128,   9079,     13,   4220,   4119,    646,  23643,    323,   5889,\n",
       "           311,   4755,     11,   3270,   9709,     11,    476,   7789,    448,\n",
       "         11521,   4378,     13,   2379,    525,  16176,    389,  12767,  29425,\n",
       "           311,   3960,  12624,    323,   7269,   5068,   3941,   5257,   8357,\n",
       "            11,   3259,   1105,   7988,   7375,    369,   9079,   1075,   2213,\n",
       "          9688,     11,   3491,  98146,     11,    323,   4128,   1824,     13,\n",
       "        151645])"
      ]
     },
     "execution_count": 61,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "generated_ids[0]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "id": "d005185b",
   "metadata": {},
   "outputs": [],
   "source": [
    "output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "id": "1af447bc",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "316"
      ]
     },
     "execution_count": 16,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "len(output_ids)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "id": "9aaebd51",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "[151667,\n",
       " 198,\n",
       " 32313,\n",
       " 11,\n",
       " 279,\n",
       " 1196,\n",
       " 374,\n",
       " 10161,\n",
       " 369,\n",
       " 264,\n",
       " 2805,\n",
       " 16800,\n",
       " 311,\n",
       " 264,\n",
       " 3460,\n",
       " 4128,\n",
       " 1614,\n",
       " 13,\n",
       " 6771,\n",
       " 752,\n",
       " 1191,\n",
       " 553,\n",
       " 88646,\n",
       " 1128,\n",
       " 358,\n",
       " 1414,\n",
       " 911,\n",
       " 444,\n",
       " 10994,\n",
       " 82,\n",
       " 13,\n",
       " 2379,\n",
       " 525,\n",
       " 2409,\n",
       " 4128,\n",
       " 4119,\n",
       " 11,\n",
       " 1290,\n",
       " 30,\n",
       " 2055,\n",
       " 358,\n",
       " 1265,\n",
       " 6286,\n",
       " 429,\n",
       " 807,\n",
       " 525,\n",
       " 15235,\n",
       " 4119,\n",
       " 6188,\n",
       " 311,\n",
       " 3535,\n",
       " 323,\n",
       " 6923,\n",
       " 1467,\n",
       " 382,\n",
       " 40,\n",
       " 1184,\n",
       " 311,\n",
       " 2506,\n",
       " 432,\n",
       " 63594,\n",
       " 13,\n",
       " 10696,\n",
       " 1191,\n",
       " 448,\n",
       " 279,\n",
       " 31774,\n",
       " 25,\n",
       " 807,\n",
       " 646,\n",
       " 3535,\n",
       " 323,\n",
       " 6923,\n",
       " 1467,\n",
       " 11,\n",
       " 975,\n",
       " 389,\n",
       " 5257,\n",
       " 9079,\n",
       " 1075,\n",
       " 35764,\n",
       " 4755,\n",
       " 11,\n",
       " 4378,\n",
       " 9709,\n",
       " 11,\n",
       " 4992,\n",
       " 13,\n",
       " 5005,\n",
       " 6286,\n",
       " 862,\n",
       " 16928,\n",
       " 11,\n",
       " 1075,\n",
       " 11589,\n",
       " 5248,\n",
       " 15459,\n",
       " 11,\n",
       " 6351,\n",
       " 9079,\n",
       " 11,\n",
       " 323,\n",
       " 1246,\n",
       " 807,\n",
       " 525,\n",
       " 1483,\n",
       " 304,\n",
       " 2155,\n",
       " 5043,\n",
       " 382,\n",
       " 14190,\n",
       " 11,\n",
       " 279,\n",
       " 1196,\n",
       " 2578,\n",
       " 387,\n",
       " 3330,\n",
       " 369,\n",
       " 264,\n",
       " 3974,\n",
       " 23251,\n",
       " 2041,\n",
       " 3709,\n",
       " 2238,\n",
       " 10916,\n",
       " 13,\n",
       " 358,\n",
       " 1265,\n",
       " 11167,\n",
       " 1376,\n",
       " 3501,\n",
       " 2041,\n",
       " 42415,\n",
       " 1119,\n",
       " 3565,\n",
       " 13,\n",
       " 7281,\n",
       " 11,\n",
       " 1281,\n",
       " 2704,\n",
       " 311,\n",
       " 990,\n",
       " 4285,\n",
       " 4128,\n",
       " 323,\n",
       " 5648,\n",
       " 502,\n",
       " 70821,\n",
       " 13,\n",
       " 6771,\n",
       " 752,\n",
       " 1779,\n",
       " 421,\n",
       " 1052,\n",
       " 594,\n",
       " 4113,\n",
       " 770,\n",
       " 807,\n",
       " 2578,\n",
       " 1184,\n",
       " 13,\n",
       " 2379,\n",
       " 2578,\n",
       " 387,\n",
       " 264,\n",
       " 5458,\n",
       " 476,\n",
       " 4325,\n",
       " 5916,\n",
       " 311,\n",
       " 3960,\n",
       " 911,\n",
       " 15235,\n",
       " 11,\n",
       " 773,\n",
       " 31273,\n",
       " 374,\n",
       " 2989,\n",
       " 382,\n",
       " 80022,\n",
       " 11,\n",
       " 7196,\n",
       " 2924,\n",
       " 2494,\n",
       " 911,\n",
       " 1246,\n",
       " 807,\n",
       " 525,\n",
       " 16176,\n",
       " 389,\n",
       " 3460,\n",
       " 29425,\n",
       " 323,\n",
       " 525,\n",
       " 1661,\n",
       " 518,\n",
       " 8660,\n",
       " 2266,\n",
       " 323,\n",
       " 83789,\n",
       " 13,\n",
       " 2938,\n",
       " 4933,\n",
       " 862,\n",
       " 16928,\n",
       " 7797,\n",
       " 1101,\n",
       " 1467,\n",
       " 9471,\n",
       " 13,\n",
       " 97593,\n",
       " 11,\n",
       " 429,\n",
       " 1265,\n",
       " 3421,\n",
       " 279,\n",
       " 58786,\n",
       " 13,\n",
       " 6771,\n",
       " 752,\n",
       " 2182,\n",
       " 432,\n",
       " 678,\n",
       " 3786,\n",
       " 304,\n",
       " 264,\n",
       " 11657,\n",
       " 323,\n",
       " 38219,\n",
       " 1616,\n",
       " 624,\n",
       " 151668,\n",
       " 271,\n",
       " 32,\n",
       " 3460,\n",
       " 4128,\n",
       " 1614,\n",
       " 320,\n",
       " 4086,\n",
       " 44,\n",
       " 8,\n",
       " 374,\n",
       " 458,\n",
       " 15235,\n",
       " 1849,\n",
       " 6188,\n",
       " 311,\n",
       " 3535,\n",
       " 323,\n",
       " 6923,\n",
       " 1467,\n",
       " 11,\n",
       " 1741,\n",
       " 438,\n",
       " 4128,\n",
       " 11,\n",
       " 6540,\n",
       " 11,\n",
       " 323,\n",
       " 1008,\n",
       " 5810,\n",
       " 4128,\n",
       " 9079,\n",
       " 13,\n",
       " 4220,\n",
       " 4119,\n",
       " 646,\n",
       " 23643,\n",
       " 323,\n",
       " 5889,\n",
       " 311,\n",
       " 4755,\n",
       " 11,\n",
       " 3270,\n",
       " 9709,\n",
       " 11,\n",
       " 476,\n",
       " 7789,\n",
       " 448,\n",
       " 11521,\n",
       " 4378,\n",
       " 13,\n",
       " 2379,\n",
       " 525,\n",
       " 16176,\n",
       " 389,\n",
       " 12767,\n",
       " 29425,\n",
       " 311,\n",
       " 3960,\n",
       " 12624,\n",
       " 323,\n",
       " 7269,\n",
       " 5068,\n",
       " 3941,\n",
       " 5257,\n",
       " 8357,\n",
       " 11,\n",
       " 3259,\n",
       " 1105,\n",
       " 7988,\n",
       " 7375,\n",
       " 369,\n",
       " 9079,\n",
       " 1075,\n",
       " 2213,\n",
       " 9688,\n",
       " 11,\n",
       " 3491,\n",
       " 98146,\n",
       " 11,\n",
       " 323,\n",
       " 4128,\n",
       " 1824,\n",
       " 13,\n",
       " 151645]"
      ]
     },
     "execution_count": 17,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "output_ids"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "id": "3cf686c2",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "84"
      ]
     },
     "execution_count": 18,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "output_ids[::-1].index(151668)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 19,
   "id": "0f1c9beb",
   "metadata": {},
   "outputs": [],
   "source": [
    "try:\n",
    "    # rindex finding 151668 (</think>)\n",
    "    index = len(output_ids) - output_ids[::-1].index(151668)\n",
    "except ValueError:\n",
    "    index = 0"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 20,
   "id": "e7a6467f",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "232"
      ]
     },
     "execution_count": 20,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "index"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 64,
   "id": "a4a1793d",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "user\n",
      "Give me a short introduction to large language model.\n",
      "assistant\n",
      "<think>\n",
      "Okay, the user is asking for a short introduction to a large language model. Let me start by recalling what I know about LLMs. They are big language models, right? So I should mention that they are AI models designed to understand and generate text.\n",
      "\n",
      "I need to keep it concise. Maybe start with the basics: they can understand and generate text, work on various tasks like answering questions, writing articles, etc. Then mention their capabilities, like handling multiple languages, complex tasks, and how they are used in different fields.\n",
      "\n",
      "Wait, the user might be looking for a quick overview without getting too technical. I should highlight key points without diving into details. Also, make sure to use simple language and avoid jargon. Let me check if there's anything else they might need. They might be a student or someone starting to learn about AI, so clarity is important.\n",
      "\n",
      "Hmm, maybe include something about how they are trained on large datasets and are good at understanding context and nuances. That shows their capabilities beyond just text generation. Alright, that should cover the essentials. Let me put it all together in a friendly and informative way.\n",
      "</think>\n",
      "\n",
      "A large language model (LLM) is an AI system designed to understand and generate text, such as language, knowledge, and other natural language tasks. These models can analyze and respond to questions, write articles, or assist with creative writing. They are trained on vast datasets to learn patterns and improve performance across various applications, making them powerful tools for tasks like content creation, problem-solving, and language support.\n"
     ]
    }
   ],
   "source": [
    "print(tokenizer.decode(generated_ids[0],skip_special_tokens=True))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 21,
   "id": "b9af872d",
   "metadata": {},
   "outputs": [],
   "source": [
    "thinking_content = tokenizer.decode(output_ids[:index], skip_special_tokens=True).strip(\"\\n\")\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 23,
   "id": "cd92616c",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "<think>\n",
      "Okay, the user is asking for a short introduction to a large language model. Let me start by recalling what I know about LLMs. They are big language models, right? So I should mention that they are AI models designed to understand and generate text.\n",
      "\n",
      "I need to keep it concise. Maybe start with the basics: they can understand and generate text, work on various tasks like answering questions, writing articles, etc. Then mention their capabilities, like handling multiple languages, complex tasks, and how they are used in different fields.\n",
      "\n",
      "Wait, the user might be looking for a quick overview without getting too technical. I should highlight key points without diving into details. Also, make sure to use simple language and avoid jargon. Let me check if there's anything else they might need. They might be a student or someone starting to learn about AI, so clarity is important.\n",
      "\n",
      "Hmm, maybe include something about how they are trained on large datasets and are good at understanding context and nuances. That shows their capabilities beyond just text generation. Alright, that should cover the essentials. Let me put it all together in a friendly and informative way.\n",
      "</think>\n"
     ]
    }
   ],
   "source": [
    "print(thinking_content)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 24,
   "id": "bbd886c2",
   "metadata": {},
   "outputs": [],
   "source": [
    "content = tokenizer.decode(output_ids[index:], skip_special_tokens=True).strip(\"\\n\")\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 26,
   "id": "c602a5a4",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "A large language model (LLM) is an AI system designed to understand and generate text, such as language, knowledge, and other natural language tasks. These models can analyze and respond to questions, write articles, or assist with creative writing. They are trained on vast datasets to learn patterns and improve performance across various applications, making them powerful tools for tasks like content creation, problem-solving, and language support.\n"
     ]
    }
   ],
   "source": [
    "print(content)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "95a0b492",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "'123'"
      ]
     },
     "execution_count": 60,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# 下载数据"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "9fcfe790",
   "metadata": {},
   "source": [
    "# 读取数据"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "ae4364d0",
   "metadata": {},
   "source": [
    "from modelscope.msdatasets import MsDataset\n",
    "import json\n",
    "import random\n",
    "\n",
    "random.seed(42)\n",
    "\n",
    "'''\n",
    "国产下载模型\n",
    "\n",
    "'''\n",
    "\n",
    "ds = MsDataset.load('krisfu/delicate_medical_r1_data', subset_name='default', split='train')\n",
    "print(ds)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 65,
   "id": "7bd1a394",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Dataset({\n",
      "    features: ['instruction', 'question', 'think', 'answer', 'metrics'],\n",
      "    num_rows: 2407\n",
      "})\n"
     ]
    }
   ],
   "source": [
    "from modelscope.msdatasets import MsDataset\n",
    "import json\n",
    "import random\n",
    "\n",
    "random.seed(42)\n",
    "\n",
    "'''\n",
    "国产下载模型\n",
    "\n",
    "'''\n",
    "\n",
    "ds = MsDataset.load('krisfu/delicate_medical_r1_data', subset_name='default', split='train')\n",
    "print(ds)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 66,
   "id": "4f9e4f41",
   "metadata": {},
   "outputs": [],
   "source": [
    "data_list = list(ds)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 67,
   "id": "dac8011c",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "2407"
      ]
     },
     "execution_count": 67,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "len(data_list)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 69,
   "id": "cd063daf",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "2166"
      ]
     },
     "execution_count": 69,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "random.shuffle(data_list)\n",
    "split_idx = int(len(data_list) * 0.9)\n",
    "split_idx"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 70,
   "id": "db8f7c8f",
   "metadata": {},
   "outputs": [],
   "source": [
    "train_data = data_list[:split_idx]\n",
    "val_data = data_list[split_idx:]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 71,
   "id": "99273501",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "{'instruction': '阿巴卡韦与哪些药物合用时需要特别注意？',\n",
       " 'question': '医生，我正在服用阿巴卡韦，但我也需要治疗其他疾病，哪些药物我应该避免与阿巴卡韦一起使用呢？',\n",
       " 'think': '嗯，用户问的是阿巴卡韦与其他药物合用时需要注意哪些药物，以及应该避免哪些药物。首先，我需要回忆一下阿巴卡韦的药理作用和代谢途径。阿巴卡韦是一种抗病毒药物，主要用于治疗HIV感染。它的代谢途径可能涉及肝脏酶系统，特别是细胞色素P450酶，或者其他的代谢途径？\\n\\n接下来，用户提到的药物相互作用可能包括影响阿巴卡韦代谢的药物，或者阿巴卡韦影响其他药物代谢的情况。根据答案中的例子，比如丙磺舒、氟康唑、萘普生、吲哚美辛，这些药物似乎抑制了葡萄糖醛酸化作用，导致阿巴卡韦的代谢减慢，从而增加毒性风险。葡萄糖醛酸化是肝脏代谢的一种常见方式，所以这些药物可能通过抑制UGT酶（葡萄糖醛酸转移酶）来影响代谢。\\n\\n然后，肝微粒体酶诱导剂如利福平会加速阿巴卡韦的代谢，导致血药浓度降低，疗效下降。利福平是已知的CYP450酶诱导剂，可能促进阿巴卡韦的代谢，所以合用时可能需要调整剂量。\\n\\n克拉霉素可能减少阿巴卡韦的吸收，这可能是因为克拉霉素是P-glycoprotein的抑制剂，或者影响胃肠道的吸收机制？或者克拉霉素本身是CYP3A4的抑制剂，而阿巴卡韦是否通过CYP3A4代谢呢？需要确认这一点。或者可能克拉霉素与其他药物相互作用导致吸收减少？\\n\\n另外，用户可能有其他疾病需要治疗，所以需要考虑同时使用的药物是否属于上述类别。比如，如果患者需要使用非甾体抗炎药（如萘普生、吲哚美辛），或者抗真菌药氟康唑，或者利福平（如结核治疗），或者大环内酯类抗生素如克拉霉素，都需要特别注意。\\n\\n可能还需要考虑其他药物相互作用，比如与中枢神经系统抑制剂合用是否有叠加效应，或者与其他抗病毒药物的相互作用。但根据提供的答案，重点在代谢和吸收方面。\\n\\n另外，阿巴卡韦已知的严重过敏反应（HLA-B*5701等位基因相关），但用户的问题是关于药物相互作用，所以可能不需要涉及过敏问题，但可能需要提醒医生进行全面评估。\\n\\n总结思考步骤：首先确定阿巴卡韦的代谢途径，然后寻找哪些药物会影响这些途径（如诱导剂或抑制剂），接着列出具体的药物例子，并解释其影响（如增加毒性或降低疗效），最后建议咨询医生调整剂量或选择替代药物。需要确保提到的药物如丙磺舒（抑制肾排泄？或者葡萄糖醛酸化？），氟康唑作为CYP3A4抑制剂可能影响代谢，但答案中提到的是葡萄糖醛酸化抑制，可能需要确认这些药物的作用机制是否正确。\\n\\n可能存在的疑问：丙磺舒是否抑制葡萄糖醛酸化？丙磺舒通常抑制肾小管分泌，比如抑制尿酸排泄，可能对其他药物的排泄也有影响，但葡萄糖醛酸化是肝脏代谢，可能涉及不同的酶。可能需要确认这些药物是否确实抑制葡萄糖醛酸化，或者是否有其他机制导致骨髓毒性增加。例如，阿巴卡韦的代谢产物是否有毒性，当代谢被抑制时积累？\\n\\n例如，如果阿巴卡韦本身或其代谢产物有骨髓抑制作用，那么抑制代谢会增加毒性。因此，合用抑制葡萄糖醛酸化的药物（如丙磺舒、氟康唑等）会减少代谢，导致药物或其活性代谢物在体内蓄积，从而增加骨髓毒性风险。\\n\\n另外，利福平作为诱导剂，加速代谢，导致血药浓度降低，可能需要增加阿巴卡韦的剂量以维持疗效。而克拉霉素可能通过抑制代谢酶或影响肠道吸收（如与阿巴卡韦竞争转运体），导致吸收减少，因此需要调整剂量或给药时间。\\n\\n可能还需要考虑其他药物，比如与阿巴卡韦有相似副作用的药物，如其他骨髓抑制药物，但答案中主要提到的相互作用是药代动力学方面的。\\n\\n最后，确保回答覆盖了用户的问题，即需要避免的药物和注意事项，并建议咨询医生，以确保安全用药。\\n',\n",
       " 'answer': '您好，阿巴卡韦与一些药物合用时需要特别注意。例如，与丙磺舒、氟康唑、萘普生、吲哚美辛等抑制葡糖苷酸化作用的药物合用，可能会增加骨髓毒性。此外，肝微粒体酶诱导剂如利福平会降低阿巴卡韦的血药浓度，而克拉霉素则可能减少其吸收。为了您的安全，建议在使用这些药物时咨询医生，以确定是否需要调整剂量或选择其他治疗方案。请务必告知医生您正在服用的所有药物，以便他们为您提供最佳的医疗建议。',\n",
       " 'metrics': {'quality_f1': 1.0}}"
      ]
     },
     "execution_count": 71,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "train_data[0]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 72,
   "id": "75396637",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "'c:\\\\Users\\\\caofei\\\\Desktop\\\\desktop link\\\\torch1\\\\hgface\\\\test'"
      ]
     },
     "execution_count": 72,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "os.getcwd()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 73,
   "id": "39f43586",
   "metadata": {},
   "outputs": [],
   "source": [
    "with open('train.jsonl', 'w', encoding='utf-8') as f:\n",
    "    for item in train_data:\n",
    "        json.dump(item, f, ensure_ascii=False)\n",
    "        f.write('\\n')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 74,
   "id": "90843ebf",
   "metadata": {},
   "outputs": [],
   "source": [
    "\n",
    "with open('val.jsonl', 'w', encoding='utf-8') as f:\n",
    "    for item in val_data:\n",
    "        json.dump(item, f, ensure_ascii=False)\n",
    "        f.write('\\n')\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 85,
   "id": "66fe9f43",
   "metadata": {},
   "outputs": [],
   "source": [
    "PROMPT = \"你是一个医学专家，你需要根据用户的问题，给出带有思考的回答。\"\n",
    "MAX_LENGTH = 2048"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 79,
   "id": "aef8a191",
   "metadata": {},
   "outputs": [],
   "source": [
    "def dataset_jsonl_transfer(origin_path, new_path):\n",
    "    \"\"\"\n",
    "    将原始数据集转换为大模型微调所需数据格式的新数据集\n",
    "    \"\"\"\n",
    "    messages = []\n",
    "    # 读取旧的JSONL文件\n",
    "    with open(origin_path, \"r\",encoding='utf-8') as file:\n",
    "        for line in file:\n",
    "            # 解析每一行的json数据\n",
    "            data = json.loads(line)\n",
    "            input = data[\"question\"]\n",
    "            output = f'<think>{data[\"think\"]}</think> \\n {data[\"answer\"]}'\n",
    "            message = {\n",
    "                \"instruction\": PROMPT,\n",
    "                \"input\": f\"{input}\",\n",
    "                \"output\": output,\n",
    "            }\n",
    "            messages.append(message)\n",
    "    # 保存重构后的JSONL文件\n",
    "    with open(new_path, \"w\", encoding=\"utf-8\") as file:\n",
    "        for message in messages:\n",
    "            file.write(json.dumps(message, ensure_ascii=False) + \"\\n\")\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 80,
   "id": "bbaf09f3",
   "metadata": {},
   "outputs": [],
   "source": [
    "import os\n",
    "train_dataset_path = \"train.jsonl\"\n",
    "test_dataset_path = \"val.jsonl\"\n",
    "train_jsonl_new_path = \"train_format.jsonl\"\n",
    "test_jsonl_new_path = \"val_format.jsonl\"\n",
    "if not os.path.exists(train_jsonl_new_path):\n",
    "    dataset_jsonl_transfer(train_dataset_path, train_jsonl_new_path)\n",
    "if not os.path.exists(test_jsonl_new_path):\n",
    "    dataset_jsonl_transfer(test_dataset_path, test_jsonl_new_path)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 81,
   "id": "2661a5ba",
   "metadata": {},
   "outputs": [],
   "source": [
    "import pandas as pd\n",
    "from datasets import Dataset\n",
    "train_df = pd.read_json(train_jsonl_new_path, lines=True)\n",
    "train_ds = Dataset.from_pandas(train_df)\n",
    "# train_dataset = train_ds.map(process_func, remove_columns=train_ds.column_names)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 97,
   "id": "e1930fc9",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "{'instruction': '你是一个医学专家，你需要根据用户的问题，给出带有思考的回答。',\n",
       " 'input': '医生，我看到我的动脉血pH值在7.35到7.45之间，但PaCO₂和HCO₃⁻的值似乎有些异常。这正常吗？它能说明我的身体没有酸碱平衡问题吗？还是可能存在代偿性或混合性酸碱平衡紊乱？',\n",
       " 'output': '<think>嗯，用户的问题是关于动脉血pH值在正常范围内，但PaCO₂和HCO3⁻异常的情况，想知道是否可能存在代偿或混合性酸碱紊乱。首先，我需要回忆一下酸碱平衡的基本知识。pH正常但其他指标异常，这可能意味着代偿机制在起作用，或者有混合性问题。\\n\\n首先，pH值7.35-7.45是正常范围，但用户的情况是pH正常，但PaCO₂和HCO3⁻异常。这时候需要考虑代偿的情况。比如，代谢性酸中毒时，肾脏会代偿增加HCO3⁻，而呼吸性碱中毒时，PaCO₂降低，可能通过肾脏保留碳酸氢盐来代偿。但代偿通常不会完全回到正常，除非是部分代偿。\\n\\n接下来，如果两者同时异常，比如PaCO₂和HCO3⁻都升高或都降低，这可能提示混合性紊乱，比如代谢性碱中毒合并呼吸性酸中毒。或者相反方向的变化，比如代谢性酸中毒合并呼吸性碱中毒，这时候可能pH正常，但需要看具体数值。\\n\\n用户的问题可能需要分步骤分析：首先确定原发和继发的变化。比如，如果PaCO₂升高（原发性呼吸性酸中毒），HCO3⁻也会升高作为代偿，这时候pH可能在正常范围，但需要看数值是否符合代偿公式。比如，呼吸性酸中毒的代偿公式是HCO3⁻大约每增加1mmol/L，PaCO₂每增加10mmHg，pH可能接近正常。如果超过这个范围，可能有混合性问题。\\n\\n另外，混合性的情况需要看两者变化的方向是否一致或相反。比如，代谢性酸中毒和代谢性碱中毒同时存在，或者呼吸性和代谢性同时存在。这时候可能需要更详细的分析，比如使用三角法或罗马数字法来判断。\\n\\n用户可能担心自己虽然pH正常，但其他指标异常是否意味着问题。需要解释即使pH正常，代偿或混合的情况仍然存在，需要进一步检查。比如，如果PaCO₂和HCO3⁻同时升高，可能提示慢性呼吸性酸中毒合并代谢性碱中毒？或者需要具体数值来判断。\\n\\n可能需要提醒用户，单独的pH值无法完全判断，必须结合PaCO₂和HCO3⁻的变化方向和程度。同时，临床症状也很重要，比如是否有呼吸困难、代谢紊乱的表现等。\\n\\n总结来说，回答的结构应该是：先确认pH正常但其他指标异常的可能情况，解释代偿和混合性紊乱的定义，然后说明需要具体数值来分析，比如原发和代偿的关系，以及混合的情况如何判断，最后建议进一步检查和结合临床表现。\\n</think> \\n 您的动脉血pH值在7.35到7.45之间，确实处于正常范围，平均值是7.40。然而，pH值正常并不意味着没有酸碱平衡紊乱，尤其是当PaCO₂和HCO₃⁻的值出现异常时。在这种情况下，可能存在代偿性酸碱平衡紊乱或混合性酸碱平衡紊乱。代偿性酸碱平衡紊乱是指机体通过调节PaCO₂或HCO₃⁻来尝试恢复pH值至正常范围，而混合性酸碱平衡紊乱则是指同时存在两种或以上不同类型的酸碱平衡紊乱。为了准确判断，我们需要进一步分析您的PaCO₂和HCO₃⁻的具体数值，以及考虑其他可能的临床症状和体征。如果您的PaCO₂和HCO₃⁻值偏离正常范围，建议进行更详细的检查以确定具体的酸碱平衡状态。'}"
      ]
     },
     "execution_count": 97,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "train_ds[100]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 90,
   "id": "4fac2cbe",
   "metadata": {},
   "outputs": [],
   "source": [
    "def process_func(example):\n",
    "    \"\"\"\n",
    "    将数据集进行预处理\n",
    "    \"\"\" \n",
    "    input_ids, attention_mask, labels = [], [], []\n",
    "    instruction = tokenizer(\n",
    "        f\"<|im_start|>system\\n{PROMPT}<|im_end|>\\n<|im_start|>user\\n{example['input']}<|im_end|>\\n<|im_start|>assistant\\n\",\n",
    "        add_special_tokens=False,\n",
    "    )\n",
    "    response = tokenizer(f\"{example['output']}\", add_special_tokens=False)\n",
    "    input_ids = instruction[\"input_ids\"] + response[\"input_ids\"] + [tokenizer.pad_token_id]\n",
    "    attention_mask = (\n",
    "        instruction[\"attention_mask\"] + response[\"attention_mask\"] + [1]\n",
    "    )\n",
    "    labels = [-100] * len(instruction[\"input_ids\"]) + response[\"input_ids\"] + [tokenizer.pad_token_id]\n",
    "    if len(input_ids) > MAX_LENGTH:  # 做一个截断\n",
    "        input_ids = input_ids[:MAX_LENGTH]\n",
    "        attention_mask = attention_mask[:MAX_LENGTH]\n",
    "        labels = labels[:MAX_LENGTH]\n",
    "    return {\"input_ids\": input_ids, \"attention_mask\": attention_mask, \"labels\": labels}  "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 84,
   "id": "83e016d4",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "Qwen2TokenizerFast(name_or_path='D:\\Models\\qwen3_0.6b', vocab_size=151643, model_max_length=131072, is_fast=True, padding_side='right', truncation_side='right', special_tokens={'eos_token': '<|im_end|>', 'pad_token': '<|endoftext|>', 'additional_special_tokens': ['<|im_start|>', '<|im_end|>', '<|object_ref_start|>', '<|object_ref_end|>', '<|box_start|>', '<|box_end|>', '<|quad_start|>', '<|quad_end|>', '<|vision_start|>', '<|vision_end|>', '<|vision_pad|>', '<|image_pad|>', '<|video_pad|>']}, clean_up_tokenization_spaces=False, added_tokens_decoder={\n",
       "\t151643: AddedToken(\"<|endoftext|>\", rstrip=False, lstrip=False, single_word=False, normalized=False, special=True),\n",
       "\t151644: AddedToken(\"<|im_start|>\", rstrip=False, lstrip=False, single_word=False, normalized=False, special=True),\n",
       "\t151645: AddedToken(\"<|im_end|>\", rstrip=False, lstrip=False, single_word=False, normalized=False, special=True),\n",
       "\t151646: AddedToken(\"<|object_ref_start|>\", rstrip=False, lstrip=False, single_word=False, normalized=False, special=True),\n",
       "\t151647: AddedToken(\"<|object_ref_end|>\", rstrip=False, lstrip=False, single_word=False, normalized=False, special=True),\n",
       "\t151648: AddedToken(\"<|box_start|>\", rstrip=False, lstrip=False, single_word=False, normalized=False, special=True),\n",
       "\t151649: AddedToken(\"<|box_end|>\", rstrip=False, lstrip=False, single_word=False, normalized=False, special=True),\n",
       "\t151650: AddedToken(\"<|quad_start|>\", rstrip=False, lstrip=False, single_word=False, normalized=False, special=True),\n",
       "\t151651: AddedToken(\"<|quad_end|>\", rstrip=False, lstrip=False, single_word=False, normalized=False, special=True),\n",
       "\t151652: AddedToken(\"<|vision_start|>\", rstrip=False, lstrip=False, single_word=False, normalized=False, special=True),\n",
       "\t151653: AddedToken(\"<|vision_end|>\", rstrip=False, lstrip=False, single_word=False, normalized=False, special=True),\n",
       "\t151654: AddedToken(\"<|vision_pad|>\", rstrip=False, lstrip=False, single_word=False, normalized=False, special=True),\n",
       "\t151655: AddedToken(\"<|image_pad|>\", rstrip=False, lstrip=False, single_word=False, normalized=False, special=True),\n",
       "\t151656: AddedToken(\"<|video_pad|>\", rstrip=False, lstrip=False, single_word=False, normalized=False, special=True),\n",
       "\t151657: AddedToken(\"<tool_call>\", rstrip=False, lstrip=False, single_word=False, normalized=False, special=False),\n",
       "\t151658: AddedToken(\"</tool_call>\", rstrip=False, lstrip=False, single_word=False, normalized=False, special=False),\n",
       "\t151659: AddedToken(\"<|fim_prefix|>\", rstrip=False, lstrip=False, single_word=False, normalized=False, special=False),\n",
       "\t151660: AddedToken(\"<|fim_middle|>\", rstrip=False, lstrip=False, single_word=False, normalized=False, special=False),\n",
       "\t151661: AddedToken(\"<|fim_suffix|>\", rstrip=False, lstrip=False, single_word=False, normalized=False, special=False),\n",
       "\t151662: AddedToken(\"<|fim_pad|>\", rstrip=False, lstrip=False, single_word=False, normalized=False, special=False),\n",
       "\t151663: AddedToken(\"<|repo_name|>\", rstrip=False, lstrip=False, single_word=False, normalized=False, special=False),\n",
       "\t151664: AddedToken(\"<|file_sep|>\", rstrip=False, lstrip=False, single_word=False, normalized=False, special=False),\n",
       "\t151665: AddedToken(\"<tool_response>\", rstrip=False, lstrip=False, single_word=False, normalized=False, special=False),\n",
       "\t151666: AddedToken(\"</tool_response>\", rstrip=False, lstrip=False, single_word=False, normalized=False, special=False),\n",
       "\t151667: AddedToken(\"<think>\", rstrip=False, lstrip=False, single_word=False, normalized=False, special=False),\n",
       "\t151668: AddedToken(\"</think>\", rstrip=False, lstrip=False, single_word=False, normalized=False, special=False),\n",
       "}\n",
       ")"
      ]
     },
     "execution_count": 84,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "tokenizer"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 95,
   "id": "af639ec1",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "['instruction', 'input', 'output']"
      ]
     },
     "execution_count": 95,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "train_ds.column_names"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 91,
   "id": "a9ac6de6",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "77a8af5f059d4dd4b5bffb7a8d2fda0a",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "Map:   0%|          | 0/2166 [00:00<?, ? examples/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    }
   ],
   "source": [
    "train_dataset = train_ds.map(process_func, remove_columns=train_ds.column_names)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 98,
   "id": "d413f4f6",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "datasets.arrow_dataset.Dataset"
      ]
     },
     "execution_count": 98,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "type(train_dataset)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 94,
   "id": "15a41d92",
   "metadata": {},
   "outputs": [],
   "source": [
    "from transformers import Trainer"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "cb2320f3",
   "metadata": {},
   "outputs": [],
   "source": [
    "trainer = Trainer(\n",
    "    model=model,\n",
    "    args=training_args,\n",
    "    train_dataset=dataset[\"train\"],\n",
    "    eval_dataset=dataset[\"test\"],\n",
    "    processing_class=tokenizer,\n",
    "    data_collator=data_collator,\n",
    "    compute_metrics=compute_metrics,\n",
    ")\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "db8c3f6d",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "7c06026b",
   "metadata": {},
   "outputs": [],
   "source": [
    "i like apple     i1 like1 apple1\n",
    "\n",
    "\n",
    "i like apple\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "d9a47d41",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "cbf2b0e8",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "31ae44b2",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 101,
   "id": "b13b54c2",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[ True, False, False, False, False],\n",
       "        [ True,  True, False, False, False],\n",
       "        [ True,  True,  True, False, False],\n",
       "        [ True,  True,  True,  True, False],\n",
       "        [ True,  True,  True,  True,  True]])"
      ]
     },
     "execution_count": 101,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "import torch\n",
    "mask = (torch.triu(torch.ones(5, 5)) == 1).transpose(0, 1)\n",
    "mask"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 102,
   "id": "7dc6bb47",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[0., -inf, -inf, -inf, -inf],\n",
       "        [0., 0., -inf, -inf, -inf],\n",
       "        [0., 0., 0., -inf, -inf],\n",
       "        [0., 0., 0., 0., -inf],\n",
       "        [0., 0., 0., 0., 0.]])"
      ]
     },
     "execution_count": 102,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "mask.float().masked_fill(mask == 0, float('-inf')).masked_fill(mask == 1, float(0.0))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 103,
   "id": "33347f58",
   "metadata": {},
   "outputs": [],
   "source": [
    "import torch.nn as nn"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 104,
   "id": "afdf83e6",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[0., -inf, -inf, -inf, -inf],\n",
       "        [0., 0., -inf, -inf, -inf],\n",
       "        [0., 0., 0., -inf, -inf],\n",
       "        [0., 0., 0., 0., -inf],\n",
       "        [0., 0., 0., 0., 0.]])"
      ]
     },
     "execution_count": 104,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "nn.Transformer.generate_square_subsequent_mask(5)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "fcb832b2",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "torch_py10",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.10.10"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
