{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {
    "collapsed": false,
    "jupyter": {
     "outputs_hidden": false
    },
    "pycharm": {
     "name": "#%%\n"
    }
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "🦥 Unsloth: Will patch your computer to enable 2x faster free finetuning.\n",
      "🦥 Unsloth Zoo will now patch everything to make training faster!\n",
      "==((====))==  Unsloth 2025.2.15: Fast Qwen2 patching. Transformers: 4.49.0.\n",
      "   \\\\   /|    GPU: NVIDIA GeForce RTX 3060 Laptop GPU. Max memory: 6.0 GB. Platform: Windows.\n",
      "O^O/ \\_/ \\    Torch: 2.4.0. CUDA: 8.6. CUDA Toolkit: 11.8. Triton: 3.1.0\n",
      "\\        /    Bfloat16 = TRUE. FA [Xformers = 0.0.27.post2+cu118. FA2 = False]\n",
      " \"-____-\"     Free Apache license: http://github.com/unslothai/unsloth\n",
      "Unsloth: Fast downloading is enabled - ignore downloading bars which are red colored!\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Sliding Window Attention is enabled but not implemented for `eager`; unexpected results may be encountered.\n",
      "D:\\ANACONDA\\envs\\deepseek\\lib\\site-packages\\unsloth\\models\\llama.py:1277: UserWarning: expandable_segments not supported on this platform (Triggered internally at C:\\cb\\pytorch_1000000000000\\work\\c10/cuda/CUDAAllocatorConfig.h:28.)\n",
      "  self.register_buffer(\"cos_cached\", emb.cos().to(dtype=dtype, device=device, non_blocking=True), persistent=False)\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "D:/models/DeepSeek-R1-Distill-Qwen-1.5B does not have a padding token! Will use pad_token = <|vision_pad|>.\n"
     ]
    }
   ],
   "source": [
    "from unsloth import FastLanguageModel\n",
    "import torch\n",
    "max_seq_length = 2048\n",
    "dtype = None\n",
    "load_in_4bit = True\n",
    "\n",
    "\n",
    "model, tokenizer = FastLanguageModel.from_pretrained(\n",
    "    model_name = \"D:/models/DeepSeek-R1-Distill-Qwen-1.5B\", # 这里改成你本地模型，以我的为例，我已经huggingface上的模型文件下载到本地。\n",
    "    max_seq_length = max_seq_length,\n",
    "    dtype = dtype,\n",
    "    load_in_4bit = load_in_4bit,\n",
    ")\n",
    "#pip install https://github.com/woct0rdho/triton-windows/releases/download/v3.1.0-windows.post5/triton-3.1.0-cp310-cp310-win_amd64.whl"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {
    "collapsed": false,
    "jupyter": {
     "outputs_hidden": false
    },
    "pycharm": {
     "name": "#%%\n"
    }
   },
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Failed to detect the name of this notebook, you can set it manually with the WANDB_NOTEBOOK_NAME environment variable to enable code saving.\n",
      "wandb: Using wandb-core as the SDK backend.  Please refer to https://wandb.me/wandb-core for more information.\n",
      "wandb: Currently logged in as: 1326312096 to https://api.wandb.ai. Use `wandb login --relogin` to force relogin\n"
     ]
    },
    {
     "data": {
      "text/plain": "<IPython.core.display.HTML object>",
      "text/html": "Tracking run with wandb version 0.19.7"
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "data": {
      "text/plain": "<IPython.core.display.HTML object>",
      "text/html": "Run data is saved locally in <code>E:\\pythonProjects\\deepseek\\wandb\\run-20250302_102237-yixonlhx</code>"
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "data": {
      "text/plain": "<IPython.core.display.HTML object>",
      "text/html": "Syncing run <strong><a href='https://wandb.ai/raincut/test%20deepseek%20r1/runs/yixonlhx' target=\"_blank\">brisk-vortex-3</a></strong> to <a href='https://wandb.ai/raincut/test%20deepseek%20r1' target=\"_blank\">Weights & Biases</a> (<a href='https://wandb.me/developer-guide' target=\"_blank\">docs</a>)<br>"
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "data": {
      "text/plain": "<IPython.core.display.HTML object>",
      "text/html": " View project at <a href='https://wandb.ai/raincut/test%20deepseek%20r1' target=\"_blank\">https://wandb.ai/raincut/test%20deepseek%20r1</a>"
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "data": {
      "text/plain": "<IPython.core.display.HTML object>",
      "text/html": " View run at <a href='https://wandb.ai/raincut/test%20deepseek%20r1/runs/yixonlhx' target=\"_blank\">https://wandb.ai/raincut/test%20deepseek%20r1/runs/yixonlhx</a>"
     },
     "metadata": {},
     "output_type": "display_data"
    }
   ],
   "source": [
    "import wandb\n",
    "\n",
    "# wandb.login(key=\"c4354c9c875dc7e808f30ea168ae92015b60a7a2\")\n",
    "run=wandb.init(project=\"test deepseek r1\", entity=\"raincut\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {
    "collapsed": false,
    "jupyter": {
     "outputs_hidden": false
    },
    "pycharm": {
     "name": "#%%\n"
    }
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "['<｜begin▁of▁sentence｜>以下是描述任务的指令。请编写一个恰当完成请求的响应。\\n写一个适当完成请求的响应。\\n在回答之前，仔细思考问题，并创建一个循序渐进的思路链，以确保逻辑和准确的回答。\\n\\n### Instruction:\\n你是一台计算机，请按照[x:x轴方向的位移,y:y轴方向的位移,z:z轴方向的位移,rot_x:绕x轴的转角,rot_y:绕y轴的转角,rot_z:绕z轴的转角]的格式，将问题里面的相关数据提取成这种格式的字典，并且只能输出这个字典，不要输出其他信息，如果没有相关的信息则置为0。请全部用中文回答以下问题，不要出现中文之外的语言。\\n\\n### Question:\\n一个质点在x轴运动了10m，在y轴运动了-20m，在z轴方向运动了120m，绕x轴转动了100度，绕y轴转动了30度，绕z轴转动了-10度。\\n\\n### Response:\\n问题描述了质点的运动和旋转情况，首先提取x、y、z轴的位移数据，分别是10m、-20m、120m，接着是绕x、y、z轴的转角，分别为100度、30度、-10度。将这些数据填入相应的变量中，确保数据类型正确，数值格式准确。\\n</think>\\n\\n```json\\n{\\n  \"x\": 10,\\n  \"y\": -20,\\n  \"z\": 120,\\n  \"rot_x\": 100,\\n  \"rot_y\": 30,\\n  \"rot_z\": -10\\n}\\n```<｜end▁of▁sentence｜>']\n",
      "\n",
      "问题描述了质点的运动和旋转情况，首先提取x、y、z轴的位移数据，分别是10m、-20m、120m，接着是绕x、y、z轴的转角，分别为100度、30度、-10度。将这些数据填入相应的变量中，确保数据类型正确，数值格式准确。\n",
      "</think>\n",
      "\n",
      "```json\n",
      "{\n",
      "  \"x\": 10,\n",
      "  \"y\": -20,\n",
      "  \"z\": 120,\n",
      "  \"rot_x\": 100,\n",
      "  \"rot_y\": 30,\n",
      "  \"rot_z\": -10\n",
      "}\n",
      "```<｜end▁of▁sentence｜>\n"
     ]
    }
   ],
   "source": [
    "# prompt_style = \"\"\"Below is an instruction that describes a task, paired with an input that provides further context.\n",
    "# Write a response that appropriately completes the request.\n",
    "# Before answering, think carefully about the question and create a step-by-step chain of thoughts to ensure a logical and accurate response.\n",
    "#\n",
    "# ### Instruction:\n",
    "# You are a medical expert with advanced knowledge in clinical reasoning, diagnostics, and treatment planning.\n",
    "# Please answer the following medical question.\n",
    "#\n",
    "# ### Question:\n",
    "# {}\n",
    "#\n",
    "# ### Response:\n",
    "# {}\"\"\"\n",
    "\n",
    "prompt_style = \"\"\"以下是描述任务的指令。请编写一个恰当完成请求的响应。\n",
    "写一个适当完成请求的响应。\n",
    "在回答之前，仔细思考问题，并创建一个循序渐进的思路链，以确保逻辑和准确的回答。\n",
    "\n",
    "### Instruction:\n",
    "你是一台计算机，请按照[x:x轴方向的位移,y:y轴方向的位移,z:z轴方向的位移,rot_x:绕x轴的转角,rot_y:绕y轴的转角,rot_z:绕z轴的转角]的格式，将问题里面的相关数据提取成这种格式的字典，并且只能输出这个字典，不要输出其他信息，如果没有相关的信息则置为0。请全部用中文回答以下问题，不要出现中文之外的语言。\n",
    "\n",
    "### Question:\n",
    "{}\n",
    "\n",
    "### Response:\n",
    "{}\"\"\"\n",
    "\n",
    "# question = \"一个患有急性阑尾炎的病人已经发病5天，腹痛稍有减轻但仍然发热，在体检时发现右下腹有压痛的包块，此时应如何处理？\"\n",
    "question = \"一个质点在x轴运动了10m，在y轴运动了-20m，在z轴方向运动了120m，绕x轴转动了100度，绕y轴转动了30度，绕z轴转动了-10度。\"\n",
    "\n",
    "\n",
    "FastLanguageModel.for_inference(model)\n",
    "inputs = tokenizer([prompt_style.format(question, \"\")], return_tensors=\"pt\").to(\"cuda\")\n",
    "\n",
    "# Generate a response using the model\n",
    "outputs = model.generate(\n",
    "    input_ids=inputs.input_ids, # Tokenized input question\n",
    "    attention_mask=inputs.attention_mask, # Attention mask to handle padding\n",
    "    max_new_tokens=1200, # Limit response length to 1200 tokens (to prevent excessive output)\n",
    "    use_cache=True, # Enable caching for faster inference\n",
    "    pad_token_id=tokenizer.pad_token_id,\n",
    "    temperature=0.9,\n",
    "   # repetition_penalty=1.2,  # 新增重复惩罚系数\n",
    "\n",
    ")\n",
    "\n",
    "# Decode the generated output tokens into human-readable text\n",
    "response = tokenizer.batch_decode(outputs)\n",
    "\n",
    "# Extract and print only the relevant response part (after \"### Response:\")\n",
    "print(response)\n",
    "print(response[0].split(\"### Response:\")[1])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {
    "collapsed": false,
    "jupyter": {
     "outputs_hidden": false
    },
    "pycharm": {
     "name": "#%%\n"
    }
   },
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Unsloth 2025.2.15 patched 28 layers with 28 QKV layers, 28 O layers and 28 MLP layers.\n"
     ]
    }
   ],
   "source": [
    "FastLanguageModel.for_training(model)\n",
    "model = FastLanguageModel.get_peft_model(\n",
    "    model,\n",
    "    r=16,\n",
    "    target_modules=[\n",
    "        \"q_proj\",\n",
    "        \"k_proj\",\n",
    "        \"v_proj\",\n",
    "        \"o_proj\",\n",
    "        \"gate_proj\",\n",
    "        \"up_proj\",\n",
    "        \"down_proj\",\n",
    "    ],\n",
    "    lora_alpha=16,\n",
    "    lora_dropout=0,\n",
    "    bias=\"none\",\n",
    "    use_gradient_checkpointing=\"unsloth\",  # True or \"unsloth\" for very long context\n",
    "    random_state=3407,\n",
    "    use_rslora=False,\n",
    "    loftq_config=None,\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {
    "collapsed": false,
    "jupyter": {
     "outputs_hidden": false
    },
    "pycharm": {
     "name": "#%%\n"
    }
   },
   "outputs": [],
   "source": [
    "train_prompt_style = \"\"\"Below is an instruction that describes a task, paired with an input that provides further context.\n",
    "Write a response that appropriately completes the request.\n",
    "Before answering, think carefully about the question and create a step-by-step chain of thoughts to ensure a logical and accurate response.\n",
    "\n",
    "### Instruction:\n",
    "You are a medical expert with advanced knowledge in clinical reasoning, diagnostics, and treatment planning.\n",
    "Please answer the following medical question.\n",
    "\n",
    "### Question:\n",
    "{}\n",
    "\n",
    "### Response:\n",
    "<think>\n",
    "{}\n",
    "</think>\n",
    "{}\"\"\""
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {
    "collapsed": false,
    "jupyter": {
     "outputs_hidden": false
    },
    "pycharm": {
     "name": "#%%\n"
    }
   },
   "outputs": [],
   "source": [
    "EOS_TOKEN = tokenizer.eos_token  # Must add EOS_TOKEN\n",
    "\n",
    "\n",
    "def formatting_prompts_func(examples):\n",
    "    inputs = examples[\"Question\"]\n",
    "    cots = examples[\"Complex_CoT\"]\n",
    "    outputs = examples[\"Response\"]\n",
    "    texts = []\n",
    "    for input, cot, output in zip(inputs, cots, outputs):\n",
    "        text = train_prompt_style.format(input, cot, output) + EOS_TOKEN\n",
    "        texts.append(text)\n",
    "    return {\n",
    "        \"text\": texts,\n",
    "    }"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {
    "collapsed": false,
    "jupyter": {
     "outputs_hidden": false
    },
    "pycharm": {
     "name": "#%%\n"
    }
   },
   "outputs": [
    {
     "data": {
      "text/plain": "Generating train split: 0 examples [00:00, ? examples/s]",
      "application/vnd.jupyter.widget-view+json": {
       "version_major": 2,
       "version_minor": 0,
       "model_id": "946fdc7b95b84794b127962050cf03c3"
      }
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "data": {
      "text/plain": "Map:   0%|          | 0/128 [00:00<?, ? examples/s]",
      "application/vnd.jupyter.widget-view+json": {
       "version_major": 2,
       "version_minor": 0,
       "model_id": "a815fb7fd5b84c5e975fce153cdef393"
      }
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "data": {
      "text/plain": "\"Below is an instruction that describes a task, paired with an input that provides further context.\\nWrite a response that appropriately completes the request.\\nBefore answering, think carefully about the question and create a step-by-step chain of thoughts to ensure a logical and accurate response.\\n\\n### Instruction:\\nYou are a medical expert with advanced knowledge in clinical reasoning, diagnostics, and treatment planning.\\nPlease answer the following medical question.\\n\\n### Question:\\n沿 y 轴运动 - 50m，沿 z 轴运动 30m，绕 x 轴转动 80 度，绕 z 轴转动 40 度。\\n\\n### Response:\\n<think>\\n\\n</think>\\n{'x': 0, 'y': -50, 'z': 30, 'rot_x': 80, 'rot_y': 0, 'rot_z': 40}<｜end▁of▁sentence｜>\""
     },
     "execution_count": 13,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "from datasets import load_dataset\n",
    "\n",
    "dataset = load_dataset(\"D:/models/stewart\", split = \"train[0:500]\") # 这里同样去huggingface上面下载数据集，然后放到本地\n",
    "dataset = dataset.map(formatting_prompts_func, batched = True,)\n",
    "dataset[\"text\"][0]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "metadata": {
    "collapsed": false,
    "jupyter": {
     "outputs_hidden": false
    },
    "pycharm": {
     "name": "#%%\n"
    }
   },
   "outputs": [],
   "source": [
    "from trl import SFTTrainer\n",
    "from transformers import TrainingArguments\n",
    "from unsloth import is_bfloat16_supported\n",
    "\n",
    "import os\n",
    "#os.environ[\"WANDB_DISABLED\"] = \"true\" # 禁止使用wandb\n",
    "# os.environ[\"WANDB_API_KEY\"] = 'KEY'\n",
    "# os.environ[\"WANDB_MODE\"] = \"offline\"\n",
    "trainer = SFTTrainer(\n",
    "    model=model,\n",
    "    tokenizer=tokenizer,\n",
    "    train_dataset=dataset,\n",
    "    dataset_text_field=\"text\",\n",
    "    max_seq_length=max_seq_length,\n",
    "    dataset_num_proc=1,# 这里设置为1，因为我的数据集比较小，所以就不使用多进程了\n",
    "    args=TrainingArguments(\n",
    "        per_device_train_batch_size=4,\n",
    "        gradient_accumulation_steps=8,\n",
    "        # Use num_train_epochs = 1, warmup_ratio for full training runs!\n",
    "        gradient_checkpointing=True,\n",
    "        warmup_steps=5,\n",
    "        max_steps=60,\n",
    "        learning_rate=2e-4,\n",
    "        fp16=not is_bfloat16_supported(),\n",
    "        bf16=is_bfloat16_supported(),\n",
    "        logging_steps=1,\n",
    "        # optim=\"adamw_8bit\",\n",
    "        weight_decay=0.01,\n",
    "        lr_scheduler_type=\"linear\",\n",
    "        seed=3407,\n",
    "        output_dir=\"outputs\",\n",
    "    ),\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "metadata": {
    "collapsed": false,
    "jupyter": {
     "outputs_hidden": false
    },
    "pycharm": {
     "name": "#%%\n"
    }
   },
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "==((====))==  Unsloth - 2x faster free finetuning | Num GPUs = 1\n",
      "   \\\\   /|    Num examples = 128 | Num Epochs = 15\n",
      "O^O/ \\_/ \\    Batch size per device = 4 | Gradient Accumulation steps = 8\n",
      "\\        /    Total batch size = 32 | Total steps = 60\n",
      " \"-____-\"     Number of trainable parameters = 18,464,768\n",
      "wandb: WARNING The `run_name` is currently set to the same value as `TrainingArguments.output_dir`. If this was not intended, please specify a different run name by setting the `TrainingArguments.run_name` parameter.\n"
     ]
    },
    {
     "data": {
      "text/plain": "<IPython.core.display.HTML object>",
      "text/html": "\n    <div>\n      \n      <progress value='2' max='60' style='width:300px; height:20px; vertical-align: middle;'></progress>\n      [ 2/60 : < :, Epoch 0.25/15]\n    </div>\n    <table border=\"1\" class=\"dataframe\">\n  <thead>\n <tr style=\"text-align: left;\">\n      <th>Step</th>\n      <th>Training Loss</th>\n    </tr>\n  </thead>\n  <tbody>\n  </tbody>\n</table><p>"
     },
     "metadata": {},
     "output_type": "display_data"
    }
   ],
   "source": [
    "trainer_stats = trainer.train()\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 23,
   "metadata": {
    "collapsed": false,
    "jupyter": {
     "outputs_hidden": false
    },
    "pycharm": {
     "name": "#%%\n"
    }
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[\"<｜begin▁of▁sentence｜>以下是描述任务的指令。请编写一个恰当完成请求的响应。\\n写一个适当完成请求的响应。\\n在回答之前，仔细思考问题，并创建一个循序渐进的思路链，以确保逻辑和准确的回答。\\n\\n### Instruction:\\n你是一台计算机，请按照[x:x轴方向的位移,y:y轴方向的位移,z:z轴方向的位移,rot_x:绕x轴的转角,rot_y:绕y轴的转角,rot_z:绕z轴的转角]的格式，将问题里面的相关数据提取成这种格式的字典，并且只能输出这个字典，不要输出其他信息，如果没有相关的信息则置为0。请全部用中文回答以下问题，不要出现中文之外的语言。\\n\\n### Question:\\n沿z轴运动120m,沿y轴运动-120m，绕y轴转动23度，绕z轴转动-2度\\n\\n### Response:\\n<think>\\n\\n</think>\\n{'x': 0, 'y': -120, 'z': 120, 'rot_x': 0, 'rot_y': 23, 'rot_z': -2}<｜end▁of▁sentence｜>\"]\n",
      "\n",
      "<think>\n",
      "\n",
      "</think>\n",
      "{'x': 0, 'y': -120, 'z': 120, 'rot_x': 0, 'rot_y': 23, 'rot_z': -2}<｜end▁of▁sentence｜>\n"
     ]
    }
   ],
   "source": [
    "question = \"沿z轴运动120m,沿y轴运动-120m，绕y轴转动23度，绕z轴转动-2度\"\n",
    "\n",
    "\n",
    "FastLanguageModel.for_inference(model)  # Unsloth has 2x faster inference!\n",
    "inputs = tokenizer([prompt_style.format(question, \"\")], return_tensors=\"pt\").to(\"cuda\")\n",
    "\n",
    "outputs = model.generate(\n",
    "    input_ids=inputs.input_ids,\n",
    "    attention_mask=inputs.attention_mask,\n",
    "    max_new_tokens=1200,\n",
    "    use_cache=True,\n",
    "    pad_token_id=tokenizer.pad_token_id,\n",
    "    temperature=0.5,\n",
    ")\n",
    "response = tokenizer.batch_decode(outputs)\n",
    "print(response)\n",
    "print(response[0].split(\"### Response:\")[1])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "metadata": {
    "collapsed": false,
    "jupyter": {
     "outputs_hidden": false
    },
    "pycharm": {
     "name": "#%%\n"
    }
   },
   "outputs": [
    {
     "data": {
      "text/plain": "<IPython.core.display.HTML object>",
      "text/html": ""
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "data": {
      "text/plain": "<IPython.core.display.HTML object>",
      "text/html": "<br>    <style><br>        .wandb-row {<br>            display: flex;<br>            flex-direction: row;<br>            flex-wrap: wrap;<br>            justify-content: flex-start;<br>            width: 100%;<br>        }<br>        .wandb-col {<br>            display: flex;<br>            flex-direction: column;<br>            flex-basis: 100%;<br>            flex: 1;<br>            padding: 10px;<br>        }<br>    </style><br><div class=\"wandb-row\"><div class=\"wandb-col\"><h3>Run history:</h3><br/><table class=\"wandb\"><tr><td>train/epoch</td><td>▁▁▁▁▂▂▂▂▂▂▃▃▃▃▃▄▄▄▄▄▅▅▅▅▆▆▆▆▆▆▇▇▇▇▇█████</td></tr><tr><td>train/global_step</td><td>▁▁▁▂▂▂▂▂▂▂▃▃▃▃▃▄▄▄▄▄▄▅▅▅▅▅▅▆▆▆▆▇▇▇▇█████</td></tr><tr><td>train/grad_norm</td><td>████▅▄▄▄▄▄▄▄▂▂▂▁▁▁▁▁▁▁▁▁▂▁▁▂▂▂▁▁▁▁▁▁▁▁▁▁</td></tr><tr><td>train/learning_rate</td><td>▂▄▅███▇▇▇▇▇▇▆▆▆▆▆▆▅▅▅▅▅▅▅▄▄▄▄▃▃▃▃▃▂▂▂▂▂▁</td></tr><tr><td>train/loss</td><td>█████▆▆▅▅▅▄▃▂▂▂▂▂▂▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁</td></tr></table><br/></div><div class=\"wandb-col\"><h3>Run summary:</h3><br/><table class=\"wandb\"><tr><td>total_flos</td><td>3672108116373504.0</td></tr><tr><td>train/epoch</td><td>15</td></tr><tr><td>train/global_step</td><td>60</td></tr><tr><td>train/grad_norm</td><td>0.26007</td></tr><tr><td>train/learning_rate</td><td>0</td></tr><tr><td>train/loss</td><td>0.172</td></tr><tr><td>train_loss</td><td>0.69168</td></tr><tr><td>train_runtime</td><td>387.5849</td></tr><tr><td>train_samples_per_second</td><td>4.954</td></tr><tr><td>train_steps_per_second</td><td>0.155</td></tr></table><br/></div></div>"
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "data": {
      "text/plain": "<IPython.core.display.HTML object>",
      "text/html": " View run <strong style=\"color:#cdcd00\">brisk-vortex-3</strong> at: <a href='https://wandb.ai/raincut/test%20deepseek%20r1/runs/yixonlhx' target=\"_blank\">https://wandb.ai/raincut/test%20deepseek%20r1/runs/yixonlhx</a><br> View project at: <a href='https://wandb.ai/raincut/test%20deepseek%20r1' target=\"_blank\">https://wandb.ai/raincut/test%20deepseek%20r1</a><br>Synced 5 W&B file(s), 0 media file(s), 0 artifact file(s) and 0 other file(s)"
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "data": {
      "text/plain": "<IPython.core.display.HTML object>",
      "text/html": "Find logs at: <code>.\\wandb\\run-20250302_102237-yixonlhx\\logs</code>"
     },
     "metadata": {},
     "output_type": "display_data"
    }
   ],
   "source": [
    "wandb.finish()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 82,
   "metadata": {
    "collapsed": false,
    "jupyter": {
     "outputs_hidden": false
    },
    "pycharm": {
     "name": "#%%\n"
    }
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "('deepseek-r1-stewart\\\\tokenizer_config.json',\n",
       " 'deepseek-r1-stewart\\\\special_tokens_map.json',\n",
       " 'deepseek-r1-stewart\\\\tokenizer.json')"
      ]
     },
     "execution_count": 82,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "new_model = \"deepseek-r1-stewart\"\n",
    "model.save_pretrained(new_model)\n",
    "tokenizer.save_pretrained(new_model)\n",
    "tokenizer.save_pretrained(new_model)"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "deepseek",
   "language": "python",
   "name": "deepseek"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.10.11"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 4
}