{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "51168cc7",
   "metadata": {},
   "outputs": [],
   "source": [
    "!pip install --upgrade --force-reinstall --no-cache-dir unsloth unsloth_zoo\n",
    "!pip install bitsandbytes>=0.45.3\n",
    "!pip install --upgrade vllm"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "d4551d8a",
   "metadata": {},
   "outputs": [],
   "source": [
    "!pip install modelscope>=1.18.1"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "03680d6e",
   "metadata": {},
   "outputs": [],
   "source": [
    "from unsloth import FastLanguageModel\n",
    "import torch"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "6122bda4",
   "metadata": {},
   "outputs": [],
   "source": [
    "max_seq_length = 2048\n",
    "dtype = None\n",
    "load_in_4bit = True"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "be5edfb6",
   "metadata": {},
   "outputs": [],
   "source": [
    "model, tokenizer = FastLanguageModel.from_pretrained(\n",
    "    model_name = \"/mnt/codes/qwen3/models/Qwen3-4B-unsloth-bnb-4bit\",\n",
    "    max_seq_length = max_seq_length,\n",
    "    dtype = dtype,\n",
    "    load_in_4bit = load_in_4bit,\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "f0e979dc",
   "metadata": {},
   "outputs": [],
   "source": [
    "model"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "35ad04fa",
   "metadata": {},
   "outputs": [],
   "source": [
    "tokenizer"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "13b32be2",
   "metadata": {},
   "outputs": [],
   "source": [
    "messages = [\n",
    "    {\"role\" : \"user\", \"content\" : \"Solve (x + 2)^2 = 0.\"}\n",
    "]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "db55398e",
   "metadata": {},
   "outputs": [],
   "source": [
    "text = tokenizer.apply_chat_template(\n",
    "    messages,\n",
    "    tokenize = False,\n",
    "    add_generation_prompt = True, # Must add for generation\n",
    "    enable_thinking = False, # Disable thinking\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "640cb348",
   "metadata": {},
   "outputs": [],
   "source": [
    "text"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "c5e3bc27",
   "metadata": {},
   "outputs": [],
   "source": [
    "from transformers import TextStreamer\n",
    "\n",
    "_ = model.generate(\n",
    "    **tokenizer(text, return_tensors = \"pt\").to(\"cuda\"),\n",
    "    max_new_tokens = 256, # Increase for longer outputs!\n",
    "    temperature = 0.7, top_p = 0.8, top_k = 20, # For non thinking\n",
    "    streamer = TextStreamer(tokenizer, skip_prompt = True),\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "029ae640",
   "metadata": {},
   "outputs": [],
   "source": [
    "messages = [\n",
    "    {\"role\" : \"user\", \"content\" : \"Solve (x + 2)^2 = 0.\"}\n",
    "]\n",
    "text = tokenizer.apply_chat_template(\n",
    "    messages,\n",
    "    tokenize = False,\n",
    "    add_generation_prompt = True, # Must add for generation\n",
    "    enable_thinking = True, # Disable thinking\n",
    ")\n",
    "\n",
    "from transformers import TextStreamer\n",
    "outputs = model.generate(\n",
    "    **tokenizer(text, return_tensors = \"pt\").to(\"cuda\"),\n",
    "    max_new_tokens = 2048, # Increase for longer outputs!\n",
    "    temperature = 0.6, top_p = 0.95, top_k = 20, # For thinking\n",
    "    streamer = TextStreamer(tokenizer, skip_prompt = True),\n",
    ")\n",
    "\n",
    "response = tokenizer.batch_decode(outputs)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "5e5a3e9d",
   "metadata": {},
   "outputs": [],
   "source": [
    "response[0]"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "f0f0a88d",
   "metadata": {},
   "source": [
    "显存占用"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "8fb23aaf",
   "metadata": {},
   "outputs": [],
   "source": [
    "gpu_stats = torch.cuda.get_device_properties(0)\n",
    "start_gpu_memory = round(torch.cuda.max_memory_reserved() / 1024 / 1024 / 1024, 3)\n",
    "max_memory = round(gpu_stats.total_memory / 1024 / 1024 / 1024, 3)\n",
    "print(f\"GPU = {gpu_stats.name}. Max memory = {max_memory} GB.\")\n",
    "print(f\"{start_gpu_memory} GB of memory reserved.\")"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "3970fb29",
   "metadata": {},
   "source": [
    "- 数据处理流程\n",
    "Qwen3 具备推理模式和非推理模式。因此，我们应当使用两个数据集：\n",
    "1. 我们使用 Open Math Reasoning 数据集，该数据集曾被用于赢得 AIMO（AI 数学奥林匹克 - 第二届进步奖）挑战！我们从中抽取了 10% 可验证的推理轨迹，这些轨迹是基于 DeepSeek R1 模型生成的，并且准确率超过 95%。\n",
    "2. 我们还利用了 Maxime Labonne 的 FineTome-100k 数据集，该数据集风格类似 ShareGPT。但我们需要将其转换为 HuggingFace 通用的多轮对话格式。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "035c107b",
   "metadata": {},
   "outputs": [],
   "source": [
    "!pip install --upgrade datasets huggingface_hub"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "754e2e87",
   "metadata": {},
   "outputs": [],
   "source": [
    "from datasets import load_dataset\n",
    "reasoning_dataset = load_dataset(\"/mnt/codes/qwen3/fine-tuning/datasets/data/unsloth/OpenMathReasoning-mini\", split = \"cot\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "ccea90d9",
   "metadata": {},
   "outputs": [],
   "source": [
    "non_reasoning_dataset = load_dataset(\"/mnt/codes/qwen3/fine-tuning/datasets/data/mlabonne/FineTome-100k\", split = \"train\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "9329284c",
   "metadata": {},
   "outputs": [],
   "source": [
    "reasoning_dataset"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "b3466385",
   "metadata": {},
   "outputs": [],
   "source": [
    "non_reasoning_dataset"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "edc5555f",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 将数学问题数据集转换为对话格式，用于训练聊天模型。\n",
    "def generate_conversation(examples):\n",
    "    problems  = examples[\"problem\"]\n",
    "    solutions = examples[\"generated_solution\"]\n",
    "    conversations = []\n",
    "    for problem, solution in zip(problems, solutions):\n",
    "        conversations.append([\n",
    "            {\"role\" : \"user\",      \"content\" : problem},\n",
    "            {\"role\" : \"assistant\", \"content\" : solution},\n",
    "        ])\n",
    "    return { \"conversations\": conversations, }"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "8ac54e05",
   "metadata": {},
   "outputs": [],
   "source": [
    "reasoning_dataset[0]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "f9746333",
   "metadata": {},
   "outputs": [],
   "source": [
    "reasoning_data = reasoning_dataset.map(generate_conversation, batched = True)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "22aec7e5",
   "metadata": {},
   "outputs": [],
   "source": [
    "reasoning_data[\"conversations\"][0]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "c3045b30",
   "metadata": {},
   "outputs": [],
   "source": [
    "reasoning_conversations = [\n",
    "    tokenizer.apply_chat_template(conversation, tokenize=False)\n",
    "    for conversation in reasoning_data[\"conversations\"]\n",
    "]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "dbf7d845",
   "metadata": {},
   "outputs": [],
   "source": [
    "reasoning_conversations[0]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "dbccf03b",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 处理非推理类型的对话数据集，并将其转换为适合训练的格式。\n",
    "# 将 non_reasoning_dataset（非推理数据集）转换为标准的 ShareGPT 格式\n",
    "# standardize_sharegpt 函数会：\n",
    "# 统一对话格式\n",
    "# 规范化角色标签（user/assistant）\n",
    "# 清理和验证数据结构\n",
    "# 确保数据符合训练要求\n",
    "from unsloth.chat_templates import standardize_sharegpt\n",
    "dataset = standardize_sharegpt(non_reasoning_dataset)\n",
    "\n",
    "non_reasoning_conversations = [\n",
    "    tokenizer.apply_chat_template(conversation, tokenize=False)\n",
    "    for conversation in dataset[\"conversations\"]\n",
    "]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "1533c143",
   "metadata": {},
   "outputs": [],
   "source": [
    "non_reasoning_conversations[0]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "6f8c133a",
   "metadata": {},
   "outputs": [],
   "source": [
    "print(len(reasoning_conversations))\n",
    "print(len(non_reasoning_conversations))"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "edf533b2",
   "metadata": {},
   "source": [
    "非推理类数据集的长度更长。我们假设希望模型保留一定的推理能力，但又特别希望它作为一个聊天模型来使用。因此，我们需要定义一个 仅聊天数据的比例。目标是从两个数据集中构建一个混合训练集。这里我们可以设定一个 25% 推理数据、75% 聊天数据的比例：\n",
    "也就是说，从推理数据集中抽取 25%（或者说，抽取占比为 100% - 聊天数据占比 的部分），最后将这两个数据集合并起来即可。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "9ffac450",
   "metadata": {},
   "outputs": [],
   "source": [
    "chat_percentage = 0.75"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "fddf19d4",
   "metadata": {},
   "outputs": [],
   "source": [
    "import pandas as pd\n",
    "non_reasoning_subset = pd.Series(non_reasoning_conversations)\n",
    "non_reasoning_subset = non_reasoning_subset.sample(\n",
    "    int(len(reasoning_conversations) * (1.0 - chat_percentage)),\n",
    "    random_state = 2407,\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "b6b88c6d",
   "metadata": {},
   "outputs": [],
   "source": [
    "data = pd.concat([\n",
    "    pd.Series(reasoning_conversations),\n",
    "    pd.Series(non_reasoning_subset)\n",
    "])\n",
    "data.name = \"text\"\n",
    "\n",
    "from datasets import Dataset\n",
    "combined_dataset = Dataset.from_pandas(pd.DataFrame(data))\n",
    "# 随机打乱数据集，设置随机种子，确保打乱结果可重现\n",
    "combined_dataset = combined_dataset.shuffle(seed = 3407)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "cebbd098",
   "metadata": {},
   "outputs": [],
   "source": [
    "len(combined_dataset)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "6bc79b98",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 对话类数据集\n",
    "combined_dataset[0]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "386b8c57",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 推理类数据集\n",
    "combined_dataset[2]"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "174c1240",
   "metadata": {},
   "source": [
    "微调流程"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "b840470c",
   "metadata": {},
   "source": [
    "#### STEP1. 添加LoRA参数注入"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "6dcdf1e5",
   "metadata": {},
   "outputs": [],
   "source": [
    "model = FastLanguageModel.get_peft_model(\n",
    "    model,\n",
    "    r = 32,           # Choose any number > 0! Suggested 8, 16, 32, 64, 128\n",
    "    target_modules = [\"q_proj\", \"k_proj\", \"v_proj\", \"o_proj\",\n",
    "                      \"gate_proj\", \"up_proj\", \"down_proj\",],\n",
    "    lora_alpha = 32,  # Best to choose alpha = rank or rank*2\n",
    "    lora_dropout = 0, # Supports any, but = 0 is optimized\n",
    "    bias = \"none\",    # Supports any, but = \"none\" is optimized\n",
    "    # [NEW] \"unsloth\" uses 30% less VRAM, fits 2x larger batch sizes!\n",
    "    use_gradient_checkpointing = \"unsloth\", # True or \"unsloth\" for very long context\n",
    "    random_state = 3407,\n",
    "    use_rslora = False,   # We support rank stabilized LoRA\n",
    "    loftq_config = None,  # And LoftQ\n",
    ")"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "1f4b88cf",
   "metadata": {},
   "source": [
    "#### STEP2. 设置微调参数"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "95dce06c",
   "metadata": {},
   "outputs": [],
   "source": [
    "from trl import SFTTrainer, SFTConfig\n",
    "trainer = SFTTrainer(\n",
    "    model = model,\n",
    "    tokenizer = tokenizer,\n",
    "    train_dataset = combined_dataset,\n",
    "    eval_dataset = None, # Can set up evaluation!\n",
    "    args = SFTConfig(\n",
    "        dataset_text_field = \"text\",\n",
    "        per_device_train_batch_size = 2,\n",
    "        gradient_accumulation_steps = 4, # Use GA to mimic batch size!\n",
    "        warmup_steps = 5,\n",
    "        # num_train_epochs = 1, # Set this for 1 full training run.\n",
    "        max_steps = 30,\n",
    "        learning_rate = 2e-4, # Reduce to 2e-5 for long training runs\n",
    "        logging_steps = 1,\n",
    "        optim = \"adamw_8bit\",\n",
    "        weight_decay = 0.01,\n",
    "        lr_scheduler_type = \"linear\",\n",
    "        seed = 3407,\n",
    "        # report_to = \"wandb\", # Use this for WandB etc\n",
    "    ),\n",
    ")"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "609e6051",
   "metadata": {},
   "source": [
    "其中`SFTTrainer`：一个专门为指令微调设计的训练器，封装了 Hugging Face 的 `Trainer`，而`SFTConfig`：配置训练参数的专用类，功能类似 `TrainingArguments`。而SFTConfig核心参数解释如下："
   ]
  },
  {
   "cell_type": "markdown",
   "id": "87c580cc",
   "metadata": {},
   "source": [
    "| 参数名                             | 含义                                              |\n",
    "| ------------------------------- | ----------------------------------------------- |\n",
    "| `dataset_text_field=\"text\"`     | 数据集中用于训练的字段名称，如 `text` 或 `prompt`               |\n",
    "| `per_device_train_batch_size=2` | 每张 GPU 上的 batch size 是 2                        |\n",
    "| `gradient_accumulation_steps=4` | 梯度累计 4 次后才进行一次反向传播（等效于总 batch size = 2 × 4 = 8） |\n",
    "| `warmup_steps=5`                | 前 5 步进行 warmup（缓慢提升学习率）                         |\n",
    "| `max_steps=30`                  | 最多训练 30 步（适合调试或快速实验）                            |\n",
    "| `learning_rate=2e-4`            | 初始学习率（短训练可用较高值）                                 |\n",
    "| `logging_steps=1`               | 每训练 1 步就打印一次日志                                  |\n",
    "| `optim=\"adamw_8bit\"`            | 使用 8-bit AdamW 优化器（节省内存，Unsloth 支持）             |\n",
    "| `weight_decay=0.01`             | 权重衰减，用于防止过拟合                                    |\n",
    "| `lr_scheduler_type=\"linear\"`    | 线性学习率调度器（从高到低线性下降）                              |\n",
    "| `seed=3407`                     | 固定随机种子，确保结果可复现                                  |\n",
    "| `report_to=\"none\"`              | 不使用 WandB 或 TensorBoard 等日志平台（可改为 `\"wandb\"`）    |"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "8f94e943",
   "metadata": {},
   "source": [
    "此时基本训练过程为：\n",
    "1. 从 `combined_dataset` 中取出一批样本（2 条）\n",
    "2. 重复上面过程 4 次（`gradient_accumulation_steps=4`）\n",
    "3. 将累计的梯度用于更新模型一次参数（等效于一次大 batch 更新）\n",
    "4. 重复上述过程，直到 `max_steps=30` 停止"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "b694371d",
   "metadata": {},
   "outputs": [],
   "source": [
    "# @title Show current memory stats\n",
    "gpu_stats = torch.cuda.get_device_properties(0)\n",
    "start_gpu_memory = round(torch.cuda.max_memory_reserved() / 1024 / 1024 / 1024, 3)\n",
    "max_memory = round(gpu_stats.total_memory / 1024 / 1024 / 1024, 3)\n",
    "print(f\"GPU = {gpu_stats.name}. Max memory = {max_memory} GB.\")\n",
    "print(f\"{start_gpu_memory} GB of memory reserved.\")"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "7566930b",
   "metadata": {},
   "source": [
    "#### Step 3. 【可选】设置wandb"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "fade459e",
   "metadata": {},
   "outputs": [],
   "source": [
    "# import wandb\n",
    "# wandb.login(key=\"4b62572b8426ff59ce46fec93a00eb1feecc026a\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "75a3bb0a",
   "metadata": {},
   "outputs": [],
   "source": [
    "# run = wandb.init(project='Fine-tune-Qwen-4B-4bit on Combined Dataset', )"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "1bba464c",
   "metadata": {},
   "source": [
    "#### Step 4. 微调执行流程"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "a7ebdc1e",
   "metadata": {},
   "source": [
    "&emsp;&emsp;一切准备就绪后，接下来即可开始进行微调。由于本次微调总共只运行30个step，整个过程并不会很长，实际执行过程如下："
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "5943f9a5",
   "metadata": {},
   "outputs": [],
   "source": [
    "trainer_stats = trainer.train()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "2718fbba",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 微调期间显存占用检测\n",
    "# @title Show final memory and time stats\n",
    "used_memory = round(torch.cuda.max_memory_reserved() / 1024 / 1024 / 1024, 3)\n",
    "used_memory_for_lora = round(used_memory - start_gpu_memory, 3)\n",
    "used_percentage = round(used_memory / max_memory * 100, 3)\n",
    "lora_percentage = round(used_memory_for_lora / max_memory * 100, 3)\n",
    "print(f\"{trainer_stats.metrics['train_runtime']} seconds used for training.\")\n",
    "print(\n",
    "    f\"{round(trainer_stats.metrics['train_runtime']/60, 2)} minutes used for training.\"\n",
    ")\n",
    "print(f\"Peak reserved memory = {used_memory} GB.\")\n",
    "print(f\"Peak reserved memory for training = {used_memory_for_lora} GB.\")\n",
    "print(f\"Peak reserved memory % of max memory = {used_percentage} %.\")\n",
    "print(f\"Peak reserved memory for training % of max memory = {lora_percentage} %.\")"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "3dc64949",
   "metadata": {},
   "source": [
    "#### Step 5. 模型对话测试"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "8bf15013",
   "metadata": {},
   "outputs": [],
   "source": [
    "model"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "d457f05f",
   "metadata": {},
   "source": [
    "而当我们完成训练后，即可尝试进行模型对话："
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "3aafcb93",
   "metadata": {},
   "outputs": [],
   "source": [
    "messages = [\n",
    "    {\"role\" : \"user\", \"content\" : \"Solve (x + 2)^2 = 0.\"}\n",
    "]\n",
    "text = tokenizer.apply_chat_template(\n",
    "    messages,\n",
    "    tokenize = False,\n",
    "    add_generation_prompt = True, # Must add for generation\n",
    "    enable_thinking = False, # Disable thinking\n",
    ")\n",
    "\n",
    "from transformers import TextStreamer\n",
    "_ = model.generate(\n",
    "    **tokenizer(text, return_tensors = \"pt\").to(\"cuda\"),\n",
    "    max_new_tokens = 256, # Increase for longer outputs!\n",
    "    temperature = 0.7, top_p = 0.8, top_k = 20, # For non thinking\n",
    "    streamer = TextStreamer(tokenizer, skip_prompt = True),\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "3a0b744f",
   "metadata": {},
   "outputs": [],
   "source": [
    "messages = [\n",
    "    {\"role\" : \"user\", \"content\" : \"Solve (x + 2)^2 = 0.\"}\n",
    "]\n",
    "text = tokenizer.apply_chat_template(\n",
    "    messages,\n",
    "    tokenize = False,\n",
    "    add_generation_prompt = True, # Must add for generation\n",
    "    enable_thinking = True, # Disable thinking\n",
    ")\n",
    "\n",
    "from transformers import TextStreamer\n",
    "_ = model.generate(\n",
    "    **tokenizer(text, return_tensors = \"pt\").to(\"cuda\"),\n",
    "    max_new_tokens = 20488, # Increase for longer outputs!\n",
    "    temperature = 0.6, top_p = 0.95, top_k = 20, # For thinking\n",
    "    streamer = TextStreamer(tokenizer, skip_prompt = True),\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "60a54103",
   "metadata": {},
   "outputs": [],
   "source": [
    "messages = [\n",
    "    {\"role\" : \"user\", \"content\" : \"Determine the surface area of the portion of the plane $2x + 3y + 6z = 9$ that lies in the first octant.\"}\n",
    "]\n",
    "text = tokenizer.apply_chat_template(\n",
    "    messages,\n",
    "    tokenize = False,\n",
    "    add_generation_prompt = True, # Must add for generation\n",
    "    enable_thinking = True, # Disable thinking\n",
    ")\n",
    "\n",
    "from transformers import TextStreamer\n",
    "_ = model.generate(\n",
    "    **tokenizer(text, return_tensors = \"pt\").to(\"cuda\"),\n",
    "    max_new_tokens = 20488, # Increase for longer outputs!\n",
    "    temperature = 0.6, top_p = 0.95, top_k = 20, # For thinking\n",
    "    streamer = TextStreamer(tokenizer, skip_prompt = True),\n",
    ")"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "feaa3bdf",
   "metadata": {},
   "source": [
    "#### Step 6. 模型大规模微调"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "9bbf2cd0",
   "metadata": {},
   "source": [
    "&emsp;&emsp;接下来继续深入进行训练，此处考虑训练完一整个epoch，总共约8小时左右，训练流程如下所示："
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "c1357cbe",
   "metadata": {},
   "outputs": [],
   "source": [
    "from trl import SFTTrainer, SFTConfig\n",
    "trainer = SFTTrainer(\n",
    "    model = model,\n",
    "    tokenizer = tokenizer,\n",
    "    train_dataset = combined_dataset,\n",
    "    eval_dataset = None, # Can set up evaluation!\n",
    "    args = SFTConfig(\n",
    "        dataset_text_field = \"text\",\n",
    "        per_device_train_batch_size = 4,\n",
    "        gradient_accumulation_steps = 2, # Use GA to mimic batch size!\n",
    "        warmup_steps = 5,\n",
    "        num_train_epochs = 1, # Set this for 1 full training run.\n",
    "        learning_rate = 2e-4, # Reduce to 2e-5 for long training runs\n",
    "        logging_steps = 1,\n",
    "        optim = \"adamw_8bit\",\n",
    "        weight_decay = 0.01,\n",
    "        lr_scheduler_type = \"linear\",\n",
    "        seed = 3407,\n",
    "        report_to = \"none\", # Use this for WandB etc\n",
    "    ),\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "eec15c83",
   "metadata": {},
   "outputs": [],
   "source": [
    "trainer_stats = trainer.train()"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "185638f6",
   "metadata": {},
   "source": [
    "此时训练完成后再进行对话，能明显看出模型当前数学性能有所提升，具体问答效果如下："
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "036b86ca",
   "metadata": {},
   "outputs": [],
   "source": [
    "messages = [\n",
    "    {\"role\" : \"user\", \"content\" : \"Determine the surface area of the portion of the plane $2x + 3y + 6z = 9$ that lies in the first octant.\"}\n",
    "]\n",
    "text = tokenizer.apply_chat_template(\n",
    "    messages,\n",
    "    tokenize = False,\n",
    "    add_generation_prompt = True, # Must add for generation\n",
    "    enable_thinking = True, # Disable thinking\n",
    ")\n",
    "\n",
    "from transformers import TextStreamer\n",
    "_ = model.generate(\n",
    "    **tokenizer(text, return_tensors = \"pt\").to(\"cuda\"),\n",
    "    max_new_tokens = 20488, # Increase for longer outputs!\n",
    "    temperature = 0.6, top_p = 0.95, top_k = 20, # For thinking\n",
    "    streamer = TextStreamer(tokenizer, skip_prompt = True),\n",
    ")"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "137d3a74",
   "metadata": {},
   "source": [
    "#### Step 7.模型保存"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "cac0c263",
   "metadata": {},
   "source": [
    "微调结束后即可进行模型保存，由于我们训练的LoRA本身是FP16精度，因此模型需要保存为fp16精度格式，才能完整保留模型当前性能："
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "0158093d",
   "metadata": {},
   "outputs": [],
   "source": [
    "model.save_pretrained_merged(save_directory = \"Qwen3-4B-finetuned-4bit\", \n",
    "                             tokenizer = tokenizer, \n",
    "                             save_method = \"merged_4bit_forced\")"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "a55a0ae0",
   "metadata": {},
   "source": [
    "> - 正在将原始 4bit 权重与微调产生的 LoRA adapter 合并，并转换为 16bit（FP16）精度，用于部署\n",
    "> - 会尽量使用不超过 703.3 MB（最大上限是你设备的 70～75% 内存）来进行保存操作，避免崩溃或卡死\n",
    "> - 开始保存模型，会进行合并+格式转换，时间取决于模型大小（比如 Qwen3-32B 预计要几分钟）\n",
    "> - 如果模型太大而内存不足，Unsloth 会自动启用“磁盘保存模式”，避免占用过多内存\n",
    "> - tokenizer 保存完毕"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "c83501ea",
   "metadata": {},
   "source": [
    "#### Step 8.模型性能测试"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "54417793",
   "metadata": {},
   "source": [
    "接下来即可使用vllm对其进行调用，并借助EvalScope进行测试。需要借助vLLM调用导出后的模型，然后借助evalscope进行测试。\n",
    "```python\n",
    "from evalscope import TaskConfig, run_task\n",
    "task_cfg = TaskConfig(\n",
    "    model='./Qwen3-32B-finetuned-fp16',\n",
    "    api_url='http://127.0.0.1:8000/v1/chat/completions',\n",
    "    eval_type='service',\n",
    "    datasets=[\n",
    "        'data_collection',\n",
    "    ],\n",
    "    dataset_args={\n",
    "        'data_collection': {\n",
    "            'dataset_id': 'modelscope/EvalScope-Qwen3-Test',\n",
    "            'filters': {'remove_until': '</think>'}  # 过滤掉思考的内容\n",
    "        }\n",
    "    },\n",
    "    eval_batch_size=128,\n",
    "    generation_config={\n",
    "        'max_tokens': 30000,  # 最大生成token数，建议设置为较大值避免输出截断\n",
    "        'temperature': 0.6,  # 采样温度 (qwen 报告推荐值)\n",
    "        'top_p': 0.95,  # top-p采样 (qwen 报告推荐值)\n",
    "        'top_k': 20,  # top-k采样 (qwen 报告推荐值)\n",
    "        'n': 1,  # 每个请求产生的回复数量\n",
    "    },\n",
    "    timeout=60000,  # 超时时间\n",
    "    stream=True,  # 是否使用流式输出\n",
    "    limit=2000,  # 设置为2000条数据进行测试\n",
    ")\n",
    "\n",
    "run_task(task_cfg=task_cfg)\n",
    "```"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "f0c8460f",
   "metadata": {},
   "source": [
    "测试后即可在前端中观察测评报告：\n",
    "```bash\n",
    "evalscope app\n",
    "```"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "a876a98f",
   "metadata": {},
   "source": [
    "首先微调后模型性能如图所示："
   ]
  },
  {
   "cell_type": "markdown",
   "id": "fdd08d18",
   "metadata": {},
   "source": [
    "## 七、Qwen3中文法律高效微调实战"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "2be69edd",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/data/miniconda3/envs/qwen3/lib/python3.11/site-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n",
      "  from .autonotebook import tqdm as notebook_tqdm\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "🦥 Unsloth: Will patch your computer to enable 2x faster free finetuning.\n",
      "INFO 09-25 17:00:11 [__init__.py:216] Automatically detected platform cuda.\n",
      "🦥 Unsloth Zoo will now patch everything to make training faster!\n"
     ]
    }
   ],
   "source": [
    "from unsloth import FastLanguageModel\n",
    "import torch"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "id": "85f41ec0",
   "metadata": {},
   "outputs": [],
   "source": [
    "max_seq_length = 8192\n",
    "dtype = None\n",
    "load_in_4bit = True"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "id": "b89c1bd9",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "==((====))==  Unsloth 2025.9.4: Fast Qwen3 patching. Transformers: 4.55.4. vLLM: 0.10.2.\n",
      "   \\\\   /|    NVIDIA A10. Num GPUs = 1. Max memory: 22.191 GB. Platform: Linux.\n",
      "O^O/ \\_/ \\    Torch: 2.8.0+cu128. CUDA: 8.6. CUDA Toolkit: 12.8. Triton: 3.4.0\n",
      "\\        /    Bfloat16 = TRUE. FA [Xformers = 0.0.32.post1. FA2 = False]\n",
      " \"-____-\"     Free license: http://github.com/unslothai/unsloth\n",
      "Unsloth: Fast downloading is enabled - ignore downloading bars which are red colored!\n"
     ]
    }
   ],
   "source": [
    "model, tokenizer = FastLanguageModel.from_pretrained(\n",
    "    model_name = \"/mnt/codes/qwen3/models/Qwen3-4B-unsloth-bnb-4bit\",\n",
    "    max_seq_length = max_seq_length,\n",
    "    dtype = dtype,\n",
    "    load_in_4bit = load_in_4bit,\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 45,
   "id": "302ee82e",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "'/mnt/codes/qwen3/models/Qwen3-4B-unsloth-bnb-4bit'"
      ]
     },
     "execution_count": 45,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "model.config._name_or_path"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "id": "cf7fc517",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "GPU = NVIDIA A10. Max memory = 22.191 GB.\n",
      "3.816 GB of memory reserved.\n"
     ]
    }
   ],
   "source": [
    "# @title Show current memory stats\n",
    "gpu_stats = torch.cuda.get_device_properties(0)\n",
    "start_gpu_memory = round(torch.cuda.max_memory_reserved() / 1024 / 1024 / 1024, 3)\n",
    "max_memory = round(gpu_stats.total_memory / 1024 / 1024 / 1024, 3)\n",
    "print(f\"GPU = {gpu_stats.name}. Max memory = {max_memory} GB.\")\n",
    "print(f\"{start_gpu_memory} GB of memory reserved.\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "id": "b4d5dce3",
   "metadata": {},
   "outputs": [],
   "source": [
    "question_1 = \"问题：某分裂组织在边境地区策划武装割据，并与境外势力秘密勾结，其首要分子甲在实施过程中被抓获。请问甲的行为构成何罪？应当如何量刑？\""
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "id": "73fc4b7a",
   "metadata": {},
   "outputs": [],
   "source": [
    "question_2 = \"2015年9月4日14时50分许，被告人曹某某在景德镇市珠山区XX涵洞路口醉酒（血液酒精含量未明确数值）、无证驾驶无牌二轮摩托车，搭载李某林左转时未让直行车辆，与超速行驶的朱某良驾驶的出租车相撞，致李某林受伤（具体伤情等级未说明）。事故后曹某某赔偿李某林并获得谅解。公诉机关以危险驾驶罪指控，提供现场勘查记录、酒精检测等9项证据。本案中：1. 曹某某同时存在醉酒、无证、无牌三项违法情节，但缺乏具体酒精浓度数据；2. 事故系双方过错（曹某某未让行与朱某良超速）共同导致；3. 被害人已谅解。在此情况下：（1）危险驾驶罪的构成要件是否全部满足？（2）量刑时如何平衡\\\"无证+无牌+醉驾\\\"的从重情节与\\\"赔偿谅解\\\"的从轻情节？（3）最终判处3个月有期徒刑并处罚金1000元是否适当？\""
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "id": "a966b86b",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "<think>\n",
      "嗯，用户问的是关于某个分裂组织在边境地区策划武装割据，并与境外势力勾结，其首要分子甲被抓获，问甲构成什么罪，如何量刑。首先，我需要确定这个案件涉及的法律条款。根据中国刑法，分裂国家罪和分裂国家罪的首要分子可能涉及其他罪名。\n",
      "\n",
      "首先，分裂国家罪是刑法第103条，规定组织、策划、实施分裂国家、破坏国家统一的行为，处五年以下有期徒刑、拘役或者管制；对首要分子和其他积极参加的人员，处五年以上有期徒刑、无期徒刑或者死刑。但这里的情况是分裂组织在边境地区策划武装割据，并且与境外势力勾结，可能涉及更严重的犯罪。\n",
      "\n",
      "另外，可能涉及恐怖活动罪，比如刑法第120条，如果该组织有恐怖活动的性质，可能构成组织、领导恐怖活动组织罪，或者参加恐怖活动组织罪，但需要判断是否符合恐怖活动的特征。或者，如果该组织有武装割据的行为，可能涉及武装叛乱、暴乱罪，即刑法第123条，但需要看具体行为是否符合。\n",
      "\n",
      "不过，用户提到的是“分裂组织”，所以可能更倾向于分裂国家罪。但需要区分分裂国家罪和分裂国家罪的首要分子。根据刑法第103条，组织、策划、实施分裂国家、破坏国家统一的行为，构成分裂国家罪。而首要分子可能被判处更重的刑罚，如无期徒刑或死刑。\n",
      "\n",
      "另外，如果该组织与境外势力勾结，可能构成分裂国家罪的加重情节，或者可能涉及其他罪名，如资助恐怖主义活动罪，但需要具体分析。\n",
      "\n",
      "可能需要考虑的是，如果该组织有武装割据的行为，可能同时构成武装叛乱、暴乱罪（刑法第123条），但通常分裂国家罪和武装叛乱、暴乱罪是两个不同的罪名。不过，如果分裂组织实施武装割据，可能同时触犯多个罪名。\n",
      "\n",
      "此外，根据最高人民法院、最高人民检察院的相关司法解释，分裂国家罪的首要分子可能被判处无期徒刑或死刑，而其他参加者可能根据情节严重程度量刑。\n",
      "\n",
      "需要确认的是，甲作为首要分子，是否构成分裂国家罪的首要分子，是否属于加重情节。如果该组织与境外势力勾结，可能构成“与境外势力勾结”的情节，可能加重处罚。\n",
      "\n",
      "另外，是否属于“武装割据”，可能涉及武装叛乱或暴乱，但需要看具体行为是否符合武装叛乱的构成要件。例如，是否使用暴力手段，是否针对国家政权等。\n",
      "\n",
      "可能的结论是，甲构成分裂国家罪的首要分子，可能被判处无期徒刑或死刑，并且可能同时构成其他罪名，如武装叛乱罪，但需要具体分析。不过通常，如果组织是分裂国家的，首要分子可能被认定为分裂国家罪的首要分子，而其他行为如武装割据可能属于该罪的加重情节。\n",
      "\n",
      "另外，根据刑法第103条，分裂国家罪的首要分子，可能被判处无期徒刑或死刑，而其他积极参加的人员可能被判处五年以上有期徒刑。因此，甲作为首要分子，可能被判处无期徒刑或死刑，并可能根据情节严重程度决定。\n",
      "\n",
      "需要确认的是，该组织是否属于“分裂国家”的组织，而甲作为首要分子是否构成该罪，以及是否与境外势力勾结是否属于加重情节。如果甲与境外势力勾结，可能属于“与境外势力勾结”情节，根据司法解释，可能加重处罚。\n",
      "\n",
      "总结：甲的行为构成分裂国家罪的首要分子，可能被判处无期徒刑或死刑，并根据情节严重程度决定。同时，如果该组织有武装割据的行为，可能涉及武装叛乱或暴乱罪，但通常分裂国家罪的首要分子可能同时构成其他罪名，但可能被数罪并罚，但需要具体分析。\n",
      "</think>\n",
      "\n",
      "根据中国刑法及相关司法解释，甲的行为构成**分裂国家罪的首要分子**，具体分析如下：\n",
      "\n",
      "---\n",
      "\n",
      "### **一、罪名认定**\n",
      "1. **分裂国家罪**（刑法第103条）  \n",
      "   甲作为分裂组织的**首要分子**，组织、策划、实施分裂国家、破坏国家统一的行为，且与境外势力勾结，符合《刑法》第103条的构成要件。  \n",
      "   - **核心行为**：策划武装割据、与境外势力勾结，直接危害国家统一和领土完整。\n",
      "   - **加重情节**：与境外势力勾结、组织武装割据等情节，可能被认定为“情节严重”，适用更重刑罚。\n",
      "\n",
      "2. **可能涉及的其他罪名**  \n",
      "   - 若武装割据行为具有**暴力性**（如使用武器、暴力手段），可能同时构成**武装叛乱、暴乱罪**（刑法第123条），但通常与分裂国家罪并列处罚，需数罪并罚。\n",
      "   - 若涉及恐怖活动，可能构成**组织、领导恐怖活动组织罪**（刑法第120条），但需明确是否符合恐怖活动的特征（如恐怖主义目的）。\n",
      "\n",
      "---\n",
      "\n",
      "### **二、量刑标准**\n",
      "1. **分裂国家罪的首要分子**  \n",
      "   - **基本刑罚**：根据《刑法》第103条，组织、策划、实施分裂国家、破坏国家统一的行为，**处五年以下有期徒刑、拘役或者管制**；**首要分子和其他积极参加的人员**，**处五年以上有期徒刑、无期徒刑或者死刑**。\n",
      "   - **加重情节**：若与境外势力勾结、组织武装割据，属于“情节严重”，可**判处无期徒刑或死刑**。\n",
      "\n",
      "2. **具体量刑考量**  \n",
      "   - **首要分子**：通常判处**无期徒刑或死刑**，具体需结合犯罪手段、后果、社会危害性等因素。\n",
      "   - **其他参加人员**：根据其作用和情节，可能判处**五年以上有期徒刑**。\n",
      "\n",
      "---\n",
      "\n",
      "### **三、司法实践中的处理**\n",
      "- **与境外势力勾结**：根据最高人民法院、最高人民检察院的司法解释，此类情节可认定为“情节严重”，加重处罚。\n",
      "- **武装割据**：若行为具有**暴力性**（如制造冲突、攻击国家机关等），可能同时构成**武装叛乱、暴乱罪**，需数罪并罚。\n",
      "\n",
      "---\n",
      "\n",
      "### **四、结论**\n",
      "甲作为分裂组织的**首要分子**，其行为构成**分裂国家罪**，且因与境外势力勾结、实施武装割据，属于“情节严重”，依法应判处**无期徒刑或死刑**。若同时涉及武装叛乱或恐怖活动，可能数罪并罚，但以分裂国家罪的量刑为主。\n",
      "\n",
      "**法律依据**：  \n",
      "- 《中华人民共和国刑法》第103条（分裂国家罪）  \n",
      "- 最高人民法院、最高人民检察院关于办理分裂国家、破坏国家统一犯罪案件具体应用法律若干问题的解释（2010年）  \n",
      "- 司法实践中对“与境外势力勾结”“武装割据”等情节的认定标准。<|im_end|>\n"
     ]
    }
   ],
   "source": [
    "messages = [\n",
    "    {\"role\" : \"user\", \"content\" : question_1}\n",
    "]\n",
    "text = tokenizer.apply_chat_template(\n",
    "    messages,\n",
    "    tokenize = False,\n",
    "    add_generation_prompt = True, # Must add for generation\n",
    "    enable_thinking = True, # Disable thinking\n",
    ")\n",
    "\n",
    "from transformers import TextStreamer\n",
    "_ = model.generate(\n",
    "    **tokenizer(text, return_tensors = \"pt\").to(\"cuda\"),\n",
    "    max_new_tokens = 20488, # Increase for longer outputs!\n",
    "    temperature = 0.6, top_p = 0.95, top_k = 20, # For thinking\n",
    "    streamer = TextStreamer(tokenizer, skip_prompt = True),\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "id": "7ec7e1d2",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "<think>\n",
      "嗯，这个问题看起来有点复杂，但让我慢慢理清楚。首先，我需要回忆一下中国刑法中关于危险驾驶罪的相关规定，然后结合案件的具体情况来分析。\n",
      "\n",
      "首先，题目中的曹某某在2015年9月4日14时50分许，醉酒驾驶无证、无牌的二轮摩托车，搭载李某林左转时未让直行车辆，与朱某良驾驶的出租车相撞，导致李某林受伤。之后曹某某赔偿了李某林并获得谅解。公诉机关以危险驾驶罪指控，提供了现场勘查记录、酒精检测等9项证据。现在需要解决三个问题：危险驾驶罪的构成要件是否满足，量刑时如何平衡从重和从轻情节，以及最终判决是否适当。\n",
      "\n",
      "首先，危险驾驶罪的构成要件。根据《刑法》第133条之一，危险驾驶罪包括醉酒驾驶机动车、驾驶机动车追逐竞驶、违反危险化学品安全管理规定等行为。这里曹某某的情况是醉酒驾驶无证、无牌的二轮摩托车，是否属于危险驾驶罪的范畴？\n",
      "\n",
      "首先，危险驾驶罪的主体是“机动车”，而二轮摩托车是否属于机动车呢？根据《机动车驾驶证申领和使用规定》，二轮摩托车属于机动车，所以曹某某驾驶的二轮摩托车属于机动车，因此醉酒驾驶属于危险驾驶罪的构成要件。不过，是否需要考虑无证驾驶的问题？根据《刑法》第133条之一，醉酒驾驶机动车的，处拘役并处罚金，所以即使无证驾驶，只要醉酒驾驶机动车，就构成危险驾驶罪。因此，曹某某的行为符合危险驾驶罪的构成要件，即使无证、无牌，但醉酒驾驶机动车本身已经构成危险驾驶罪。\n",
      "\n",
      "接下来，第二个问题，量刑时如何平衡从重和从轻情节。曹某某有无证、无牌、醉酒三项违法情节，这些都属于从重处罚的情节。而被害人已谅解，赔偿了损失，这属于从轻情节。根据《刑法》第67条，对于自首、立功、赔偿谅解等可以从轻或减轻处罚。但这里是否属于自首？题目中没有提到曹某某是否自首，所以可能不考虑自首。但赔偿谅解属于从轻情节，可以减轻处罚。\n",
      "\n",
      "第三个问题，最终判决是否适当。曹某某被判处3个月拘役并处罚金1000元。需要考虑从重和从轻因素。根据《刑法》第133条之一，醉酒驾驶机动车的，处拘役并处罚金，情节严重的可以处拘役并处罚金。但这里是否属于“情节严重”？比如，造成交通事故，但题目中说事故是双方过错，曹某某未让行，朱某良超速，导致李某林受伤。不过，是否构成事故责任？根据《刑法》第133条，交通肇事罪需要造成严重后果，但危险驾驶罪可能不以造成事故为前提，但如果是醉酒驾驶导致事故，可能涉及交通肇事罪，但题目中公诉机关是危险驾驶罪，所以可能属于危险驾驶罪导致事故的情况。不过，危险驾驶罪和交通肇事罪可能有交叉，但这里可能属于危险驾驶罪，同时可能构成交通肇事罪，但题目中公诉机关只指控危险驾驶罪，所以可能只按危险驾驶罪处理。\n",
      "\n",
      "不过，根据《最高人民法院、最高人民检察院关于办理危险驾驶刑事案件适用法律若干问题的解释》第2条，醉酒驾驶机动车，造成交通事故且负事故全部责任或者主要责任的，处拘役，并处罚金。但本案中，事故是双方过错，曹某某未让行，朱某良超速，所以可能曹某某负主要责任或同等责任？如果是主要责任，可能属于情节严重，但题目中没有明确责任划分，但根据描述，曹某某未让行，朱某良超速，可能曹某某负主要责任，或者双方都有责任。如果曹某某负主要责任，那么可能属于情节严重，但题目中说被害人已谅解，可能减轻处罚。\n",
      "\n",
      "不过，根据刑法第133条之一，醉酒驾驶机动车的，处拘役并处罚金。如果造成交通事故，可能属于情节严重，但需要看是否属于“情节严重”。根据司法解释，如果造成交通事故，且负事故全部责任或者主要责任的，处拘役并处罚金，可能属于情节严重，但题目中曹某某可能负主要责任，所以可能属于情节严重，但被害人已谅解，可能减轻处罚。\n",
      "\n",
      "但题目中曹某某被判处3个月拘役并处罚金1000元，是否适当？需要考虑从重和从轻因素。无证、无牌、醉驾三项违法情节，属于从重处罚，但赔偿谅解属于从轻。根据《刑法》第133条之一，如果造成交通事故，可能处拘役并处罚金，但如果是危险驾驶罪，可能基准刑期是拘役，但根据从重和从轻因素调整。\n",
      "\n",
      "另外，根据《刑法》第67条，对于赔偿谅解的，可以减轻处罚。例如，如果曹某某赔偿了李某林并获得谅解，可能可以减轻处罚，比如从拘役3个月到更轻的刑罚，但题目中判处3个月，可能属于从轻处罚的范围。\n",
      "\n",
      "但问题在于，曹某某有无证、无牌、醉驾三项违法情节，这属于从重情节，而赔偿谅解属于从轻情节。是否平衡后判处3个月，是否适当？\n",
      "\n",
      "另外，根据《刑法》第133条之一，醉酒驾驶机动车的，处拘役并处罚金，但根据司法解释，如果造成交通事故，可能属于情节严重，但如果是双方过错，可能责任比例较低，所以可能刑罚较轻。\n",
      "\n",
      "不过，可能需要参考具体案例或司法解释。例如，如果曹某某无证、无牌驾驶，属于严重违法，但赔偿谅解，可能减轻处罚。如果基准刑期是拘役，但考虑到赔偿谅解，可能判处3个月，但需要看具体情节。\n",
      "\n",
      "另外，根据《刑法》第133条之一，如果造成交通事故，可能属于“情节严重”，但如果是双方过错，可能责任比例较低，所以刑罚可能较轻。例如，如果曹某某负主要责任，可能属于情节严重，但赔偿谅解可能减轻处罚。\n",
      "\n",
      "综上，可能曹某某的刑罚是适当的，因为虽然有从重情节，但赔偿谅解和双方过错导致事故，所以判处3个月拘役并处罚金1000元是适当的。\n",
      "</think>\n",
      "\n",
      "### 问题分析与结论：\n",
      "\n",
      "#### **1. 危险驾驶罪的构成要件是否满足？**\n",
      "- **构成要件分析**：  \n",
      "  根据《刑法》第133条之一，危险驾驶罪的构成要件包括：  \n",
      "  - **醉酒驾驶机动车**：曹某某醉酒驾驶二轮摩托车，符合“醉酒驾驶机动车”的行为。  \n",
      "  - **机动车**：二轮摩托车属于机动车（依据《机动车驾驶证申领和使用规定》），因此醉酒驾驶机动车成立。  \n",
      "  - **无证、无牌**：虽属违法情节，但危险驾驶罪的构成不以“有证”或“有牌”为前提，仅需醉酒驾驶机动车即可成立。  \n",
      "  - **事故责任**：虽然事故由双方过错导致，但曹某某的醉酒驾驶行为本身已构成危险驾驶罪，且事故后果（李某林受伤）属于“造成交通事故”的情节（《刑法》第133条之一第2款规定）。  \n",
      "  **结论**：危险驾驶罪的构成要件已全部满足。\n",
      "\n",
      "---\n",
      "\n",
      "#### **2. 量刑时如何平衡从重与从轻情节？**\n",
      "- **从重情节**：  \n",
      "  - **醉酒驾驶**：属于危险驾驶罪的核心行为，直接构成犯罪。  \n",
      "  - **无证、无牌驾驶**：违反交通管理法规，属于从重处罚情节（《刑法》第133条之一规定）。  \n",
      "  - **事故责任**：虽为双方过错，但曹某某未让行（主观过错），可能负主要责任，属于“情节严重”（《最高人民法院、最高人民检察院关于办理危险驾驶刑事案件适用法律若干问题的解释》第2条）。  \n",
      "- **从轻情节**：  \n",
      "  - **赔偿谅解**：被害人李某林已谅解并获得赔偿，符合《刑法》第67条规定的“赔偿谅解”从轻情节。  \n",
      "  - **事故责任比例**：若曹某某负主要责任，可能减轻刑罚；若为同等责任，则可能适用较轻刑罚。  \n",
      "- **量刑平衡**：  \n",
      "  根据《刑法》第133条之一，醉酒驾驶机动车处拘役并处罚金。若造成交通事故且负主要责任，可能判处3-6个月拘役；若赔偿谅解，可减轻处罚。结合本案，曹某某虽有从重情节，但赔偿谅解和事故责任比例较低，刑罚适当。\n",
      "\n",
      "---\n",
      "\n",
      "#### **3. 最终判决是否适当？**\n",
      "- **判决依据**：  \n",
      "  - **危险驾驶罪**：曹某某醉酒驾驶二轮摩托车，符合危险驾驶罪构成要件。  \n",
      "  - **从重与从轻情节**：无证、无牌、醉驾属从重，赔偿谅解属从轻。  \n",
      "  - **事故责任**：双方过错导致事故，但曹某某未让行，可能负主要责任，但赔偿谅解可减轻刑罚。  \n",
      "- **判决合理性**：  \n",
      "  - **刑期**：3个月拘役属于“情节较轻”的范畴（《刑法》第133条之一规定）。  \n",
      "  - **罚金**：1000元罚金符合“数额较小”的标准（《刑法》第133条之一）。  \n",
      "  - **法律依据**：司法解释中，若赔偿谅解且事故责任比例较低，可判处较轻刑罚。  \n",
      "- **结论**：最终判决**适当**，符合从重与从轻情节的平衡，且刑罚在法定范围内。\n",
      "\n",
      "---\n",
      "\n",
      "### **最终答案：**\n",
      "1. **危险驾驶罪的构成要件已全部满足**，曹某某的行为符合危险驾驶罪的构成要件。  \n",
      "2. **量刑时应平衡从重与从轻情节**：无证、无牌、醉驾属从重，赔偿谅解属从轻，最终判决可减轻处罚。  \n",
      "3. **最终判处3个月有期徒刑并处罚金1000元是适当的**，符合法律规定和案件实际情况。<|im_end|>\n"
     ]
    }
   ],
   "source": [
    "messages = [\n",
    "    {\"role\" : \"user\", \"content\" : question_2}\n",
    "]\n",
    "text = tokenizer.apply_chat_template(\n",
    "    messages,\n",
    "    tokenize = False,\n",
    "    add_generation_prompt = True, # Must add for generation\n",
    "    enable_thinking = True, # Disable thinking\n",
    ")\n",
    "\n",
    "from transformers import TextStreamer\n",
    "_ = model.generate(\n",
    "    **tokenizer(text, return_tensors = \"pt\").to(\"cuda\"),\n",
    "    max_new_tokens = 20488, # Increase for longer outputs!\n",
    "    temperature = 0.6, top_p = 0.95, top_k = 20, # For thinking\n",
    "    streamer = TextStreamer(tokenizer, skip_prompt = True),\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "id": "00bfe394",
   "metadata": {},
   "outputs": [],
   "source": [
    "from datasets import Dataset\n",
    "import json"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "id": "87a3487c",
   "metadata": {},
   "outputs": [],
   "source": [
    "def load_jsonl_dataset(file_path):\n",
    "    \"\"\"从JSONL文件加载数据集\"\"\"\n",
    "    data = {\"input\": [], \"output\": []}\n",
    "    \n",
    "    print(f\"开始加载数据集: {file_path}\")\n",
    "    count = 0\n",
    "    error_count = 0\n",
    "    \n",
    "    with open(file_path, 'r', encoding='utf-8') as f:\n",
    "        for line in f:\n",
    "            try:\n",
    "                item = json.loads(line.strip())\n",
    "                # 根据数据集结构提取字段\n",
    "                instruct_text = item.get(\"instruction\", \"\")\n",
    "                input_text = item.get(\"input\", \"\")\n",
    "                \n",
    "                if instruct_text and input_text:\n",
    "                    input = f\"{instruct_text}\\n{input_text}\"\n",
    "                else:\n",
    "                    input = instruct_text or input_text\n",
    "                output = item.get(\"output\", \"\")\n",
    "                \n",
    "                data[\"input\"].append(input)\n",
    "                data[\"output\"].append(output)\n",
    "                count += 1\n",
    "            except Exception as e:\n",
    "                print(f\"解析行时出错: {e}\")\n",
    "                error_count += 1\n",
    "                continue\n",
    "    \n",
    "    print(f\"数据集加载完成: 成功加载{count}个样本, 跳过{error_count}个错误样本\")\n",
    "    return Dataset.from_dict(data)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "id": "94ef70f1",
   "metadata": {},
   "outputs": [],
   "source": [
    "data_path = \"/mnt/cache/modelscope/datasets/KuugoRen/chinese_law_ft_dataset/chinese_law_ft_dataset.jsonl\""
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "id": "b3ac39cf",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "开始加载数据集: /mnt/cache/modelscope/datasets/KuugoRen/chinese_law_ft_dataset/chinese_law_ft_dataset.jsonl\n",
      "数据集加载完成: 成功加载281142个样本, 跳过0个错误样本\n",
      "\n",
      "数据集统计:\n",
      "- 样本数量: 281142\n",
      "- 字段: ['input', 'output']\n",
      "随机采样后的样本数量: 2000\n",
      "{'input': '某个大型商场购买了一台电梯为顾客服务，但是没有设置特种设备安全管理机构或者配备专职的特种设备安全管理人员。在电梯使用过程中，某位顾客发生了事故。\\n根据《特种设备安全法》第三十六条规定，对于为公众提供服务的特种设备的运营使用单位，是否应该设置特种设备安全管理机构或者配备专职的特种设备安全管理人员？', 'output': '根据《特种设备安全法》第三十六条规定，电梯作为为公众提供服务的特种设备的运营使用单位，应当对特种设备的使用安全负责，因此应该设置特种设备安全管理机构或者配备专职的特种设备安全管理人员。\\n同时，《特种设备安全法》第三十五条规定特种设备使用单位应当建立特种设备安全技术档案，包括特种设备的定期检验和自行检查记录、日常使用状况记录、维护保养记录以及运行故障和事故记录。要依法建立这些技术档案，设置特种设备安全管理机构或配备特种设备安全管理人员是必要的。\\n因此，针对该大型商场购买的电梯为顾客服务，由于未设置特种设备安全管理机构或配备专职的特种设备安全管理人员，存在违反《特种设备安全法》的情形。'}\n"
     ]
    }
   ],
   "source": [
    "# 加载自定义数据集\n",
    "dataset = load_jsonl_dataset(data_path)\n",
    "\n",
    "# 显示数据集信息\n",
    "print(f\"\\n数据集统计:\")\n",
    "print(f\"- 样本数量: {len(dataset)}\")\n",
    "print(f\"- 字段: {dataset.column_names}\")\n",
    "\n",
    "# 随机采样2000个样本\n",
    "dataset = dataset.shuffle(seed=3407).select(range(2000))\n",
    "print(f\"随机采样后的样本数量: {len(dataset)}\")\n",
    "\n",
    "print(dataset[100])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "id": "ba9a3d6f",
   "metadata": {},
   "outputs": [],
   "source": [
    "def formatting_prompts_func(examples):\n",
    "    \"\"\"根据提示模板格式化数据\"\"\"\n",
    "    inputs = examples[\"input\"]\n",
    "    outputs = examples[\"output\"]\n",
    "    \n",
    "    texts = []\n",
    "    for  input_text, output in zip(inputs, outputs):\n",
    "        texts.append([\n",
    "            {\"role\" : \"user\", \"content\" : input_text},\n",
    "            {\"role\" : \"assistant\", \"content\" : output},\n",
    "        ])\n",
    "    \n",
    "    return {\"text\": texts}"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "id": "bec308e4",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "开始格式化数据集...\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Map: 100%|██████████| 2000/2000 [00:00<00:00, 45753.63 examples/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "数据集格式化完成\n"
     ]
    }
   ],
   "source": [
    "# 应用格式化\n",
    "print(\"开始格式化数据集...\")\n",
    "no_reasonging_data = dataset.map(formatting_prompts_func,batched = True)\n",
    "no_reasoning_conversations = [tokenizer.apply_chat_template(\n",
    "    text,\n",
    "    tokenize = False,\n",
    ") for text in no_reasonging_data[\"text\"]\n",
    "]\n",
    "print(\"数据集格式化完成\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 52,
   "id": "eb663004",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "'<|im_start|>user\\n合同撤销之后是否有违约金?<|im_end|>\\n<|im_start|>assistant\\n<think>\\n\\n</think>\\n\\n合同撤销后可否追究违约责任依据以下规定：1、撤销合同看合同条款是否有相应的规定，如出现不应该撤销合同的情形，则撤销行为是违约的，应该承担协议约定的违约责任，守约方可以主张违约金；\\n2、如果合同约定的撤销情形出现了胁迫、显失公平等行为，撤销行为则不构成违约，则撤销合同不需要付违约金。<|im_end|>\\n'"
      ]
     },
     "execution_count": 52,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "no_reasoning_conversations[0]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "id": "b8b0aaae",
   "metadata": {},
   "outputs": [],
   "source": [
    "from datasets import Dataset\n",
    "import pandas as pd\n",
    "data=pd.concat([pd.Series(no_reasoning_conversations)])\n",
    "data.name = \"text\"\n",
    "combined_dataset = Dataset.from_pandas(pd.DataFrame(data))\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "id": "d8ad35e3",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Unsloth 2025.9.4 patched 36 layers with 36 QKV layers, 36 O layers and 36 MLP layers.\n"
     ]
    }
   ],
   "source": [
    "model = FastLanguageModel.get_peft_model(\n",
    "    model,\n",
    "    r = 32,           # Choose any number > 0! Suggested 8, 16, 32, 64, 128\n",
    "    target_modules = [\"q_proj\", \"k_proj\", \"v_proj\", \"o_proj\",\n",
    "                      \"gate_proj\", \"up_proj\", \"down_proj\",],\n",
    "    lora_alpha = 32,  # Best to choose alpha = rank or rank*2\n",
    "    lora_dropout = 0, # Supports any, but = 0 is optimized\n",
    "    bias = \"none\",    # Supports any, but = \"none\" is optimized\n",
    "    # [NEW] \"unsloth\" uses 30% less VRAM, fits 2x larger batch sizes!\n",
    "    use_gradient_checkpointing = \"unsloth\", # True or \"unsloth\" for very long context\n",
    "    random_state = 3407,\n",
    "    use_rslora = False,   # We support rank stabilized LoRA\n",
    "    loftq_config = None,  # And LoftQ\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "id": "435d653f",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Unsloth: Tokenizing [\"text\"] (num_proc=20): 100%|██████████| 2000/2000 [00:02<00:00, 748.80 examples/s] \n"
     ]
    }
   ],
   "source": [
    "from trl import SFTTrainer, SFTConfig\n",
    "trainer = SFTTrainer(\n",
    "    model = model,\n",
    "    tokenizer = tokenizer,\n",
    "    train_dataset = combined_dataset,\n",
    "    eval_dataset = None, # Can set up evaluation!\n",
    "    args = SFTConfig(\n",
    "        dataset_text_field = \"text\",\n",
    "        per_device_train_batch_size = 2,\n",
    "        gradient_accumulation_steps = 4, # Use GA to mimic batch size!\n",
    "        warmup_steps = 5,\n",
    "        # num_train_epochs = 1, # Set this for 1 full training run.\n",
    "        max_steps = 15,\n",
    "        learning_rate = 2e-4, # Reduce to 2e-5 for long training runs\n",
    "        logging_steps = 1,\n",
    "        optim = \"adamw_8bit\",\n",
    "        weight_decay = 0.01,\n",
    "        lr_scheduler_type = \"linear\",\n",
    "        seed = 3407,\n",
    "        report_to = None, # Use this for WandB etc\n",
    "    ),\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 20,
   "id": "bb63b56e",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "GPU = NVIDIA A10. Max memory = 22.191 GB.\n",
      "4.006 GB of memory reserved.\n"
     ]
    }
   ],
   "source": [
    "# @title Show current memory stats\n",
    "gpu_stats = torch.cuda.get_device_properties(0)\n",
    "start_gpu_memory = round(torch.cuda.max_memory_reserved() / 1024 / 1024 / 1024, 3)\n",
    "max_memory = round(gpu_stats.total_memory / 1024 / 1024 / 1024, 3)\n",
    "print(f\"GPU = {gpu_stats.name}. Max memory = {max_memory} GB.\")\n",
    "print(f\"{start_gpu_memory} GB of memory reserved.\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "id": "d3045cc2",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "==((====))==  Unsloth - 2x faster free finetuning | Num GPUs used = 1\n",
      "   \\\\   /|    Num examples = 2,000 | Num Epochs = 1 | Total steps = 15\n",
      "O^O/ \\_/ \\    Batch size per device = 2 | Gradient accumulation steps = 4\n",
      "\\        /    Data Parallel GPUs = 1 | Total batch size (2 x 4 x 1) = 8\n",
      " \"-____-\"     Trainable parameters = 66,060,288 of 4,088,528,384 (1.62% trained)\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Unsloth: Will smartly offload gradients to save VRAM!\n"
     ]
    },
    {
     "data": {
      "text/html": [
       "\n",
       "    <div>\n",
       "      \n",
       "      <progress value='15' max='15' style='width:300px; height:20px; vertical-align: middle;'></progress>\n",
       "      [15/15 00:31, Epoch 0/1]\n",
       "    </div>\n",
       "    <table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       " <tr style=\"text-align: left;\">\n",
       "      <th>Step</th>\n",
       "      <th>Training Loss</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <td>1</td>\n",
       "      <td>2.606800</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <td>2</td>\n",
       "      <td>2.546200</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <td>3</td>\n",
       "      <td>2.577700</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <td>4</td>\n",
       "      <td>2.303300</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <td>5</td>\n",
       "      <td>1.885600</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <td>6</td>\n",
       "      <td>2.174400</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <td>7</td>\n",
       "      <td>2.039000</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <td>8</td>\n",
       "      <td>1.885600</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <td>9</td>\n",
       "      <td>1.831700</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <td>10</td>\n",
       "      <td>1.572200</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <td>11</td>\n",
       "      <td>1.341700</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <td>12</td>\n",
       "      <td>1.545800</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <td>13</td>\n",
       "      <td>1.907200</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <td>14</td>\n",
       "      <td>1.655700</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <td>15</td>\n",
       "      <td>1.550600</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table><p>"
      ],
      "text/plain": [
       "<IPython.core.display.HTML object>"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    }
   ],
   "source": [
    "trainer_stats = trainer.train()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 22,
   "id": "187c7fec",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "10.5192 seconds used for training.\n",
      "0.18 minutes used for training.\n",
      "Peak reserved memory = 5.02 GB.\n",
      "Peak reserved memory for training = 1.014 GB.\n",
      "Peak reserved memory % of max memory = 22.622 %.\n",
      "Peak reserved memory for training % of max memory = 4.569 %.\n"
     ]
    }
   ],
   "source": [
    "# @title Show final memory and time stats\n",
    "used_memory = round(torch.cuda.max_memory_reserved() / 1024 / 1024 / 1024, 3)\n",
    "used_memory_for_lora = round(used_memory - start_gpu_memory, 3)\n",
    "used_percentage = round(used_memory / max_memory * 100, 3)\n",
    "lora_percentage = round(used_memory_for_lora / max_memory * 100, 3)\n",
    "print(f\"{trainer_stats.metrics['train_runtime']} seconds used for training.\")\n",
    "print(\n",
    "    f\"{round(trainer_stats.metrics['train_runtime']/60, 2)} minutes used for training.\"\n",
    ")\n",
    "print(f\"Peak reserved memory = {used_memory} GB.\")\n",
    "print(f\"Peak reserved memory for training = {used_memory_for_lora} GB.\")\n",
    "print(f\"Peak reserved memory % of max memory = {used_percentage} %.\")\n",
    "print(f\"Peak reserved memory for training % of max memory = {lora_percentage} %.\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 23,
   "id": "7bf915a0",
   "metadata": {},
   "outputs": [],
   "source": [
    "question_1 = \"问题：某分裂组织在边境地区策划武装割据，并与境外势力秘密勾结，其首要分子甲在实施过程中被抓获。请问甲的行为构成何罪？应当如何量刑？\""
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 24,
   "id": "c3def0b5",
   "metadata": {},
   "outputs": [],
   "source": [
    "question_2 = \"2015年9月4日14时50分许，被告人曹某某在景德镇市珠山区XX涵洞路口醉酒（血液酒精含量未明确数值）、无证驾驶无牌二轮摩托车，搭载李某林左转时未让直行车辆，与超速行驶的朱某良驾驶的出租车相撞，致李某林受伤（具体伤情等级未说明）。事故后曹某某赔偿李某林并获得谅解。公诉机关以危险驾驶罪指控，提供现场勘查记录、酒精检测等9项证据。本案中：1. 曹某某同时存在醉酒、无证、无牌三项违法情节，但缺乏具体酒精浓度数据；2. 事故系双方过错（曹某某未让行与朱某良超速）共同导致；3. 被害人已谅解。在此情况下：（1）危险驾驶罪的构成要件是否全部满足？（2）量刑时如何平衡\\\"无证+无牌+醉驾\\\"的从重情节与\\\"赔偿谅解\\\"的从轻情节？（3）最终判处3个月有期徒刑并处罚金1000元是否适当？\""
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 25,
   "id": "90494eb5",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "根据中国刑法的规定，针对该问题，甲的行为可能构成以下几种犯罪：\n",
      "\n",
      "---\n",
      "\n",
      "## 一、甲的行为可能构成的罪名\n",
      "\n",
      "### 1. **组织、领导恐怖活动组织罪**\n",
      "- **构成要件**：甲作为分裂组织的首要分子，组织、领导了分裂组织，可能构成该罪。\n",
      "- **量刑**：根据《刑法》第120条，组织、领导恐怖活动组织的，处十年以上有期徒刑、无期徒刑或死刑；对恐怖活动组织的成员，处三年以上十年以下有期徒刑；对恐怖活动的实施者，处五年以上有期徒刑、无期徒刑或死刑。\n",
      "\n",
      "### 2. **分裂国家罪**\n",
      "- **构成要件**：甲策划武装割据，可能构成分裂国家罪。\n",
      "- **量刑**：根据《刑法》第103条，分裂国家罪的刑罚为五年以上十年以下有期徒刑；情节特别严重的，处十年以上有期徒刑、无期徒刑或死刑。\n",
      "\n",
      "### 3. **武装叛乱、暴乱罪**\n",
      "- **构成要件**：甲策划武装割据，可能构成武装叛乱、暴乱罪。\n",
      "- **量刑**：根据《刑法》第120条，武装叛乱、暴乱的，处十年以上有期徒刑、无期徒刑或死刑。\n",
      "\n",
      "### 4. **参加恐怖组织罪**\n",
      "- **构成要件**：甲作为首要分子，可能构成参加恐怖组织罪。\n",
      "- **量刑**：根据《刑法》第120条，参加恐怖组织的，处五年以上有期徒刑、无期徒刑或死刑。\n",
      "\n",
      "---\n",
      "\n",
      "## 二、量刑原则\n",
      "\n",
      "1. **首要分子**：甲作为分裂组织的首要分子，应依法从重处罚。\n",
      "2. **情节严重**：若其行为导致严重后果（如造成人员伤亡、破坏社会秩序等），可依法从重处罚。\n",
      "3. **从轻或减轻处罚**：若甲有自首、立功、赔偿损失等情节，可依法从轻或减轻处罚。\n",
      "\n",
      "---\n",
      "\n",
      "## 三、结论\n",
      "\n",
      "甲的行为可能构成以下罪名：\n",
      "\n",
      "- **组织、领导恐怖活动组织罪**\n",
      "- **分裂国家罪**\n",
      "- **武装叛乱、暴乱罪**\n",
      "- **参加恐怖组织罪**\n",
      "\n",
      "根据其行为的严重程度、社会危害性、主观故意等因素，甲可能被判处**十年以上有期徒刑、无期徒刑或死刑**，具体量刑需由司法机关根据案件具体情况认定。\n",
      "\n",
      "---\n",
      "\n",
      "## 四、建议\n",
      "\n",
      "1. **司法机关**应依法对甲进行立案侦查，并依法提起公诉。\n",
      "2. **审判机关**应严格依据《刑法》及相关司法解释，准确适用罪名和量刑。\n",
      "3. **法律援助**：甲若需辩护，应依法委托律师参与诉讼。\n",
      "\n",
      "---\n",
      "\n",
      "如需进一步分析，可提供具体情节细节。<|im_end|>\n"
     ]
    }
   ],
   "source": [
    "messages = [\n",
    "    {\"role\" : \"user\", \"content\" : question_1}\n",
    "]\n",
    "text = tokenizer.apply_chat_template(\n",
    "    messages,\n",
    "    tokenize = False,\n",
    "    add_generation_prompt = True, # Must add for generation\n",
    "    enable_thinking = False, # Disable thinking\n",
    ")\n",
    "\n",
    "from transformers import TextStreamer\n",
    "_ = model.generate(\n",
    "    **tokenizer(text, return_tensors = \"pt\").to(\"cuda\"),\n",
    "    max_new_tokens = 20488, # Increase for longer outputs!\n",
    "    temperature = 0.6, top_p = 0.95, top_k = 20, # For thinking\n",
    "    streamer = TextStreamer(tokenizer, skip_prompt = True),\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 26,
   "id": "d9c3f89c",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "本案涉及危险驾驶罪的构成要件、量刑平衡以及最终判决是否适当。以下从三个问题逐一分析，并结合法律规定和司法实践进行解答。\n",
      "\n",
      "---\n",
      "\n",
      "## 一、危险驾驶罪的构成要件是否全部满足？\n",
      "\n",
      "### 1. **危险驾驶罪的构成要件**  \n",
      "根据《刑法》第133条之一规定，危险驾驶罪的构成要件包括：  \n",
      "- **主观方面**：故意，即明知自己的行为具有危险性仍实施。  \n",
      "- **客观方面**：在道路上驾驶机动车，具有醉酒、无证、无牌、追逐竞驶、严重超速等情形之一，且危害公共安全。  \n",
      "\n",
      "### 2. **本案分析**  \n",
      "- **醉酒驾驶**：曹某某在案发时处于醉酒状态（虽未明确酒精浓度，但醉酒驾驶是危险驾驶罪的核心构成要件之一）。  \n",
      "- **无证驾驶**：曹某某无证驾驶，属于危险驾驶罪的加重情节。  \n",
      "- **无牌驾驶**：无牌驾驶也是危险驾驶罪的加重情节。  \n",
      "- **驾驶二轮摩托车**：二轮摩托车属于机动车，符合“驾驶机动车”的要件。  \n",
      "- **在道路上**：案发地点为涵洞路口，属于“道路”范围。  \n",
      "- **危害公共安全**：虽然未造成人员死亡或重伤，但其行为具有高度危险性，且与事故直接相关，构成“危害公共安全”的危险驾驶罪。  \n",
      "\n",
      "### 3. **结论**  \n",
      "**危险驾驶罪的构成要件已全部满足**。  \n",
      "- **醉酒驾驶**是危险驾驶罪的核心要件，即使未明确酒精浓度，但“醉酒驾驶”是危险驾驶罪的构成要件之一。  \n",
      "- **无证、无牌驾驶**属于加重情节，但并不影响危险驾驶罪的构成。  \n",
      "- **事故虽由双方过错造成，但曹某某的醉酒驾驶行为是直接导致事故发生的重要原因**，符合“危害公共安全”的要件。\n",
      "\n",
      "---\n",
      "\n",
      "## 二、量刑时如何平衡“无证+无牌+醉驾”从重情节与“赔偿谅解”从轻情节？\n",
      "\n",
      "### 1. **从重情节**  \n",
      "- **无证驾驶**：属于危险驾驶罪的加重情节，根据《刑法》第133条之一第2款，无证驾驶属于“情节严重”的情形。  \n",
      "- **无牌驾驶**：同样属于加重情节，可能被认定为“情节严重”。  \n",
      "- **醉酒驾驶**：属于危险驾驶罪的核心构成要件，且具有严重危害性。  \n",
      "- **事故责任**：曹某某虽未让行，但其醉酒驾驶行为是事故的直接原因，责任比例应认定为主要原因，属于“主要责任”或“同等责任”。  \n",
      "\n",
      "### 2. **从轻情节**  \n",
      "- **赔偿谅解**：根据《刑法》第67条，被告人赔偿被害人损失并获得谅解，可从轻处罚。  \n",
      "- **事故责任认定**：虽为共同过错，但曹某某是醉酒驾驶，具有更高危险性，责任比例应认定为主要原因。  \n",
      "- **认罪认罚**：若曹某某认罪认罚，可从轻处罚。  \n",
      "\n",
      "### 3. **量刑平衡建议**  \n",
      "- **从重情节**：无证、无牌、醉酒驾驶，属于危险驾驶罪的加重情节，且事故具有严重危害性，应认定为“情节严重”。  \n",
      "- **从轻情节**：赔偿谅解、事故责任认定为共同过错，可减轻处罚。  \n",
      "- **量刑范围**：根据《刑法》第133条之一第1款，危险驾驶罪的量刑范围为：  \n",
      "  - **情节严重**：拘役，并处罚金。  \n",
      "  - **情节特别严重**：三年以上七年以下有期徒刑。  \n",
      "\n",
      "### 4. **结论**  \n",
      "**量刑应综合考虑从重与从轻情节**，最终判决应在“拘役并处罚金”的范围内，但具体刑期应根据事故责任、赔偿谅解、认罪认罚等因素综合判断。\n",
      "\n",
      "---\n",
      "\n",
      "## 三、最终判处3个月有期徒刑并处罚金1000元是否适当？\n",
      "\n",
      "### 1. **法律依据**  \n",
      "- **危险驾驶罪**：根据《刑法》第133条之一，醉酒驾驶机动车的，处拘役，并处罚金。  \n",
      "- **从轻情节**：赔偿谅解、认罪认罚可减轻处罚。  \n",
      "- **从重情节**：无证、无牌驾驶、醉酒驾驶，属于“情节严重”，应从重处罚。\n",
      "\n",
      "### 2. **判决分析**  \n",
      "- **3个月拘役**：属于法定刑范围内的最低刑期，但考虑到曹某某是醉酒驾驶，且事故责任为主要原因，刑期可能偏轻。  \n",
      "- **罚金1000元**：属于“罚金数额较低”，但根据《刑法》第133条之一，罚金应根据情节严重程度决定，可能偏低。  \n",
      "- **事故责任认定为共同过错**：若认定为共同过错，曹某某的责任比例应为主要原因，刑期应相应加重。  \n",
      "- **赔偿谅解**：可减轻刑罚，但不能完全免除刑罚。  \n",
      "\n",
      "### 3. **建议**  \n",
      "- **刑期**：3个月拘役偏轻，应考虑加重刑期，如**3-6个月拘役**。  \n",
      "- **罚金**：1000元偏低，应提高至**2000-5000元**。  \n",
      "- **判决适当性**：若判决为3个月拘役并处罚金1000元，**可能属于从轻处罚**，但应结合具体责任比例和赔偿谅解综合判断。\n",
      "\n",
      "### 4. **结论**  \n",
      "**判处3个月有期徒刑并处罚金1000元是否适当，需结合具体责任比例、赔偿谅解、认罪认罚等因素综合判断**。  \n",
      "- 若曹某某为主要原因，且赔偿谅解，刑期可适当减轻，但**不应低于拘役3个月**。  \n",
      "- 若判决为3个月拘役并处罚金1000元，**属于从轻处罚，但刑期偏轻**，建议刑期应为**3-6个月拘役**，罚金为**2000-5000元**。\n",
      "\n",
      "---\n",
      "\n",
      "## 总结：\n",
      "\n",
      "| 问题 | 结论 |\n",
      "|------|------|\n",
      "| 1. 危险驾驶罪构成要件是否满足？ | 是，构成危险驾驶罪。 |\n",
      "| 2. 量刑是否平衡从重与从轻？ | 应综合考虑，建议刑期3-6个月，罚金2000-5000元。 |\n",
      "| 3. 判决是否适当？ | 若判决为3个月拘役并罚金1000元，属于从轻处罚，但刑期偏轻，建议适当加重。 |\n",
      "\n",
      "---\n",
      "\n",
      "如需进一步结合具体判例或司法解释进行分析，可提供更详细信息。<|im_end|>\n"
     ]
    }
   ],
   "source": [
    "messages = [\n",
    "    {\"role\" : \"user\", \"content\" : question_2}\n",
    "]\n",
    "text = tokenizer.apply_chat_template(\n",
    "    messages,\n",
    "    tokenize = False,\n",
    "    add_generation_prompt = True, # Must add for generation\n",
    "    enable_thinking = False, # Disable thinking\n",
    ")\n",
    "\n",
    "from transformers import TextStreamer\n",
    "_ = model.generate(\n",
    "    **tokenizer(text, return_tensors = \"pt\").to(\"cuda\"),\n",
    "    max_new_tokens = 20488, # Increase for longer outputs!\n",
    "    temperature = 0.6, top_p = 0.95, top_k = 20, # For thinking\n",
    "    streamer = TextStreamer(tokenizer, skip_prompt = True),\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 58,
   "id": "f04c48cd",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "'/mnt/codes/qwen3/models/Qwen3-4B-unsloth-bnb-4bit'"
      ]
     },
     "execution_count": 58,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "model.config._name_or_path"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 59,
   "id": "18f8d263",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "PeftModelForCausalLM(\n",
       "  (base_model): LoraModel(\n",
       "    (model): Qwen3ForCausalLM(\n",
       "      (model): Qwen3Model(\n",
       "        (embed_tokens): Embedding(151936, 2560, padding_idx=151654)\n",
       "        (layers): ModuleList(\n",
       "          (0-1): 2 x Qwen3DecoderLayer(\n",
       "            (self_attn): Qwen3Attention(\n",
       "              (q_proj): lora.Linear(\n",
       "                (base_layer): Linear(in_features=2560, out_features=4096, bias=False)\n",
       "                (lora_dropout): ModuleDict(\n",
       "                  (default): Identity()\n",
       "                )\n",
       "                (lora_A): ModuleDict(\n",
       "                  (default): Linear(in_features=2560, out_features=32, bias=False)\n",
       "                )\n",
       "                (lora_B): ModuleDict(\n",
       "                  (default): Linear(in_features=32, out_features=4096, bias=False)\n",
       "                )\n",
       "                (lora_embedding_A): ParameterDict()\n",
       "                (lora_embedding_B): ParameterDict()\n",
       "                (lora_magnitude_vector): ModuleDict()\n",
       "              )\n",
       "              (k_proj): lora.Linear(\n",
       "                (base_layer): Linear(in_features=2560, out_features=1024, bias=False)\n",
       "                (lora_dropout): ModuleDict(\n",
       "                  (default): Identity()\n",
       "                )\n",
       "                (lora_A): ModuleDict(\n",
       "                  (default): Linear(in_features=2560, out_features=32, bias=False)\n",
       "                )\n",
       "                (lora_B): ModuleDict(\n",
       "                  (default): Linear(in_features=32, out_features=1024, bias=False)\n",
       "                )\n",
       "                (lora_embedding_A): ParameterDict()\n",
       "                (lora_embedding_B): ParameterDict()\n",
       "                (lora_magnitude_vector): ModuleDict()\n",
       "              )\n",
       "              (v_proj): lora.Linear(\n",
       "                (base_layer): Linear(in_features=2560, out_features=1024, bias=False)\n",
       "                (lora_dropout): ModuleDict(\n",
       "                  (default): Identity()\n",
       "                )\n",
       "                (lora_A): ModuleDict(\n",
       "                  (default): Linear(in_features=2560, out_features=32, bias=False)\n",
       "                )\n",
       "                (lora_B): ModuleDict(\n",
       "                  (default): Linear(in_features=32, out_features=1024, bias=False)\n",
       "                )\n",
       "                (lora_embedding_A): ParameterDict()\n",
       "                (lora_embedding_B): ParameterDict()\n",
       "                (lora_magnitude_vector): ModuleDict()\n",
       "              )\n",
       "              (o_proj): lora.Linear(\n",
       "                (base_layer): Linear(in_features=4096, out_features=2560, bias=False)\n",
       "                (lora_dropout): ModuleDict(\n",
       "                  (default): Identity()\n",
       "                )\n",
       "                (lora_A): ModuleDict(\n",
       "                  (default): Linear(in_features=4096, out_features=32, bias=False)\n",
       "                )\n",
       "                (lora_B): ModuleDict(\n",
       "                  (default): Linear(in_features=32, out_features=2560, bias=False)\n",
       "                )\n",
       "                (lora_embedding_A): ParameterDict()\n",
       "                (lora_embedding_B): ParameterDict()\n",
       "                (lora_magnitude_vector): ModuleDict()\n",
       "              )\n",
       "              (q_norm): Qwen3RMSNorm((128,), eps=1e-06)\n",
       "              (k_norm): Qwen3RMSNorm((128,), eps=1e-06)\n",
       "              (rotary_emb): LlamaRotaryEmbedding()\n",
       "            )\n",
       "            (mlp): Qwen3MLP(\n",
       "              (gate_proj): lora.Linear(\n",
       "                (base_layer): Linear(in_features=2560, out_features=9728, bias=False)\n",
       "                (lora_dropout): ModuleDict(\n",
       "                  (default): Identity()\n",
       "                )\n",
       "                (lora_A): ModuleDict(\n",
       "                  (default): Linear(in_features=2560, out_features=32, bias=False)\n",
       "                )\n",
       "                (lora_B): ModuleDict(\n",
       "                  (default): Linear(in_features=32, out_features=9728, bias=False)\n",
       "                )\n",
       "                (lora_embedding_A): ParameterDict()\n",
       "                (lora_embedding_B): ParameterDict()\n",
       "                (lora_magnitude_vector): ModuleDict()\n",
       "              )\n",
       "              (up_proj): lora.Linear(\n",
       "                (base_layer): Linear(in_features=2560, out_features=9728, bias=False)\n",
       "                (lora_dropout): ModuleDict(\n",
       "                  (default): Identity()\n",
       "                )\n",
       "                (lora_A): ModuleDict(\n",
       "                  (default): Linear(in_features=2560, out_features=32, bias=False)\n",
       "                )\n",
       "                (lora_B): ModuleDict(\n",
       "                  (default): Linear(in_features=32, out_features=9728, bias=False)\n",
       "                )\n",
       "                (lora_embedding_A): ParameterDict()\n",
       "                (lora_embedding_B): ParameterDict()\n",
       "                (lora_magnitude_vector): ModuleDict()\n",
       "              )\n",
       "              (down_proj): lora.Linear(\n",
       "                (base_layer): Linear(in_features=9728, out_features=2560, bias=False)\n",
       "                (lora_dropout): ModuleDict(\n",
       "                  (default): Identity()\n",
       "                )\n",
       "                (lora_A): ModuleDict(\n",
       "                  (default): Linear(in_features=9728, out_features=32, bias=False)\n",
       "                )\n",
       "                (lora_B): ModuleDict(\n",
       "                  (default): Linear(in_features=32, out_features=2560, bias=False)\n",
       "                )\n",
       "                (lora_embedding_A): ParameterDict()\n",
       "                (lora_embedding_B): ParameterDict()\n",
       "                (lora_magnitude_vector): ModuleDict()\n",
       "              )\n",
       "              (act_fn): SiLU()\n",
       "            )\n",
       "            (input_layernorm): Qwen3RMSNorm((2560,), eps=1e-06)\n",
       "            (post_attention_layernorm): Qwen3RMSNorm((2560,), eps=1e-06)\n",
       "          )\n",
       "          (2): Qwen3DecoderLayer(\n",
       "            (self_attn): Qwen3Attention(\n",
       "              (q_proj): lora.Linear4bit(\n",
       "                (base_layer): Linear4bit(in_features=2560, out_features=4096, bias=False)\n",
       "                (lora_dropout): ModuleDict(\n",
       "                  (default): Identity()\n",
       "                )\n",
       "                (lora_A): ModuleDict(\n",
       "                  (default): Linear(in_features=2560, out_features=32, bias=False)\n",
       "                )\n",
       "                (lora_B): ModuleDict(\n",
       "                  (default): Linear(in_features=32, out_features=4096, bias=False)\n",
       "                )\n",
       "                (lora_embedding_A): ParameterDict()\n",
       "                (lora_embedding_B): ParameterDict()\n",
       "                (lora_magnitude_vector): ModuleDict()\n",
       "              )\n",
       "              (k_proj): lora.Linear4bit(\n",
       "                (base_layer): Linear4bit(in_features=2560, out_features=1024, bias=False)\n",
       "                (lora_dropout): ModuleDict(\n",
       "                  (default): Identity()\n",
       "                )\n",
       "                (lora_A): ModuleDict(\n",
       "                  (default): Linear(in_features=2560, out_features=32, bias=False)\n",
       "                )\n",
       "                (lora_B): ModuleDict(\n",
       "                  (default): Linear(in_features=32, out_features=1024, bias=False)\n",
       "                )\n",
       "                (lora_embedding_A): ParameterDict()\n",
       "                (lora_embedding_B): ParameterDict()\n",
       "                (lora_magnitude_vector): ModuleDict()\n",
       "              )\n",
       "              (v_proj): lora.Linear4bit(\n",
       "                (base_layer): Linear4bit(in_features=2560, out_features=1024, bias=False)\n",
       "                (lora_dropout): ModuleDict(\n",
       "                  (default): Identity()\n",
       "                )\n",
       "                (lora_A): ModuleDict(\n",
       "                  (default): Linear(in_features=2560, out_features=32, bias=False)\n",
       "                )\n",
       "                (lora_B): ModuleDict(\n",
       "                  (default): Linear(in_features=32, out_features=1024, bias=False)\n",
       "                )\n",
       "                (lora_embedding_A): ParameterDict()\n",
       "                (lora_embedding_B): ParameterDict()\n",
       "                (lora_magnitude_vector): ModuleDict()\n",
       "              )\n",
       "              (o_proj): lora.Linear4bit(\n",
       "                (base_layer): Linear4bit(in_features=4096, out_features=2560, bias=False)\n",
       "                (lora_dropout): ModuleDict(\n",
       "                  (default): Identity()\n",
       "                )\n",
       "                (lora_A): ModuleDict(\n",
       "                  (default): Linear(in_features=4096, out_features=32, bias=False)\n",
       "                )\n",
       "                (lora_B): ModuleDict(\n",
       "                  (default): Linear(in_features=32, out_features=2560, bias=False)\n",
       "                )\n",
       "                (lora_embedding_A): ParameterDict()\n",
       "                (lora_embedding_B): ParameterDict()\n",
       "                (lora_magnitude_vector): ModuleDict()\n",
       "              )\n",
       "              (q_norm): Qwen3RMSNorm((128,), eps=1e-06)\n",
       "              (k_norm): Qwen3RMSNorm((128,), eps=1e-06)\n",
       "              (rotary_emb): LlamaRotaryEmbedding()\n",
       "            )\n",
       "            (mlp): Qwen3MLP(\n",
       "              (gate_proj): lora.Linear4bit(\n",
       "                (base_layer): Linear4bit(in_features=2560, out_features=9728, bias=False)\n",
       "                (lora_dropout): ModuleDict(\n",
       "                  (default): Identity()\n",
       "                )\n",
       "                (lora_A): ModuleDict(\n",
       "                  (default): Linear(in_features=2560, out_features=32, bias=False)\n",
       "                )\n",
       "                (lora_B): ModuleDict(\n",
       "                  (default): Linear(in_features=32, out_features=9728, bias=False)\n",
       "                )\n",
       "                (lora_embedding_A): ParameterDict()\n",
       "                (lora_embedding_B): ParameterDict()\n",
       "                (lora_magnitude_vector): ModuleDict()\n",
       "              )\n",
       "              (up_proj): lora.Linear4bit(\n",
       "                (base_layer): Linear4bit(in_features=2560, out_features=9728, bias=False)\n",
       "                (lora_dropout): ModuleDict(\n",
       "                  (default): Identity()\n",
       "                )\n",
       "                (lora_A): ModuleDict(\n",
       "                  (default): Linear(in_features=2560, out_features=32, bias=False)\n",
       "                )\n",
       "                (lora_B): ModuleDict(\n",
       "                  (default): Linear(in_features=32, out_features=9728, bias=False)\n",
       "                )\n",
       "                (lora_embedding_A): ParameterDict()\n",
       "                (lora_embedding_B): ParameterDict()\n",
       "                (lora_magnitude_vector): ModuleDict()\n",
       "              )\n",
       "              (down_proj): lora.Linear4bit(\n",
       "                (base_layer): Linear4bit(in_features=9728, out_features=2560, bias=False)\n",
       "                (lora_dropout): ModuleDict(\n",
       "                  (default): Identity()\n",
       "                )\n",
       "                (lora_A): ModuleDict(\n",
       "                  (default): Linear(in_features=9728, out_features=32, bias=False)\n",
       "                )\n",
       "                (lora_B): ModuleDict(\n",
       "                  (default): Linear(in_features=32, out_features=2560, bias=False)\n",
       "                )\n",
       "                (lora_embedding_A): ParameterDict()\n",
       "                (lora_embedding_B): ParameterDict()\n",
       "                (lora_magnitude_vector): ModuleDict()\n",
       "              )\n",
       "              (act_fn): SiLU()\n",
       "            )\n",
       "            (input_layernorm): Qwen3RMSNorm((2560,), eps=1e-06)\n",
       "            (post_attention_layernorm): Qwen3RMSNorm((2560,), eps=1e-06)\n",
       "          )\n",
       "          (3): Qwen3DecoderLayer(\n",
       "            (self_attn): Qwen3Attention(\n",
       "              (q_proj): lora.Linear4bit(\n",
       "                (base_layer): Linear4bit(in_features=2560, out_features=4096, bias=False)\n",
       "                (lora_dropout): ModuleDict(\n",
       "                  (default): Identity()\n",
       "                )\n",
       "                (lora_A): ModuleDict(\n",
       "                  (default): Linear(in_features=2560, out_features=32, bias=False)\n",
       "                )\n",
       "                (lora_B): ModuleDict(\n",
       "                  (default): Linear(in_features=32, out_features=4096, bias=False)\n",
       "                )\n",
       "                (lora_embedding_A): ParameterDict()\n",
       "                (lora_embedding_B): ParameterDict()\n",
       "                (lora_magnitude_vector): ModuleDict()\n",
       "              )\n",
       "              (k_proj): lora.Linear4bit(\n",
       "                (base_layer): Linear4bit(in_features=2560, out_features=1024, bias=False)\n",
       "                (lora_dropout): ModuleDict(\n",
       "                  (default): Identity()\n",
       "                )\n",
       "                (lora_A): ModuleDict(\n",
       "                  (default): Linear(in_features=2560, out_features=32, bias=False)\n",
       "                )\n",
       "                (lora_B): ModuleDict(\n",
       "                  (default): Linear(in_features=32, out_features=1024, bias=False)\n",
       "                )\n",
       "                (lora_embedding_A): ParameterDict()\n",
       "                (lora_embedding_B): ParameterDict()\n",
       "                (lora_magnitude_vector): ModuleDict()\n",
       "              )\n",
       "              (v_proj): lora.Linear4bit(\n",
       "                (base_layer): Linear4bit(in_features=2560, out_features=1024, bias=False)\n",
       "                (lora_dropout): ModuleDict(\n",
       "                  (default): Identity()\n",
       "                )\n",
       "                (lora_A): ModuleDict(\n",
       "                  (default): Linear(in_features=2560, out_features=32, bias=False)\n",
       "                )\n",
       "                (lora_B): ModuleDict(\n",
       "                  (default): Linear(in_features=32, out_features=1024, bias=False)\n",
       "                )\n",
       "                (lora_embedding_A): ParameterDict()\n",
       "                (lora_embedding_B): ParameterDict()\n",
       "                (lora_magnitude_vector): ModuleDict()\n",
       "              )\n",
       "              (o_proj): lora.Linear4bit(\n",
       "                (base_layer): Linear4bit(in_features=4096, out_features=2560, bias=False)\n",
       "                (lora_dropout): ModuleDict(\n",
       "                  (default): Identity()\n",
       "                )\n",
       "                (lora_A): ModuleDict(\n",
       "                  (default): Linear(in_features=4096, out_features=32, bias=False)\n",
       "                )\n",
       "                (lora_B): ModuleDict(\n",
       "                  (default): Linear(in_features=32, out_features=2560, bias=False)\n",
       "                )\n",
       "                (lora_embedding_A): ParameterDict()\n",
       "                (lora_embedding_B): ParameterDict()\n",
       "                (lora_magnitude_vector): ModuleDict()\n",
       "              )\n",
       "              (q_norm): Qwen3RMSNorm((128,), eps=1e-06)\n",
       "              (k_norm): Qwen3RMSNorm((128,), eps=1e-06)\n",
       "              (rotary_emb): LlamaRotaryEmbedding()\n",
       "            )\n",
       "            (mlp): Qwen3MLP(\n",
       "              (gate_proj): lora.Linear(\n",
       "                (base_layer): Linear(in_features=2560, out_features=9728, bias=False)\n",
       "                (lora_dropout): ModuleDict(\n",
       "                  (default): Identity()\n",
       "                )\n",
       "                (lora_A): ModuleDict(\n",
       "                  (default): Linear(in_features=2560, out_features=32, bias=False)\n",
       "                )\n",
       "                (lora_B): ModuleDict(\n",
       "                  (default): Linear(in_features=32, out_features=9728, bias=False)\n",
       "                )\n",
       "                (lora_embedding_A): ParameterDict()\n",
       "                (lora_embedding_B): ParameterDict()\n",
       "                (lora_magnitude_vector): ModuleDict()\n",
       "              )\n",
       "              (up_proj): lora.Linear(\n",
       "                (base_layer): Linear(in_features=2560, out_features=9728, bias=False)\n",
       "                (lora_dropout): ModuleDict(\n",
       "                  (default): Identity()\n",
       "                )\n",
       "                (lora_A): ModuleDict(\n",
       "                  (default): Linear(in_features=2560, out_features=32, bias=False)\n",
       "                )\n",
       "                (lora_B): ModuleDict(\n",
       "                  (default): Linear(in_features=32, out_features=9728, bias=False)\n",
       "                )\n",
       "                (lora_embedding_A): ParameterDict()\n",
       "                (lora_embedding_B): ParameterDict()\n",
       "                (lora_magnitude_vector): ModuleDict()\n",
       "              )\n",
       "              (down_proj): lora.Linear(\n",
       "                (base_layer): Linear(in_features=9728, out_features=2560, bias=False)\n",
       "                (lora_dropout): ModuleDict(\n",
       "                  (default): Identity()\n",
       "                )\n",
       "                (lora_A): ModuleDict(\n",
       "                  (default): Linear(in_features=9728, out_features=32, bias=False)\n",
       "                )\n",
       "                (lora_B): ModuleDict(\n",
       "                  (default): Linear(in_features=32, out_features=2560, bias=False)\n",
       "                )\n",
       "                (lora_embedding_A): ParameterDict()\n",
       "                (lora_embedding_B): ParameterDict()\n",
       "                (lora_magnitude_vector): ModuleDict()\n",
       "              )\n",
       "              (act_fn): SiLU()\n",
       "            )\n",
       "            (input_layernorm): Qwen3RMSNorm((2560,), eps=1e-06)\n",
       "            (post_attention_layernorm): Qwen3RMSNorm((2560,), eps=1e-06)\n",
       "          )\n",
       "          (4): Qwen3DecoderLayer(\n",
       "            (self_attn): Qwen3Attention(\n",
       "              (q_proj): lora.Linear(\n",
       "                (base_layer): Linear(in_features=2560, out_features=4096, bias=False)\n",
       "                (lora_dropout): ModuleDict(\n",
       "                  (default): Identity()\n",
       "                )\n",
       "                (lora_A): ModuleDict(\n",
       "                  (default): Linear(in_features=2560, out_features=32, bias=False)\n",
       "                )\n",
       "                (lora_B): ModuleDict(\n",
       "                  (default): Linear(in_features=32, out_features=4096, bias=False)\n",
       "                )\n",
       "                (lora_embedding_A): ParameterDict()\n",
       "                (lora_embedding_B): ParameterDict()\n",
       "                (lora_magnitude_vector): ModuleDict()\n",
       "              )\n",
       "              (k_proj): lora.Linear(\n",
       "                (base_layer): Linear(in_features=2560, out_features=1024, bias=False)\n",
       "                (lora_dropout): ModuleDict(\n",
       "                  (default): Identity()\n",
       "                )\n",
       "                (lora_A): ModuleDict(\n",
       "                  (default): Linear(in_features=2560, out_features=32, bias=False)\n",
       "                )\n",
       "                (lora_B): ModuleDict(\n",
       "                  (default): Linear(in_features=32, out_features=1024, bias=False)\n",
       "                )\n",
       "                (lora_embedding_A): ParameterDict()\n",
       "                (lora_embedding_B): ParameterDict()\n",
       "                (lora_magnitude_vector): ModuleDict()\n",
       "              )\n",
       "              (v_proj): lora.Linear(\n",
       "                (base_layer): Linear(in_features=2560, out_features=1024, bias=False)\n",
       "                (lora_dropout): ModuleDict(\n",
       "                  (default): Identity()\n",
       "                )\n",
       "                (lora_A): ModuleDict(\n",
       "                  (default): Linear(in_features=2560, out_features=32, bias=False)\n",
       "                )\n",
       "                (lora_B): ModuleDict(\n",
       "                  (default): Linear(in_features=32, out_features=1024, bias=False)\n",
       "                )\n",
       "                (lora_embedding_A): ParameterDict()\n",
       "                (lora_embedding_B): ParameterDict()\n",
       "                (lora_magnitude_vector): ModuleDict()\n",
       "              )\n",
       "              (o_proj): lora.Linear(\n",
       "                (base_layer): Linear(in_features=4096, out_features=2560, bias=False)\n",
       "                (lora_dropout): ModuleDict(\n",
       "                  (default): Identity()\n",
       "                )\n",
       "                (lora_A): ModuleDict(\n",
       "                  (default): Linear(in_features=4096, out_features=32, bias=False)\n",
       "                )\n",
       "                (lora_B): ModuleDict(\n",
       "                  (default): Linear(in_features=32, out_features=2560, bias=False)\n",
       "                )\n",
       "                (lora_embedding_A): ParameterDict()\n",
       "                (lora_embedding_B): ParameterDict()\n",
       "                (lora_magnitude_vector): ModuleDict()\n",
       "              )\n",
       "              (q_norm): Qwen3RMSNorm((128,), eps=1e-06)\n",
       "              (k_norm): Qwen3RMSNorm((128,), eps=1e-06)\n",
       "              (rotary_emb): LlamaRotaryEmbedding()\n",
       "            )\n",
       "            (mlp): Qwen3MLP(\n",
       "              (gate_proj): lora.Linear(\n",
       "                (base_layer): Linear(in_features=2560, out_features=9728, bias=False)\n",
       "                (lora_dropout): ModuleDict(\n",
       "                  (default): Identity()\n",
       "                )\n",
       "                (lora_A): ModuleDict(\n",
       "                  (default): Linear(in_features=2560, out_features=32, bias=False)\n",
       "                )\n",
       "                (lora_B): ModuleDict(\n",
       "                  (default): Linear(in_features=32, out_features=9728, bias=False)\n",
       "                )\n",
       "                (lora_embedding_A): ParameterDict()\n",
       "                (lora_embedding_B): ParameterDict()\n",
       "                (lora_magnitude_vector): ModuleDict()\n",
       "              )\n",
       "              (up_proj): lora.Linear(\n",
       "                (base_layer): Linear(in_features=2560, out_features=9728, bias=False)\n",
       "                (lora_dropout): ModuleDict(\n",
       "                  (default): Identity()\n",
       "                )\n",
       "                (lora_A): ModuleDict(\n",
       "                  (default): Linear(in_features=2560, out_features=32, bias=False)\n",
       "                )\n",
       "                (lora_B): ModuleDict(\n",
       "                  (default): Linear(in_features=32, out_features=9728, bias=False)\n",
       "                )\n",
       "                (lora_embedding_A): ParameterDict()\n",
       "                (lora_embedding_B): ParameterDict()\n",
       "                (lora_magnitude_vector): ModuleDict()\n",
       "              )\n",
       "              (down_proj): lora.Linear(\n",
       "                (base_layer): Linear(in_features=9728, out_features=2560, bias=False)\n",
       "                (lora_dropout): ModuleDict(\n",
       "                  (default): Identity()\n",
       "                )\n",
       "                (lora_A): ModuleDict(\n",
       "                  (default): Linear(in_features=9728, out_features=32, bias=False)\n",
       "                )\n",
       "                (lora_B): ModuleDict(\n",
       "                  (default): Linear(in_features=32, out_features=2560, bias=False)\n",
       "                )\n",
       "                (lora_embedding_A): ParameterDict()\n",
       "                (lora_embedding_B): ParameterDict()\n",
       "                (lora_magnitude_vector): ModuleDict()\n",
       "              )\n",
       "              (act_fn): SiLU()\n",
       "            )\n",
       "            (input_layernorm): Qwen3RMSNorm((2560,), eps=1e-06)\n",
       "            (post_attention_layernorm): Qwen3RMSNorm((2560,), eps=1e-06)\n",
       "          )\n",
       "          (5): Qwen3DecoderLayer(\n",
       "            (self_attn): Qwen3Attention(\n",
       "              (q_proj): lora.Linear4bit(\n",
       "                (base_layer): Linear4bit(in_features=2560, out_features=4096, bias=False)\n",
       "                (lora_dropout): ModuleDict(\n",
       "                  (default): Identity()\n",
       "                )\n",
       "                (lora_A): ModuleDict(\n",
       "                  (default): Linear(in_features=2560, out_features=32, bias=False)\n",
       "                )\n",
       "                (lora_B): ModuleDict(\n",
       "                  (default): Linear(in_features=32, out_features=4096, bias=False)\n",
       "                )\n",
       "                (lora_embedding_A): ParameterDict()\n",
       "                (lora_embedding_B): ParameterDict()\n",
       "                (lora_magnitude_vector): ModuleDict()\n",
       "              )\n",
       "              (k_proj): lora.Linear4bit(\n",
       "                (base_layer): Linear4bit(in_features=2560, out_features=1024, bias=False)\n",
       "                (lora_dropout): ModuleDict(\n",
       "                  (default): Identity()\n",
       "                )\n",
       "                (lora_A): ModuleDict(\n",
       "                  (default): Linear(in_features=2560, out_features=32, bias=False)\n",
       "                )\n",
       "                (lora_B): ModuleDict(\n",
       "                  (default): Linear(in_features=32, out_features=1024, bias=False)\n",
       "                )\n",
       "                (lora_embedding_A): ParameterDict()\n",
       "                (lora_embedding_B): ParameterDict()\n",
       "                (lora_magnitude_vector): ModuleDict()\n",
       "              )\n",
       "              (v_proj): lora.Linear4bit(\n",
       "                (base_layer): Linear4bit(in_features=2560, out_features=1024, bias=False)\n",
       "                (lora_dropout): ModuleDict(\n",
       "                  (default): Identity()\n",
       "                )\n",
       "                (lora_A): ModuleDict(\n",
       "                  (default): Linear(in_features=2560, out_features=32, bias=False)\n",
       "                )\n",
       "                (lora_B): ModuleDict(\n",
       "                  (default): Linear(in_features=32, out_features=1024, bias=False)\n",
       "                )\n",
       "                (lora_embedding_A): ParameterDict()\n",
       "                (lora_embedding_B): ParameterDict()\n",
       "                (lora_magnitude_vector): ModuleDict()\n",
       "              )\n",
       "              (o_proj): lora.Linear4bit(\n",
       "                (base_layer): Linear4bit(in_features=4096, out_features=2560, bias=False)\n",
       "                (lora_dropout): ModuleDict(\n",
       "                  (default): Identity()\n",
       "                )\n",
       "                (lora_A): ModuleDict(\n",
       "                  (default): Linear(in_features=4096, out_features=32, bias=False)\n",
       "                )\n",
       "                (lora_B): ModuleDict(\n",
       "                  (default): Linear(in_features=32, out_features=2560, bias=False)\n",
       "                )\n",
       "                (lora_embedding_A): ParameterDict()\n",
       "                (lora_embedding_B): ParameterDict()\n",
       "                (lora_magnitude_vector): ModuleDict()\n",
       "              )\n",
       "              (q_norm): Qwen3RMSNorm((128,), eps=1e-06)\n",
       "              (k_norm): Qwen3RMSNorm((128,), eps=1e-06)\n",
       "              (rotary_emb): LlamaRotaryEmbedding()\n",
       "            )\n",
       "            (mlp): Qwen3MLP(\n",
       "              (gate_proj): lora.Linear4bit(\n",
       "                (base_layer): Linear4bit(in_features=2560, out_features=9728, bias=False)\n",
       "                (lora_dropout): ModuleDict(\n",
       "                  (default): Identity()\n",
       "                )\n",
       "                (lora_A): ModuleDict(\n",
       "                  (default): Linear(in_features=2560, out_features=32, bias=False)\n",
       "                )\n",
       "                (lora_B): ModuleDict(\n",
       "                  (default): Linear(in_features=32, out_features=9728, bias=False)\n",
       "                )\n",
       "                (lora_embedding_A): ParameterDict()\n",
       "                (lora_embedding_B): ParameterDict()\n",
       "                (lora_magnitude_vector): ModuleDict()\n",
       "              )\n",
       "              (up_proj): lora.Linear4bit(\n",
       "                (base_layer): Linear4bit(in_features=2560, out_features=9728, bias=False)\n",
       "                (lora_dropout): ModuleDict(\n",
       "                  (default): Identity()\n",
       "                )\n",
       "                (lora_A): ModuleDict(\n",
       "                  (default): Linear(in_features=2560, out_features=32, bias=False)\n",
       "                )\n",
       "                (lora_B): ModuleDict(\n",
       "                  (default): Linear(in_features=32, out_features=9728, bias=False)\n",
       "                )\n",
       "                (lora_embedding_A): ParameterDict()\n",
       "                (lora_embedding_B): ParameterDict()\n",
       "                (lora_magnitude_vector): ModuleDict()\n",
       "              )\n",
       "              (down_proj): lora.Linear4bit(\n",
       "                (base_layer): Linear4bit(in_features=9728, out_features=2560, bias=False)\n",
       "                (lora_dropout): ModuleDict(\n",
       "                  (default): Identity()\n",
       "                )\n",
       "                (lora_A): ModuleDict(\n",
       "                  (default): Linear(in_features=9728, out_features=32, bias=False)\n",
       "                )\n",
       "                (lora_B): ModuleDict(\n",
       "                  (default): Linear(in_features=32, out_features=2560, bias=False)\n",
       "                )\n",
       "                (lora_embedding_A): ParameterDict()\n",
       "                (lora_embedding_B): ParameterDict()\n",
       "                (lora_magnitude_vector): ModuleDict()\n",
       "              )\n",
       "              (act_fn): SiLU()\n",
       "            )\n",
       "            (input_layernorm): Qwen3RMSNorm((2560,), eps=1e-06)\n",
       "            (post_attention_layernorm): Qwen3RMSNorm((2560,), eps=1e-06)\n",
       "          )\n",
       "          (6): Qwen3DecoderLayer(\n",
       "            (self_attn): Qwen3Attention(\n",
       "              (q_proj): lora.Linear(\n",
       "                (base_layer): Linear(in_features=2560, out_features=4096, bias=False)\n",
       "                (lora_dropout): ModuleDict(\n",
       "                  (default): Identity()\n",
       "                )\n",
       "                (lora_A): ModuleDict(\n",
       "                  (default): Linear(in_features=2560, out_features=32, bias=False)\n",
       "                )\n",
       "                (lora_B): ModuleDict(\n",
       "                  (default): Linear(in_features=32, out_features=4096, bias=False)\n",
       "                )\n",
       "                (lora_embedding_A): ParameterDict()\n",
       "                (lora_embedding_B): ParameterDict()\n",
       "                (lora_magnitude_vector): ModuleDict()\n",
       "              )\n",
       "              (k_proj): lora.Linear(\n",
       "                (base_layer): Linear(in_features=2560, out_features=1024, bias=False)\n",
       "                (lora_dropout): ModuleDict(\n",
       "                  (default): Identity()\n",
       "                )\n",
       "                (lora_A): ModuleDict(\n",
       "                  (default): Linear(in_features=2560, out_features=32, bias=False)\n",
       "                )\n",
       "                (lora_B): ModuleDict(\n",
       "                  (default): Linear(in_features=32, out_features=1024, bias=False)\n",
       "                )\n",
       "                (lora_embedding_A): ParameterDict()\n",
       "                (lora_embedding_B): ParameterDict()\n",
       "                (lora_magnitude_vector): ModuleDict()\n",
       "              )\n",
       "              (v_proj): lora.Linear(\n",
       "                (base_layer): Linear(in_features=2560, out_features=1024, bias=False)\n",
       "                (lora_dropout): ModuleDict(\n",
       "                  (default): Identity()\n",
       "                )\n",
       "                (lora_A): ModuleDict(\n",
       "                  (default): Linear(in_features=2560, out_features=32, bias=False)\n",
       "                )\n",
       "                (lora_B): ModuleDict(\n",
       "                  (default): Linear(in_features=32, out_features=1024, bias=False)\n",
       "                )\n",
       "                (lora_embedding_A): ParameterDict()\n",
       "                (lora_embedding_B): ParameterDict()\n",
       "                (lora_magnitude_vector): ModuleDict()\n",
       "              )\n",
       "              (o_proj): lora.Linear(\n",
       "                (base_layer): Linear(in_features=4096, out_features=2560, bias=False)\n",
       "                (lora_dropout): ModuleDict(\n",
       "                  (default): Identity()\n",
       "                )\n",
       "                (lora_A): ModuleDict(\n",
       "                  (default): Linear(in_features=4096, out_features=32, bias=False)\n",
       "                )\n",
       "                (lora_B): ModuleDict(\n",
       "                  (default): Linear(in_features=32, out_features=2560, bias=False)\n",
       "                )\n",
       "                (lora_embedding_A): ParameterDict()\n",
       "                (lora_embedding_B): ParameterDict()\n",
       "                (lora_magnitude_vector): ModuleDict()\n",
       "              )\n",
       "              (q_norm): Qwen3RMSNorm((128,), eps=1e-06)\n",
       "              (k_norm): Qwen3RMSNorm((128,), eps=1e-06)\n",
       "              (rotary_emb): LlamaRotaryEmbedding()\n",
       "            )\n",
       "            (mlp): Qwen3MLP(\n",
       "              (gate_proj): lora.Linear(\n",
       "                (base_layer): Linear(in_features=2560, out_features=9728, bias=False)\n",
       "                (lora_dropout): ModuleDict(\n",
       "                  (default): Identity()\n",
       "                )\n",
       "                (lora_A): ModuleDict(\n",
       "                  (default): Linear(in_features=2560, out_features=32, bias=False)\n",
       "                )\n",
       "                (lora_B): ModuleDict(\n",
       "                  (default): Linear(in_features=32, out_features=9728, bias=False)\n",
       "                )\n",
       "                (lora_embedding_A): ParameterDict()\n",
       "                (lora_embedding_B): ParameterDict()\n",
       "                (lora_magnitude_vector): ModuleDict()\n",
       "              )\n",
       "              (up_proj): lora.Linear(\n",
       "                (base_layer): Linear(in_features=2560, out_features=9728, bias=False)\n",
       "                (lora_dropout): ModuleDict(\n",
       "                  (default): Identity()\n",
       "                )\n",
       "                (lora_A): ModuleDict(\n",
       "                  (default): Linear(in_features=2560, out_features=32, bias=False)\n",
       "                )\n",
       "                (lora_B): ModuleDict(\n",
       "                  (default): Linear(in_features=32, out_features=9728, bias=False)\n",
       "                )\n",
       "                (lora_embedding_A): ParameterDict()\n",
       "                (lora_embedding_B): ParameterDict()\n",
       "                (lora_magnitude_vector): ModuleDict()\n",
       "              )\n",
       "              (down_proj): lora.Linear(\n",
       "                (base_layer): Linear(in_features=9728, out_features=2560, bias=False)\n",
       "                (lora_dropout): ModuleDict(\n",
       "                  (default): Identity()\n",
       "                )\n",
       "                (lora_A): ModuleDict(\n",
       "                  (default): Linear(in_features=9728, out_features=32, bias=False)\n",
       "                )\n",
       "                (lora_B): ModuleDict(\n",
       "                  (default): Linear(in_features=32, out_features=2560, bias=False)\n",
       "                )\n",
       "                (lora_embedding_A): ParameterDict()\n",
       "                (lora_embedding_B): ParameterDict()\n",
       "                (lora_magnitude_vector): ModuleDict()\n",
       "              )\n",
       "              (act_fn): SiLU()\n",
       "            )\n",
       "            (input_layernorm): Qwen3RMSNorm((2560,), eps=1e-06)\n",
       "            (post_attention_layernorm): Qwen3RMSNorm((2560,), eps=1e-06)\n",
       "          )\n",
       "          (7-32): 26 x Qwen3DecoderLayer(\n",
       "            (self_attn): Qwen3Attention(\n",
       "              (q_proj): lora.Linear4bit(\n",
       "                (base_layer): Linear4bit(in_features=2560, out_features=4096, bias=False)\n",
       "                (lora_dropout): ModuleDict(\n",
       "                  (default): Identity()\n",
       "                )\n",
       "                (lora_A): ModuleDict(\n",
       "                  (default): Linear(in_features=2560, out_features=32, bias=False)\n",
       "                )\n",
       "                (lora_B): ModuleDict(\n",
       "                  (default): Linear(in_features=32, out_features=4096, bias=False)\n",
       "                )\n",
       "                (lora_embedding_A): ParameterDict()\n",
       "                (lora_embedding_B): ParameterDict()\n",
       "                (lora_magnitude_vector): ModuleDict()\n",
       "              )\n",
       "              (k_proj): lora.Linear4bit(\n",
       "                (base_layer): Linear4bit(in_features=2560, out_features=1024, bias=False)\n",
       "                (lora_dropout): ModuleDict(\n",
       "                  (default): Identity()\n",
       "                )\n",
       "                (lora_A): ModuleDict(\n",
       "                  (default): Linear(in_features=2560, out_features=32, bias=False)\n",
       "                )\n",
       "                (lora_B): ModuleDict(\n",
       "                  (default): Linear(in_features=32, out_features=1024, bias=False)\n",
       "                )\n",
       "                (lora_embedding_A): ParameterDict()\n",
       "                (lora_embedding_B): ParameterDict()\n",
       "                (lora_magnitude_vector): ModuleDict()\n",
       "              )\n",
       "              (v_proj): lora.Linear4bit(\n",
       "                (base_layer): Linear4bit(in_features=2560, out_features=1024, bias=False)\n",
       "                (lora_dropout): ModuleDict(\n",
       "                  (default): Identity()\n",
       "                )\n",
       "                (lora_A): ModuleDict(\n",
       "                  (default): Linear(in_features=2560, out_features=32, bias=False)\n",
       "                )\n",
       "                (lora_B): ModuleDict(\n",
       "                  (default): Linear(in_features=32, out_features=1024, bias=False)\n",
       "                )\n",
       "                (lora_embedding_A): ParameterDict()\n",
       "                (lora_embedding_B): ParameterDict()\n",
       "                (lora_magnitude_vector): ModuleDict()\n",
       "              )\n",
       "              (o_proj): lora.Linear4bit(\n",
       "                (base_layer): Linear4bit(in_features=4096, out_features=2560, bias=False)\n",
       "                (lora_dropout): ModuleDict(\n",
       "                  (default): Identity()\n",
       "                )\n",
       "                (lora_A): ModuleDict(\n",
       "                  (default): Linear(in_features=4096, out_features=32, bias=False)\n",
       "                )\n",
       "                (lora_B): ModuleDict(\n",
       "                  (default): Linear(in_features=32, out_features=2560, bias=False)\n",
       "                )\n",
       "                (lora_embedding_A): ParameterDict()\n",
       "                (lora_embedding_B): ParameterDict()\n",
       "                (lora_magnitude_vector): ModuleDict()\n",
       "              )\n",
       "              (q_norm): Qwen3RMSNorm((128,), eps=1e-06)\n",
       "              (k_norm): Qwen3RMSNorm((128,), eps=1e-06)\n",
       "              (rotary_emb): LlamaRotaryEmbedding()\n",
       "            )\n",
       "            (mlp): Qwen3MLP(\n",
       "              (gate_proj): lora.Linear4bit(\n",
       "                (base_layer): Linear4bit(in_features=2560, out_features=9728, bias=False)\n",
       "                (lora_dropout): ModuleDict(\n",
       "                  (default): Identity()\n",
       "                )\n",
       "                (lora_A): ModuleDict(\n",
       "                  (default): Linear(in_features=2560, out_features=32, bias=False)\n",
       "                )\n",
       "                (lora_B): ModuleDict(\n",
       "                  (default): Linear(in_features=32, out_features=9728, bias=False)\n",
       "                )\n",
       "                (lora_embedding_A): ParameterDict()\n",
       "                (lora_embedding_B): ParameterDict()\n",
       "                (lora_magnitude_vector): ModuleDict()\n",
       "              )\n",
       "              (up_proj): lora.Linear4bit(\n",
       "                (base_layer): Linear4bit(in_features=2560, out_features=9728, bias=False)\n",
       "                (lora_dropout): ModuleDict(\n",
       "                  (default): Identity()\n",
       "                )\n",
       "                (lora_A): ModuleDict(\n",
       "                  (default): Linear(in_features=2560, out_features=32, bias=False)\n",
       "                )\n",
       "                (lora_B): ModuleDict(\n",
       "                  (default): Linear(in_features=32, out_features=9728, bias=False)\n",
       "                )\n",
       "                (lora_embedding_A): ParameterDict()\n",
       "                (lora_embedding_B): ParameterDict()\n",
       "                (lora_magnitude_vector): ModuleDict()\n",
       "              )\n",
       "              (down_proj): lora.Linear4bit(\n",
       "                (base_layer): Linear4bit(in_features=9728, out_features=2560, bias=False)\n",
       "                (lora_dropout): ModuleDict(\n",
       "                  (default): Identity()\n",
       "                )\n",
       "                (lora_A): ModuleDict(\n",
       "                  (default): Linear(in_features=9728, out_features=32, bias=False)\n",
       "                )\n",
       "                (lora_B): ModuleDict(\n",
       "                  (default): Linear(in_features=32, out_features=2560, bias=False)\n",
       "                )\n",
       "                (lora_embedding_A): ParameterDict()\n",
       "                (lora_embedding_B): ParameterDict()\n",
       "                (lora_magnitude_vector): ModuleDict()\n",
       "              )\n",
       "              (act_fn): SiLU()\n",
       "            )\n",
       "            (input_layernorm): Qwen3RMSNorm((2560,), eps=1e-06)\n",
       "            (post_attention_layernorm): Qwen3RMSNorm((2560,), eps=1e-06)\n",
       "          )\n",
       "          (33): Qwen3DecoderLayer(\n",
       "            (self_attn): Qwen3Attention(\n",
       "              (q_proj): lora.Linear(\n",
       "                (base_layer): Linear(in_features=2560, out_features=4096, bias=False)\n",
       "                (lora_dropout): ModuleDict(\n",
       "                  (default): Identity()\n",
       "                )\n",
       "                (lora_A): ModuleDict(\n",
       "                  (default): Linear(in_features=2560, out_features=32, bias=False)\n",
       "                )\n",
       "                (lora_B): ModuleDict(\n",
       "                  (default): Linear(in_features=32, out_features=4096, bias=False)\n",
       "                )\n",
       "                (lora_embedding_A): ParameterDict()\n",
       "                (lora_embedding_B): ParameterDict()\n",
       "                (lora_magnitude_vector): ModuleDict()\n",
       "              )\n",
       "              (k_proj): lora.Linear(\n",
       "                (base_layer): Linear(in_features=2560, out_features=1024, bias=False)\n",
       "                (lora_dropout): ModuleDict(\n",
       "                  (default): Identity()\n",
       "                )\n",
       "                (lora_A): ModuleDict(\n",
       "                  (default): Linear(in_features=2560, out_features=32, bias=False)\n",
       "                )\n",
       "                (lora_B): ModuleDict(\n",
       "                  (default): Linear(in_features=32, out_features=1024, bias=False)\n",
       "                )\n",
       "                (lora_embedding_A): ParameterDict()\n",
       "                (lora_embedding_B): ParameterDict()\n",
       "                (lora_magnitude_vector): ModuleDict()\n",
       "              )\n",
       "              (v_proj): lora.Linear(\n",
       "                (base_layer): Linear(in_features=2560, out_features=1024, bias=False)\n",
       "                (lora_dropout): ModuleDict(\n",
       "                  (default): Identity()\n",
       "                )\n",
       "                (lora_A): ModuleDict(\n",
       "                  (default): Linear(in_features=2560, out_features=32, bias=False)\n",
       "                )\n",
       "                (lora_B): ModuleDict(\n",
       "                  (default): Linear(in_features=32, out_features=1024, bias=False)\n",
       "                )\n",
       "                (lora_embedding_A): ParameterDict()\n",
       "                (lora_embedding_B): ParameterDict()\n",
       "                (lora_magnitude_vector): ModuleDict()\n",
       "              )\n",
       "              (o_proj): lora.Linear(\n",
       "                (base_layer): Linear(in_features=4096, out_features=2560, bias=False)\n",
       "                (lora_dropout): ModuleDict(\n",
       "                  (default): Identity()\n",
       "                )\n",
       "                (lora_A): ModuleDict(\n",
       "                  (default): Linear(in_features=4096, out_features=32, bias=False)\n",
       "                )\n",
       "                (lora_B): ModuleDict(\n",
       "                  (default): Linear(in_features=32, out_features=2560, bias=False)\n",
       "                )\n",
       "                (lora_embedding_A): ParameterDict()\n",
       "                (lora_embedding_B): ParameterDict()\n",
       "                (lora_magnitude_vector): ModuleDict()\n",
       "              )\n",
       "              (q_norm): Qwen3RMSNorm((128,), eps=1e-06)\n",
       "              (k_norm): Qwen3RMSNorm((128,), eps=1e-06)\n",
       "              (rotary_emb): LlamaRotaryEmbedding()\n",
       "            )\n",
       "            (mlp): Qwen3MLP(\n",
       "              (gate_proj): lora.Linear4bit(\n",
       "                (base_layer): Linear4bit(in_features=2560, out_features=9728, bias=False)\n",
       "                (lora_dropout): ModuleDict(\n",
       "                  (default): Identity()\n",
       "                )\n",
       "                (lora_A): ModuleDict(\n",
       "                  (default): Linear(in_features=2560, out_features=32, bias=False)\n",
       "                )\n",
       "                (lora_B): ModuleDict(\n",
       "                  (default): Linear(in_features=32, out_features=9728, bias=False)\n",
       "                )\n",
       "                (lora_embedding_A): ParameterDict()\n",
       "                (lora_embedding_B): ParameterDict()\n",
       "                (lora_magnitude_vector): ModuleDict()\n",
       "              )\n",
       "              (up_proj): lora.Linear4bit(\n",
       "                (base_layer): Linear4bit(in_features=2560, out_features=9728, bias=False)\n",
       "                (lora_dropout): ModuleDict(\n",
       "                  (default): Identity()\n",
       "                )\n",
       "                (lora_A): ModuleDict(\n",
       "                  (default): Linear(in_features=2560, out_features=32, bias=False)\n",
       "                )\n",
       "                (lora_B): ModuleDict(\n",
       "                  (default): Linear(in_features=32, out_features=9728, bias=False)\n",
       "                )\n",
       "                (lora_embedding_A): ParameterDict()\n",
       "                (lora_embedding_B): ParameterDict()\n",
       "                (lora_magnitude_vector): ModuleDict()\n",
       "              )\n",
       "              (down_proj): lora.Linear4bit(\n",
       "                (base_layer): Linear4bit(in_features=9728, out_features=2560, bias=False)\n",
       "                (lora_dropout): ModuleDict(\n",
       "                  (default): Identity()\n",
       "                )\n",
       "                (lora_A): ModuleDict(\n",
       "                  (default): Linear(in_features=9728, out_features=32, bias=False)\n",
       "                )\n",
       "                (lora_B): ModuleDict(\n",
       "                  (default): Linear(in_features=32, out_features=2560, bias=False)\n",
       "                )\n",
       "                (lora_embedding_A): ParameterDict()\n",
       "                (lora_embedding_B): ParameterDict()\n",
       "                (lora_magnitude_vector): ModuleDict()\n",
       "              )\n",
       "              (act_fn): SiLU()\n",
       "            )\n",
       "            (input_layernorm): Qwen3RMSNorm((2560,), eps=1e-06)\n",
       "            (post_attention_layernorm): Qwen3RMSNorm((2560,), eps=1e-06)\n",
       "          )\n",
       "          (34): Qwen3DecoderLayer(\n",
       "            (self_attn): Qwen3Attention(\n",
       "              (q_proj): lora.Linear(\n",
       "                (base_layer): Linear(in_features=2560, out_features=4096, bias=False)\n",
       "                (lora_dropout): ModuleDict(\n",
       "                  (default): Identity()\n",
       "                )\n",
       "                (lora_A): ModuleDict(\n",
       "                  (default): Linear(in_features=2560, out_features=32, bias=False)\n",
       "                )\n",
       "                (lora_B): ModuleDict(\n",
       "                  (default): Linear(in_features=32, out_features=4096, bias=False)\n",
       "                )\n",
       "                (lora_embedding_A): ParameterDict()\n",
       "                (lora_embedding_B): ParameterDict()\n",
       "                (lora_magnitude_vector): ModuleDict()\n",
       "              )\n",
       "              (k_proj): lora.Linear(\n",
       "                (base_layer): Linear(in_features=2560, out_features=1024, bias=False)\n",
       "                (lora_dropout): ModuleDict(\n",
       "                  (default): Identity()\n",
       "                )\n",
       "                (lora_A): ModuleDict(\n",
       "                  (default): Linear(in_features=2560, out_features=32, bias=False)\n",
       "                )\n",
       "                (lora_B): ModuleDict(\n",
       "                  (default): Linear(in_features=32, out_features=1024, bias=False)\n",
       "                )\n",
       "                (lora_embedding_A): ParameterDict()\n",
       "                (lora_embedding_B): ParameterDict()\n",
       "                (lora_magnitude_vector): ModuleDict()\n",
       "              )\n",
       "              (v_proj): lora.Linear(\n",
       "                (base_layer): Linear(in_features=2560, out_features=1024, bias=False)\n",
       "                (lora_dropout): ModuleDict(\n",
       "                  (default): Identity()\n",
       "                )\n",
       "                (lora_A): ModuleDict(\n",
       "                  (default): Linear(in_features=2560, out_features=32, bias=False)\n",
       "                )\n",
       "                (lora_B): ModuleDict(\n",
       "                  (default): Linear(in_features=32, out_features=1024, bias=False)\n",
       "                )\n",
       "                (lora_embedding_A): ParameterDict()\n",
       "                (lora_embedding_B): ParameterDict()\n",
       "                (lora_magnitude_vector): ModuleDict()\n",
       "              )\n",
       "              (o_proj): lora.Linear(\n",
       "                (base_layer): Linear(in_features=4096, out_features=2560, bias=False)\n",
       "                (lora_dropout): ModuleDict(\n",
       "                  (default): Identity()\n",
       "                )\n",
       "                (lora_A): ModuleDict(\n",
       "                  (default): Linear(in_features=4096, out_features=32, bias=False)\n",
       "                )\n",
       "                (lora_B): ModuleDict(\n",
       "                  (default): Linear(in_features=32, out_features=2560, bias=False)\n",
       "                )\n",
       "                (lora_embedding_A): ParameterDict()\n",
       "                (lora_embedding_B): ParameterDict()\n",
       "                (lora_magnitude_vector): ModuleDict()\n",
       "              )\n",
       "              (q_norm): Qwen3RMSNorm((128,), eps=1e-06)\n",
       "              (k_norm): Qwen3RMSNorm((128,), eps=1e-06)\n",
       "              (rotary_emb): LlamaRotaryEmbedding()\n",
       "            )\n",
       "            (mlp): Qwen3MLP(\n",
       "              (gate_proj): lora.Linear(\n",
       "                (base_layer): Linear(in_features=2560, out_features=9728, bias=False)\n",
       "                (lora_dropout): ModuleDict(\n",
       "                  (default): Identity()\n",
       "                )\n",
       "                (lora_A): ModuleDict(\n",
       "                  (default): Linear(in_features=2560, out_features=32, bias=False)\n",
       "                )\n",
       "                (lora_B): ModuleDict(\n",
       "                  (default): Linear(in_features=32, out_features=9728, bias=False)\n",
       "                )\n",
       "                (lora_embedding_A): ParameterDict()\n",
       "                (lora_embedding_B): ParameterDict()\n",
       "                (lora_magnitude_vector): ModuleDict()\n",
       "              )\n",
       "              (up_proj): lora.Linear(\n",
       "                (base_layer): Linear(in_features=2560, out_features=9728, bias=False)\n",
       "                (lora_dropout): ModuleDict(\n",
       "                  (default): Identity()\n",
       "                )\n",
       "                (lora_A): ModuleDict(\n",
       "                  (default): Linear(in_features=2560, out_features=32, bias=False)\n",
       "                )\n",
       "                (lora_B): ModuleDict(\n",
       "                  (default): Linear(in_features=32, out_features=9728, bias=False)\n",
       "                )\n",
       "                (lora_embedding_A): ParameterDict()\n",
       "                (lora_embedding_B): ParameterDict()\n",
       "                (lora_magnitude_vector): ModuleDict()\n",
       "              )\n",
       "              (down_proj): lora.Linear(\n",
       "                (base_layer): Linear(in_features=9728, out_features=2560, bias=False)\n",
       "                (lora_dropout): ModuleDict(\n",
       "                  (default): Identity()\n",
       "                )\n",
       "                (lora_A): ModuleDict(\n",
       "                  (default): Linear(in_features=9728, out_features=32, bias=False)\n",
       "                )\n",
       "                (lora_B): ModuleDict(\n",
       "                  (default): Linear(in_features=32, out_features=2560, bias=False)\n",
       "                )\n",
       "                (lora_embedding_A): ParameterDict()\n",
       "                (lora_embedding_B): ParameterDict()\n",
       "                (lora_magnitude_vector): ModuleDict()\n",
       "              )\n",
       "              (act_fn): SiLU()\n",
       "            )\n",
       "            (input_layernorm): Qwen3RMSNorm((2560,), eps=1e-06)\n",
       "            (post_attention_layernorm): Qwen3RMSNorm((2560,), eps=1e-06)\n",
       "          )\n",
       "          (35): Qwen3DecoderLayer(\n",
       "            (self_attn): Qwen3Attention(\n",
       "              (q_proj): lora.Linear4bit(\n",
       "                (base_layer): Linear4bit(in_features=2560, out_features=4096, bias=False)\n",
       "                (lora_dropout): ModuleDict(\n",
       "                  (default): Identity()\n",
       "                )\n",
       "                (lora_A): ModuleDict(\n",
       "                  (default): Linear(in_features=2560, out_features=32, bias=False)\n",
       "                )\n",
       "                (lora_B): ModuleDict(\n",
       "                  (default): Linear(in_features=32, out_features=4096, bias=False)\n",
       "                )\n",
       "                (lora_embedding_A): ParameterDict()\n",
       "                (lora_embedding_B): ParameterDict()\n",
       "                (lora_magnitude_vector): ModuleDict()\n",
       "              )\n",
       "              (k_proj): lora.Linear4bit(\n",
       "                (base_layer): Linear4bit(in_features=2560, out_features=1024, bias=False)\n",
       "                (lora_dropout): ModuleDict(\n",
       "                  (default): Identity()\n",
       "                )\n",
       "                (lora_A): ModuleDict(\n",
       "                  (default): Linear(in_features=2560, out_features=32, bias=False)\n",
       "                )\n",
       "                (lora_B): ModuleDict(\n",
       "                  (default): Linear(in_features=32, out_features=1024, bias=False)\n",
       "                )\n",
       "                (lora_embedding_A): ParameterDict()\n",
       "                (lora_embedding_B): ParameterDict()\n",
       "                (lora_magnitude_vector): ModuleDict()\n",
       "              )\n",
       "              (v_proj): lora.Linear4bit(\n",
       "                (base_layer): Linear4bit(in_features=2560, out_features=1024, bias=False)\n",
       "                (lora_dropout): ModuleDict(\n",
       "                  (default): Identity()\n",
       "                )\n",
       "                (lora_A): ModuleDict(\n",
       "                  (default): Linear(in_features=2560, out_features=32, bias=False)\n",
       "                )\n",
       "                (lora_B): ModuleDict(\n",
       "                  (default): Linear(in_features=32, out_features=1024, bias=False)\n",
       "                )\n",
       "                (lora_embedding_A): ParameterDict()\n",
       "                (lora_embedding_B): ParameterDict()\n",
       "                (lora_magnitude_vector): ModuleDict()\n",
       "              )\n",
       "              (o_proj): lora.Linear4bit(\n",
       "                (base_layer): Linear4bit(in_features=4096, out_features=2560, bias=False)\n",
       "                (lora_dropout): ModuleDict(\n",
       "                  (default): Identity()\n",
       "                )\n",
       "                (lora_A): ModuleDict(\n",
       "                  (default): Linear(in_features=4096, out_features=32, bias=False)\n",
       "                )\n",
       "                (lora_B): ModuleDict(\n",
       "                  (default): Linear(in_features=32, out_features=2560, bias=False)\n",
       "                )\n",
       "                (lora_embedding_A): ParameterDict()\n",
       "                (lora_embedding_B): ParameterDict()\n",
       "                (lora_magnitude_vector): ModuleDict()\n",
       "              )\n",
       "              (q_norm): Qwen3RMSNorm((128,), eps=1e-06)\n",
       "              (k_norm): Qwen3RMSNorm((128,), eps=1e-06)\n",
       "              (rotary_emb): LlamaRotaryEmbedding()\n",
       "            )\n",
       "            (mlp): Qwen3MLP(\n",
       "              (gate_proj): lora.Linear4bit(\n",
       "                (base_layer): Linear4bit(in_features=2560, out_features=9728, bias=False)\n",
       "                (lora_dropout): ModuleDict(\n",
       "                  (default): Identity()\n",
       "                )\n",
       "                (lora_A): ModuleDict(\n",
       "                  (default): Linear(in_features=2560, out_features=32, bias=False)\n",
       "                )\n",
       "                (lora_B): ModuleDict(\n",
       "                  (default): Linear(in_features=32, out_features=9728, bias=False)\n",
       "                )\n",
       "                (lora_embedding_A): ParameterDict()\n",
       "                (lora_embedding_B): ParameterDict()\n",
       "                (lora_magnitude_vector): ModuleDict()\n",
       "              )\n",
       "              (up_proj): lora.Linear4bit(\n",
       "                (base_layer): Linear4bit(in_features=2560, out_features=9728, bias=False)\n",
       "                (lora_dropout): ModuleDict(\n",
       "                  (default): Identity()\n",
       "                )\n",
       "                (lora_A): ModuleDict(\n",
       "                  (default): Linear(in_features=2560, out_features=32, bias=False)\n",
       "                )\n",
       "                (lora_B): ModuleDict(\n",
       "                  (default): Linear(in_features=32, out_features=9728, bias=False)\n",
       "                )\n",
       "                (lora_embedding_A): ParameterDict()\n",
       "                (lora_embedding_B): ParameterDict()\n",
       "                (lora_magnitude_vector): ModuleDict()\n",
       "              )\n",
       "              (down_proj): lora.Linear4bit(\n",
       "                (base_layer): Linear4bit(in_features=9728, out_features=2560, bias=False)\n",
       "                (lora_dropout): ModuleDict(\n",
       "                  (default): Identity()\n",
       "                )\n",
       "                (lora_A): ModuleDict(\n",
       "                  (default): Linear(in_features=9728, out_features=32, bias=False)\n",
       "                )\n",
       "                (lora_B): ModuleDict(\n",
       "                  (default): Linear(in_features=32, out_features=2560, bias=False)\n",
       "                )\n",
       "                (lora_embedding_A): ParameterDict()\n",
       "                (lora_embedding_B): ParameterDict()\n",
       "                (lora_magnitude_vector): ModuleDict()\n",
       "              )\n",
       "              (act_fn): SiLU()\n",
       "            )\n",
       "            (input_layernorm): Qwen3RMSNorm((2560,), eps=1e-06)\n",
       "            (post_attention_layernorm): Qwen3RMSNorm((2560,), eps=1e-06)\n",
       "          )\n",
       "        )\n",
       "        (norm): Qwen3RMSNorm((2560,), eps=1e-06)\n",
       "        (rotary_emb): LlamaRotaryEmbedding()\n",
       "      )\n",
       "      (lm_head): Linear(in_features=2560, out_features=151936, bias=False)\n",
       "    )\n",
       "  )\n",
       ")"
      ]
     },
     "execution_count": 59,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "model"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 66,
   "id": "691683b7",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "<generator object Module.named_modules at 0x7f6221ced250>"
      ]
     },
     "execution_count": 66,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "model.named_modules() "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "id": "27826fae",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Detected local model directory: /mnt/codes/qwen3/models/Qwen3-4B-unsloth-bnb-4bit\n",
      "Unsloth: Merging LoRA weights into 4bit model...\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/data/miniconda3/envs/qwen3/lib/python3.11/site-packages/peft/tuners/lora/bnb.py:348: UserWarning: Merge lora module to 4-bit linear may get different generations due to rounding errors.\n",
      "  warnings.warn(\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Unsloth: Merging finished.\n",
      "Unsloth: Found skipped modules: ['model.layers.0.self_attn.q_proj', 'model.layers.0.self_attn.k_proj', 'model.layers.0.self_attn.v_proj', 'model.layers.0.self_attn.o_proj', 'model.layers.0.mlp.gate_proj', 'model.layers.0.mlp.up_proj', 'model.layers.0.mlp.down_proj', 'model.layers.1.self_attn.q_proj', 'model.layers.1.self_attn.k_proj', 'model.layers.1.self_attn.v_proj', 'model.layers.1.self_attn.o_proj', 'model.layers.1.mlp.gate_proj', 'model.layers.1.mlp.up_proj', 'model.layers.1.mlp.down_proj', 'model.layers.3.mlp.gate_proj', 'model.layers.3.mlp.up_proj', 'model.layers.3.mlp.down_proj', 'model.layers.4.self_attn.q_proj', 'model.layers.4.self_attn.k_proj', 'model.layers.4.self_attn.v_proj', 'model.layers.4.self_attn.o_proj', 'model.layers.4.mlp.gate_proj', 'model.layers.4.mlp.up_proj', 'model.layers.4.mlp.down_proj', 'model.layers.6.self_attn.q_proj', 'model.layers.6.self_attn.k_proj', 'model.layers.6.self_attn.v_proj', 'model.layers.6.self_attn.o_proj', 'model.layers.6.mlp.gate_proj', 'model.layers.6.mlp.up_proj', 'model.layers.6.mlp.down_proj', 'model.layers.33.self_attn.q_proj', 'model.layers.33.self_attn.k_proj', 'model.layers.33.self_attn.v_proj', 'model.layers.33.self_attn.o_proj', 'model.layers.34.self_attn.q_proj', 'model.layers.34.self_attn.k_proj', 'model.layers.34.self_attn.v_proj', 'model.layers.34.self_attn.o_proj', 'model.layers.34.mlp.gate_proj', 'model.layers.34.mlp.up_proj', 'model.layers.34.mlp.down_proj', 'lm_head']. Updating config.\n",
      "Unsloth: Saving merged 4bit model to Qwen3-8B-finetuned-4bit...\n",
      "Unsloth: Merged 4bit model saved.\n",
      "Unsloth: Merged 4bit model process completed.\n"
     ]
    }
   ],
   "source": [
    "# https://blog.csdn.net/weixin_53707653/article/details/147928644\n",
    "# 保存微调后的模型\n",
    "model.save_pretrained_merged(\n",
    "    save_directory=\"Qwen3-8B-finetuned-4bit\",  # 保存路径\n",
    "    tokenizer=tokenizer,  # 同时保存分词器\n",
    "    save_method=\"merged_4bit_forced\"  \n",
    ")"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "qwen3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.11.13"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
