{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "f07b7aa1",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "True\n",
      "12.4\n",
      "2.6.0+cu124\n"
     ]
    }
   ],
   "source": [
    "import torch\n",
    "print(torch.cuda.is_available()) \n",
    "print(torch.version.cuda)\n",
    "print(torch.__version__) \n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "b0d5d24a",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "c:\\Users\\Administrator\\.conda\\envs\\llm\\Lib\\site-packages\\tqdm\\auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n",
      "  from .autonotebook import tqdm as notebook_tqdm\n"
     ]
    }
   ],
   "source": [
    "import torch\n",
    "from transformers import (\n",
    "    AutoModelForCausalLM,\n",
    "    AutoTokenizer,\n",
    "    TrainingArguments,\n",
    "    BitsAndBytesConfig,\n",
    "    AdamW,\n",
    "    get_linear_schedule_with_warmup\n",
    ")\n",
    "from peft import LoraConfig, get_peft_model,prepare_model_for_kbit_training\n",
    "from datasets import load_dataset\n",
    "from accelerate import Accelerator\n",
    "from torch.utils.data import DataLoader\n",
    "from tqdm import tqdm\n",
    "import math"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "760d0af9",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Loading checkpoint shards: 100%|██████████| 2/2 [00:10<00:00,  5.16s/it]\n",
      "c:\\Users\\Administrator\\.conda\\envs\\llm\\Lib\\site-packages\\transformers\\generation\\configuration_utils.py:590: UserWarning: `do_sample` is set to `False`. However, `temperature` is set to `0.9` -- this flag is only used in sample-based generation modes. You should set `do_sample=True` or unset `temperature`. This was detected when initializing the generation config instance, which means the corresponding file may hold incorrect parameterization and should be fixed.\n",
      "  warnings.warn(\n",
      "c:\\Users\\Administrator\\.conda\\envs\\llm\\Lib\\site-packages\\transformers\\generation\\configuration_utils.py:595: UserWarning: `do_sample` is set to `False`. However, `top_p` is set to `0.6` -- this flag is only used in sample-based generation modes. You should set `do_sample=True` or unset `top_p`. This was detected when initializing the generation config instance, which means the corresponding file may hold incorrect parameterization and should be fixed.\n",
      "  warnings.warn(\n",
      "c:\\Users\\Administrator\\.conda\\envs\\llm\\Lib\\site-packages\\transformers\\generation\\configuration_utils.py:590: UserWarning: `do_sample` is set to `False`. However, `temperature` is set to `0.9` -- this flag is only used in sample-based generation modes. You should set `do_sample=True` or unset `temperature`.\n",
      "  warnings.warn(\n",
      "c:\\Users\\Administrator\\.conda\\envs\\llm\\Lib\\site-packages\\transformers\\generation\\configuration_utils.py:595: UserWarning: `do_sample` is set to `False`. However, `top_p` is set to `0.6` -- this flag is only used in sample-based generation modes. You should set `do_sample=True` or unset `top_p`.\n",
      "  warnings.warn(\n",
      "Map: 100%|██████████| 15011/15011 [00:03<00:00, 3753.69 examples/s]\n"
     ]
    }
   ],
   "source": [
    "accelerator = Accelerator(mixed_precision=\"fp16\")\n",
    "model_path = \"./lmsys/vicuna-7b-v1.5\"\n",
    "dataset_name = \"./datasets/databricks-dolly-15k\"\n",
    "output_dir = \"./outputs/vicuna-7b-lora\"\n",
    "batch_size = 16 \n",
    "tokenizer = AutoTokenizer.from_pretrained(model_path)\n",
    "tokenizer.pad_token = tokenizer.eos_token\n",
    "tokenizer.padding_side = \"right\"\n",
    "# 4-bit量化加载模型\n",
    "bnb_config = BitsAndBytesConfig(\n",
    "    load_in_4bit=True,\n",
    "    bnb_4bit_quant_type=\"nf4\",\n",
    "    bnb_4bit_compute_dtype=torch.float16,\n",
    ")\n",
    "model = AutoModelForCausalLM.from_pretrained(\n",
    "    model_path,\n",
    "    quantization_config=bnb_config,\n",
    "    device_map=\"auto\",\n",
    "    torch_dtype=torch.float16,\n",
    ")\n",
    "model = prepare_model_for_kbit_training(model) \n",
    "if torch.cuda.get_device_properties(0).total_memory < 20e9:  # 显存 < 20GB\n",
    "    model.gradient_checkpointing_enable()\n",
    "    model.config.use_cache = False \n",
    "\n",
    "# 添加LoRA适配器\n",
    "peft_config = LoraConfig(\n",
    "    r=16,\n",
    "    lora_alpha=32,\n",
    "    target_modules=[\"q_proj\", \"k_proj\", \"v_proj\", \"o_proj\"],\n",
    "    lora_dropout=0.05,\n",
    "    bias=\"none\",\n",
    "    task_type=\"CAUSAL_LM\",\n",
    ")\n",
    "model = get_peft_model(model, peft_config)\n",
    "\n",
    "# 加载并格式化数据集\n",
    "dataset = load_dataset(dataset_name, split=\"train\")\n",
    "def preprocess(batch):\n",
    "    texts = [\n",
    "        f\"### Instruction:\\n{ins}\\n\\n### Context:\\n{ctx if ctx else ''}\\n\\n### Response:\\n{res}\"\n",
    "        for ins, ctx, res in zip(batch[\"instruction\"], batch[\"context\"], batch[\"response\"])\n",
    "    ]\n",
    "    tokenized = tokenizer(\n",
    "        texts,\n",
    "        padding=\"max_length\",\n",
    "        truncation=True,\n",
    "        max_length=1024,\n",
    "        return_tensors=\"pt\"\n",
    "    )\n",
    "    tokenized[\"labels\"] = tokenized[\"input_ids\"].clone()\n",
    "    tokenized[\"labels\"][tokenized[\"labels\"] == tokenizer.pad_token_id] = -100\n",
    "    return tokenized\n",
    "processed_dataset = dataset.map(preprocess, batched=True, remove_columns=dataset.column_names)\n",
    "processed_dataset.set_format(type='torch', columns=['input_ids', 'attention_mask', 'labels'])\n",
    "train_dataloader = DataLoader(processed_dataset, batch_size=batch_size, shuffle=True, pin_memory=True)\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "49ec5500",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "c:\\Users\\Administrator\\.conda\\envs\\llm\\Lib\\site-packages\\transformers\\optimization.py:591: FutureWarning: This implementation of AdamW is deprecated and will be removed in a future version. Use the PyTorch implementation torch.optim.AdamW instead, or set `no_deprecation_warning=True` to disable this warning\n",
      "  warnings.warn(\n"
     ]
    }
   ],
   "source": [
    "# 优化器\n",
    "num_epochs = 3\n",
    "steps_per_epoch = len(train_dataloader)\n",
    "num_training_steps = num_epochs * steps_per_epoch\n",
    "optimizer = AdamW(model.parameters(), lr=2e-5)\n",
    "lr_scheduler = get_linear_schedule_with_warmup(\n",
    "    optimizer,\n",
    "    num_warmup_steps = min(70, int(0.1 * num_training_steps)),  # 不超过 100 步\n",
    "    num_training_steps = num_training_steps\n",
    ")\n",
    "# 使用accelerate准备组件\n",
    "model, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(\n",
    "    model, optimizer, train_dataloader, lr_scheduler\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "id": "432dccb7",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 0:   0%|          | 0/1877 [00:00<?, ?it/s]`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`.\n",
      "c:\\Users\\Administrator\\.conda\\envs\\llm\\Lib\\site-packages\\torch\\_dynamo\\eval_frame.py:745: UserWarning: torch.utils.checkpoint: the use_reentrant parameter should be passed explicitly. In version 2.5 we will raise an exception if use_reentrant is not passed. use_reentrant=False is recommended, but if you need to preserve the current default behavior, you can pass use_reentrant=True. Refer to docs for more details on the differences between the two variants.\n",
      "  return fn(*args, **kwargs)\n",
      "Epoch 0:   0%|          | 1/1877 [00:06<3:23:52,  6.52s/it, loss=1.64, lr=2.86e-7]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 0, Step 0, Loss: 1.6383, LR: 2.86e-07\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 0:   3%|▎         | 51/1877 [04:41<2:48:07,  5.52s/it, loss=1.38, lr=1.46e-5]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 0, Step 50, Loss: 1.3810, LR: 1.46e-05\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 0:   5%|▌         | 101/1877 [09:16<2:42:42,  5.50s/it, loss=1.75, lr=1.99e-5]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 0, Step 100, Loss: 1.7515, LR: 1.99e-05\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 0:   8%|▊         | 151/1877 [13:51<2:38:20,  5.50s/it, loss=1.27, lr=1.97e-5]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 0, Step 150, Loss: 1.2660, LR: 1.97e-05\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 0:  11%|█         | 201/1877 [18:25<2:32:29,  5.46s/it, loss=1.12, lr=1.95e-5]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 0, Step 200, Loss: 1.1226, LR: 1.95e-05\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 0:  13%|█▎        | 251/1877 [22:59<2:27:29,  5.44s/it, loss=1.65, lr=1.93e-5]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 0, Step 250, Loss: 1.6468, LR: 1.93e-05\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 0:  16%|█▌        | 301/1877 [27:34<2:24:31,  5.50s/it, loss=1.49, lr=1.92e-5]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 0, Step 300, Loss: 1.4924, LR: 1.92e-05\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 0:  19%|█▊        | 351/1877 [32:09<2:19:52,  5.50s/it, loss=1.34, lr=1.9e-5] "
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 0, Step 350, Loss: 1.3417, LR: 1.90e-05\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 0:  21%|██▏       | 401/1877 [36:43<2:15:10,  5.50s/it, loss=1.33, lr=1.88e-5]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 0, Step 400, Loss: 1.3258, LR: 1.88e-05\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 0:  24%|██▍       | 451/1877 [41:18<2:10:45,  5.50s/it, loss=1.25, lr=1.86e-5]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 0, Step 450, Loss: 1.2473, LR: 1.86e-05\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 0:  27%|██▋       | 501/1877 [45:52<2:06:02,  5.50s/it, loss=1.76, lr=1.84e-5]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 0, Step 500, Loss: 1.7613, LR: 1.84e-05\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 0:  29%|██▉       | 551/1877 [50:26<2:01:36,  5.50s/it, loss=1.6, lr=1.83e-5] "
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 0, Step 550, Loss: 1.5973, LR: 1.83e-05\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 0:  32%|███▏      | 601/1877 [55:01<1:56:46,  5.49s/it, loss=1.58, lr=1.81e-5]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 0, Step 600, Loss: 1.5759, LR: 1.81e-05\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 0:  35%|███▍      | 651/1877 [59:34<1:51:59,  5.48s/it, loss=1.54, lr=1.79e-5]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 0, Step 650, Loss: 1.5382, LR: 1.79e-05\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 0:  37%|███▋      | 701/1877 [1:04:08<1:47:51,  5.50s/it, loss=1.46, lr=1.77e-5]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 0, Step 700, Loss: 1.4554, LR: 1.77e-05\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 0:  40%|████      | 751/1877 [1:08:42<1:42:30,  5.46s/it, loss=1.52, lr=1.76e-5]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 0, Step 750, Loss: 1.5186, LR: 1.76e-05\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 0:  43%|████▎     | 801/1877 [1:13:17<1:38:24,  5.49s/it, loss=1.32, lr=1.74e-5]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 0, Step 800, Loss: 1.3210, LR: 1.74e-05\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 0:  45%|████▌     | 851/1877 [1:17:52<1:34:01,  5.50s/it, loss=1.58, lr=1.72e-5]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 0, Step 850, Loss: 1.5801, LR: 1.72e-05\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 0:  48%|████▊     | 901/1877 [1:22:26<1:29:16,  5.49s/it, loss=1.53, lr=1.7e-5] "
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 0, Step 900, Loss: 1.5276, LR: 1.70e-05\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 0:  51%|█████     | 951/1877 [1:27:01<1:24:37,  5.48s/it, loss=1.64, lr=1.68e-5]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 0, Step 950, Loss: 1.6449, LR: 1.68e-05\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 0:  53%|█████▎    | 1001/1877 [1:31:36<1:20:19,  5.50s/it, loss=1.69, lr=1.67e-5]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 0, Step 1000, Loss: 1.6860, LR: 1.67e-05\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 0:  56%|█████▌    | 1051/1877 [1:36:10<1:15:34,  5.49s/it, loss=1.53, lr=1.65e-5]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 0, Step 1050, Loss: 1.5251, LR: 1.65e-05\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 0:  59%|█████▊    | 1101/1877 [1:40:45<1:11:08,  5.50s/it, loss=1.7, lr=1.63e-5] "
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 0, Step 1100, Loss: 1.6965, LR: 1.63e-05\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 0:  61%|██████▏   | 1151/1877 [1:45:19<1:06:19,  5.48s/it, loss=1.54, lr=1.61e-5]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 0, Step 1150, Loss: 1.5364, LR: 1.61e-05\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 0:  64%|██████▍   | 1201/1877 [1:49:53<1:01:51,  5.49s/it, loss=1.2, lr=1.59e-5] "
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 0, Step 1200, Loss: 1.2015, LR: 1.59e-05\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 0:  67%|██████▋   | 1251/1877 [1:54:28<57:26,  5.51s/it, loss=1.51, lr=1.58e-5]  "
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 0, Step 1250, Loss: 1.5071, LR: 1.58e-05\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 0:  69%|██████▉   | 1301/1877 [1:59:03<52:48,  5.50s/it, loss=1.21, lr=1.56e-5]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 0, Step 1300, Loss: 1.2052, LR: 1.56e-05\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 0:  72%|███████▏  | 1351/1877 [2:03:37<48:14,  5.50s/it, loss=1.5, lr=1.54e-5] "
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 0, Step 1350, Loss: 1.4985, LR: 1.54e-05\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 0:  75%|███████▍  | 1401/1877 [2:08:12<43:27,  5.48s/it, loss=1.52, lr=1.52e-5]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 0, Step 1400, Loss: 1.5177, LR: 1.52e-05\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 0:  77%|███████▋  | 1451/1877 [2:12:51<39:31,  5.57s/it, loss=1.31, lr=1.5e-5] "
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 0, Step 1450, Loss: 1.3057, LR: 1.50e-05\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 0:  80%|███████▉  | 1501/1877 [2:17:21<33:37,  5.37s/it, loss=1.34, lr=1.49e-5]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 0, Step 1500, Loss: 1.3389, LR: 1.49e-05\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 0:  83%|████████▎ | 1551/1877 [2:21:51<29:20,  5.40s/it, loss=1.22, lr=1.47e-5]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 0, Step 1550, Loss: 1.2159, LR: 1.47e-05\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 0:  85%|████████▌ | 1601/1877 [2:26:21<24:50,  5.40s/it, loss=1.45, lr=1.45e-5]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 0, Step 1600, Loss: 1.4471, LR: 1.45e-05\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 0:  88%|████████▊ | 1651/1877 [2:30:50<20:20,  5.40s/it, loss=1.23, lr=1.43e-5]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 0, Step 1650, Loss: 1.2336, LR: 1.43e-05\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 0:  91%|█████████ | 1701/1877 [2:35:20<15:50,  5.40s/it, loss=0.657, lr=1.41e-5]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 0, Step 1700, Loss: 0.6570, LR: 1.41e-05\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 0:  93%|█████████▎| 1751/1877 [2:39:50<11:20,  5.40s/it, loss=1.54, lr=1.4e-5]  "
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 0, Step 1750, Loss: 1.5409, LR: 1.40e-05\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 0:  96%|█████████▌| 1801/1877 [2:44:20<06:50,  5.40s/it, loss=1.41, lr=1.38e-5]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 0, Step 1800, Loss: 1.4096, LR: 1.38e-05\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 0:  99%|█████████▊| 1851/1877 [2:48:50<02:20,  5.40s/it, loss=1.5, lr=1.36e-5] "
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 0, Step 1850, Loss: 1.5050, LR: 1.36e-05\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 0: 100%|██████████| 1877/1877 [2:51:07<00:00,  5.47s/it, loss=1.29, lr=1.35e-5]\n"
     ]
    }
   ],
   "source": [
    "model.train()\n",
    "for epoch in range(1):\n",
    "    progress_bar = tqdm(train_dataloader, disable=not accelerator.is_local_main_process,desc=f\"Epoch {epoch}\")\n",
    "    for step, batch in enumerate(progress_bar):\n",
    "        outputs = model(**batch)\n",
    "        loss = outputs.loss\n",
    "        accelerator.backward(loss)\n",
    "        optimizer.step()\n",
    "        lr_scheduler.step()\n",
    "        optimizer.zero_grad() \n",
    "\n",
    "        if accelerator.is_local_main_process and step % 10 == 0:\n",
    "            progress_bar.set_postfix(loss=loss.item(), lr=lr_scheduler.get_last_lr()[0]) # 实时显示训练指标\n",
    "        if step % 50 == 0:\n",
    "            print(f\"Epoch {epoch}, Step {step}, Loss: {loss.item():.4f}, LR: {lr_scheduler.get_last_lr()[0]:.2e}\")\n",
    "\n",
    "accelerator.wait_for_everyone()\n",
    "if accelerator.is_main_process:\n",
    "    accelerator.save_model(model, \"./outputs/vicuna-7b-lora\")\n",
    "    tokenizer.save_pretrained(\"./outputs/vicuna-7b-lora\")\n",
    "    peft_config.save_pretrained(output_dir)\n",
    "accelerator.end_training()\n"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.12.9"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
