{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "398a42d5-c92f-4df8-8c6a-91cc35e584f7",
   "metadata": {},
   "outputs": [],
   "source": [
    "\"\"\"使用 Hugging Face Transformers 和 DeepSpeed 在单机多卡上微调 BERT 模型（适用于分类任务） \n",
    "deepspeed配置文件的内容如下：\n",
    "{\n",
    "  \"train_micro_batch_size_per_gpu\": 8,\n",
    "  \"gradient_accumulation_steps\": 2,\n",
    "  \"optimizer\": {\n",
    "    \"type\": \"AdamW\",\n",
    "    \"params\": {\n",
    "      \"lr\": 5e-5,\n",
    "      \"weight_decay\": 0.01\n",
    "    }\n",
    "  },\n",
    "  \"fp16\": {\n",
    "    \"enabled\": true,\n",
    "    \"auto_cast\": false\n",
    "  },\n",
    "  \"zero_optimization\": {\n",
    "    \"stage\": 2,\n",
    "    \"allgather_partitions\": false\n",
    "  },\n",
    "  \"steps_per_print\": 50\n",
    "}\n",
    "配置中各个属性解释如下：\n",
    "rain_micro_batch_size_per_gpu: 8 - 这个参数定义了每个GPU上的微批次大小。在你的情况下，这意味着每次前向传播和反向传播会处理8个样本\n",
    "gradient_accumulation_steps: 2  - 梯度累积步数。当设置为2时，DeepSpeed会累积2个微批次的梯度后才更新模型权重。这相当于有效批次大小为16（8×2)\n",
    "optimizer: 优化器配置\n",
    "  type: “AdamW” - 指定使用AdamW优化器，这是Adam优化器的一个变体，加入了权重衰减的正则化项\n",
    "  params: 优化器参数\n",
    "    lr: 5e-5 - 学习率设置为0.00005，这是一个相对较小的学习率，适合微调预训练模型\n",
    "    weight_decay: 0.01 - 权重衰减系数，用于防止过拟合\n",
    "fp16: 混合精度训练配置\n",
    "  enabled: true - 启用FP16（半精度）训练，可以显著减少内存使用并加速训练\n",
    "  auto_cast: false - 禁用自动类型转换，意味着你需要手动管理精度转换\n",
    "zero_optimization: ZeRO（零冗余优化器）配置\n",
    "  stage: 2 - 启用ZeRO阶段2优化，会优化梯度状态和优化器状态的分区\n",
    "  allgather_partitions: false - 禁用allgather分区，可以减少通信开销但可能增加内存使用\n",
    "steps_per_print: 50 - 每50个步骤打印一次训练信息，包括损失值等指标。这有助于监控训练进度，而不会过于频繁地输出信息    \n",
    "\"\"\"\n",
    "import os\n",
    "from transformers import (\n",
    "    AutoTokenizer, \n",
    "    AutoModelForSequenceClassification,\n",
    "    TrainingArguments, \n",
    "    Trainer\n",
    ")\n",
    "from datasets import load_dataset\n",
    "# 查看是否有DeepSpeed相关的警告\n",
    "import logging\n",
    "logging.basicConfig(level=logging.INFO)\n",
    "\n",
    "# 1. 设置环境变量（关键步骤）\n",
    "os.environ['MASTER_ADDR'] = 'localhost'\n",
    "os.environ['MASTER_PORT'] = '29500'\n",
    "os.environ['RANK'] = '0'\n",
    "os.environ['LOCAL_RANK'] = '0'\n",
    "os.environ['WORLD_SIZE'] = '1'\n",
    "\n",
    "# 2. 加载模型和数据\n",
    "model_name = \"bert-base-uncased\"\n",
    "tokenizer = AutoTokenizer.from_pretrained(model_name)\n",
    "model = AutoModelForSequenceClassification.from_pretrained(model_name, num_labels=2)\n",
    "\n",
    "# 3. 数据预处理（强制统一长度）\n",
    "def tokenize_fn(examples):\n",
    "    return tokenizer(\n",
    "        examples[\"text\"], \n",
    "        truncation=True, \n",
    "        max_length=128,\n",
    "        padding=\"max_length\",\n",
    "        return_tensors=\"pt\"\n",
    "    )\n",
    "\n",
    "dataset = load_dataset(\"imdb\", split=\"train[:10%]+test[:10%]\")\n",
    "tokenized_dataset = dataset.map(tokenize_fn, batched=True).train_test_split(test_size=0.2)\n",
    "\n",
    "# 4. 训练参数配置\n",
    "training_args = TrainingArguments(\n",
    "    output_dir=\"./results\",\n",
    "    per_device_train_batch_size=8,\n",
    "    num_train_epochs=1,\n",
    "    fp16=True,\n",
    "    gradient_accumulation_steps=2,\n",
    "    learning_rate=5e-5,\n",
    "    weight_decay=0.01,  # 与 ds_config.json 保持一致\n",
    "    deepspeed=\"./ds_config.json\",\n",
    "    report_to=[],\n",
    "    save_strategy=\"steps\",        # 启用按步保存\n",
    "    save_steps=100,               # 每100步保存一次\n",
    "    save_total_limit=2,           # 最多保留2个检查点（训练完成后results文件夹下会保存两个检测点文件夹）\n",
    "    evaluation_strategy=\"steps\",  # 每个 `eval_steps` 保存一次\n",
    "    eval_steps=100,               # 每 100 步评估一次\n",
    "    save_on_each_node=True,       # 在每个节点上保存模型\n",
    "    disable_tqdm=False\n",
    ")\n",
    "\n",
    "# 5. 初始化Trainer\n",
    "trainer = Trainer(\n",
    "    model=model,\n",
    "    args=training_args,\n",
    "    train_dataset=tokenized_dataset[\"train\"],\n",
    "    eval_dataset=tokenized_dataset[\"test\"],\n",
    ")\n",
    "\n",
    "# 6. 启动训练（特殊封装）\n",
    "from IPython.display import clear_output\n",
    "\n",
    "def notebook_train():\n",
    "    train_result = trainer.train()\n",
    "    print(\"train_result: \", train_result)\n",
    "    # trainer.save_model(\"./results/manual_save\")  # 强制保存\n",
    "    clear_output()  # 训练完成后清除冗余输出（代码执行完成后，控制台只会保留此行代码之后的打印信息）\n",
    "    print(\"训练完成！\")\n",
    "    print(\"训练结果已保存到:\", training_args.output_dir)\n",
    "\n",
    "notebook_train()  # 执行训练"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "b271962d-e9fb-4f6b-a258-5bc01b02265f",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "hlf_old_env",
   "language": "python",
   "name": "hlf_old_env"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.10.18"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
