{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 微调BERT实现帖子分类\n",
    "\n",
    "## 1. 载入数据"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "77\n"
     ]
    }
   ],
   "source": [
    "#! 1. 数据准备\n",
    "import os\n",
    "\n",
    "# 定义两个list分别存放两个板块的帖子数据\n",
    "academy_titles = []\n",
    "job_titles     = []\n",
    "with open('../Chapter03-05/academy_titles.txt', encoding='utf8') as f:\n",
    "    for l in f:  # 按行读取文件\n",
    "        academy_titles.append(l.strip())  # strip 方法用于去掉行尾空格\n",
    "        \n",
    "with open('../Chapter03-05/job_titles.txt', encoding='utf8') as f:\n",
    "    for l in f:  # 按行读取文件\n",
    "        job_titles.append(l.strip())       # strip 方法用于去掉行尾空格\n",
    "        \n",
    "#! 合并两个列表并添加标签\n",
    "data_list = []\n",
    "for title in academy_titles:\n",
    "    data_list.append([title, 0])\n",
    "\n",
    "for title in job_titles:\n",
    "    data_list.append([title, 1])\n",
    "\n",
    "#! 计算句子的最大长度\n",
    "max_length = 0\n",
    "for case in data_list:\n",
    "    max_length = max(max_length, len(case[0])+2)\n",
    "print(max_length)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 2. 数据划分"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "from sklearn.model_selection import train_test_split\n",
    "train_list, dev_list = train_test_split(data_list,test_size=0.3, random_state=15, shuffle=True)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 3. 参数设置"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "import os\n",
    "import time\n",
    "import random\n",
    "import torch\n",
    "import torch.nn.functional as F\n",
    "from   torch import nn\n",
    "from   tqdm import tqdm\n",
    "\n",
    "from transformers import get_linear_schedule_with_warmup, AdamW\n",
    "from transformers import BertTokenizer, BertForSequenceClassification     # 序列分类的模型\n",
    "\n",
    "if torch.cuda.is_available():\n",
    "    device = torch.device(\"cuda\")\n",
    "else:\n",
    "    device = torch.device(\"cpu\")\n",
    "max_train_epochs            = 6\n",
    "warmup_proportion           = 0.05\n",
    "gradient_accumulation_steps = 2\n",
    "train_batch_size            = 8\n",
    "valid_batch_size            = train_batch_size\n",
    "test_batch_size             = train_batch_size\n",
    "data_workers                = 2\n",
    "\n",
    "learning_rate = 1e-6\n",
    "weight_decay  = 0.01\n",
    "max_grad_norm = 1.0\n",
    "# cur_time      = time.strftime(\"%Y-%m-%d_%H:%M:%S\")\n",
    "\n",
    "\n",
    "tokenizer = BertTokenizer.from_pretrained('bert-base-chinese')\n",
    "class DataSet(torch.utils.data.Dataset):\n",
    "    def __init__(self, examples):\n",
    "        self.examples = examples\n",
    "\n",
    "    def __len__(self):\n",
    "        return len(self.examples)\n",
    "\n",
    "    def __getitem__(self, index):\n",
    "        example = self.examples[index]\n",
    "        title   = example[0]\n",
    "        label   = example[1]\n",
    "        # r       = tokenizer.encode_plus(title, max_length=max_length, padding=\"max_length\")\n",
    "        return title, label, index   #, r['token_type_ids'], label, index\n",
    "    \n",
    "\n",
    "def collate_fn(batch):\n",
    "    r = tokenizer([b[0] for b in batch], padding=True)\n",
    "    input_ids      = torch.LongTensor(r['input_ids'])\n",
    "    attention_mask = torch.LongTensor(r['attention_mask'])\n",
    "    label          = torch.LongTensor([b[1] for b in batch])\n",
    "    index          = [b[2] for b in batch]\n",
    "    return input_ids, attention_mask, label, index\n",
    "\n",
    "train_dataset = DataSet(train_list)\n",
    "train_loader  = torch.utils.data.DataLoader(train_dataset,\n",
    "                    batch_size   = train_batch_size,\n",
    "                    shuffle      = True,\n",
    "                    num_workers  = data_workers,\n",
    "                    collate_fn   = collate_fn,\n",
    "                )\n",
    "\n",
    "\n",
    "dev_dataset   = DataSet(dev_list)\n",
    "dev_loader    = torch.utils.data.DataLoader(dev_dataset,\n",
    "                    batch_size   = train_batch_size,\n",
    "                    shuffle      = False,\n",
    "                    num_workers  = data_workers,\n",
    "                    collate_fn   = collate_fn,\n",
    "                )"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 4. 定义模型"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Some weights of BertForSequenceClassification were not initialized from the model checkpoint at bert-base-chinese and are newly initialized: ['classifier.bias', 'classifier.weight']\n",
      "You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.\n",
      "/home/yangxianpku/.local/lib/python3.10/site-packages/transformers/optimization.py:429: FutureWarning: This implementation of AdamW is deprecated and will be removed in a future version. Use the PyTorch implementation torch.optim.AdamW instead, or set `no_deprecation_warning=True` to disable this warning\n",
      "  warnings.warn(\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "warmup steps : 12\n"
     ]
    }
   ],
   "source": [
    "model = BertForSequenceClassification.from_pretrained('bert-base-chinese',  num_labels=2)\n",
    "model.to(device)\n",
    "\n",
    "t_total          = len(train_loader) // gradient_accumulation_steps * max_train_epochs + 1\n",
    "num_warmup_steps = int(warmup_proportion * t_total)\n",
    "print('warmup steps : %d' % num_warmup_steps)\n",
    "\n",
    "no_decay        = ['bias', 'LayerNorm.weight'] # no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']\n",
    "param_optimizer = list(model.named_parameters())\n",
    "\n",
    "# 对需要进行权重衰减的参数应用weight_decay，不应用的设为0\n",
    "optimizer_grouped_parameters = [\n",
    "    {'params':[p for n, p in param_optimizer if not any(nd in n for nd in no_decay)],'weight_decay': weight_decay},\n",
    "    {'params':[p for n, p in param_optimizer if any(nd in n for nd in no_decay)],'weight_decay': 0.0}\n",
    "]\n",
    "optimizer = AdamW(optimizer_grouped_parameters, lr=learning_rate, correct_bias=False)\n",
    "# 初始化一个线性学习率调度器，带有预热阶段。在预热阶段结束后，学习率会从初始学习率线性衰减到0。\n",
    "scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=num_warmup_steps, num_training_steps=t_total)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [],
   "source": [
    "def get_score():\n",
    "    y_true = []\n",
    "    y_pred = []\n",
    "    model.eval()\n",
    "    for step, batch in enumerate(tqdm(dev_loader)):\n",
    "        with torch.no_grad():\n",
    "            input_ids, attention_mask = (b.to(device) for b in batch[:2])\n",
    "            y_true += batch[2].numpy().tolist()\n",
    "            logist  = model(input_ids, attention_mask)[0]                    #! 没有输入labels则返回的是logits\n",
    "            result  = torch.argmax(logist, 1).cpu().numpy().tolist()\n",
    "            y_pred += result\n",
    "    correct = 0\n",
    "    for i in range(len(y_true)):\n",
    "        if y_true[i] == y_pred[i]:\n",
    "            correct += 1\n",
    "    accuracy = correct / len(y_pred)\n",
    "    \n",
    "    return accuracy\n",
    "\n",
    "def print_test(title):\n",
    "    r              = tokenizer([title])\n",
    "    input_ids      = torch.LongTensor(r['input_ids']).to(device)\n",
    "    attention_mask = torch.LongTensor(r['attention_mask']).to(device)\n",
    "    logist         = model(input_ids, attention_mask)[0]\n",
    "    result         = torch.argmax(logist, 1).cpu().numpy().tolist()[0]\n",
    "    result         = ['考研考博', '招聘信息'][result]\n",
    "    print(title, result)\n",
    "def print_cases():\n",
    "    print_test('考研心得')\n",
    "    print_test('北大实验室博士')\n",
    "    print_test('考外校博士')\n",
    "    print_test('北大实验室招博士')\n",
    "    print_test('工作or考研?')\n",
    "    print_test('急求自然语言处理工程师')\n",
    "    print_test('校招offer比较')"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 5. 模型训练"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "  0%|          | 0/84 [00:00<?, ?it/s]"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|██████████| 84/84 [01:24<00:00,  1.00s/it]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch = 1 Epoch Mean Loss 0.2757 Time 1.40 min\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|██████████| 36/36 [00:07<00:00,  4.56it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "0.8989547038327527\n",
      "考研心得 考研考博\n",
      "北大实验室博士 招聘信息\n",
      "考外校博士 考研考博\n",
      "北大实验室招博士 招聘信息\n",
      "工作or考研? 考研考博\n",
      "急求自然语言处理工程师 招聘信息\n",
      "校招offer比较 招聘信息\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|██████████| 84/84 [01:19<00:00,  1.06it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch = 2 Epoch Mean Loss 0.3479 Time 1.32 min\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|██████████| 36/36 [00:07<00:00,  4.59it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "0.9512195121951219\n",
      "考研心得 考研考博\n",
      "北大实验室博士 招聘信息\n",
      "考外校博士 考研考博\n",
      "北大实验室招博士 招聘信息\n",
      "工作or考研? 考研考博\n",
      "急求自然语言处理工程师 招聘信息\n",
      "校招offer比较 招聘信息\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|██████████| 84/84 [01:15<00:00,  1.11it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch = 3 Epoch Mean Loss 0.0597 Time 1.26 min\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|██████████| 36/36 [00:08<00:00,  4.08it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "0.9686411149825784\n",
      "考研心得 考研考博\n",
      "北大实验室博士 招聘信息\n",
      "考外校博士 考研考博\n",
      "北大实验室招博士 招聘信息\n",
      "工作or考研? 考研考博\n",
      "急求自然语言处理工程师 招聘信息\n",
      "校招offer比较 招聘信息\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|██████████| 84/84 [01:11<00:00,  1.17it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch = 4 Epoch Mean Loss 0.0704 Time 1.20 min\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|██████████| 36/36 [00:09<00:00,  3.78it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "0.9790940766550522\n",
      "考研心得 考研考博\n",
      "北大实验室博士 招聘信息\n",
      "考外校博士 考研考博\n",
      "北大实验室招博士 招聘信息\n",
      "工作or考研? 考研考博\n",
      "急求自然语言处理工程师 招聘信息\n",
      "校招offer比较 招聘信息\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|██████████| 84/84 [01:13<00:00,  1.15it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch = 5 Epoch Mean Loss 0.2806 Time 1.22 min\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|██████████| 36/36 [00:09<00:00,  3.71it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "0.9825783972125436\n",
      "考研心得 考研考博\n",
      "北大实验室博士 考研考博\n",
      "考外校博士 考研考博\n",
      "北大实验室招博士 招聘信息\n",
      "工作or考研? 考研考博\n",
      "急求自然语言处理工程师 招聘信息\n",
      "校招offer比较 招聘信息\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|██████████| 84/84 [01:12<00:00,  1.16it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch = 6 Epoch Mean Loss 0.0465 Time 1.21 min\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|██████████| 36/36 [00:07<00:00,  4.71it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "0.9790940766550522\n",
      "考研心得 考研考博\n",
      "北大实验室博士 考研考博\n",
      "考外校博士 考研考博\n",
      "北大实验室招博士 招聘信息\n",
      "工作or考研? 考研考博\n",
      "急求自然语言处理工程师 招聘信息\n",
      "校招offer比较 招聘信息\n"
     ]
    }
   ],
   "source": [
    "\"\"\"\n",
    "    梯度累积是一种在训练深度学习模型时常用的技术，特别是当可用的计算资源（如GPU内存）限制了批次大小时。梯度累积步数（Gradient Accumulation Steps）\n",
    "是指在进行一次参数更新之前，累积梯度的步骤数。\n",
    "\n",
    "    通常，在深度学习训练过程中，每处理一个批次的数据后，就会基于这个批次的数据计算损失（loss），然后计算损失关于模型参数的梯度，并使用这个梯度来更新模\n",
    "型的参数。但是，当GPU内存不足以处理大批次的数据时，这就限制了模型训练的效率和效果，因为较小的批次大小可能导致梯度估计的方差增加，影响模型的训练稳定性和\n",
    "最终性能。\n",
    "\n",
    "    梯度累积技术允许模型在多个小批次的数据上累积其梯度，然后使用这些累积的梯度来更新模型参数。这样做的效果相当于在更大的批次数据上进行单次更新，而不增加\n",
    "对内存的需求。\n",
    "\n",
    "    具体来说，如果梯度累积步数为N，那么在进行参数更新前，模型会处理N个小批次的数据，每处理完一个小批次的数据后，不是立即更新参数，而是将计算得到的梯度累\n",
    "积起来。在处理完这N个小批次后，累积的梯度被用来一次性更新模型参数，然后清空梯度缓存以准备下一轮的累积。\n",
    "\n",
    "    使用梯度累积可以在不增加计算资源负担的情况下，有效模拟更大批次大小的训练效果，这对于资源有限但希望优化模型性能的场景非常有用。\n",
    "\"\"\"\n",
    "for epoch in range(max_train_epochs):\n",
    "    b_time = time.time()\n",
    "    model.train()\n",
    "    for step, batch in enumerate(tqdm(train_loader)):\n",
    "        input_ids, attention_mask, label = (b.to(device) for b in batch[:-1])\n",
    "        loss = model(input_ids, attention_mask, labels=label)   #! 输入了labels, 则返回的是loss+logits\n",
    "        loss = loss[0]                                          #! [0]表示loss, [1]表示logits, 推荐使用loss.loss 和loss.logit而不是索引来访问\n",
    "        loss.backward()                                         #! 执行一次反向传播，计算梯度\n",
    "        if (step + 1) % gradient_accumulation_steps == 0:\n",
    "            optimizer.step()         #! 使用计算的梯度更新模型参数\n",
    "            scheduler.step()         #! 调整优化器的学习率\n",
    "            optimizer.zero_grad()    #! 在开始下一次梯度计算之前，要清楚已经累积的梯度\n",
    "    print('Epoch = %d Epoch Mean Loss %.4f Time %.2f min' % (epoch+1, loss.item(), (time.time() - b_time)/60))\n",
    "    print(get_score())\n",
    "    print_cases()\n",
    "        "
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.10.12"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
