{
 "cells": [
  {
   "cell_type": "markdown",
   "id": "dd8179a2-fd59-4cc8-aaa9-e85b2b0c4446",
   "metadata": {},
   "source": [
    "### 1.数据预处理"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "1315150b-236a-491d-8bb8-e79909c818c5",
   "metadata": {},
   "outputs": [],
   "source": [
    "#读取数据\n",
    "academy_titles = []\n",
    "job_titles = []\n",
    "with open(\"academy_titles.txt\", encoding=\"utf-8\", mode=\"r\") as f:\n",
    "    for line in f:\n",
    "        academy_titles.append(line.strip())\n",
    "\n",
    "with open(\"job_titles.txt\", encoding=\"utf-8\", mode=\"r\") as f:\n",
    "    for line in f:\n",
    "        job_titles.append(line.strip())"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "5cc8b8aa-0359-4485-8bd7-47a0d0a0f3bd",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "['【字节跳动内推】校招岗位全面开放，帮查进度！', '招聘兼职/ 笔试考务 /200-300 每人', '国企出版社招聘坐班兼职生']"
      ]
     },
     "execution_count": 2,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "job_titles[:3]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "id": "d30c7206-c583-4862-a143-1c0810f22f92",
   "metadata": {},
   "outputs": [],
   "source": [
    "#合并列表,并添加label\n",
    "data_list = []\n",
    "\n",
    "for title in academy_titles:\n",
    "    data_list.append([title, 0])\n",
    "\n",
    "for title in job_titles:\n",
    "    data_list.append([title, 1])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "id": "8bd3a6d2-2076-433a-a5d1-3f50b5d42f12",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "[['北师教育学，你我一起努力，让胜利酣畅淋漓。', 0],\n",
       " ['考博英语词汇', 0],\n",
       " ['出售人大新闻学院2015年考研权威资料', 0],\n",
       " ['【脑科院 郭桃梅课题组】科研助理招聘', 0],\n",
       " ['管理学院的同学帮帮忙呐～', 0],\n",
       " ['关于师大物理系在职博士', 0],\n",
       " ['北大金融专硕需要考政治么', 0],\n",
       " ['大家认识这个心理学院的教授么？', 0],\n",
       " ['出876化学教育资料及333教育综合资料', 0],\n",
       " ['有没有考历史学的，北师大或者别的都可以', 0]]"
      ]
     },
     "execution_count": 4,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "data_list[:10]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "id": "61919435-c0e4-447a-b668-66aa226d1846",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "77\n"
     ]
    }
   ],
   "source": [
    "#计算最长的标题\n",
    "max_len = 0\n",
    "for case in data_list:\n",
    "    max_len = max(max_len, len(case[0]) + 2)\n",
    "print(max_len)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "id": "d27c93ad-1aa5-40da-ab6b-733f214b0bbf",
   "metadata": {},
   "outputs": [],
   "source": [
    "#划分数据集\n",
    "from sklearn.model_selection import train_test_split\n",
    "train_list, dev_list = train_test_split(data_list, test_size=0.3, random_state=15, shuffle=True)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "ab3cda01-d9af-4246-80bd-771333bb5f5e",
   "metadata": {},
   "source": [
    "### 2.参数设置"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "id": "5af42c1b-3e9a-4e9c-9ade-3d81a7f1ee5a",
   "metadata": {},
   "outputs": [],
   "source": [
    "import os\n",
    "import time\n",
    "import random\n",
    "import torch\n",
    "import torch.nn.functional as F\n",
    "from torch import nn\n",
    "from tqdm import tqdm\n",
    "\n",
    "from transformers import get_linear_schedule_with_warmup, AdamW\n",
    "from transformers import BertTokenizer, BertForSequenceClassification"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "id": "cd26e83c-cb2c-4995-bbc6-7f580f44ef23",
   "metadata": {},
   "outputs": [],
   "source": [
    "device = \"cuda\" if torch .cuda.is_available() else \"cpu\"\n",
    "max_train_epochs = 5\n",
    "warmup_proportion = 0.05\n",
    "gradient_accumulation_steps = 4\n",
    "train_bacth_size = 8\n",
    "valid_batch_size = 8\n",
    "test_batch_size = 8\n",
    "\n",
    "learning_rate = 2e-5\n",
    "weight_decay = 0.01\n",
    "max_grad_norm = 1.0\n",
    "cur_time = time.strftime(\"%Y-%m-%d_%H:%M:%S\")"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "2eec440d-97f7-4e76-8860-2d0960e641df",
   "metadata": {},
   "source": [
    "### 3.定义Datase与DataLoader"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "id": "2467b686-ed46-47ab-9d52-e12dfd1bb045",
   "metadata": {},
   "outputs": [],
   "source": [
    "tokenizer = BertTokenizer.from_pretrained(\"bert-base-chinese\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "id": "478b2bb0-b493-46ec-a912-ff2f7e01357f",
   "metadata": {},
   "outputs": [],
   "source": [
    "class MyDataset(torch.utils.data.Dataset):\n",
    "    def __init__(self, examples):\n",
    "        self.examples = examples\n",
    "    \n",
    "    def __len__(self):\n",
    "        return len(self.examples)\n",
    "    \n",
    "    def __getitem__(self, index):\n",
    "        example = self.examples[index]\n",
    "        title = example[0]\n",
    "        label = example[1]\n",
    "        # encode_plus返回所有的编码信息，具体如下：\n",
    "        # input_ids:是单词在词典中的编码\n",
    "        # token_type_ids’:区分两个句子的编码（上句全为0，下句全为1）\n",
    "        # attention_mask’:指定对哪些词进行self-Attention操作\n",
    "        r = tokenizer.encode_plus(title, max_length=max_len, padding=\"max_length\")\n",
    "        return title, label, index"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "id": "15cd8dcf-9573-4b75-8e61-cf4b89a7e68c",
   "metadata": {},
   "outputs": [],
   "source": [
    "def the_collate_fn(batch):\n",
    "    r = tokenizer([b[0] for b in batch], padding=True)\n",
    "    input_ids = torch.LongTensor(r[\"input_ids\"])\n",
    "    attention_mask = torch.LongTensor(r[\"attention_mask\"])\n",
    "    #token_type_ids = torch.LongTensor(r[\"token_type_ids\"])\n",
    "    label = torch.LongTensor([b[1] for b in batch])\n",
    "    indexs = [b[2] for b in batch]\n",
    "    return input_ids, attention_mask, label, indexs  #, token_type_ids"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "id": "a2ab2a89-c94b-46c1-9333-be56b9128d2c",
   "metadata": {},
   "outputs": [],
   "source": [
    "train_dataet = MyDataset(train_list)\n",
    "train_data_loader = torch.utils.data.DataLoader(\n",
    "    train_dataet,\n",
    "    batch_size=train_bacth_size,\n",
    "    shuffle=True,\n",
    "    collate_fn=the_collate_fn,\n",
    ")\n",
    "dev_dataset = MyDataset(dev_list)\n",
    "dev_data_loader = torch.utils.data.DataLoader(\n",
    "    dev_dataset,\n",
    "    batch_size = valid_batch_size,\n",
    "    shuffle = False,\n",
    "    collate_fn = the_collate_fn\n",
    ")"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "2d5e4fbd-9e76-4ffb-a1d4-d141e0f0cb59",
   "metadata": {},
   "source": [
    "### 4.定义模型"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "id": "73bdc672-117e-43ba-a67e-4297d18c458b",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Some weights of the model checkpoint at bert-base-chinese were not used when initializing BertForSequenceClassification: ['cls.predictions.decoder.weight', 'cls.seq_relationship.weight', 'cls.predictions.transform.dense.bias', 'cls.seq_relationship.bias', 'cls.predictions.transform.LayerNorm.bias', 'cls.predictions.transform.LayerNorm.weight', 'cls.predictions.bias', 'cls.predictions.transform.dense.weight']\n",
      "- This IS expected if you are initializing BertForSequenceClassification from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n",
      "- This IS NOT expected if you are initializing BertForSequenceClassification from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n",
      "Some weights of BertForSequenceClassification were not initialized from the model checkpoint at bert-base-chinese and are newly initialized: ['classifier.weight', 'classifier.bias']\n",
      "You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "warmup steps : 31\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "E:\\Miniconda3-latest-Windows-x86_64\\lib\\site-packages\\transformers\\optimization.py:306: FutureWarning: This implementation of AdamW is deprecated and will be removed in a future version. Use the PyTorch implementation torch.optim.AdamW instead, or set `no_deprecation_warning=True` to disable this warning\n",
      "  warnings.warn(\n"
     ]
    }
   ],
   "source": [
    "#直接使用预训练的BertForSequenceClassification模型\n",
    "model = BertForSequenceClassification.from_pretrained(\"bert-base-chinese\")\n",
    "model.to(device)\n",
    "\n",
    "t_total = len(train_data_loader)  #训练集大小\n",
    "num_warmup_steps = int(warmup_proportion * t_total)\n",
    "print(\"warmup steps : %d\" % num_warmup_steps)\n",
    "no_decay = [\"bias\", \"LayerNorm.weight\"]  #  no_decay = [\"bias\", \"LayerNorm.bias\", \"LayerNorm.weight\"]\n",
    "param_optimizer = list(model.named_parameters())\n",
    "optimizer_grouped_parameters = [\n",
    "    {'params':[p for n, p in param_optimizer if not any(nd in n for nd in no_decay)],'weight_decay': weight_decay},\n",
    "    {'params':[p for n, p in param_optimizer if any(nd in n for nd in no_decay)],'weight_decay': 0.0}\n",
    "]\n",
    "optimizer = AdamW(optimizer_grouped_parameters, lr=learning_rate, correct_bias=False)\n",
    "scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=num_warmup_steps, num_training_steps=t_total)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "861c6f7a-adce-47f8-8bcf-6c4d94ec0fb2",
   "metadata": {},
   "source": [
    "### 5.评估函数"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "id": "b0518050-c4d8-489a-8b83-0a271cb27300",
   "metadata": {},
   "outputs": [],
   "source": [
    "def get_score():\n",
    "    y_true = []\n",
    "    y_pred = []\n",
    "    for step, batch in enumerate(tqdm(dev_data_loader)):\n",
    "        model.eval()\n",
    "        with torch.no_grad():\n",
    "            input_ids, attention_mask = (b.to(device) for b in batch[:2])\n",
    "        y_true += batch[2].numpy().tolist()\n",
    "        logist = model(input_ids, attention_mask)[0]\n",
    "        result = torch.argmax(logist, 1).cpu().numpy().tolist()\n",
    "        y_pred += result\n",
    "    correct = 0\n",
    "    for i in range(len(y_true)):\n",
    "        if y_true[i] == y_pred[i]:\n",
    "            correct += 1\n",
    "    accuracy = correct / len(y_pred)\n",
    "    \n",
    "    return accuracy"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "0584e301-3c6d-46e3-bb02-a69f36b4f1ea",
   "metadata": {},
   "source": [
    "### 6.训练模型"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "id": "5e656968-0f57-42d9-a3aa-985e8e8c8eeb",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|████████████████████████████████████████████████████████████████████████████████| 622/622 [00:25<00:00, 24.82it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch = 1 Epoch Mean Loss 0.0005 Time 0.42 min\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|████████████████████████████████████████████████████████████████████████████████| 267/267 [00:03<00:00, 84.73it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "0.9948429442100328\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|████████████████████████████████████████████████████████████████████████████████| 622/622 [00:24<00:00, 25.68it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch = 2 Epoch Mean Loss 0.0003 Time 0.40 min\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|████████████████████████████████████████████████████████████████████████████████| 267/267 [00:03<00:00, 86.75it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "1.0\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|████████████████████████████████████████████████████████████████████████████████| 622/622 [00:24<00:00, 25.26it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch = 3 Epoch Mean Loss 0.0002 Time 0.41 min\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|████████████████████████████████████████████████████████████████████████████████| 267/267 [00:03<00:00, 85.82it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "1.0\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|████████████████████████████████████████████████████████████████████████████████| 622/622 [00:25<00:00, 24.52it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch = 4 Epoch Mean Loss 0.0002 Time 0.42 min\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|████████████████████████████████████████████████████████████████████████████████| 267/267 [00:03<00:00, 87.11it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "1.0\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|████████████████████████████████████████████████████████████████████████████████| 622/622 [00:25<00:00, 24.82it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch = 5 Epoch Mean Loss 0.0001 Time 0.42 min\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|████████████████████████████████████████████████████████████████████████████████| 267/267 [00:03<00:00, 82.22it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "1.0\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "\n"
     ]
    }
   ],
   "source": [
    "for epoch in range(max_train_epochs):\n",
    "    b_time = time.time()  # 开始时间\n",
    "    \n",
    "    model.train()\n",
    "    for step, batch in enumerate(tqdm(train_data_loader)):\n",
    "        input_ids, attention_mask, label = (b.to(device) for b in batch[:-1])\n",
    "        loss = model(input_ids, attention_mask, labels=label)\n",
    "        loss = loss[0]\n",
    "        loss.backward()\n",
    "        \n",
    "        if(step + 1) % gradient_accumulation_steps == 0:\n",
    "            optimizer.step()\n",
    "            scheduler.step()\n",
    "            optimizer.zero_grad()\n",
    "    print('Epoch = %d Epoch Mean Loss %.4f Time %.2f min' % (epoch+1, loss.item(), (time.time() - b_time)/60))\n",
    "    print(get_score())"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "id": "fad9778a-fb19-4903-a8c3-8ff93c8a672c",
   "metadata": {},
   "outputs": [],
   "source": [
    "torch.save(model, 'bert_model.pkl')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "id": "177bf21f-23dd-4456-804a-b37ba4ff54e2",
   "metadata": {},
   "outputs": [],
   "source": [
    "bert_model = torch.load(\"bert_model.pkl\")"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "f3c18749-8fdc-4458-b4fd-87411fce0fb7",
   "metadata": {},
   "source": [
    "### 7.测试模型"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 22,
   "id": "df1a77b4-3bd6-42bb-ad04-eabf70cdae06",
   "metadata": {},
   "outputs": [],
   "source": [
    "def print_test(title):\n",
    "    r = tokenizer([title])\n",
    "    input_ids = torch.LongTensor(r[\"input_ids\"]).to(device)\n",
    "    attention_mask = torch.LongTensor(r[\"attention_mask\"]).to(device)\n",
    "    logist = bert_model(input_ids, attention_mask)[0]\n",
    "    result = torch.argmax(logist, 1).cpu().numpy().tolist()[0]\n",
    "    result = ['考研考博', '招聘信息'][result]\n",
    "    print(title, result)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 26,
   "id": "8a402590-77ca-4316-a4a1-c801076b7d03",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "考研心得 招聘信息\n",
      "考北大实验室博士 考研考博\n",
      "考外校博士 考研考博\n",
      "北大实验室招博士 招聘信息\n",
      "工作or考研? 招聘信息\n",
      "急求自然语言处理工程师 招聘信息\n",
      "校招offer比较 招聘信息\n"
     ]
    }
   ],
   "source": [
    "print_test('考研心得')\n",
    "print_test('考北大实验室博士')\n",
    "print_test('考外校博士')\n",
    "print_test('北大实验室招博士')\n",
    "print_test('工作or考研?')\n",
    "print_test('急求自然语言处理工程师')\n",
    "print_test('校招offer比较')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "6d3965bf-95d0-4f13-8f6d-5ee48044f062",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.9.12"
  },
  "widgets": {
   "application/vnd.jupyter.widget-state+json": {
    "state": {},
    "version_major": 2,
    "version_minor": 0
   }
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
