{
 "cells": [
  {
   "cell_type": "markdown",
   "id": "c21cb7b9",
   "metadata": {},
   "source": [
    "# 预训练模型\n",
    "参考：https://blog.csdn.net/weixin_44750512/article/details/123236934"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "4fbdece7",
   "metadata": {},
   "outputs": [],
   "source": [
    "import numpy as np\n",
    "import pandas as pd\n",
    "from tqdm import tqdm\n",
    "\n",
    "import torch\n",
    "import torch.nn as nn\n",
    "import torch.nn.functional as F\n",
    "from torch.optim.lr_scheduler import ReduceLROnPlateau\n",
    "from torch.utils.data import TensorDataset, DataLoader\n",
    "\n",
    "import transformers\n",
    "from transformers import BertTokenizer ,BertConfig, AdamW, BertModel\n",
    "\n",
    "from sklearn.metrics import accuracy_score, classification_report\n",
    "from sklearn.model_selection import train_test_split"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "cbd53b86",
   "metadata": {},
   "source": [
    "## 加载数据"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "id": "32bd302e",
   "metadata": {},
   "outputs": [],
   "source": [
    "# df=pd.read_csv(train_path,encoding='gbk')\n",
    "# text_list=df.iloc[:,0].tolist()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "id": "c3b5f718",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[ 101, 1091, 1762,  ...,    0,    0,    0],\n",
       "        [ 101, 2458, 2399,  ...,    0,    0,    0],\n",
       "        [ 101, 6937, 3247,  ...,    0,    0,    0],\n",
       "        ...,\n",
       "        [ 101, 3766, 3300,  ...,    0,    0,    0],\n",
       "        [ 101, 3173, 1094,  ...,    0,    0,    0],\n",
       "        [ 101,  794, 6073,  ...,    0,    0,    0]])"
      ]
     },
     "execution_count": 18,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# # 是句子中每个字对应的id\n",
    "# input_ids=tok['input_ids']\n",
    "# input_ids"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 20,
   "id": "05f9f228",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "torch.Size([98387, 175])"
      ]
     },
     "execution_count": 20,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# input_ids.shape"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 21,
   "id": "a3cfefb5",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([], size=(0, 2), dtype=torch.int64)"
      ]
     },
     "execution_count": 21,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# # 是句子标识符id，由于情感分类只涉及到一个句子，所以该标识符都是0。\n",
    "# token_type_ids=tok['token_type_ids']\n",
    "# token_type_ids.nonzero()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 22,
   "id": "b6d58d1e",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[1, 1, 1,  ..., 0, 0, 0],\n",
       "        [1, 1, 1,  ..., 0, 0, 0],\n",
       "        [1, 1, 1,  ..., 0, 0, 0],\n",
       "        ...,\n",
       "        [1, 1, 1,  ..., 0, 0, 0],\n",
       "        [1, 1, 1,  ..., 0, 0, 0],\n",
       "        [1, 1, 1,  ..., 0, 0, 0]])"
      ]
     },
     "execution_count": 22,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# # 少于max_len的部分补0\n",
    "# attention_mask=tok['attention_mask']\n",
    "# attention_mask"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 23,
   "id": "ec3b0649",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "torch.Size([98387, 175])"
      ]
     },
     "execution_count": 23,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# attention_mask.shape"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 31,
   "id": "f9fa1e3f",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([ 0., -1.,  1.,  ...,  0.,  1.,  0.])"
      ]
     },
     "execution_count": 31,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# label=pd.read_csv('./数据集/train.csv',encoding='gbk').iloc[:,1].tolist()\n",
    "# label=torch.Tensor(label)\n",
    "# label"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "71945e78",
   "metadata": {},
   "source": [
    "### 数据编码成Bert需要的输入格式"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "a7da70e5",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 内容同上，封装为函数\n",
    "def encode(max_len, vocab_path, text_list):\n",
    "    # 加载分词模型，使用BertTokenizer，编码成Bert需要的输入格式\n",
    "    tok=BertTokenizer.from_pretrained(vocab_path)\n",
    "    # 初始化分词模型\n",
    "    tok=tok(\n",
    "        text_list,\n",
    "        padding=True,\n",
    "        truncation=True,\n",
    "        max_length=max_len,\n",
    "        return_tensors='pt'  # 返回的类型为pytorch tensor\n",
    "    )\n",
    "    \n",
    "    # 将数据转化为Bert的输入形式\n",
    "    # 句子中每个字对应的id\n",
    "    input_ids=tok['input_ids']\n",
    "    # 是句子标识符id，由于情感分类只涉及到一个句子，所以该标识符都是0\n",
    "    token_type_ids=tok['token_type_ids']\n",
    "    # 少于max_len的部分补0\n",
    "    attention_mask=tok['attention_mask']\n",
    "    return input_ids, token_type_ids, attention_mask"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "1ed6d743",
   "metadata": {},
   "source": [
    "### 将数据加载为Tensor格式"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "id": "1574c36d",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 将数据转化为Tensor格式\n",
    "def load_data(df,test=False):\n",
    "    # 读数据\n",
    "    text_list=df.iloc[:,0].tolist()\n",
    "    label=[]\n",
    "    if test==False:\n",
    "        label=df.iloc[:,1].tolist()\n",
    "    \n",
    "    # 将原始数据分词并embedding\n",
    "    input_ids, token_type_ids, attention_mask= encode(max_len=512,\n",
    "                            vocab_path='./bert_chinese/vocab.txt',\n",
    "                            text_list=text_list)\n",
    "    \n",
    "    # 将数据集打包（数据+label）\n",
    "    if test==False:\n",
    "        label=torch.Tensor(label)\n",
    "        data=TensorDataset(input_ids,token_type_ids,attention_mask,label)\n",
    "    else:\n",
    "        data=TensorDataset(input_ids,token_type_ids,attention_mask)\n",
    "    \n",
    "    return data"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "601c8f82",
   "metadata": {},
   "source": [
    "### 划分训练集、验证集\n",
    "训练集：验证集=3:1"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "id": "093247fd",
   "metadata": {},
   "outputs": [],
   "source": [
    "train_path='./数据集/train.csv'\n",
    "test_path='./数据集/test.csv'\n",
    "\n",
    "df_train=pd.read_csv(train_path,encoding='gbk')\n",
    "df_test=pd.read_csv(test_path,encoding='gbk')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "id": "962fda58",
   "metadata": {},
   "outputs": [],
   "source": [
    "X_train, X_dev, y_train, y_dev = train_test_split(df_train,\n",
    "                                                  df_train['情感倾向'],\n",
    "                                                  test_size=0.25, \n",
    "                                                  stratify=df_train['情感倾向'])"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "f9bb072a",
   "metadata": {},
   "source": [
    "### 实例化DataLoader"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "id": "f742ee9b",
   "metadata": {},
   "outputs": [],
   "source": [
    "batch_size=16"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "id": "07dccefe",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/Users/minzijun/opt/anaconda3/envs/secondenv/lib/python3.8/site-packages/transformers/tokenization_utils_base.py:1628: FutureWarning: Calling BertTokenizer.from_pretrained() with the path to a single file or url is deprecated and won't be possible anymore in v5. Use a model identifier or the path to a directory instead.\n",
      "  warnings.warn(\n"
     ]
    }
   ],
   "source": [
    "# 调用load_data函数，将数据加载为Tensor形式\n",
    "train_data=load_data(X_train)\n",
    "dev_data=load_data(X_dev)\n",
    "test_data=load_data(df_test,True)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "id": "d975578f",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 将训练数据和测试数据进行DataLoader实例化，包装所使用的数据，每次抛出一批数据\n",
    "train_loader=DataLoader(dataset=train_data,\n",
    "                        batch_size=batch_size,\n",
    "                        shuffle=True)\n",
    "dev_loader=DataLoader(dataset=dev_data,\n",
    "                      batch_size=batch_size,\n",
    "                      shuffle=True)\n",
    "test_loader=DataLoader(dataset=test_data,\n",
    "                        batch_size=batch_size,\n",
    "                        shuffle=False)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "7b510a07",
   "metadata": {},
   "source": [
    "## 加载模型"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "c65006ba",
   "metadata": {},
   "source": [
    "警告：https://blog.csdn.net/PolarisRisingWar/article/details/123974645"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "46a82107",
   "metadata": {},
   "source": [
    "### 定义神经网络"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "id": "86a06039",
   "metadata": {},
   "outputs": [],
   "source": [
    "class BertClassification(nn.Module):\n",
    "    def __init__(self, hidden_size, output_size):\n",
    "        super(BertModel, self).__init__()\n",
    "        \n",
    "        # 加载预训练\n",
    "        pre_weights='./bert_chinese/'\n",
    "        self.bert=BertModel.from_pretrained(pre_weights)\n",
    "        for param in self.bert.parameters():\n",
    "            param.requires_grad=True\n",
    "            \n",
    "        # 定义线性函数\n",
    "        self.dense=nn.Linear(hidden_size, output_size)\n",
    "        \n",
    "    def forward(self, input_ids, token_type_ids, attention_mask):\n",
    "        bert_output=self.bert(input_ids=input_ids,\n",
    "                              token_type_ids=token_type_ids,\n",
    "                              attention_mask=attention_mask)\n",
    "        \n",
    "        # 获得预训练模型的输出 \n",
    "        bert_cls_hidden_state=bert_output[1]\n",
    "        # 将向量输入到线性层映射\n",
    "        linear_output=self.dense(bert_cls_hidden_state)\n",
    "        \n",
    "        return linear_output"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "160e25ce",
   "metadata": {},
   "source": [
    "## 训练过程"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "0124b248",
   "metadata": {},
   "source": [
    "### 定义验证函数"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "id": "97729db9",
   "metadata": {},
   "outputs": [],
   "source": [
    "def dev(model, dev_loader):\n",
    "    # 模型载入\n",
    "    model.to(device)\n",
    "    model.eval()\n",
    "    \n",
    "    with torch.no_grad():\n",
    "        correct, total = 0, 0\n",
    "        for step, (input_ids, token_type_ids, attention_mask, labels) in enumerate(dev_loader):\n",
    "            input_ids, token_type_ids, attention_mask, labels= input_ids.to(device),token_type_ids.to(device),attention_mask.to(device), labels.to(device)\n",
    "            output=model(input_ids, token_type_ids, attention_mask)\n",
    "            _, predict=torch.max(output.data,1)\n",
    "            correct+=(predict==labels).sum().item()\n",
    "            total+=labels.size(0)\n",
    "            \n",
    "        res=correct/total\n",
    "        return res"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "aa140cc7",
   "metadata": {},
   "source": [
    "### 定义训练函数"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "id": "1da92d30",
   "metadata": {},
   "outputs": [],
   "source": [
    "def train(model, train_loader, dev_loader, device, epochs=5):\n",
    "    # 模型载入\n",
    "    model.to(device)\n",
    "    model.train()\n",
    "    \n",
    "    # 定义损失函数\n",
    "    criterion=nn.CrossEntropyLoss()\n",
    "    param_optimizer=list(model.named_parameters())\n",
    "    no_decay=['bias','LayerNorm.bias','LayerNorm.weight']\n",
    "    \n",
    "    # 设置模型参数的权重衰减\n",
    "    optimizer_grouped_parameters=[\n",
    "        {\n",
    "            'params':[p for n,p in param_optimizer \n",
    "                      if not any(nd in n for nd in no_decay)],\n",
    "             'weight_decay':0.01\n",
    "        },\n",
    "        {\n",
    "            'params':[p for n,p in param_optimizer\n",
    "                     if any(nd in n for nd in no_decay)],\n",
    "            'weight_decay':0.0\n",
    "        }\n",
    "    ]\n",
    "    \n",
    "    # 学习率的设置\n",
    "    optimizer_params={'lr':1e-5, 'eps':1e-6, 'correct_bias':False}\n",
    "    # 使用AdamW优化器\n",
    "    optimizer=AdamW(optimizer_grouped_parameters,**optimizer_params)\n",
    "    # 学习率调整，检测准确率的状态，然后衰减学习率\n",
    "    scheduler=ReduceLROnPlateau(optimizer, mode='max',\n",
    "                                factor=0.5, min_lr=1e-7,\n",
    "                                patience=5, verbose= True, \n",
    "                                threshold=0.0001, eps=1e-08)\n",
    "    \n",
    "    t_total=len(train_loader)\n",
    "    best_acc, correct, total = 0, 0, 0\n",
    "    accs=[]\n",
    "    \n",
    "    print('--------------Begin training--------------')\n",
    "    for epoch in range(epochs):\n",
    "        for step, (input_ids, token_type_ids, attention_mask, labels) in enumerate(train_loader):\n",
    "            input_ids, token_type_ids, attention_mask, labels= input_ids.to(device),token_type_ids.to(device),attention_mask.to(device), labels.to(device)\n",
    "\n",
    "            # 过往梯度清零\n",
    "            optimizer.zero_grad()\n",
    "            # 获得输出\n",
    "            output=model(input_ids, token_type_ids, attention_mask)\n",
    "            # 计算损失\n",
    "            loss=criterion(output,labels)\n",
    "            \n",
    "            # 计算准确率\n",
    "            _, predict=torch.max(output.data,1)\n",
    "            correct+=(predict==labels).sum().item()\n",
    "            total+=labels.size(0)\n",
    "            train_acc=correct/total\n",
    "            accs.append(train_acc)\n",
    "            \n",
    "            # 反向传播\n",
    "            loss.backward()\n",
    "            # 根据梯度更新参数\n",
    "            optimizer.step()\n",
    "            \n",
    "            # 打印，2 steps一次\n",
    "            if (step+1)%2==0:\n",
    "                print('Train: Epoch [{}/{}], step [{}/{}], train_acc:{:.6f}, loss:{:.6f}'\n",
    "                      .format(epoch+1, epochs, step+1, \n",
    "                              t_total, train_acc, loss.item()))\n",
    "               \n",
    "            # 验证，50 steps一次\n",
    "            if (step+1)%50==0:\n",
    "                acc=dev(model, dev_loader)\n",
    "                if best_acc<acc:\n",
    "                    best_acc=acc\n",
    "                    torch.save('./保存模型/model_best.pkl')\n",
    "                print('Dev: Epoch [{}/{}], step [{}/{}], train_acc:{:.6f}, dev_acc:{:.6f}, best_acc:{:.6f}, loss:{:.6f}'\n",
    "                      .format(epoch+1, epochs, step+1, t_total, \n",
    "                              train_acc, acc, best_acc, loss.item()))\n",
    "        \n",
    "        scheduler.step(best_acc)\n",
    "        \n",
    "    return accs"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "id": "e1569e56",
   "metadata": {},
   "outputs": [],
   "source": [
    "device=torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n",
    "epochs=5"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "0fa77f59",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Bert最后一个隐层的输出是768维\n",
    "model=BertClassification(768,3)\n",
    "accs=train(model,train_loader,dev_loader,device,epochs)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "dcfa239b",
   "metadata": {},
   "source": [
    "### 定义预测函数"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "id": "45cb0f14",
   "metadata": {},
   "outputs": [],
   "source": [
    "def predict(model, test_loader):\n",
    "    # 模型载入\n",
    "    model.to(device)\n",
    "    model.eval()\n",
    "    \n",
    "    predicts=[]\n",
    "    predict_probs=[]\n",
    "    \n",
    "    with torch.no_grad():\n",
    "        for step, (input_ids, token_type_ids, attention_mask, labels) in enumerate(dev_loader):\n",
    "            input_ids, token_type_ids, attention_mask= input_ids.to(device),token_type_ids.to(device),attention_mask.to(device)\n",
    "            output=model(input_ids, token_type_ids, attention_mask)\n",
    "            _, predict=torch.max(output.data,1)\n",
    "            \n",
    "            pre_numpy = predict.cpu().numpy().tolist()\n",
    "            predicts.extend(pre_numpy)\n",
    "            probs=F.softmax(output).detach().cpu().numpy().tolist()\n",
    "            predict_probs.extend(probs)\n",
    "            \n",
    "        return predicts, predict_probs"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "936858fd",
   "metadata": {},
   "outputs": [],
   "source": [
    "model_best=torch.load('./保存模型/model_best.pkl')\n",
    "predicts, predict_probs=predict(model_best, test_loader)"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python [conda env:secondenv] *",
   "language": "python",
   "name": "conda-env-secondenv-py"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.8.6"
  },
  "toc": {
   "base_numbering": 1,
   "nav_menu": {},
   "number_sections": true,
   "sideBar": true,
   "skip_h1_title": false,
   "title_cell": "Table of Contents",
   "title_sidebar": "Contents",
   "toc_cell": false,
   "toc_position": {},
   "toc_section_display": true,
   "toc_window_display": false
  },
  "varInspector": {
   "cols": {
    "lenName": 16,
    "lenType": 16,
    "lenVar": 40
   },
   "kernels_config": {
    "python": {
     "delete_cmd_postfix": "",
     "delete_cmd_prefix": "del ",
     "library": "var_list.py",
     "varRefreshCmd": "print(var_dic_list())"
    },
    "r": {
     "delete_cmd_postfix": ") ",
     "delete_cmd_prefix": "rm(",
     "library": "var_list.r",
     "varRefreshCmd": "cat(var_dic_list()) "
    }
   },
   "types_to_exclude": [
    "module",
    "function",
    "builtin_function_or_method",
    "instance",
    "_Feature"
   ],
   "window_display": false
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
