{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# BERT-CNN"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import torch\n",
    "import torch.nn as nn\n",
    "import torch.nn.functional as F\n",
    "from transfomers import BertModel, BertTokenizer\n",
    "\n",
    "\n",
    "class Config(object):\n",
    "    \"\"\"配置参数\"\"\"\n",
    "    def __init__(self, dataset):\n",
    "        self.model_name = 'bert'\n",
    "        self.train_path = dataset + '/data/train.txt'  # 训练集\n",
    "        self.dev_path = dataset + '/data/dev.txt'  # 验证集\n",
    "        self.test_path = dataset + '/data/test.txt'  # 测试集\n",
    "\n",
    "        self.class_list = [\n",
    "            x.strip() for x in open(dataset + '/data/class.txt').readlines()\n",
    "        ]  # 类别名单\n",
    "\n",
    "        self.save_path = dataset + '/saved_dict/' + self.model_name + '.ckpt'  # 模型训练结果\n",
    "\n",
    "        self.device = torch.device(\n",
    "            'cuda' if torch.cuda.is_available() else 'cpu')  # 设备\n",
    "\n",
    "        self.require_improvement = 1000  # 若超过1000batch效果还没提升，则提前结束训练\n",
    "        self.num_classes = len(self.class_list)  # 类别数\n",
    "        self.num_epochs = 3  # epoch数\n",
    "        self.batch_size = 128  # mini-batch大小\n",
    "        self.pad_size = 32  # 每句话处理成的长度(短填长切)\n",
    "        self.learning_rate = 5e-5  # 学习率\n",
    "\n",
    "        self.bert_path = './bert_pretrain'\n",
    "\n",
    "        self.tokenizer = BertTokenizer.from_pretrained(self.bert_path)\n",
    "        self.hidden_size = 768\n",
    "        self.filter_sizes = (2, 3, 4)  # 卷积核尺寸\n",
    "        self.num_filters = 256  # 卷积核数量(channels数)\n",
    "        self.dropout = 0.1\n",
    "\n",
    "\n",
    "class Model(nn.Module):\n",
    "    def __init__(self, config):\n",
    "        super(Model, self).__init__()\n",
    "        self.bert = BertModel.from_pretrained(config.bert_path)\n",
    "\n",
    "        for param in self.bert.parameters():\n",
    "            param.requires_grad = True\n",
    "\n",
    "        self.convs = nn.ModuleList([\n",
    "            nn.Conv2d(1, config.num_filters, (k, config.hidden_size))\n",
    "            for k in config.filter_sizes\n",
    "        ])\n",
    "        self.dropout = nn.Dropout(config.dropout)\n",
    "\n",
    "        self.fc_cnn = nn.Linear(config.num_filters * len(config.filter_sizes),\n",
    "                                config.num_classes)\n",
    "\n",
    "    def conv_and_pool(self, x, conv):\n",
    "        x = F.relu(conv(x)).squeeze(3)\n",
    "        x = F.max_pool1d(x, x.size(2)).squeeze(2)\n",
    "        return x\n",
    "\n",
    "    def forward(self, x):\n",
    "        context = x[0]  # 输入的句子\n",
    "        mask = x[\n",
    "            2]  # 对padding部分进行mask，和句子一个size，padding部分用0表示，如：[1, 1, 1, 1, 0, 0]\n",
    "        encoder_out, text_cls = self.bert(context,\n",
    "                                          attention_mask=mask,\n",
    "                                          output_all_encoded_layers=False)\n",
    "        out = encoder_out.unsqueeze(1)\n",
    "        out = torch.cat([self.conv_and_pool(out, conv) for conv in self.convs],\n",
    "                        1)\n",
    "        out = self.dropout(out)\n",
    "        out = self.fc_cnn(out)\n",
    "        return out"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2020-05-15T15:09:53.744130Z",
     "start_time": "2020-05-15T15:09:53.738846Z"
    }
   },
   "source": [
    "# BERT-DPCNN"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import torch\n",
    "import torch.nn as nn\n",
    "import torch.nn.functional as F\n",
    "from transfomers import BertModel, BertTokenizer\n",
    "\n",
    "\n",
    "class Config(object):\n",
    "    \"\"\"配置参数\"\"\"\n",
    "    def __init__(self, dataset):\n",
    "        self.model_name = 'bert'\n",
    "        self.train_path = dataset + '/data/train.txt'  # 训练集\n",
    "        self.dev_path = dataset + '/data/dev.txt'  # 验证集\n",
    "        self.test_path = dataset + '/data/test.txt'  # 测试集\n",
    "        self.class_list = [\n",
    "            x.strip() for x in open(dataset + '/data/class.txt').readlines()\n",
    "        ]  # 类别名单\n",
    "        self.save_path = dataset + '/saved_dict/' + self.model_name + '.ckpt'  # 模型训练结果\n",
    "        self.device = torch.device(\n",
    "            'cuda' if torch.cuda.is_available() else 'cpu')  # 设备\n",
    "\n",
    "        self.require_improvement = 1000  # 若超过1000batch效果还没提升，则提前结束训练\n",
    "        self.num_classes = len(self.class_list)  # 类别数\n",
    "        self.num_epochs = 3  # epoch数\n",
    "        self.batch_size = 128  # mini-batch大小\n",
    "        self.pad_size = 32  # 每句话处理成的长度(短填长切)\n",
    "        self.learning_rate = 5e-5  # 学习率\n",
    "        self.bert_path = './bert_pretrain'\n",
    "        self.tokenizer = BertTokenizer.from_pretrained(self.bert_path)\n",
    "        self.hidden_size = 768\n",
    "        self.num_filters = 250  # 卷积核数量(channels数)\n",
    "\n",
    "\n",
    "class Model(nn.Module):\n",
    "    def __init__(self, config):\n",
    "        super(Model, self).__init__()\n",
    "        self.bert = BertModel.from_pretrained(config.bert_path)\n",
    "        for param in self.bert.parameters():\n",
    "            param.requires_grad = True\n",
    "        # self.fc = nn.Linear(config.hidden_size, config.num_classes)\n",
    "        \n",
    "        self.conv_region = nn.Conv2d(1,\n",
    "                                     config.num_filters,\n",
    "                                     (3, config.hidden_size),\n",
    "                                     stride=1)\n",
    "        self.conv = nn.Conv2d(config.num_filters,\n",
    "                              config.num_filters, (3, 1),\n",
    "                              stride=1)\n",
    "        self.max_pool = nn.MaxPool2d(kernel_size=(3, 1), stride=2)\n",
    "        self.padding1 = nn.ZeroPad2d((0, 0, 1, 1))  # top bottom\n",
    "        self.padding2 = nn.ZeroPad2d((0, 0, 0, 1))  # bottom\n",
    "        self.relu = nn.ReLU()\n",
    "        self.fc = nn.Linear(config.num_filters, config.num_classes)\n",
    "\n",
    "    def forward(self, x):\n",
    "        context = x[0]  # 输入的句子\n",
    "        mask = x[\n",
    "            2]  # 对padding部分进行mask，和句子一个size，padding部分用0表示，如：[1, 1, 1, 1, 0, 0]\n",
    "        encoder_out, text_cls = self.bert(context,\n",
    "                                          attention_mask=mask,\n",
    "                                          output_all_encoded_layers=False)\n",
    "        x = encoder_out.unsqueeze(1)  # [batch_size, 1, seq_len, embed]\n",
    "        x = self.conv_region(x)  # [batch_size, 250, seq_len-3+1, 1]\n",
    "\n",
    "        x = self.padding1(x)  # [batch_size, 250, seq_len, 1]\n",
    "        x = self.relu(x)\n",
    "        x = self.conv(x)  # [batch_size, 250, seq_len-3+1, 1]\n",
    "        x = self.padding1(x)  # [batch_size, 250, seq_len, 1]\n",
    "        x = self.relu(x)\n",
    "        x = self.conv(x)  # [batch_size, 250, seq_len-3+1, 1]\n",
    "        while x.size()[2] > 2:\n",
    "            x = self._block(x)\n",
    "        x = x.squeeze()  # [batch_size, num_filters(250)]\n",
    "        x = self.fc(x)\n",
    "        return x\n",
    "\n",
    "    def _block(self, x):\n",
    "        x = self.padding2(x)\n",
    "        px = self.max_pool(x)\n",
    "        x = self.padding1(px)\n",
    "        x = F.relu(x)\n",
    "        x = self.conv(x)\n",
    "        x = self.padding1(x)\n",
    "        x = F.relu(x)\n",
    "        x = self.conv(x)\n",
    "        x = x + px  # short cut\n",
    "        return x"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# BERT-RCNN"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import torch\n",
    "import torch.nn as nn\n",
    "import torch.nn.functional as F\n",
    "from transfomers import BertModel, BertTokenizer\n",
    "\n",
    "\n",
    "class Config(object):\n",
    "    \"\"\"配置参数\"\"\"\n",
    "    def __init__(self, dataset):\n",
    "        self.model_name = 'bert'\n",
    "        self.train_path = dataset + '/data/train.txt'  # 训练集\n",
    "        self.dev_path = dataset + '/data/dev.txt'  # 验证集\n",
    "        self.test_path = dataset + '/data/test.txt'  # 测试集\n",
    "        self.class_list = [\n",
    "            x.strip() for x in open(dataset + '/data/class.txt').readlines()\n",
    "        ]  # 类别名单\n",
    "        self.save_path = dataset + '/saved_dict/' + self.model_name + '.ckpt'  # 模型训练结果\n",
    "        self.device = torch.device(\n",
    "            'cuda' if torch.cuda.is_available() else 'cpu')  # 设备\n",
    "\n",
    "        self.require_improvement = 1000  # 若超过1000batch效果还没提升，则提前结束训练\n",
    "        self.num_classes = len(self.class_list)  # 类别数\n",
    "        self.num_epochs = 3  # epoch数\n",
    "        self.batch_size = 128  # mini-batch大小\n",
    "        self.pad_size = 32  # 每句话处理成的长度(短填长切)\n",
    "        self.learning_rate = 5e-5  # 学习率\n",
    "        self.bert_path = './bert_pretrain'\n",
    "        self.tokenizer = BertTokenizer.from_pretrained(self.bert_path)\n",
    "        self.hidden_size = 768\n",
    "        self.filter_sizes = (2, 3, 4)  # 卷积核尺寸\n",
    "        self.num_filters = 256  # 卷积核数量(channels数)\n",
    "        self.dropout = 0.1\n",
    "        self.rnn_hidden = 256\n",
    "        self.num_layers = 2\n",
    "\n",
    "\n",
    "class Model(nn.Module):\n",
    "    def __init__(self, config):\n",
    "        super(Model, self).__init__()\n",
    "        self.bert = BertModel.from_pretrained(config.bert_path)\n",
    "        for param in self.bert.parameters():\n",
    "            param.requires_grad = True\n",
    "\n",
    "        self.lstm = nn.LSTM(config.hidden_size,\n",
    "                            config.rnn_hidden,\n",
    "                            config.num_layers,\n",
    "                            bidirectional=True,\n",
    "                            batch_first=True,\n",
    "                            dropout=config.dropout)\n",
    "        self.maxpool = nn.MaxPool1d(config.pad_size)\n",
    "\n",
    "        self.fc = nn.Linear(config.rnn_hidden * 2 + config.hidden_size,\n",
    "                            config.num_classes)\n",
    "\n",
    "    def forward(self, x):\n",
    "        context = x[0]  # 输入的句子\n",
    "        mask = x[\n",
    "            2]  # 对padding部分进行mask，和句子一个size，padding部分用0表示，如：[1, 1, 1, 1, 0, 0]\n",
    "        encoder_out, text_cls = self.bert(context,\n",
    "                                          attention_mask=mask,\n",
    "                                          output_all_encoded_layers=False)\n",
    "        out, _ = self.lstm(encoder_out)\n",
    "        out = torch.cat((encoder_out, out), 2)\n",
    "        out = F.relu(out)\n",
    "        out = out.permute(0, 2, 1)\n",
    "        out = self.maxpool(out).squeeze()\n",
    "        out = self.fc(out)\n",
    "        return out"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# BERT-RNN"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import torch\n",
    "import torch.nn as nn\n",
    "import torch.nn.functional as F\n",
    "from transfomers import BertModel, BertTokenizer\n",
    "\n",
    "\n",
    "class Config(object):\n",
    "    \"\"\"配置参数\"\"\"\n",
    "    def __init__(self, dataset):\n",
    "        self.model_name = 'bert'\n",
    "        self.train_path = dataset + '/data/train.txt'  # 训练集\n",
    "        self.dev_path = dataset + '/data/dev.txt'  # 验证集\n",
    "        self.test_path = dataset + '/data/test.txt'  # 测试集\n",
    "        self.class_list = [\n",
    "            x.strip() for x in open(dataset + '/data/class.txt').readlines()\n",
    "        ]  # 类别名单\n",
    "        self.save_path = dataset + '/saved_dict/' + self.model_name + '.ckpt'  # 模型训练结果\n",
    "        self.device = torch.device(\n",
    "            'cuda' if torch.cuda.is_available() else 'cpu')  # 设备\n",
    "\n",
    "        self.require_improvement = 1000  # 若超过1000batch效果还没提升，则提前结束训练\n",
    "        self.num_classes = len(self.class_list)  # 类别数\n",
    "        self.num_epochs = 3  # epoch数\n",
    "        self.batch_size = 128  # mini-batch大小\n",
    "        self.pad_size = 32  # 每句话处理成的长度(短填长切)\n",
    "        self.learning_rate = 5e-5  # 学习率\n",
    "        self.bert_path = './bert_pretrain'\n",
    "        self.tokenizer = BertTokenizer.from_pretrained(self.bert_path)\n",
    "        self.hidden_size = 768\n",
    "        self.filter_sizes = (2, 3, 4)  # 卷积核尺寸\n",
    "        self.num_filters = 256  # 卷积核数量(channels数)\n",
    "        self.dropout = 0.1\n",
    "        self.rnn_hidden = 768\n",
    "        self.num_layers = 2\n",
    "\n",
    "\n",
    "class Model(nn.Module):\n",
    "    def __init__(self, config):\n",
    "        super(Model, self).__init__()\n",
    "        self.bert = BertModel.from_pretrained(config.bert_path)\n",
    "        for param in self.bert.parameters():\n",
    "            param.requires_grad = True\n",
    "        self.lstm = nn.LSTM(config.hidden_size,\n",
    "                            config.rnn_hidden,\n",
    "                            config.num_layers,\n",
    "                            bidirectional=True,\n",
    "                            batch_first=True,\n",
    "                            dropout=config.dropout)\n",
    "        self.dropout = nn.Dropout(config.dropout)\n",
    "        self.fc_rnn = nn.Linear(config.rnn_hidden * 2, config.num_classes)\n",
    "\n",
    "    def forward(self, x):\n",
    "        context = x[0]  # 输入的句子\n",
    "        mask = x[\n",
    "            2]  # 对padding部分进行mask，和句子一个size，padding部分用0表示，如：[1, 1, 1, 1, 0, 0]\n",
    "        encoder_out, text_cls = self.bert(context,\n",
    "                                          attention_mask=mask,\n",
    "                                          output_all_encoded_layers=False)\n",
    "        out, _ = self.lstm(encoder_out)\n",
    "        out = self.dropout(out)\n",
    "        out = self.fc_rnn(out[:, -1, :])  # 句子最后时刻的 hidden state\n",
    "        return out"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.7"
  },
  "toc": {
   "base_numbering": 1,
   "nav_menu": {},
   "number_sections": true,
   "sideBar": true,
   "skip_h1_title": false,
   "title_cell": "Table of Contents",
   "title_sidebar": "Contents",
   "toc_cell": false,
   "toc_position": {},
   "toc_section_display": true,
   "toc_window_display": false
  },
  "varInspector": {
   "cols": {
    "lenName": 16,
    "lenType": 16,
    "lenVar": 40
   },
   "kernels_config": {
    "python": {
     "delete_cmd_postfix": "",
     "delete_cmd_prefix": "del ",
     "library": "var_list.py",
     "varRefreshCmd": "print(var_dic_list())"
    },
    "r": {
     "delete_cmd_postfix": ") ",
     "delete_cmd_prefix": "rm(",
     "library": "var_list.r",
     "varRefreshCmd": "cat(var_dic_list()) "
    }
   },
   "types_to_exclude": [
    "module",
    "function",
    "builtin_function_or_method",
    "instance",
    "_Feature"
   ],
   "window_display": false
  }
 },
 "nbformat": 4,
 "nbformat_minor": 4
}
