{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "a32cff6a-082e-47ab-84e4-6c7cdba24112",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2022-10-22T15:48:54.858253Z",
     "start_time": "2022-10-22T15:48:44.209733Z"
    }
   },
   "source": [
    "import json\n",
    "import os\n",
    "import torch\n",
    "from torch import nn\n",
    "import dltools"
   ],
   "outputs": []
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "1e6fa6bd-91dd-4a06-b09d-78998d1cd94e",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2022-10-22T15:48:54.873212Z",
     "start_time": "2022-10-22T15:48:54.860246Z"
    }
   },
   "source": [
    "def load_pretrained_model(pretrained_model, num_hiddens, ffn_num_hiddens,num_heads, num_layers, dropout, max_len, devices):\n",
    "    data_dir = \"./bert.small.torch/\"\n",
    "    # 定义空词表以加载预定义词表\n",
    "    vocab = dltools.Vocab()\n",
    "    vocab.idx_to_token = json.load(open(os.path.join(data_dir,'vocab.json')))\n",
    "    vocab.token_to_idx = {token: idx for idx, token in enumerate(vocab.idx_to_token)}\n",
    "    bert = dltools.BERTModel(len(vocab), num_hiddens, norm_shape=[256],\n",
    "                         ffn_num_input=256, ffn_num_hiddens=ffn_num_hiddens,\n",
    "                         num_heads=4, num_layers=2, dropout=0.2,\n",
    "                         max_len=max_len, key_size=256, query_size=256,\n",
    "                         value_size=256, hid_in_features=256,\n",
    "                         mlm_in_features=256, nsp_in_features=256)\n",
    "    # 加载预训练BERT参数\n",
    "    bert.load_state_dict(torch.load(os.path.join(data_dir,'pretrained.params')))\n",
    "    return bert, vocab"
   ],
   "outputs": []
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "id": "58cc9c20-2c9e-4a1d-a634-9e22c60c170d",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2022-10-22T15:48:55.285110Z",
     "start_time": "2022-10-22T15:48:54.874209Z"
    }
   },
   "source": [
    "devices = dltools.try_all_gpus()\n",
    "bert, vocab = load_pretrained_model('bert.small', num_hiddens=256, ffn_num_hiddens=512, num_heads=4,num_layers=2, dropout=0.1, max_len=512, devices=devices)"
   ],
   "outputs": []
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "id": "029b151e-50d2-4303-b634-431ac7863317",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2022-10-22T15:48:55.301067Z",
     "start_time": "2022-10-22T15:48:55.287104Z"
    }
   },
   "source": [
    "# standford natural language inference \n",
    "class SNLIBERTDataset(torch.utils.data.Dataset):\n",
    "    def __init__(self, dataset, max_len, vocab=None):\n",
    "        all_premise_hypothesis_tokens = [\n",
    "            [p_tokens, h_tokens] for p_tokens, h_tokens in \n",
    "            zip(*[dltools.tokenize([s.lower() for s in sentences])for sentences in dataset[:2]])\n",
    "        ]\n",
    "\n",
    "        self.labels = torch.tensor(dataset[2])\n",
    "        self.vocab = vocab\n",
    "        self.max_len = max_len\n",
    "        (self.all_token_ids, self.all_segments,self.valid_lens) = self._preprocess(all_premise_hypothesis_tokens)\n",
    "        print('read ' + str(len(self.all_token_ids)) + ' examples')\n",
    "\n",
    "    def _preprocess(self, all_premise_hypothesis_tokens):\n",
    "        out = [self._mp_worker(x) for x in all_premise_hypothesis_tokens]\n",
    "        all_token_ids = [token_ids for token_ids, segments, valid_len in out]\n",
    "        all_segments = [segments for token_ids, segments, valid_len in out]\n",
    "        valid_lens = [valid_len for token_ids, segments, valid_len in out]\n",
    "        return (torch.tensor(all_token_ids, dtype=torch.long),\n",
    "                torch.tensor(all_segments, dtype=torch.long),\n",
    "                torch.tensor(valid_lens))\n",
    "\n",
    "    def _mp_worker(self, premise_hypothesis_tokens):\n",
    "        p_tokens, h_tokens = premise_hypothesis_tokens\n",
    "        self._truncate_pair_of_tokens(p_tokens, h_tokens)\n",
    "        tokens, segments = dltools.get_tokens_and_segments(p_tokens, h_tokens)\n",
    "        token_ids = self.vocab[tokens] + [self.vocab['<pad>']] * (self.max_len - len(tokens))\n",
    "        segments = segments + [0] * (self.max_len - len(segments))\n",
    "        valid_len = len(tokens)\n",
    "        return token_ids, segments, valid_len\n",
    "\n",
    "    def _truncate_pair_of_tokens(self, p_tokens, h_tokens):\n",
    "        # 为BERT输入中的'<CLS>'、'<SEP>'和'<SEP>'词元保留位置\n",
    "        while len(p_tokens) + len(h_tokens) > self.max_len - 3:\n",
    "            if len(p_tokens) > len(h_tokens):\n",
    "                p_tokens.pop()\n",
    "            else:\n",
    "                h_tokens.pop()\n",
    "\n",
    "    def __getitem__(self, idx):\n",
    "        return (self.all_token_ids[idx], self.all_segments[idx],\n",
    "                self.valid_lens[idx]), self.labels[idx]\n",
    "\n",
    "    def __len__(self):\n",
    "        return len(self.all_token_ids)"
   ],
   "outputs": []
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "id": "689bc7aa-5a9d-4f2d-83bc-6b2b3af27dd6",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2022-10-22T15:49:24.622640Z",
     "start_time": "2022-10-22T15:48:55.302064Z"
    }
   },
   "source": [
    "# 如果出现显存不足错误，请减少“batch_size”。在原始的BERT模型中，max_len=512\n",
    "batch_size, max_len, num_workers = 256, 128, dltools.get_dataloader_workers()\n",
    "data_dir = \"./snli_1.0/\"\n",
    "train_set = SNLIBERTDataset(dltools.read_snli(data_dir, True), max_len, vocab)\n",
    "test_set = SNLIBERTDataset(dltools.read_snli(data_dir, False), max_len, vocab)\n",
    "train_iter = torch.utils.data.DataLoader(train_set, batch_size, shuffle=True,num_workers=num_workers)\n",
    "test_iter = torch.utils.data.DataLoader(test_set, batch_size,num_workers=num_workers)"
   ],
   "outputs": []
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "id": "86695e81",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2022-10-22T15:49:35.687048Z",
     "start_time": "2022-10-22T15:49:24.624635Z"
    },
    "scrolled": true
   },
   "source": [
    "train_data = dltools.read_snli(data_dir, True)\n",
    "for x0, x1, y in zip(train_data[0][:3], train_data[1][:3], train_data[2][:3]):\n",
    "    print('premise', x0)\n",
    "    print('hypothesis:', x1)\n",
    "    print('label:', y)"
   ],
   "outputs": []
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "id": "764d03c5-cfee-4ab8-9bab-62ea55a6d109",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2022-10-22T15:49:35.703006Z",
     "start_time": "2022-10-22T15:49:35.688044Z"
    }
   },
   "source": [
    "class BERTClassifier(nn.Module):\n",
    "    def __init__(self, bert):\n",
    "        super(BERTClassifier, self).__init__()\n",
    "        self.encoder = bert.encoder\n",
    "        self.hidden = bert.hidden\n",
    "        self.output = nn.Linear(256, 3)\n",
    "\n",
    "    def forward(self, inputs):\n",
    "        tokens_X, segments_X, valid_lens_x = inputs\n",
    "        encoded_X = self.encoder(tokens_X, segments_X, valid_lens_x)\n",
    "        return self.output(self.hidden(encoded_X[:, 0, :])) #去除中间的维度"
   ],
   "outputs": []
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "id": "b365018c-a97b-4b37-bb68-7b131a20b4d3",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2022-10-22T15:49:35.765836Z",
     "start_time": "2022-10-22T15:49:35.704001Z"
    }
   },
   "source": [
    "net = BERTClassifier(bert)"
   ],
   "outputs": []
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "id": "c70d3fc4-903f-4c6d-b29a-6a26a5a26fea",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2022-10-22T15:56:46.758561Z",
     "start_time": "2022-10-22T15:49:35.766833Z"
    }
   },
   "source": [
    "lr, num_epochs = 1e-4, 2\n",
    "trainer = torch.optim.Adam(net.parameters(), lr=lr)\n",
    "loss = nn.CrossEntropyLoss(reduction='none')\n",
    "dltools.train_ch13(net, train_iter, test_iter, loss, trainer, num_epochs, devices)"
   ],
   "outputs": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "8ee4f6a1-40dd-408f-80c0-aa813c908b55",
   "metadata": {},
   "source": [],
   "outputs": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "82db6db4-7585-4b1d-a398-620d37e9d1cf",
   "metadata": {},
   "source": [],
   "outputs": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.12.4"
  },
  "toc": {
   "base_numbering": 1,
   "nav_menu": {},
   "number_sections": true,
   "sideBar": true,
   "skip_h1_title": false,
   "title_cell": "Table of Contents",
   "title_sidebar": "Contents",
   "toc_cell": false,
   "toc_position": {},
   "toc_section_display": true,
   "toc_window_display": false
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
