{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2020-03-09T10:00:40.682629Z",
     "start_time": "2020-03-09T10:00:38.656505Z"
    }
   },
   "outputs": [],
   "source": [
    "import gc\n",
    "import random\n",
    "import numpy as np\n",
    "import pandas as pd\n",
    "import torch\n",
    "from torch import nn\n",
    "import tensorflow as tf\n",
    "import transformers\n",
    "from sklearn import metrics\n",
    "from sklearn.model_selection import KFold"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2020-03-09T10:00:40.704043Z",
     "start_time": "2020-03-09T10:00:40.701673Z"
    }
   },
   "outputs": [],
   "source": [
    "torch.cuda.empty_cache()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 加载训练集"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2020-03-09T10:00:52.721379Z",
     "start_time": "2020-03-09T10:00:52.683331Z"
    }
   },
   "outputs": [],
   "source": [
    "data_dir = \"../data/\"\n",
    "\n",
    "x_train = pd.read_csv(data_dir + \"train.csv\")['text']\n",
    "# 为了与划分后的 y_train 区分\n",
    "label_train = pd.read_csv(data_dir + \"train.csv\")['target']\n",
    "\n",
    "# x_test = pd.read_csv(data_dir + \"test.csv\")['text']\n",
    "\n",
    "print(len(x_train))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Tokenization, 输入数据格式化"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2020-03-09T10:00:55.338694Z",
     "start_time": "2020-03-09T10:00:55.336215Z"
    }
   },
   "outputs": [],
   "source": [
    "# hugging face (transformers) 本地模型目录\n",
    "transformers_dir = \"/home/liulu/transformers/\"\n",
    "\n",
    "# bert-base-uncased 模型文件夹\n",
    "bert_dir = transformers_dir + \"bert-base-uncased/\""
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 序列转为 id"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2020-03-09T10:00:57.423464Z",
     "start_time": "2020-03-09T10:00:57.394996Z"
    }
   },
   "outputs": [],
   "source": [
    "# 由本地加载 tokenizer\n",
    "tokenizer = transformers.BertTokenizer.from_pretrained(bert_dir, do_lower_case=True)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2020-03-09T10:00:57.978924Z",
     "start_time": "2020-03-09T10:00:57.975659Z"
    }
   },
   "outputs": [],
   "source": [
    "def encode_tweet(df: pd.DataFrame) -> list:\n",
    "    input_ids = []\n",
    "    for x in df:\n",
    "        encode_x = tokenizer.encode(x, add_special_tokens=True)\n",
    "        input_ids.append(encode_x)\n",
    "    return input_ids"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2020-03-09T09:24:49.047593Z",
     "start_time": "2020-03-09T09:24:45.828214Z"
    }
   },
   "outputs": [],
   "source": [
    "train_input = encode_tweet(x_train)\n",
    "\n",
    "# test_input = encode_tweet(x_test)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 填充\n",
    "最大长度 84"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2020-03-09T06:33:51.106669Z",
     "start_time": "2020-03-09T06:33:51.104776Z"
    }
   },
   "outputs": [],
   "source": [
    "# 求训练集和测试集 text 的最大长度\n",
    "# train_max_len = max([ len(seq) for seq in train_input]) \n",
    "# test_max_len = max([ len(seq) for seq in test_input]) \n",
    "\n",
    "# print(f\"训练集最大text长度: {train_max_len}\\n测试集最大text长度: {test_max_len}\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2020-03-09T10:01:01.279417Z",
     "start_time": "2020-03-09T10:01:01.250049Z"
    }
   },
   "outputs": [],
   "source": [
    "from keras.preprocessing.sequence import pad_sequences\n",
    "\n",
    "# MAX_LEN = max(train_max_len, test_max_len), padding 的最大长度\n",
    "MAX_LEN = 84\n",
    "\n",
    "def pad_tweets(df: pd.DataFrame):\n",
    "    df = pad_sequences(df, maxlen=MAX_LEN, dtype=\"long\",\n",
    "                       value=0, \n",
    "                       padding=\"post\",      # 若不够, 在后面填充\n",
    "                       truncating=\"post\"   # 若超长, 删除后面的\n",
    "                      )\n",
    "    return df"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2020-03-09T09:24:54.374510Z",
     "start_time": "2020-03-09T09:24:54.327033Z"
    }
   },
   "outputs": [],
   "source": [
    "train_input = pad_sequences(train_input)\n",
    "\n",
    "# test_input = pad_sequences(test_input)\n",
    "\n",
    "print(train_input.shape)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Attention Mask"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2020-03-09T10:01:04.142011Z",
     "start_time": "2020-03-09T10:01:04.139278Z"
    }
   },
   "outputs": [],
   "source": [
    "def get_att_mask(df):\n",
    "    attention_masks = []\n",
    "    for text in df:\n",
    "        att_mask =  [1 if id_ > 0 else 0 for id_ in text]\n",
    "        attention_masks.append(att_mask)\n",
    "    return attention_masks"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2020-03-09T09:25:00.188160Z",
     "start_time": "2020-03-09T09:24:59.280576Z"
    }
   },
   "outputs": [],
   "source": [
    "train_att_mask = get_att_mask(train_input)\n",
    "\n",
    "# test_att_mask = get_att_mask(test_input)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2020-03-09T09:25:01.574282Z",
     "start_time": "2020-03-09T09:25:01.497278Z"
    }
   },
   "outputs": [],
   "source": [
    "# 回收无用的变量, 预训练 bert 将占用 约900MB 显存\n",
    "del x_train, tokenizer\n",
    "gc.collect()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 准备数据"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 划分训练集、验证集"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2020-03-09T09:25:05.067454Z",
     "start_time": "2020-03-09T09:25:05.053172Z"
    }
   },
   "outputs": [],
   "source": [
    "from sklearn.model_selection import train_test_split\n",
    "\n",
    "# 划分训练集 验证集\n",
    "x_train_input, x_val_input, y_train, y_val = train_test_split(\n",
    "                                                train_input, \n",
    "                                                label_train, \n",
    "                                                test_size=0.15,\n",
    "                                                random_state=2333)\n",
    "\n",
    "# attention mask 也同样划分, y1, y2 并不需要\n",
    "train_mask, val_mask, y1, y2 = train_test_split(\n",
    "                                    train_att_mask,\n",
    "                                    label_train,\n",
    "                                    test_size=0.15,\n",
    "                                    random_state=2333\n",
    ")\n",
    "\n",
    "print(f\"train length: {x_train_input.shape[0]}\")\n",
    "print(f\"  val length: {x_val_input.shape[0]}\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2020-03-09T09:25:06.355289Z",
     "start_time": "2020-03-09T09:25:06.283643Z"
    }
   },
   "outputs": [],
   "source": [
    "del y1, y2, train_input\n",
    "gc.collect()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 转为 Tensor"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2020-03-09T09:25:12.360266Z",
     "start_time": "2020-03-09T09:25:12.324719Z"
    }
   },
   "outputs": [],
   "source": [
    "x_train_input = torch.LongTensor(x_train_input)\n",
    "x_val_input = torch.LongTensor(x_val_input)\n",
    "\n",
    "y_train = torch.LongTensor(np.array(y_train))\n",
    "y_val = torch.LongTensor(np.array(y_val))\n",
    "\n",
    "mask_train = torch.LongTensor(train_mask)\n",
    "mask_val = torch.LongTensor(val_mask)\n",
    "\n",
    "# x_test_input = torch.Tensor(test_input)\n",
    "# mask_test = torch.Tensor(test_att_mask)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 以 batch 的方式构造数据集\n",
    "编写一个帮助类, 在训练时使用 batch. 它由迭代器实现, 在训练时会节省内存 <br>\n",
    "注意: 在测试集上也应如此操作"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2020-03-09T09:25:15.792557Z",
     "start_time": "2020-03-09T09:25:15.788688Z"
    }
   },
   "outputs": [],
   "source": [
    "from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler\n",
    "\n",
    "# 视显存调整\n",
    "BATCH_SIZE = 16\n",
    "\n",
    "# 训练集, 训练集迭代器\n",
    "train_data = TensorDataset(x_train_input, mask_train, y_train)\n",
    "train_sampler = RandomSampler(train_data)\n",
    "train_loader = DataLoader(train_data, sampler=train_sampler, batch_size=BATCH_SIZE)\n",
    "\n",
    "# 验证集, 验证集迭代器\n",
    "val_data = TensorDataset(x_val_input, mask_val, y_val)\n",
    "val_sampler = RandomSampler(val_data)\n",
    "val_loader = DataLoader(val_data, sampler=val_sampler, batch_size=BATCH_SIZE)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 训练模型\n",
    "使用 hugging face 提供的预训练 bert <br>\n",
    "[文档链接](https://huggingface.co/transformers/v2.2.0/main_classes/model.html#transformers.PreTrainedModel.from_pretrained)\n",
    "\n",
    "步骤\n",
    "1. 增大 BERT 最后一层 ```Dropout``` 的比例\n",
    "2. 冻结 bert 的所有层, 训练顶层的 Linear\n",
    "3. 解冻所有层, 一起训练"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 加载预训练模型\n",
    "```BertForSequenceClassfication``` 是 封装了```BERT```和一个线性分类器, 用于多分类任务"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2020-03-09T09:16:12.729957Z",
     "start_time": "2020-03-09T09:16:10.535563Z"
    }
   },
   "outputs": [],
   "source": [
    "from transformers import BertForSequenceClassification, AdamW, BertConfig\n",
    "# from transformers import BertModel\n",
    "\n",
    "# 在预训练 bert 顶部加了 1个Dropout, 1个 Liner, 输出分类结果\n",
    "model = BertForSequenceClassification.from_pretrained(bert_dir,\n",
    "                                                      # 类别数, 支持多分类\n",
    "                                                      num_labels = 2,\n",
    "                                                      output_attentions = False,\n",
    "                                                      output_hidden_states = False\n",
    "                                                     )"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 冻结 BERT, 训练 分类器\n",
    "fine-tune 的基本步骤"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 冻结 BERT 的所有层"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2020-03-09T09:16:12.917810Z",
     "start_time": "2020-03-09T09:16:12.912043Z"
    }
   },
   "outputs": [],
   "source": [
    "# 加大 Dropout 比例\n",
    "model.dropout = nn.Dropout(0.5, False)\n",
    "\n",
    "model"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2020-03-09T09:16:16.405958Z",
     "start_time": "2020-03-09T09:16:16.402432Z"
    }
   },
   "outputs": [],
   "source": [
    "# 先仅训练 分类器\n",
    "# 冻结 BERT 的所有层\n",
    "for name, param in model.named_parameters():\n",
    "    param.requires_grad = False\n",
    "\n",
    "model.classifier.weight.requires_grad = True\n",
    "model.classifier.bias.requires_grad = True"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2020-03-09T09:25:36.599165Z",
     "start_time": "2020-03-09T09:25:36.579474Z"
    }
   },
   "outputs": [],
   "source": [
    "# 查看可训练参数\n",
    "for name, param in model.named_parameters():\n",
    "    if param.requires_grad:\n",
    "        print(name)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2020-03-09T09:25:40.187424Z",
     "start_time": "2020-03-09T09:25:40.182601Z"
    }
   },
   "outputs": [],
   "source": [
    "# GPU\n",
    "model.cuda()\n",
    "\n",
    "print(\"model transfered to GPU\")"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "###  使用 AdamW 优化器\n",
    "```Adam with weight decay```"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2020-03-09T09:25:46.711372Z",
     "start_time": "2020-03-09T09:25:46.709405Z"
    }
   },
   "outputs": [],
   "source": [
    "EPOCHS = 2"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2020-03-09T09:25:47.726128Z",
     "start_time": "2020-03-09T09:25:47.721335Z"
    }
   },
   "outputs": [],
   "source": [
    "from transformers import get_linear_schedule_with_warmup\n",
    "from transformers import AdamW\n",
    "\n",
    "# 优化器 由 hugging face 提供\n",
    "optimizer = AdamW(model.parameters(),\n",
    "                  lr = 2e-5, # default is 5e-5\n",
    "                  eps = 1e-8 # default is 1e-8\n",
    "                 )\n",
    "\n",
    "# the number of batches times the number of epochs\n",
    "# 需要迭代多少个 batch\n",
    "total_steps = len(train_loader) * EPOCHS\n",
    "\n",
    "# the learning rate scheduler\n",
    "# 学习率衰减\n",
    "scheduler = get_linear_schedule_with_warmup(optimizer, \n",
    "                                            num_warmup_steps = 0,\n",
    "                                            num_training_steps = total_steps)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2020-03-09T09:25:49.966971Z",
     "start_time": "2020-03-09T09:25:49.963806Z"
    }
   },
   "outputs": [],
   "source": [
    "# Accuracy helper function\n",
    "def flat_accuracy(preds, labels):\n",
    "    \"\"\" 计算精度\n",
    "    \"\"\"\n",
    "    pred_flat = np.argmax(preds, axis=1).flatten()\n",
    "    labels_flat = labels.flatten()\n",
    "    return np.sum(pred_flat == labels_flat) / len(labels_flat)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2020-03-09T10:02:55.314174Z",
     "start_time": "2020-03-09T10:02:55.311081Z"
    }
   },
   "outputs": [],
   "source": [
    "# 有 GPU 则使用 GPU\n",
    "if torch.cuda.is_available():    \n",
    "    device = torch.device(\"cuda\")\n",
    "else:\n",
    "    device = torch.device(\"cpu\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2020-03-09T09:51:45.495874Z",
     "start_time": "2020-03-09T09:26:02.273449Z"
    },
    "scrolled": true
   },
   "outputs": [],
   "source": [
    "# 所有的随机数种子都为 50\n",
    "seed = 50\n",
    "\n",
    "random.seed(seed)\n",
    "np.random.seed(seed)\n",
    "torch.manual_seed(seed)\n",
    "torch.cuda.manual_seed_all(seed)\n",
    "\n",
    "# 训练损失\n",
    "loss_arr = []\n",
    "\n",
    "for i in range(0, EPOCHS):\n",
    "    # >>>>>>>>> Training >>>>>>>>> \n",
    "    print('====== Epoch {:} of {:}'.format(i+1, EPOCHS))\n",
    "    print('Training...')\n",
    "    \n",
    "    total_loss = 0\n",
    "    # initialize training mode\n",
    "    model.train()\n",
    "    \n",
    "    for step, batch in enumerate(train_loader):\n",
    "        # Unpacking the training batch from dataloader and copying each tensor to the GPU\n",
    "        b_input_ids = batch[0].long().to(device)       # x_train of this batch\n",
    "        b_input_mask = batch[1].long().to(device)     # attention mask\n",
    "        b_labels = batch[2].long().to(device)        # y_train\n",
    "        \n",
    "        # pytorch doesn't clear previously calculated gradients\n",
    "        # before performing backward pass, so clearing here:\n",
    "        model.zero_grad()\n",
    "\n",
    "        # 前向传播\n",
    "        outputs = model(b_input_ids,\n",
    "                       token_type_ids=None, \n",
    "                       attention_mask=b_input_mask,\n",
    "                       labels=b_labels)\n",
    "        loss = outputs[0]\n",
    "        \n",
    "        total_loss += loss.item()\n",
    "        # 反向传播计算梯度\n",
    "        loss.backward()\n",
    "        # TODO 这是啥 ?\n",
    "        torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)\n",
    "        # 优化器更新权重\n",
    "        optimizer.step()\n",
    "        # update the learning rate\n",
    "        # 更新学习率\n",
    "        scheduler.step()\n",
    "    # <<<<<<<<<< Training <<<<<<<<<< \n",
    "    \n",
    "    # 计算本轮的平均损失\n",
    "    avg_train_loss = total_loss / len(train_loader)\n",
    "    \n",
    "    loss_arr.append(avg_train_loss)\n",
    "    print(\"  Average training loss: {}\".format(avg_train_loss))\n",
    "    \n",
    "    # >>>>>>>>> Validation >>>>>>>>> \n",
    "    \n",
    "    print(\"Validation...\")\n",
    "#     t0 = time.time()\n",
    "    # evaluation mode\n",
    "    model.eval()\n",
    "    \n",
    "    eval_loss, eval_accuracy = 0, 0\n",
    "    nb_eval_steps, nb_eval_examples = 0, 0\n",
    "    \n",
    "    for batch in val_loader:\n",
    "        b_input_ids = batch[0].long().to(device)   # x_val of this batch\n",
    "        b_input_mask = batch[1].long().to(device)  # attention_mask\n",
    "        b_labels = batch[2].long().to(device)      # y_val\n",
    "        \n",
    "        with torch.no_grad():\n",
    "            outputs = model(b_input_ids, \n",
    "                           token_type_ids = None, \n",
    "                           attention_mask = b_input_mask)\n",
    "            \n",
    "        logits = outputs[0]\n",
    "        # move logits to cpu\n",
    "        logits = logits.detach().cpu().numpy()\n",
    "        label_ids = b_labels.to('cpu').numpy()\n",
    "        # get accuracy\n",
    "        tmp_eval_accuracy = flat_accuracy(logits, label_ids)\n",
    "        # 验证精度\n",
    "        eval_accuracy += tmp_eval_accuracy\n",
    "        nb_eval_steps += 1\n",
    "        \n",
    "    # <<<<<<<<<< Validation <<<<<<<<<<\n",
    "    # torch.save(model, f\"./models/checkpoints/end_model{i+1}_trainLoss{avg_train_loss}_evalAcc{eval_accuracy/nb_eval_steps}.pkl\")\n",
    "    print(\"  Accuracy: {}\".format(eval_accuracy/nb_eval_steps))\n",
    "    \n",
    "print(\"Training complete!\")"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 解冻 BERT, 一起训练"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2020-03-09T09:22:26.638585Z",
     "start_time": "2020-03-09T09:22:26.633444Z"
    }
   },
   "outputs": [],
   "source": [
    "# 解冻所有层, 一起训练\n",
    "for name, param in model.named_parameters():\n",
    "    param.requires_grad = True"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2020-03-09T09:25:46.711372Z",
     "start_time": "2020-03-09T09:25:46.709405Z"
    }
   },
   "outputs": [],
   "source": [
    "EPOCHS = 3"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2020-03-09T09:25:47.726128Z",
     "start_time": "2020-03-09T09:25:47.721335Z"
    }
   },
   "outputs": [],
   "source": [
    "from transformers import get_linear_schedule_with_warmup\n",
    "from transformers import AdamW\n",
    "\n",
    "# 优化器 由 hugging face 提供\n",
    "optimizer = AdamW(model.parameters(),\n",
    "                  lr = 2e-5, # default is 5e-5\n",
    "                  eps = 1e-8 # default is 1e-8\n",
    "                 )\n",
    "\n",
    "# the number of batches times the number of epochs\n",
    "# 需要迭代多少个 batch\n",
    "total_steps = len(train_loader) * EPOCHS\n",
    "\n",
    "# the learning rate scheduler\n",
    "# 学习率衰减\n",
    "scheduler = get_linear_schedule_with_warmup(optimizer, \n",
    "                                            num_warmup_steps = 0,\n",
    "                                            num_training_steps = total_steps)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2020-03-09T09:51:45.495874Z",
     "start_time": "2020-03-09T09:26:02.273449Z"
    },
    "scrolled": true
   },
   "outputs": [],
   "source": [
    "# 所有的随机数种子都为 50\n",
    "seed = 50\n",
    "\n",
    "random.seed(seed)\n",
    "np.random.seed(seed)\n",
    "torch.manual_seed(seed)\n",
    "torch.cuda.manual_seed_all(seed)\n",
    "\n",
    "# 训练损失\n",
    "loss_arr = []\n",
    "\n",
    "for i in range(0, EPOCHS):\n",
    "    # >>>>>>>>> Training >>>>>>>>> \n",
    "    print('====== Epoch {:} of {:}'.format(i+1, EPOCHS))\n",
    "    print('Training...')\n",
    "    \n",
    "    total_loss = 0\n",
    "    # initialize training mode\n",
    "    model.train()\n",
    "    \n",
    "    for step, batch in enumerate(train_loader):\n",
    "        # Unpacking the training batch from dataloader and copying each tensor to the GPU\n",
    "        b_input_ids = batch[0].long().to(device)       # x_train of this batch\n",
    "        b_input_mask = batch[1].long().to(device)     # attention mask\n",
    "        b_labels = batch[2].long().to(device)        # y_train\n",
    "        \n",
    "        # pytorch doesn't clear previously calculated gradients\n",
    "        # before performing backward pass, so clearing here:\n",
    "        model.zero_grad()\n",
    "\n",
    "        # 前向传播\n",
    "        outputs = model(b_input_ids,\n",
    "                       token_type_ids=None, \n",
    "                       attention_mask=b_input_mask,\n",
    "                       labels=b_labels)\n",
    "        loss = outputs[0]\n",
    "        \n",
    "        total_loss += loss.item()\n",
    "        # 反向传播计算梯度\n",
    "        loss.backward()\n",
    "        # TODO 这是啥 ?\n",
    "        torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)\n",
    "        # 优化器更新权重\n",
    "        optimizer.step()\n",
    "        # update the learning rate\n",
    "        # 更新学习率\n",
    "        scheduler.step()\n",
    "    # <<<<<<<<<< Training <<<<<<<<<< \n",
    "    \n",
    "    # 计算本轮的平均损失\n",
    "    avg_train_loss = total_loss / len(train_loader)\n",
    "    \n",
    "    loss_arr.append(avg_train_loss)\n",
    "    print(\"  Average training loss: {}\".format(avg_train_loss))\n",
    "    \n",
    "    # >>>>>>>>> Validation >>>>>>>>> \n",
    "    \n",
    "    print(\"Validation...\")\n",
    "#     t0 = time.time()\n",
    "    # evaluation mode\n",
    "    model.eval()\n",
    "    \n",
    "    eval_loss, eval_accuracy = 0, 0\n",
    "    nb_eval_steps, nb_eval_examples = 0, 0\n",
    "    \n",
    "    for batch in val_loader:\n",
    "        b_input_ids = batch[0].long().to(device)   # x_val of this batch\n",
    "        b_input_mask = batch[1].long().to(device)  # attention_mask\n",
    "        b_labels = batch[2].long().to(device)      # y_val\n",
    "        \n",
    "        with torch.no_grad():\n",
    "            outputs = model(b_input_ids, \n",
    "                           token_type_ids = None, \n",
    "                           attention_mask = b_input_mask)\n",
    "            \n",
    "        logits = outputs[0]\n",
    "        # move logits to cpu\n",
    "        logits = logits.detach().cpu().numpy()\n",
    "        label_ids = b_labels.to('cpu').numpy()\n",
    "        # get accuracy\n",
    "        tmp_eval_accuracy = flat_accuracy(logits, label_ids)\n",
    "        # 验证精度\n",
    "        eval_accuracy += tmp_eval_accuracy\n",
    "        nb_eval_steps += 1\n",
    "        \n",
    "    # <<<<<<<<<< Validation <<<<<<<<<<\n",
    "    # torch.save(model, f\"./models/checkpoints/end_model{i+1}_trainLoss{avg_train_loss}_evalAcc{eval_accuracy/nb_eval_steps}.pkl\")\n",
    "    print(\"  Accuracy: {}\".format(eval_accuracy/nb_eval_steps))\n",
    "    \n",
    "print(\"Training complete!\")"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 在测试集上测试"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2020-03-09T04:23:02.087402Z",
     "start_time": "2020-03-09T04:23:02.073402Z"
    }
   },
   "outputs": [],
   "source": [
    "# del x_train_input, x_val_input, train_data, train_loader, train_att_mask, train_mask, train_sampler\n",
    "# gc.collect()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2020-03-09T04:31:24.069005Z",
     "start_time": "2020-03-09T04:31:23.869715Z"
    }
   },
   "outputs": [],
   "source": [
    "# del train_sampler, val_data, val_loader, val_mask, val_sampler\n",
    "# gc.collect()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2020-03-09T04:35:51.475375Z",
     "start_time": "2020-03-09T04:35:51.290808Z"
    }
   },
   "outputs": [],
   "source": [
    "# del model, label_train\n",
    "# gc.collect()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2020-03-09T10:02:33.205463Z",
     "start_time": "2020-03-09T10:02:31.402359Z"
    }
   },
   "outputs": [],
   "source": [
    "from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler\n",
    "\n",
    "BATCH_SIZE = 16\n",
    "\n",
    "x_test = pd.read_csv(data_dir + \"test.csv\")['text']\n",
    "test_input = encode_tweet(x_test)\n",
    "test_input = pad_sequences(test_input)\n",
    "test_att_mask = get_att_mask(test_input)\n",
    "x_test_input = torch.Tensor(test_input)\n",
    "mask_test = torch.Tensor(test_att_mask)\n",
    "\n",
    "sample_submission = pd.read_csv(data_dir + \"sample_submission.csv\")\n",
    "pred_labels = np.array(sample_submission['target'])\n",
    "test_labels = torch.Tensor(pred_labels)\n",
    "\n",
    "# 要使用迭代器的方式加载数据, 需要凑出 y_test, 使用 example 中的 label 即可\n",
    "test_data = TensorDataset(x_test_input, mask_test, test_labels)\n",
    "test_sampler = SequentialSampler(test_data)\n",
    "test_dataloader = DataLoader(test_data, sampler=test_sampler, batch_size=BATCH_SIZE)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2020-03-09T10:02:37.286908Z",
     "start_time": "2020-03-09T10:02:35.596699Z"
    }
   },
   "outputs": [],
   "source": [
    "# 加载完整模型\n",
    "# model = torch.load(\"./models/checkpoints/end_model2_trainLoss0.31985847927905897_evalAcc0.8269675925925926.pkl\")\n",
    "# model.cuda()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2020-03-09T10:03:36.616612Z",
     "start_time": "2020-03-09T10:02:59.753555Z"
    }
   },
   "outputs": [],
   "source": [
    "model.eval()\n",
    "\n",
    "# Tracking variables \n",
    "predictions , true_labels = [], []\n",
    "\n",
    "# Predict \n",
    "for batch in test_dataloader:\n",
    "    b_input_ids = batch[0].long().to(device)\n",
    "    b_input_mask = batch[1].long().to(device)\n",
    "    b_labels = batch[2].long().to(device)\n",
    "    \n",
    "    # Telling the model not to compute or store gradients, saving memory and \n",
    "    # speeding up prediction\n",
    "    with torch.no_grad():\n",
    "      # Forward pass, calculate logit predictions\n",
    "        outputs = model(b_input_ids, token_type_ids=None,\n",
    "                        attention_mask=b_input_mask)\n",
    "\n",
    "    logits = outputs[0]\n",
    "\n",
    "    # Move logits and labels to CPU\n",
    "    logits = logits.detach().cpu().numpy()\n",
    "    label_ids = b_labels.to('cpu').numpy()\n",
    "    # Store predictions and true labels\n",
    "    predictions.append(logits)\n",
    "    true_labels.append(label_ids)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2020-03-09T10:04:03.145242Z",
     "start_time": "2020-03-09T10:04:03.140829Z"
    }
   },
   "outputs": [],
   "source": [
    "flat_predictions = [item for sublist in predictions for item in sublist]\n",
    "flat_predictions = np.argmax(flat_predictions, axis=1).flatten()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2020-03-09T10:04:04.629788Z",
     "start_time": "2020-03-09T10:04:04.626550Z"
    }
   },
   "outputs": [],
   "source": [
    "flat_predictions[0:5]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2020-03-09T10:04:10.754119Z",
     "start_time": "2020-03-09T10:04:10.745296Z"
    }
   },
   "outputs": [],
   "source": [
    "sample_submission['target'] = flat_predictions\n",
    "sample_submission.to_csv('sub3.9_3.csv', index = False)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "测试集 ```F1``` 0.83640"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.9"
  },
  "toc": {
   "base_numbering": 1,
   "nav_menu": {},
   "number_sections": true,
   "sideBar": true,
   "skip_h1_title": false,
   "title_cell": "Table of Contents",
   "title_sidebar": "Contents",
   "toc_cell": false,
   "toc_position": {
    "height": "calc(100% - 180px)",
    "left": "10px",
    "top": "150px",
    "width": "349.091px"
   },
   "toc_section_display": true,
   "toc_window_display": true
  },
  "varInspector": {
   "cols": {
    "lenName": 16,
    "lenType": 16,
    "lenVar": 40
   },
   "kernels_config": {
    "python": {
     "delete_cmd_postfix": "",
     "delete_cmd_prefix": "del ",
     "library": "var_list.py",
     "varRefreshCmd": "print(var_dic_list())"
    },
    "r": {
     "delete_cmd_postfix": ") ",
     "delete_cmd_prefix": "rm(",
     "library": "var_list.r",
     "varRefreshCmd": "cat(var_dic_list()) "
    }
   },
   "position": {
    "height": "657.699px",
    "left": "1355.43px",
    "right": "20px",
    "top": "42px",
    "width": "350px"
   },
   "types_to_exclude": [
    "module",
    "function",
    "builtin_function_or_method",
    "instance",
    "_Feature"
   ],
   "window_display": false
  }
 },
 "nbformat": 4,
 "nbformat_minor": 4
}
