{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "import torch\n",
    "from transformers import *\n",
    "from transformers import AdamW\n",
    "import torch.utils.data as Data\n",
    "import collections\n",
    "import os\n",
    "import random\n",
    "import tarfile\n",
    "import torch\n",
    "from torch import nn\n",
    "import torchtext.vocab as Vocab\n",
    "import pickle as pk\n",
    "import copy\n",
    "import time\n",
    "import pandas as pd\n",
    "import numpy as np\n",
    "import matplotlib.pyplot as plt\n",
    "import random\n",
    "import torch.nn.functional as F\n",
    "from IPython.display import display,HTML\n",
    "import os\n",
    "from torch.nn.utils.rnn import pad_sequence\n",
    "from torch.nn.utils.rnn import pack_padded_sequence\n",
    "from torch.nn.utils.rnn import pad_packed_sequence\n",
    "from torch.nn.utils.rnn import pack_sequence\n",
    "from torch.nn import CrossEntropyLoss, MSELoss\n",
    "import math\n",
    "device=torch.device(\"cuda:1\" if torch.cuda.is_available() else \"cpu\")\n",
    "import argparse\n",
    "import glob\n",
    "import json\n",
    "import logging\n",
    "from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset\n",
    "from torch.utils.data.distributed import DistributedSampler\n",
    "from tqdm import tqdm_notebook as tqdm\n",
    "import torch.utils.data as Data\n",
    "logger = logging.getLogger(__name__)\n",
    "logging.basicConfig(\n",
    "    format=\"%(asctime)s - %(levelname)s - %(name)s -   %(message)s\",\n",
    "    datefmt=\"%m/%d/%Y %H:%M:%S\",\n",
    "    level=logging.INFO \n",
    ")"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## modeltype 选择及模型加载"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {
    "code_folding": [
     9,
     18,
     26,
     34
    ],
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "04/14/2020 01:08:02 - INFO - transformers.configuration_utils -   loading configuration file ./xlnet-pytorch/xlnet-base-cased-config.json\n",
      "04/14/2020 01:08:02 - INFO - transformers.configuration_utils -   Model config XLNetConfig {\n",
      "  \"_num_labels\": 5,\n",
      "  \"architectures\": [\n",
      "    \"XLNetLMHeadModel\"\n",
      "  ],\n",
      "  \"attn_type\": \"bi\",\n",
      "  \"bi_data\": false,\n",
      "  \"bos_token_id\": 1,\n",
      "  \"clamp_len\": -1,\n",
      "  \"d_head\": 64,\n",
      "  \"d_inner\": 3072,\n",
      "  \"d_model\": 768,\n",
      "  \"do_sample\": false,\n",
      "  \"dropout\": 0.1,\n",
      "  \"early_stopping\": false,\n",
      "  \"end_n_top\": 5,\n",
      "  \"eos_token_ids\": [\n",
      "    2\n",
      "  ],\n",
      "  \"ff_activation\": \"gelu\",\n",
      "  \"finetuning_task\": null,\n",
      "  \"id2label\": {\n",
      "    \"0\": \"LABEL_0\",\n",
      "    \"1\": \"LABEL_1\",\n",
      "    \"2\": \"LABEL_2\",\n",
      "    \"3\": \"LABEL_3\",\n",
      "    \"4\": \"LABEL_4\"\n",
      "  },\n",
      "  \"initializer_range\": 0.02,\n",
      "  \"is_decoder\": false,\n",
      "  \"label2id\": {\n",
      "    \"LABEL_0\": 0,\n",
      "    \"LABEL_1\": 1,\n",
      "    \"LABEL_2\": 2,\n",
      "    \"LABEL_3\": 3,\n",
      "    \"LABEL_4\": 4\n",
      "  },\n",
      "  \"layer_norm_eps\": 1e-12,\n",
      "  \"length_penalty\": 1.0,\n",
      "  \"max_length\": 20,\n",
      "  \"mem_len\": null,\n",
      "  \"model_type\": \"xlnet\",\n",
      "  \"n_head\": 12,\n",
      "  \"n_layer\": 12,\n",
      "  \"num_beams\": 1,\n",
      "  \"num_return_sequences\": 1,\n",
      "  \"output_attentions\": true,\n",
      "  \"output_hidden_states\": true,\n",
      "  \"output_past\": true,\n",
      "  \"pad_token_id\": 5,\n",
      "  \"pruned_heads\": {},\n",
      "  \"repetition_penalty\": 1.0,\n",
      "  \"reuse_len\": null,\n",
      "  \"same_length\": false,\n",
      "  \"start_n_top\": 5,\n",
      "  \"summary_activation\": \"tanh\",\n",
      "  \"summary_last_dropout\": 0.1,\n",
      "  \"summary_type\": \"last\",\n",
      "  \"summary_use_proj\": true,\n",
      "  \"temperature\": 1.0,\n",
      "  \"top_k\": 50,\n",
      "  \"top_p\": 1.0,\n",
      "  \"torchscript\": false,\n",
      "  \"untie_r\": true,\n",
      "  \"use_bfloat16\": false,\n",
      "  \"vocab_size\": 32000\n",
      "}\n",
      "\n",
      "04/14/2020 01:08:02 - INFO - transformers.modeling_utils -   loading weights file ./xlnet-pytorch/pytorch_model.bin\n",
      "04/14/2020 01:08:08 - INFO - transformers.modeling_utils -   Weights of XLNetForSequenceClassification not initialized from pretrained model: ['sequence_summary.summary.weight', 'sequence_summary.summary.bias', 'logits_proj.weight', 'logits_proj.bias']\n",
      "04/14/2020 01:08:08 - INFO - transformers.modeling_utils -   Weights from pretrained model not used in XLNetForSequenceClassification: ['lm_loss.weight', 'lm_loss.bias']\n"
     ]
    }
   ],
   "source": [
    "hidden_dropout_prob = 0.3\n",
    "learning_rate = 2e-5\n",
    "weight_decay = 1e-2\n",
    "epochs = 6\n",
    "max_len=50\n",
    "batch_size = 16\n",
    "device=torch.device(\"cuda:3\" if torch.cuda.is_available() else \"cpu\")\n",
    "do_lower_case=True\n",
    "model_type='xlnet' #选择model type\n",
    "if model_type==\"roberta\":\n",
    "    bert_dir=\"./roberta-pytorch/\"\n",
    "    vocab=\"vocab.json\"\n",
    "    merge=\"merges.txt\"\n",
    "    config_file=\"roberta-base-config.json\"\n",
    "    config=RobertaConfig.from_pretrained(os.path.join(bert_dir,config_file),output_hidden_states=True,\n",
    "                                            output_attentions=True,hidden_dropout_prob=hidden_dropout_prob)\n",
    "    bert_model=RobertaModel.from_pretrained(bert_dir,config=config)\n",
    "    tokenizer=RobertaTokenizer(os.path.join(bert_dir,vocab),os.path.join(bert_dir,merge))\n",
    "elif model_type=='bert':\n",
    "    bert_dir=\"./bert-pytorch/\"\n",
    "    vocab=\"bert-base-uncased-vocab.txt\"\n",
    "    config_file='bert_config.json'\n",
    "    config=BertConfig.from_pretrained(os.path.join(bert_dir,config_file),output_hidden_states=True,\n",
    "                                            output_attentions=True,hidden_dropout_prob=hidden_dropout_prob)\n",
    "    bert_model=BertModel.from_pretrained(bert_dir,config=config)\n",
    "    tokenizer=BertTokenizer.from_pretrained(os.path.join(bert_dir,vocab),do_lower_case=do_lower_case)\n",
    "elif model_type=='albert':\n",
    "    bert_dir=\"./albert-base-pytorch/\"\n",
    "    vocab=\"albert-base-spiece.model\"\n",
    "    config_file=\"albert-base-config.json\"\n",
    "    config=AlbertConfig.from_pretrained(os.path.join(bert_dir,config_file),output_hidden_states=True,\n",
    "                                            output_attentions=True,hidden_dropout_prob=hidden_dropout_prob)\n",
    "    bert_model=AlbertModel.from_pretrained(bert_dir,config=config)\n",
    "    tokenizer=AlbertTokenizer(os.path.join(bert_dir,vocab),do_lower_case=do_lower_case)\n",
    "elif model_type=='xlnet':\n",
    "    bert_dir=\"./xlnet-pytorch/\"\n",
    "    vocab=\"xlnet-base-cased-spiece.model\"\n",
    "    config_file=\"xlnet-base-cased-config.json\"\n",
    "    config=XLNetConfig.from_pretrained(os.path.join(bert_dir,config_file),output_hidden_states=True,\n",
    "                                            output_attentions=True,hidden_dropout_prob=hidden_dropout_prob,num_labels=5)\n",
    "    bert_model=XLNetForSequenceClassification.from_pretrained(bert_dir,config=config)\n",
    "    tokenizer=XLNetTokenizer(os.path.join(bert_dir,vocab))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 预处理数据"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {
    "code_folding": [
     0,
     21,
     39
    ]
   },
   "outputs": [],
   "source": [
    "def convert_text_to_ids(tokenizer, text, max_len=100,model_type='bert'):\n",
    "    if isinstance(text,str):\n",
    "        output=tokenizer.encode_plus(text,max_length=max_len,pad_to_max_length=True,return_tensors=\"pt\")\n",
    "        input_ids=output[\"input_ids\"].squeeze(0)\n",
    "        if model_type=='bert' or model_type=='albert':\n",
    "            token_type_ids=output[\"token_type_ids\"].squeeze(0)\n",
    "        attention_mask=output[\"attention_mask\"].squeeze(0)\n",
    "    elif isinstance(text,list):\n",
    "        input_ids,token_type_ids,attention_mask=[],[],[]\n",
    "        for e in text:\n",
    "            output=tokenizer.encode_plus(e,max_length=max_len,pad_to_max_length=True,return_tensors=\"pt\")\n",
    "            input_ids.append(output[\"input_ids\"].squeeze(0))\n",
    "            if model_type=='bert' or model_type=='albert':\n",
    "                token_type_ids.append(output[\"token_type_ids\"].squeeze(0))\n",
    "            attention_mask.append(output[\"attention_mask\"].squeeze(0))\n",
    "    else:\n",
    "        raise Exception('type error')\n",
    "    if model_type=='bert' or model_type=='albert':\n",
    "        return torch.stack(input_ids).long(),torch.stack(token_type_ids).long(),torch.stack(attention_mask).long()\n",
    "    else:\n",
    "        return torch.stack(input_ids).long(),torch.stack(attention_mask).long() \n",
    "class ClsDataset(Data.Dataset):\n",
    "    def __init__(self,docs,doc_labels,tokenizer,max_len,model_type='bert',train_ids=None):\n",
    "        self.dataset=docs\n",
    "        self.tokenizer=tokenizer\n",
    "        self.label=torch.tensor(doc_labels).long()\n",
    "        self.model_type=model_type\n",
    "        self.train_ids=train_ids\n",
    "        if model_type=='bert' or model_type=='albert' or model_type=='xlnet':\n",
    "            self.input_ids,self.token_type_ids,self.attention_mask=convert_text_to_ids(tokenizer,self.dataset,max_len)\n",
    "        else:\n",
    "            self.input_ids,self.attention_mask=convert_text_to_ids(tokenizer,self.dataset,max_len,self.model_type)\n",
    "    def __len__(self):\n",
    "        return len(self.dataset)\n",
    "    def __getitem__(self,idx):\n",
    "        if model_type=='bert' or model_type=='albert' or model_type=='xlnet':\n",
    "            return self.input_ids[idx],self.attention_mask[idx],self.token_type_ids[idx],self.label[idx],self.train_ids[idx]\n",
    "        else:\n",
    "            return self.input_ids[idx],self.attention_mask[idx],self.label[idx],self.train_ids[idx]\n",
    "class BertClassify(nn.Module):\n",
    "    def __init__(self,model,num_labels,dropout_rate,model_type='bert'):\n",
    "        super(BertClassify,self).__init__()\n",
    "        self.bert=model\n",
    "        self.model_type=model_type\n",
    "        if self.model_type=='albert':\n",
    "            self.Dense=nn.Sequential(nn.Dropout(dropout_rate),nn.Linear(model.pooler.out_features,num_labels))\n",
    "        else:\n",
    "            self.Dense=nn.Sequential(nn.Dropout(dropout_rate),nn.Linear(model.pooler.dense.out_features,num_labels))\n",
    "    def forward(self,X,attention_mask,token_type_ids=None):\n",
    "        if model_type=='bert' or model_type=='albert':\n",
    "            pooled_output=self.bert(X,attention_mask=attention_mask,token_type_ids=token_type_ids)[1]\n",
    "        else:\n",
    "            pooled_output=self.bert(X,attention_mask=attention_mask)[1]           \n",
    "        return self.Dense(pooled_output),pooled_output"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [],
   "source": [
    "text=pd.read_csv(\"./text_classify/data/text.csv\")\n",
    "ids=max(text['id'].values)\n",
    "id2text=[[] for i in range(ids+1)]\n",
    "for idx in range(text.shape[0]):\n",
    "    id2text[text.loc[idx,'id']]=text.loc[idx,'title']\n",
    "id2text=np.array(id2text)\n",
    "train_info=pd.read_csv(\"./text_classify/data/train.csv\")\n",
    "full_texts=list(id2text[train_info['id'].values])\n",
    "full_labels=list(train_info['label'].values)\n",
    "full_ids=train_info['id'].values\n",
    "full_info=np.concatenate([np.array(full_texts)[:,np.newaxis],np.array(full_labels)[:,np.newaxis],full_ids[:,np.newaxis]],axis=1)\n",
    "np.random.seed(20)\n",
    "full_info=full_info[np.random.permutation(np.arange(full_info.shape[0]))]\n",
    "full_texts=(full_info[:,0])\n",
    "full_labels=(full_info[:,1].astype(np.int8))\n",
    "full_ids=(full_info[:,2].astype(np.int))\n",
    "num_classes=len(set(full_labels))\n",
    "test_info=pd.read_csv(\"./text_classify/data/test.csv\")\n",
    "test_texts=id2text[test_info['id'].values]\n",
    "test_labels=np.zeros(test_texts.shape[0])\n",
    "test_ids=test_info['id'].values"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## XLnet finetune"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [],
   "source": [
    "train_dataset=ClsDataset(list(full_texts),full_labels,tokenizer,max_len,model_type,full_ids)  #使用全部训练集最后整合训练\n",
    "val_dataset=ClsDataset(list(full_texts[10000:]),full_labels[10000:],tokenizer,max_len,model_type,full_ids[10000:])\n",
    "train_loader =Data.DataLoader(train_dataset, batch_size=batch_size, shuffle=True, num_workers=2)\n",
    "val_loader =Data.DataLoader(val_dataset, batch_size=batch_size, shuffle=False, num_workers=2)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [],
   "source": [
    "test_dataset=ClsDataset(list(test_texts),test_labels,tokenizer,max_len,model_type,test_ids)\n",
    "test_loader =Data.DataLoader(test_dataset, batch_size=batch_size, shuffle=False, num_workers=2)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {
    "code_folding": [
     0,
     25,
     43
    ]
   },
   "outputs": [],
   "source": [
    "def train_xlnet(model, train_iter,test_iter, optimizer, loss, device,num_epochs,model_type='bert'):\n",
    "    model=model.to(device)\n",
    "    for epoch  in range(num_epochs):\n",
    "        model.train()\n",
    "        epoch_loss=0\n",
    "        epoch_acc=0.\n",
    "        for i,batch in tqdm(enumerate(train_iter)):\n",
    "            input_ids,attention_mask,token_type_ids=batch[:3]\n",
    "            label=batch[3]\n",
    "            label,input_ids,attention_mask,token_type_ids=label.to(device),input_ids.to(device),attention_mask.to(device),token_type_ids.to(device)\n",
    "            output=model(input_ids,attention_mask=attention_mask,token_type_ids=token_type_ids,labels=label)\n",
    "            l,logits=output[0],output[1]\n",
    "            label_pred_probs=logits\n",
    "            label_pred=label_pred_probs.argmax(dim=-1)\n",
    "            optimizer.zero_grad()\n",
    "            acc=(label_pred==label).sum().cpu().item()\n",
    "            l.backward()\n",
    "            optimizer.step()\n",
    "            epoch_acc+=acc\n",
    "            epoch_loss+=l.cpu().item()\n",
    "            if i % 200 == 0:\n",
    "                print(\"current loss:\", epoch_loss / (i+1), \"\\t\", \"current acc:\", epoch_acc / ((i+1)*len(label)))\n",
    "        test_loss,test_acc=evaluate_xlnet(model,test_iter,loss,device,model_type)\n",
    "        print(\"train epoch loss:%.3f epoch acc:%.3f\"%(epoch_loss / len(train_iter), epoch_acc / len(train_iter.dataset.dataset)))\n",
    "        print(\"test epoch loss:%.3f epoch acc:%.3f\"%(test_loss, test_acc))\n",
    "def evaluate_xlnet(model, iterator, loss, device,model_type):\n",
    "    model=model.to(device)\n",
    "    model.eval()\n",
    "    epoch_loss=0\n",
    "    epoch_acc=0.\n",
    "    for i,batch in tqdm(enumerate(iterator)):\n",
    "        input_ids,attention_mask,token_type_ids=batch[:3]\n",
    "        label=batch[3]\n",
    "        label,input_ids,attention_mask,token_type_ids=label.to(device),input_ids.to(device),attention_mask.to(device),token_type_ids.to(device)\n",
    "        output=model(input_ids,attention_mask=attention_mask,token_type_ids=token_type_ids,labels=label)\n",
    "        l,logits=output[0],output[1]\n",
    "        label_pred_probs=logits\n",
    "        label_pred=label_pred_probs.argmax(dim=-1)\n",
    "        acc=(label_pred==label).sum().cpu().item()\n",
    "        epoch_acc+=acc\n",
    "        epoch_loss+=l.cpu().item()\n",
    "    model.train()\n",
    "    return epoch_loss / len(iterator), epoch_acc / len(iterator.dataset.dataset)       \n",
    "def predict_xlnet(model, iterator,device,model_type):\n",
    "    model=model.to(device)\n",
    "    model.eval()\n",
    "    pred_answers=[]\n",
    "    pred_hiddens=[]\n",
    "    pred_ids=[]\n",
    "    for i,batch in tqdm(enumerate(iterator)):\n",
    "        input_ids,attention_mask,token_type_ids=batch[:3]\n",
    "        label=batch[3]\n",
    "        data_ids=batch[4]\n",
    "        label,input_ids,attention_mask,token_type_ids=label.to(device),input_ids.to(device),attention_mask.to(device),token_type_ids.to(device)\n",
    "        output=model(input_ids,attention_mask=attention_mask,token_type_ids=token_type_ids,labels=label)\n",
    "        l,logits=output[0],output[1]\n",
    "        label_pred_probs=logits\n",
    "        label_pred_probs=F.softmax(label_pred_probs,dim=-1)\n",
    "        label_pred=label_pred_probs.argmax(dim=-1)\n",
    "        pred_answers.append(label_pred_probs.detach().cpu().numpy())\n",
    "        pred_ids.append(data_ids.detach().cpu().numpy())\n",
    "        \n",
    "    model.train()\n",
    "    return np.concatenate(pred_answers),np.concatenate(pred_ids)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/home/huangweilin/anaconda3/envs/fjw/lib/python3.6/site-packages/ipykernel_launcher.py:7: TqdmDeprecationWarning: This function will be removed in tqdm==5.0.0\n",
      "Please use `tqdm.notebook.tqdm` instead of `tqdm.tqdm_notebook`\n",
      "  import sys\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "f6aa2c0dad684efe8de2710b4e9d3862",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "HBox(children=(IntProgress(value=1, bar_style='info', max=1), HTML(value='')))"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/home/huangweilin/anaconda3/envs/fjw/lib/python3.6/site-packages/torch/nn/functional.py:1374: UserWarning: nn.functional.tanh is deprecated. Use torch.tanh instead.\n",
      "  warnings.warn(\"nn.functional.tanh is deprecated. Use torch.tanh instead.\")\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "current loss: 1.5313221216201782 \t current acc: 0.375\n",
      "current loss: 1.3644680935352003 \t current acc: 0.4253731343283582\n",
      "current loss: 1.1953330982355703 \t current acc: 0.5151184538653366\n",
      "current loss: 1.1067595810640274 \t current acc: 0.5606281198003328\n",
      "\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/home/huangweilin/anaconda3/envs/fjw/lib/python3.6/site-packages/ipykernel_launcher.py:31: TqdmDeprecationWarning: This function will be removed in tqdm==5.0.0\n",
      "Please use `tqdm.notebook.tqdm` instead of `tqdm.tqdm_notebook`\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "54ecc3f90bd54cabb19ff7133a9a3ba3",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "HBox(children=(IntProgress(value=1, bar_style='info', max=1), HTML(value='')))"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n",
      "train epoch loss:1.051 epoch acc:0.587\n",
      "test epoch loss:0.706 epoch acc:0.759\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "1cdc1baf4ab74b17884901e6e1c42ffa",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "HBox(children=(IntProgress(value=1, bar_style='info', max=1), HTML(value='')))"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "current loss: 0.5523432493209839 \t current acc: 0.625\n",
      "current loss: 0.7560927589141314 \t current acc: 0.716728855721393\n",
      "current loss: 0.7557130129334338 \t current acc: 0.7244389027431422\n",
      "current loss: 0.7574107005026892 \t current acc: 0.723481697171381\n",
      "\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "c7f96da5a41a467fa0fdc6e267696236",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "HBox(children=(IntProgress(value=1, bar_style='info', max=1), HTML(value='')))"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n",
      "train epoch loss:0.758 epoch acc:0.723\n",
      "test epoch loss:0.524 epoch acc:0.817\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "fb9983fc074e4aab8442a1769bfb5ee5",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "HBox(children=(IntProgress(value=1, bar_style='info', max=1), HTML(value='')))"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "current loss: 0.5655407905578613 \t current acc: 0.75\n",
      "current loss: 0.6212449041172047 \t current acc: 0.7730099502487562\n",
      "current loss: 0.6158514521514389 \t current acc: 0.7805486284289277\n",
      "current loss: 0.6155421936918614 \t current acc: 0.781198003327787\n",
      "\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "5f79602f49e44ac498308544d405e672",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "HBox(children=(IntProgress(value=1, bar_style='info', max=1), HTML(value='')))"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n",
      "train epoch loss:0.618 epoch acc:0.781\n",
      "test epoch loss:0.400 epoch acc:0.871\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "18f5379643e042f38eaba345169c8256",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "HBox(children=(IntProgress(value=1, bar_style='info', max=1), HTML(value='')))"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "current loss: 0.4704439043998718 \t current acc: 0.8125\n",
      "current loss: 0.4843805691051246 \t current acc: 0.8345771144278606\n",
      "current loss: 0.4936723480572427 \t current acc: 0.8298004987531172\n",
      "current loss: 0.4993281578164727 \t current acc: 0.8253951747088186\n",
      "\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "49095ea9fd324ef486f14fa0b7bb966f",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "HBox(children=(IntProgress(value=1, bar_style='info', max=1), HTML(value='')))"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n",
      "train epoch loss:0.501 epoch acc:0.825\n",
      "test epoch loss:0.314 epoch acc:0.902\n"
     ]
    }
   ],
   "source": [
    "model=bert_model\n",
    "no_decay = ['bias', 'LayerNorm.weight']\n",
    "optim_group_params=[{'params':[ p for n,p in model.named_parameters() if not any(np in n for np in no_decay)],\"weight_decay\":weight_decay},\n",
    "                   {'params':[ p for n,p in model.named_parameters() if  any(np in n for np in no_decay)],\"weight_decay\":0}]\n",
    "optimizer=AdamW(optim_group_params,lr=learning_rate)\n",
    "# scheduler = get_linear_schedule_with_warmup(\n",
    "#     optimizer, num_warmup_steps=0, num_training_steps=0\n",
    "# )\n",
    "loss=nn.CrossEntropyLoss()\n",
    "train_xlnet(model,train_loader,val_loader,optimizer,loss,device,4,model_type)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/home/huangweilin/anaconda3/envs/fjw/lib/python3.6/site-packages/ipykernel_launcher.py:50: TqdmDeprecationWarning: This function will be removed in tqdm==5.0.0\n",
      "Please use `tqdm.notebook.tqdm` instead of `tqdm.tqdm_notebook`\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "acd644ab58dd4871835aa9a28b3ef3c5",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "HBox(children=(IntProgress(value=1, bar_style='info', max=1), HTML(value='')))"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/home/huangweilin/anaconda3/envs/fjw/lib/python3.6/site-packages/ipykernel_launcher.py:3: TqdmDeprecationWarning: This function will be removed in tqdm==5.0.0\n",
      "Please use `tqdm.notebook.tqdm` instead of `tqdm.tqdm_notebook`\n",
      "  This is separate from the ipykernel package so we can avoid doing imports until\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "f55eb90216f64512b2f4f796e077f644",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "HBox(children=(IntProgress(value=0, max=12782), HTML(value='')))"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n"
     ]
    }
   ],
   "source": [
    "pred_probs,pred_ids=predict_xlnet(model,test_loader,device,model_type)\n",
    "test_submit=pd.read_csv(\"./text_classify/data/sample.csv\")\n",
    "for idx in tqdm(range(pred_ids.shape[0])):\n",
    "    index=test_submit.query(\"id==%d\"%pred_ids[idx])['label'].index[0]\n",
    "    test_submit.loc[index,'label']=pred_probs[idx].argmax()\n",
    "test_submit.to_csv(\"./text_classify/answers/submit_\"+model_type+\".csv\",index=False)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 其他model finetune"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {
    "code_folding": [
     0,
     32,
     57
    ]
   },
   "outputs": [],
   "source": [
    "def train(model, train_iter,test_iter, optimizer, loss, device,num_epochs,model_type='bert'):\n",
    "    model=model.to(device)\n",
    "    for epoch  in range(num_epochs):\n",
    "        model.train()\n",
    "        epoch_loss=0\n",
    "        epoch_acc=0.\n",
    "        for i,batch in tqdm(enumerate(train_iter)):\n",
    "            if model_type=='bert' or model_type=='albert':\n",
    "                input_ids,attention_mask,token_type_ids=batch[:3]\n",
    "                label=batch[3]\n",
    "                label,input_ids,attention_mask,token_type_ids=label.to(device),input_ids.to(device),attention_mask.to(device),token_type_ids.to(device)\n",
    "                outputs,pooled_putput=model(input_ids,attention_mask=attention_mask,token_type_ids=token_type_ids)\n",
    "            else:\n",
    "                input_ids,attention_mask=batch[:2]\n",
    "                label=batch[2]\n",
    "                label,input_ids,attention_mask=label.to(device),input_ids.to(device),attention_mask.to(device)\n",
    "                outputs,pooled_putput=model(input_ids,attention_mask=attention_mask)\n",
    "            label_pred_probs=outputs\n",
    "            label_pred=label_pred_probs.argmax(dim=-1)\n",
    "            optimizer.zero_grad()\n",
    "            l=loss(label_pred_probs,label)\n",
    "            acc=(label_pred==label).sum().cpu().item()\n",
    "            l.backward()\n",
    "            optimizer.step()\n",
    "            epoch_acc+=acc\n",
    "            epoch_loss+=l.cpu().item()\n",
    "            if i % 200 == 0:\n",
    "                print(\"current loss:\", epoch_loss / (i+1), \"\\t\", \"current acc:\", epoch_acc / ((i+1)*len(label)))\n",
    "        test_loss,test_acc=evaluate(model,test_iter,loss,device,model_type)\n",
    "        print(\"train epoch loss:%.3f epoch acc:%.3f\"%(epoch_loss / len(train_iter), epoch_acc / len(train_iter.dataset.dataset)))\n",
    "        print(\"test epoch loss:%.3f epoch acc:%.3f\"%(test_loss, test_acc))\n",
    "\n",
    "def evaluate(model, iterator, loss, device,model_type):\n",
    "    model=model.to(device)\n",
    "    model.eval()\n",
    "    epoch_loss=0\n",
    "    epoch_acc=0.\n",
    "    for i,batch in tqdm(enumerate(iterator)):\n",
    "        if model_type=='bert' or model_type=='albert':\n",
    "            input_ids,attention_mask,token_type_ids=batch[:3]\n",
    "            label=batch[3]\n",
    "            label,input_ids,attention_mask,token_type_ids=label.to(device),input_ids.to(device),attention_mask.to(device),token_type_ids.to(device)\n",
    "            outputs,pooled_output=model(input_ids,attention_mask=attention_mask,token_type_ids=token_type_ids)\n",
    "        else:\n",
    "            input_ids,attention_mask=batch[:2]\n",
    "            label=batch[2]\n",
    "            label,input_ids,attention_mask=label.to(device),input_ids.to(device),attention_mask.to(device)\n",
    "            outputs,pooled_output=model(input_ids,attention_mask=attention_mask)\n",
    "        label_pred_probs=outputs\n",
    "        label_pred=label_pred_probs.argmax(dim=-1)\n",
    "        l=loss(label_pred_probs,label)\n",
    "        acc=(label_pred==label).sum().cpu().item()\n",
    "        epoch_acc+=acc\n",
    "        epoch_loss+=l.cpu().item()\n",
    "    model.train()\n",
    "    return epoch_loss / len(iterator), epoch_acc / len(iterator.dataset.dataset)       \n",
    "\n",
    "def predict(model, iterator,device,model_type):\n",
    "    model=model.to(device)\n",
    "    model.eval()\n",
    "    pred_answers=[]\n",
    "    pred_hiddens=[]\n",
    "    pred_ids=[]\n",
    "    for i,batch in tqdm(enumerate(iterator)):\n",
    "        if model_type=='bert' or model_type=='albert':\n",
    "            input_ids,attention_mask,token_type_ids=batch[:3]\n",
    "            label=batch[3]\n",
    "            data_ids=batch[4]\n",
    "            label,input_ids,attention_mask,token_type_ids=label.to(device),input_ids.to(device),attention_mask.to(device),token_type_ids.to(device)\n",
    "            outputs,pooled_output=model(input_ids,attention_mask=attention_mask,token_type_ids=token_type_ids)\n",
    "        else:\n",
    "            input_ids,attention_mask=batch[:2]\n",
    "            label=batch[2]\n",
    "            data_ids=batch[3]\n",
    "            label,input_ids,attention_mask=label.to(device),input_ids.to(device),attention_mask.to(device)\n",
    "            outputs,pooled_output=model(input_ids,attention_mask=attention_mask)\n",
    "        label_pred_probs=F.softmax(outputs,dim=-1)\n",
    "        label_pred=label_pred_probs.argmax(dim=-1)\n",
    "        pred_answers.append(label_pred_probs.detach().cpu().numpy())\n",
    "        pred_hiddens.append(pooled_output.detach().cpu().numpy())\n",
    "        pred_ids.append(data_ids.detach().cpu().numpy())\n",
    "        \n",
    "    model.train()\n",
    "    return np.concatenate(pred_answers),np.concatenate(pred_hiddens,axis=0),np.concatenate(pred_ids)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [],
   "source": [
    "model=BertClassify(bert_model,num_classes,0.3,model_type)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [],
   "source": [
    "no_decay = ['bias', 'LayerNorm.weight']\n",
    "optim_group_params=[{'params':[ p for n,p in model.named_parameters() if not any(np in n for np in no_decay)],\"weight_decay\":weight_decay},\n",
    "                   {'params':[ p for n,p in model.named_parameters() if  any(np in n for np in no_decay)],\"weight_decay\":0}]\n",
    "optimizer=AdamW(optim_group_params,lr=learning_rate)\n",
    "# scheduler = get_linear_schedule_with_warmup(\n",
    "#     optimizer, num_warmup_steps=0, num_training_steps=0\n",
    "# )\n",
    "loss=nn.CrossEntropyLoss()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/home/huangweilin/anaconda3/envs/fjw/lib/python3.6/site-packages/ipykernel_launcher.py:7: TqdmDeprecationWarning: This function will be removed in tqdm==5.0.0\n",
      "Please use `tqdm.notebook.tqdm` instead of `tqdm.tqdm_notebook`\n",
      "  import sys\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "393fad9dea554ceeb95010ba7c6e2e37",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "HBox(children=(IntProgress(value=1, bar_style='info', max=1), HTML(value='')))"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "current loss: 2.1686549186706543 \t current acc: 0.0\n",
      "current loss: 1.5148746510643272 \t current acc: 0.32338308457711445\n",
      "current loss: 1.3432993063902914 \t current acc: 0.430642144638404\n",
      "current loss: 1.2517695727443536 \t current acc: 0.4822171381031614\n",
      "\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/home/huangweilin/anaconda3/envs/fjw/lib/python3.6/site-packages/ipykernel_launcher.py:38: TqdmDeprecationWarning: This function will be removed in tqdm==5.0.0\n",
      "Please use `tqdm.notebook.tqdm` instead of `tqdm.tqdm_notebook`\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "317bf9794cd1495b84d18b70c38005ce",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "HBox(children=(IntProgress(value=1, bar_style='info', max=1), HTML(value='')))"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n",
      "train epoch loss:1.242 epoch acc:0.487\n",
      "test epoch loss:0.994 epoch acc:0.630\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "33c16bd1da1d4f90a888d3727856a358",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "HBox(children=(IntProgress(value=1, bar_style='info', max=1), HTML(value='')))"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "current loss: 0.7308200001716614 \t current acc: 0.75\n",
      "current loss: 0.9962818681897215 \t current acc: 0.6147388059701493\n",
      "current loss: 0.9926096304991001 \t current acc: 0.6218827930174564\n",
      "current loss: 0.9811513224576357 \t current acc: 0.6269758735440932\n",
      "\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "65ca069ebc1e4240bf54d72fe6894870",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "HBox(children=(IntProgress(value=1, bar_style='info', max=1), HTML(value='')))"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n",
      "train epoch loss:0.983 epoch acc:0.626\n",
      "test epoch loss:0.868 epoch acc:0.682\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "d85f06e0032c4c3e81fc280075282a43",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "HBox(children=(IntProgress(value=1, bar_style='info', max=1), HTML(value='')))"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "current loss: 0.825111985206604 \t current acc: 0.625\n",
      "current loss: 0.9137163991062203 \t current acc: 0.6520522388059702\n",
      "current loss: 0.8903067632209036 \t current acc: 0.6647443890274314\n",
      "current loss: 0.8884758433069841 \t current acc: 0.6661813643926788\n",
      "\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "a152f70a82604726998150fa7a8a4c12",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "HBox(children=(IntProgress(value=1, bar_style='info', max=1), HTML(value='')))"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n",
      "train epoch loss:0.889 epoch acc:0.665\n",
      "test epoch loss:0.836 epoch acc:0.693\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "1a6562a433db437782d5eced2e7b1b0d",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "HBox(children=(IntProgress(value=1, bar_style='info', max=1), HTML(value='')))"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "current loss: 0.7357301712036133 \t current acc: 0.75\n",
      "current loss: 0.8340525002918433 \t current acc: 0.6884328358208955\n",
      "current loss: 0.8219924659057151 \t current acc: 0.6904613466334164\n",
      "current loss: 0.8219151362999901 \t current acc: 0.6910357737104825\n",
      "\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "108438a5d8c740739719c7267ec889aa",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "HBox(children=(IntProgress(value=1, bar_style='info', max=1), HTML(value='')))"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n",
      "train epoch loss:0.819 epoch acc:0.693\n",
      "test epoch loss:0.816 epoch acc:0.713\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "0d7e977a75af49e5b0f8f4092dc73253",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "HBox(children=(IntProgress(value=1, bar_style='info', max=1), HTML(value='')))"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "current loss: 0.6451501250267029 \t current acc: 0.8125\n",
      "current loss: 0.7504726878890944 \t current acc: 0.7161069651741293\n",
      "current loss: 0.7448895093137189 \t current acc: 0.7178927680798005\n",
      "current loss: 0.7492430424333214 \t current acc: 0.7175540765391015\n",
      "\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "3db39fdfaf904265ba4440ddcad93316",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "HBox(children=(IntProgress(value=1, bar_style='info', max=1), HTML(value='')))"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n",
      "train epoch loss:0.751 epoch acc:0.716\n",
      "test epoch loss:0.787 epoch acc:0.722\n"
     ]
    }
   ],
   "source": [
    "train_dataset=ClsDataset(list(full_texts),full_labels,tokenizer,max_len,model_type,full_ids)  #最后全训练\n",
    "val_dataset=ClsDataset(list(full_texts[10000:]),full_labels[10000:],tokenizer,max_len,model_type,full_ids[10000:])\n",
    "train_loader =Data.DataLoader(train_dataset, batch_size=batch_size, shuffle=True, num_workers=2)\n",
    "val_loader =Data.DataLoader(val_dataset, batch_size=batch_size, shuffle=False, num_workers=2)\n",
    "train(model,train_loader,val_loader,optimizer,loss,device,5,model_type)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "pred_probs,pred_hiddens,pred_ids=predict(model,test_loader,device,model_type)\n",
    "test_submit=pd.read_csv(\"./text_classify/data/sample.csv\")\n",
    "for idx in tqdm(range(pred_ids.shape[0])):\n",
    "    index=test_submit.query(\"id==%d\"%pred_ids[idx])['label'].index[0]\n",
    "    test_submit.loc[index,'label']=pred_probs[idx].argmax()\n",
    "test_submit.to_csv(\"./text_classify/answers/submit_\"+model_type+\".csv\",index=False)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 交叉验证"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {
    "code_folding": [
     3
    ]
   },
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/home/huangweilin/anaconda3/envs/fjw/lib/python3.6/site-packages/ipykernel_launcher.py:4: TqdmDeprecationWarning: This function will be removed in tqdm==5.0.0\n",
      "Please use `tqdm.notebook.tqdm` instead of `tqdm.tqdm_notebook`\n",
      "  after removing the cwd from sys.path.\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "18c8be50db0b488681308bd8b6a975b5",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "HBox(children=(IntProgress(value=0, max=5), HTML(value='')))"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "04/13/2020 20:47:15 - INFO - transformers.modeling_utils -   loading weights file ./bert-pytorch/pytorch_model.bin\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[0, 3000]\n",
      "3000 12778\n",
      "current loss: 1.861026644706726 \t current acc: 0.125\n",
      "current loss: 1.4867623658915656 \t current acc: 0.33302238805970147\n",
      "current loss: 1.2708367968437975 \t current acc: 0.470074812967581\n",
      "current loss: 1.1532464223276955 \t current acc: 0.5295341098169717\n",
      "train epoch loss:1.148 epoch acc:0.532\n",
      "test epoch loss:0.857 epoch acc:0.698\n",
      "current loss: 0.5442276000976562 \t current acc: 0.8125\n",
      "current loss: 0.8599958258185221 \t current acc: 0.681592039800995\n",
      "current loss: 0.8249456903286408 \t current acc: 0.6973192019950125\n",
      "current loss: 0.8142970268817591 \t current acc: 0.7008111480865225\n",
      "train epoch loss:0.811 epoch acc:0.701\n",
      "test epoch loss:0.801 epoch acc:0.720\n",
      "current loss: 0.9812647700309753 \t current acc: 0.625\n",
      "current loss: 0.7066632478094813 \t current acc: 0.7456467661691543\n",
      "current loss: 0.6862298589840792 \t current acc: 0.751714463840399\n",
      "current loss: 0.6928691804012324 \t current acc: 0.7479201331114809\n",
      "train epoch loss:0.693 epoch acc:0.748\n",
      "test epoch loss:0.784 epoch acc:0.728\n",
      "current loss: 0.5354488492012024 \t current acc: 0.875\n",
      "current loss: 0.5935182245216559 \t current acc: 0.7842039800995025\n",
      "current loss: 0.6040816859265515 \t current acc: 0.7810162094763092\n",
      "current loss: 0.6149203734915586 \t current acc: 0.7784941763727121\n",
      "train epoch loss:0.615 epoch acc:0.779\n",
      "test epoch loss:0.814 epoch acc:0.726\n",
      "current loss: 0.3028388023376465 \t current acc: 0.875\n",
      "current loss: 0.5473817400078276 \t current acc: 0.8131218905472637\n",
      "current loss: 0.5472474560847603 \t current acc: 0.8096945137157108\n",
      "current loss: 0.5518448814476984 \t current acc: 0.8068843594009983\n",
      "train epoch loss:0.551 epoch acc:0.808\n",
      "test epoch loss:0.798 epoch acc:0.735\n",
      "current loss: 0.3748001754283905 \t current acc: 0.9375\n",
      "current loss: 0.4733718919071985 \t current acc: 0.8411069651741293\n",
      "current loss: 0.481377749185907 \t current acc: 0.8352556109725686\n",
      "current loss: 0.48265885367072164 \t current acc: 0.8332986688851913\n",
      "train epoch loss:0.483 epoch acc:0.834\n",
      "test epoch loss:0.858 epoch acc:0.734\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/home/huangweilin/anaconda3/envs/fjw/lib/python3.6/site-packages/ipykernel_launcher.py:64: TqdmDeprecationWarning: This function will be removed in tqdm==5.0.0\n",
      "Please use `tqdm.notebook.tqdm` instead of `tqdm.tqdm_notebook`\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "f1631dc527bf479ba62eb963d3c4008f",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "HBox(children=(IntProgress(value=1, bar_style='info', max=1), HTML(value='')))"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/home/huangweilin/anaconda3/envs/fjw/lib/python3.6/site-packages/ipykernel_launcher.py:32: TqdmDeprecationWarning: This function will be removed in tqdm==5.0.0\n",
      "Please use `tqdm.notebook.tqdm` instead of `tqdm.tqdm_notebook`\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "2bf1dcd3163e4edfbf9bd152c1b635fc",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "HBox(children=(IntProgress(value=0, max=3000), HTML(value='')))"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "04/13/2020 20:55:07 - INFO - transformers.modeling_utils -   loading weights file ./bert-pytorch/pytorch_model.bin\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[3000, 6000]\n",
      "0 12778\n",
      "current loss: 1.5271714925765991 \t current acc: 0.3125\n",
      "current loss: 1.3848793002503428 \t current acc: 0.417910447761194\n",
      "current loss: 1.1965943422549383 \t current acc: 0.5215087281795511\n",
      "current loss: 1.0997411611175378 \t current acc: 0.5715474209650583\n",
      "train epoch loss:1.096 epoch acc:0.573\n",
      "test epoch loss:0.864 epoch acc:0.694\n",
      "current loss: 1.2121658325195312 \t current acc: 0.5625\n",
      "current loss: 0.8213258375279346 \t current acc: 0.6958955223880597\n",
      "current loss: 0.8097698823026291 \t current acc: 0.7004364089775561\n",
      "current loss: 0.7987845692777396 \t current acc: 0.7070507487520798\n",
      "train epoch loss:0.795 epoch acc:0.708\n",
      "test epoch loss:0.803 epoch acc:0.721\n",
      "current loss: 0.3852403163909912 \t current acc: 0.875\n",
      "current loss: 0.698234546836929 \t current acc: 0.75\n",
      "current loss: 0.6946493866957928 \t current acc: 0.7520261845386533\n",
      "current loss: 0.6879647425724543 \t current acc: 0.7554076539101497\n",
      "train epoch loss:0.689 epoch acc:0.755\n",
      "test epoch loss:0.800 epoch acc:0.720\n",
      "current loss: 0.5432217121124268 \t current acc: 0.875\n",
      "current loss: 0.6108458764962296 \t current acc: 0.7866915422885572\n",
      "current loss: 0.6114972645876711 \t current acc: 0.7872506234413965\n",
      "current loss: 0.611059522470103 \t current acc: 0.7866056572379367\n",
      "train epoch loss:0.612 epoch acc:0.786\n",
      "test epoch loss:0.752 epoch acc:0.739\n",
      "current loss: 0.6059759855270386 \t current acc: 0.8125\n",
      "current loss: 0.5315185826809252 \t current acc: 0.8190298507462687\n",
      "current loss: 0.5475764093627954 \t current acc: 0.8093827930174564\n",
      "current loss: 0.541835468020693 \t current acc: 0.8095881863560732\n",
      "train epoch loss:0.542 epoch acc:0.809\n",
      "test epoch loss:0.761 epoch acc:0.741\n",
      "current loss: 0.9464406967163086 \t current acc: 0.625\n",
      "current loss: 0.4717182823971136 \t current acc: 0.8407960199004975\n",
      "current loss: 0.47355073515762414 \t current acc: 0.8366583541147132\n",
      "current loss: 0.4811474638998806 \t current acc: 0.8314267886855241\n",
      "train epoch loss:0.481 epoch acc:0.831\n",
      "test epoch loss:0.799 epoch acc:0.736\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "22922e27804d43a1be7416d55adb63df",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "HBox(children=(IntProgress(value=1, bar_style='info', max=1), HTML(value='')))"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "4e3b42f2ccb14c77a14de0bf3f293d94",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "HBox(children=(IntProgress(value=0, max=3000), HTML(value='')))"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "04/13/2020 21:02:51 - INFO - transformers.modeling_utils -   loading weights file ./bert-pytorch/pytorch_model.bin\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[6000, 9000]\n",
      "0 12778\n",
      "current loss: 1.6780409812927246 \t current acc: 0.1875\n",
      "current loss: 1.3703752373581501 \t current acc: 0.4085820895522388\n",
      "current loss: 1.1933897415598729 \t current acc: 0.5155860349127181\n",
      "current loss: 1.1068497983469145 \t current acc: 0.5627079866888519\n",
      "train epoch loss:1.102 epoch acc:0.565\n",
      "test epoch loss:0.816 epoch acc:0.701\n",
      "current loss: 0.8696891069412231 \t current acc: 0.6875\n",
      "current loss: 0.8504443475559577 \t current acc: 0.6871890547263682\n",
      "current loss: 0.8298276420543319 \t current acc: 0.6984102244389028\n",
      "current loss: 0.8134561874307135 \t current acc: 0.7062188019966722\n",
      "train epoch loss:0.814 epoch acc:0.707\n",
      "test epoch loss:0.779 epoch acc:0.728\n",
      "current loss: 0.800324559211731 \t current acc: 0.5625\n",
      "current loss: 0.7204525977817934 \t current acc: 0.7344527363184079\n",
      "current loss: 0.705682471914779 \t current acc: 0.7442331670822943\n",
      "current loss: 0.705177397602013 \t current acc: 0.7459442595673876\n",
      "train epoch loss:0.705 epoch acc:0.746\n",
      "test epoch loss:0.720 epoch acc:0.743\n",
      "current loss: 0.22907525300979614 \t current acc: 1.0\n",
      "current loss: 0.5982659741568921 \t current acc: 0.7863805970149254\n",
      "current loss: 0.614208448148725 \t current acc: 0.7821072319201995\n",
      "current loss: 0.6214282356710878 \t current acc: 0.7779742096505824\n",
      "train epoch loss:0.625 epoch acc:0.777\n",
      "test epoch loss:0.734 epoch acc:0.745\n",
      "current loss: 1.163883090019226 \t current acc: 0.5625\n",
      "current loss: 0.5580982228564979 \t current acc: 0.8044154228855721\n",
      "current loss: 0.5551737899197604 \t current acc: 0.8040835411471322\n",
      "current loss: 0.5585693910147703 \t current acc: 0.802828618968386\n",
      "train epoch loss:0.560 epoch acc:0.803\n",
      "test epoch loss:0.759 epoch acc:0.739\n",
      "current loss: 0.6533363461494446 \t current acc: 0.6875\n",
      "current loss: 0.47512657792117463 \t current acc: 0.8296019900497512\n",
      "current loss: 0.48877479135990143 \t current acc: 0.8263715710723192\n",
      "current loss: 0.4965385090243598 \t current acc: 0.8248752079866889\n",
      "train epoch loss:0.496 epoch acc:0.825\n",
      "test epoch loss:0.773 epoch acc:0.737\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "30383cbd3d734af6bfd214a4ab4e6ee5",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "HBox(children=(IntProgress(value=1, bar_style='info', max=1), HTML(value='')))"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "4e162512bdb34adfacaf7a0341119649",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "HBox(children=(IntProgress(value=0, max=3000), HTML(value='')))"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "04/13/2020 21:10:42 - INFO - transformers.modeling_utils -   loading weights file ./bert-pytorch/pytorch_model.bin\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[9000, 12000]\n",
      "0 12778\n",
      "current loss: 1.6500942707061768 \t current acc: 0.1875\n",
      "current loss: 1.3536867582382848 \t current acc: 0.41759950248756217\n",
      "current loss: 1.1831379251884404 \t current acc: 0.5169887780548629\n",
      "current loss: 1.0982032469822445 \t current acc: 0.5645798668885191\n",
      "train epoch loss:1.093 epoch acc:0.567\n",
      "test epoch loss:0.790 epoch acc:0.714\n",
      "current loss: 1.0042145252227783 \t current acc: 0.625\n",
      "current loss: 0.8280239911814827 \t current acc: 0.7005597014925373\n",
      "current loss: 0.8241316621143027 \t current acc: 0.7058915211970075\n",
      "current loss: 0.8141738559164342 \t current acc: 0.7055948419301165\n",
      "train epoch loss:0.814 epoch acc:0.705\n",
      "test epoch loss:0.724 epoch acc:0.748\n",
      "current loss: 0.8383857011795044 \t current acc: 0.6875\n",
      "current loss: 0.7226389417452599 \t current acc: 0.7406716417910447\n",
      "current loss: 0.7107088074199577 \t current acc: 0.7422069825436409\n",
      "current loss: 0.7107308124890939 \t current acc: 0.7433444259567388\n",
      "train epoch loss:0.710 epoch acc:0.744\n",
      "test epoch loss:0.722 epoch acc:0.747\n",
      "current loss: 1.0313900709152222 \t current acc: 0.625\n",
      "current loss: 0.646117825561495 \t current acc: 0.7689676616915423\n",
      "current loss: 0.6350415341574652 \t current acc: 0.7718204488778054\n",
      "current loss: 0.6292977991843978 \t current acc: 0.7731905158069884\n",
      "train epoch loss:0.627 epoch acc:0.774\n",
      "test epoch loss:0.719 epoch acc:0.755\n",
      "current loss: 1.1321823596954346 \t current acc: 0.625\n",
      "current loss: 0.5346236429107722 \t current acc: 0.8149875621890548\n",
      "current loss: 0.5469358585347559 \t current acc: 0.8059538653366584\n",
      "current loss: 0.5563014858276792 \t current acc: 0.800436772046589\n",
      "train epoch loss:0.556 epoch acc:0.800\n",
      "test epoch loss:0.693 epoch acc:0.761\n",
      "current loss: 0.5321643948554993 \t current acc: 0.8125\n",
      "current loss: 0.4656107874577911 \t current acc: 0.8389303482587065\n",
      "current loss: 0.4874110456528212 \t current acc: 0.831359102244389\n",
      "current loss: 0.4986512655897466 \t current acc: 0.8248752079866889\n",
      "train epoch loss:0.499 epoch acc:0.824\n",
      "test epoch loss:0.729 epoch acc:0.753\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "d33151a18b8f4f4faa22828bdba0a040",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "HBox(children=(IntProgress(value=1, bar_style='info', max=1), HTML(value='')))"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "24e8a49be2b14602b9a3fe3a17d179b8",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "HBox(children=(IntProgress(value=0, max=3000), HTML(value='')))"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "04/13/2020 21:18:27 - INFO - transformers.modeling_utils -   loading weights file ./bert-pytorch/pytorch_model.bin\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[12000, 12779]\n",
      "0 11999\n",
      "current loss: 1.5687556266784668 \t current acc: 0.125\n",
      "current loss: 1.3763537445471654 \t current acc: 0.4060945273631841\n",
      "current loss: 1.1888867690973448 \t current acc: 0.515430174563591\n",
      "current loss: 1.0997120754393483 \t current acc: 0.5618760399334443\n",
      "train epoch loss:1.058 epoch acc:0.585\n",
      "test epoch loss:0.800 epoch acc:0.730\n",
      "current loss: 1.0027871131896973 \t current acc: 0.6875\n",
      "current loss: 0.797228983385646 \t current acc: 0.7086442786069652\n",
      "current loss: 0.7976326215594189 \t current acc: 0.7060473815461347\n",
      "current loss: 0.782864362472504 \t current acc: 0.7144342762063228\n",
      "train epoch loss:0.780 epoch acc:0.715\n",
      "test epoch loss:0.755 epoch acc:0.754\n",
      "current loss: 0.578153133392334 \t current acc: 0.875\n",
      "current loss: 0.679845740842582 \t current acc: 0.7540422885572139\n",
      "current loss: 0.6897802646469297 \t current acc: 0.7471945137157108\n",
      "current loss: 0.6827738069357372 \t current acc: 0.7535357737104825\n",
      "train epoch loss:0.681 epoch acc:0.754\n",
      "test epoch loss:0.715 epoch acc:0.764\n",
      "current loss: 0.38691630959510803 \t current acc: 0.875\n",
      "current loss: 0.579871741471006 \t current acc: 0.8000621890547264\n",
      "current loss: 0.5789304036377672 \t current acc: 0.7947319201995012\n",
      "current loss: 0.5917467999081445 \t current acc: 0.7901414309484193\n",
      "train epoch loss:0.603 epoch acc:0.784\n",
      "test epoch loss:0.691 epoch acc:0.770\n",
      "current loss: 0.5346271395683289 \t current acc: 0.8125\n",
      "current loss: 0.5142704019350792 \t current acc: 0.8230721393034826\n",
      "current loss: 0.534437732580594 \t current acc: 0.8137468827930174\n",
      "current loss: 0.5378970497112703 \t current acc: 0.8106281198003328\n",
      "train epoch loss:0.538 epoch acc:0.810\n",
      "test epoch loss:0.739 epoch acc:0.763\n",
      "current loss: 0.5313982963562012 \t current acc: 0.8125\n",
      "current loss: 0.46505301312279346 \t current acc: 0.8327114427860697\n",
      "current loss: 0.4860495572487018 \t current acc: 0.827930174563591\n",
      "current loss: 0.47465787925904684 \t current acc: 0.8327787021630616\n",
      "train epoch loss:0.478 epoch acc:0.831\n",
      "test epoch loss:0.751 epoch acc:0.754\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "5e7d3452d12e4d759df361ba2d6a7ce8",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "HBox(children=(IntProgress(value=1, bar_style='info', max=1), HTML(value='')))"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "fd4f68825489455f8519258fd7bc0b72",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "HBox(children=(IntProgress(value=0, max=779), HTML(value='')))"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n",
      "\n"
     ]
    }
   ],
   "source": [
    "length=3000\n",
    "id2hidden_and_prob={}\n",
    "flag=False\n",
    "for i in tqdm(range(5)):\n",
    "    val_idx=[i*length,min(full_texts.shape[0],(i+1)*length)]\n",
    "    train_idx=[i for i in range(full_texts.shape[0]) if i<val_idx[0] or i >=val_idx[1]]\n",
    "    val_set=list(full_texts[val_idx[0]:val_idx[1]])\n",
    "    val_labels=list(full_labels[val_idx[0]:val_idx[1]])\n",
    "    val_ids=full_ids[val_idx[0]:val_idx[1]]\n",
    "    train_set=list(full_texts[train_idx ])\n",
    "    train_labels=list(full_labels[train_idx])\n",
    "    train_ids=full_ids[train_idx]\n",
    "    train_dataset=ClsDataset(train_set,train_labels,tokenizer,max_len,model_type,train_ids)\n",
    "    val_dataset=ClsDataset(val_set,val_labels,tokenizer,max_len,model_type,val_ids)\n",
    "    train_loader =Data.DataLoader(train_dataset, batch_size=batch_size, shuffle=True, num_workers=2)\n",
    "    val_loader =Data.DataLoader(val_dataset, batch_size=batch_size, shuffle=False, num_workers=2)\n",
    "    print(val_idx)\n",
    "    print(train_idx[0],train_idx[-1])\n",
    "    if model_type=='roberta':\n",
    "        bert_model=RobertaModel.from_pretrained(bert_dir,config=config)\n",
    "    elif model_type=='bert':\n",
    "        bert_model=BertModel.from_pretrained(bert_dir,config=config)\n",
    "    model=BertClassify(bert_model,num_classes,0.3,'bert')\n",
    "    no_decay = ['bias', 'LayerNorm.weight']\n",
    "    optim_group_params=[{'params':[ p for n,p in model.named_parameters() if not any(np in n for np in no_decay)],\"weight_decay\":weight_decay},\n",
    "                       {'params':[ p for n,p in model.named_parameters() if  any(np in n for np in no_decay)],\"weight_decay\":0}]\n",
    "    optimizer=AdamW(optim_group_params,lr=learning_rate)\n",
    "    loss=nn.CrossEntropyLoss()\n",
    "    train(model,train_loader,val_loader,optimizer,loss,device,5,model_type)\n",
    "    pred_probs,pred_hiddens,pred_ids=predict(model,val_loader,device,model_type)\n",
    "\n",
    "    for idx in tqdm(range(pred_ids.shape[0])):\n",
    "        if id2hidden_and_prob.get(pred_ids[idx],0)!=0:\n",
    "            print(\"error!\")\n",
    "            flag=True\n",
    "            break\n",
    "        id2hidden_and_prob[pred_ids[idx]]=[pred_probs[idx],pred_hiddens[idx]]\n",
    "    if flag:\n",
    "        break\n",
    "    torch.save(model.state_dict(),\"./text_classify/models/\"+model_type+\"_\"+str(i))\n",
    "    "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {
    "code_folding": []
   },
   "outputs": [],
   "source": [
    "test_dataset=ClsDataset(list(test_texts),test_labels,tokenizer,max_len,model_type,test_ids)\n",
    "test_loader =Data.DataLoader(test_dataset, batch_size=batch_size, shuffle=False, num_workers=2)\n",
    "pred_probs_list,pred_hiddens_list=[],[]\n",
    "for i in tqdm(range(5)):\n",
    "    if model_type=='roberta':\n",
    "        bert_model=RobertaModel.from_pretrained(bert_dir,config=config)\n",
    "    elif model_type=='bert':\n",
    "        bert_model=BertModel.from_pretrained(bert_dir,config=config)\n",
    "    model=BertClassify(bert_model,num_classes,0.3,'bert')\n",
    "    model.load_state_dict(torch.load(\"./text_classify/models/\"+model_type+\"_\"+str(i)))\n",
    "    pred_probs,pred_hiddens,pred_ids=predict(model,test_loader,device,model_type)\n",
    "    pred_probs_list.append(pred_probs)\n",
    "    pred_hiddens_list.append(pred_hiddens)\n",
    "pred_probs=np.stack(pred_probs_list).mean(axis=0)\n",
    "pred_hiddens=np.stack(pred_hiddens_list).mean(axis=0)\n",
    "pred_ids=pred_ids\n",
    "for idx in tqdm(range(pred_ids.shape[0])):\n",
    "    if id2hidden_and_prob.get(pred_ids[idx],0)!=0:\n",
    "        print(\"error!\")\n",
    "        flag=True\n",
    "        break\n",
    "    id2hidden_and_prob[pred_ids[idx]]=[pred_probs[idx],pred_hiddens[idx]]\n",
    "pk.dump(id2hidden_and_prob,open(\"./text_classify/data/\"+model_type+\"_hidden_and_probs.pk\",\"wb\"))"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "fjw",
   "language": "python",
   "name": "fjw"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.9"
  },
  "toc": {
   "base_numbering": 1,
   "nav_menu": {},
   "number_sections": true,
   "sideBar": true,
   "skip_h1_title": false,
   "title_cell": "Table of Contents",
   "title_sidebar": "Contents",
   "toc_cell": false,
   "toc_position": {},
   "toc_section_display": true,
   "toc_window_display": false
  }
 },
 "nbformat": 4,
 "nbformat_minor": 4
}
