{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 3,
   "source": [
    "import os\n",
    "from common.configs.tools import reversed_label, set_seed, predict, weights_init_uniform_rule, seed_num, save_json\n",
    "# from common.configs import Helper\n",
    "from torch.autograd import Variable\n",
    "from data import to_data_loader, load_data\n",
    "from sklearn.model_selection import train_test_split\n",
    "from sklearn.metrics import f1_score\n",
    "import torch\n",
    "import torch.nn as nn\n",
    "import torch.nn.functional as F\n",
    "import torch.optim as optim\n",
    "import numpy as np\n",
    "import random\n",
    "import torch.nn.init as init\n",
    "from tqdm import tqdm\n",
    "from train import EarlyStopping\n",
    "from train import evaluate\n",
    "from torch.optim import lr_scheduler\n",
    "from tensorboardX import SummaryWriter\n",
    "import time\n",
    "\n",
    "writer = SummaryWriter('./Resultlog')\n",
    "\n",
    "torch.manual_seed(seed_num)\n",
    "random.seed(seed_num)\n",
    "\n",
    "if torch.cuda.is_available():\n",
    "    print('gpu is available: {}'.format(torch.cuda.get_device_name(0)))\n",
    "    print('device count: {}'.format(torch.cuda.device_count()))\n",
    "\n",
    "device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n",
    "print('Using device:', device)\n"
   ],
   "outputs": [
    {
     "output_type": "stream",
     "name": "stdout",
     "text": [
      "Using device: cpu\n"
     ]
    }
   ],
   "metadata": {}
  },
  {
   "cell_type": "code",
   "execution_count": 21,
   "source": [
    "def load_models(models_path):\n",
    "    models = {}\n",
    "    for root, dirs, files in os.walk(models_path):\n",
    "        for file in files:\n",
    "            model = torch.jit.load(root+'/'+file)\n",
    "            models[model.__class__.__name__] = model\n",
    "    return models\n",
    "\n",
    "def get_data(batch_size):\n",
    "    train_texts, input_ids, test_texts, labels, word2idx, embeddings = load_data(\n",
    "            gram=1, max_len=64)\n",
    "\n",
    "    X_train, X_val, y_train, y_val = train_test_split(\n",
    "        input_ids, labels, test_size=0.1, random_state=42)\n",
    "\n",
    "\n",
    "    train_dataloader, val_dataloader = to_data_loader(\n",
    "                X_train.astype(float), X_val.astype(float), y_train, y_val, batch_size=batch_size)\n",
    "    return train_dataloader, val_dataloader, test_texts, word2idx\n",
    "\n",
    "def train(model, optimizer, loss_fn, train_dataloader, val_dataloader=None, device=torch.device('cpu'), epochs=10, patience=5):\n",
    "    # Tracking best validation accuracy\n",
    "    best_accuracy = 0\n",
    "    best_f1 = 0\n",
    "    early_stopping = EarlyStopping(\n",
    "        path=None, savecp=False, patience=patience, verbose=False)\n",
    "\n",
    "    # Start training loop\n",
    "    print(f\"{'Epoch':^7} | {'Train Loss':^12} | {'Val Loss':^10} | {'Val Acc':^9} | {'Val F1':^10} | {'Learning Rate':^10} | {'Elapsed':^9}\")\n",
    "    print(\"-\"*87)\n",
    "\n",
    "    valid_epochs_loss = []\n",
    "\n",
    "    scheduler = lr_scheduler.ReduceLROnPlateau(\n",
    "        optimizer, 'min', factor=0.5, patience=5, min_lr=0.0001)\n",
    "\n",
    "    for epoch_i in range(epochs):\n",
    "        # =======================================\n",
    "        #               Training\n",
    "        # =======================================\n",
    "\n",
    "        # Tracking time and loss\n",
    "        t0_epoch = time.time()\n",
    "        total_loss = 0\n",
    "\n",
    "        # Put the model into the training mode\n",
    "        model.train()\n",
    "\n",
    "        for step, batch in enumerate(train_dataloader):\n",
    "            # Load batch to GPU\n",
    "            b_input_ids, b_labels = tuple(t.to(device) for t in batch)\n",
    "\n",
    "            # Zero out any previously calculated gradients\n",
    "            model.zero_grad()\n",
    "\n",
    "            # Perform a forward pass. This will return logits.\n",
    "            # b_input_ids = b_input_ids.type(torch.LongTensor)\n",
    "            # b_labels = b_labels.type(torch.LongTensor)\n",
    "            logits = model(b_input_ids.to(device).long())\n",
    "\n",
    "            # Compute loss and accumulate the loss values\n",
    "\n",
    "            loss = loss_fn(logits, b_labels)\n",
    "            total_loss += loss.item()\n",
    "\n",
    "            # Perform a backward pass to calculate gradients\n",
    "            loss.backward(retain_graph=True)\n",
    "\n",
    "            # Update parameters\n",
    "            optimizer.step()\n",
    "\n",
    "        learning_rate = optimizer.param_groups[-1]['lr']\n",
    "\n",
    "        # Calculate the average loss over the entire training data\n",
    "        avg_train_loss = total_loss / len(train_dataloader)\n",
    "\n",
    "        writer.add_scalar(\"train loss\", avg_train_loss, epoch_i)\n",
    "        writer.add_scalar(\"val loss\", avg_train_loss, epoch_i)\n",
    "\n",
    "        for name, weight in model.named_parameters():\n",
    "            writer.add_histogram(name, weight, epoch_i)\n",
    "            # writer.add_histogram(f'{name}.grad', weight.grad, epoch_i)\n",
    "\n",
    "        # =======================================\n",
    "        #               Evaluation\n",
    "        # =======================================\n",
    "        if val_dataloader is not None:\n",
    "            # After the completion of each training epoch, measure the model's\n",
    "            # performance on our validation set.\n",
    "            val_loss, val_accuracy, val_f1 = evaluate(\n",
    "                model, val_dataloader, loss_fn, device)\n",
    "\n",
    "            # Track the best accuracy\n",
    "            if val_accuracy > best_accuracy:\n",
    "                best_accuracy = val_accuracy\n",
    "\n",
    "            if val_f1 > best_f1:\n",
    "                best_f1 = val_f1\n",
    "\n",
    "            valid_epochs_loss.append(val_loss)\n",
    "\n",
    "            # Print performance over the entire training data\n",
    "            time_elapsed = time.time() - t0_epoch\n",
    "            scheduler.step(val_loss)\n",
    "            print(f\"{epoch_i + 1:^7} | {avg_train_loss:^12.6f} | {val_loss:^10.6f} | {val_accuracy:^9.2f} | {val_f1:^9.4f} | {learning_rate:^9.4f} | {time_elapsed:^9.2f}\")\n",
    "\n",
    "        early_stopping(\n",
    "            val_loss=valid_epochs_loss[-1], model=model)\n",
    "        if early_stopping.early_stop:\n",
    "            print(\"Early stopping\")\n",
    "            break\n",
    "\n",
    "    print(\"\\n\")\n",
    "    print(\n",
    "        f\"Training complete! Best results: [f1] {best_f1:.2f} [accuracy] {best_accuracy:.2f}%.\")\n",
    "    return best_f1, best_accuracy"
   ],
   "outputs": [],
   "metadata": {}
  },
  {
   "cell_type": "code",
   "execution_count": 22,
   "source": [
    "# train_dataloader, val_dataloader, test_texts, word2idx = get_data(16)\n",
    "models = load_models(r'common/models')"
   ],
   "outputs": [
    {
     "output_type": "error",
     "ename": "RuntimeError",
     "evalue": "version_number <= kMaxSupportedFileFormatVersion INTERNAL ASSERT FAILED at /pytorch/caffe2/serialize/inline_container.cc:131, please report a bug to PyTorch. Attempted to read a PyTorch file with version 3, but the maximum supported version for reading is 1. Your PyTorch installation may be too old. (init at /pytorch/caffe2/serialize/inline_container.cc:131)\nframe #0: c10::Error::Error(c10::SourceLocation, std::string const&) + 0x33 (0x7fda4805c273 in /home/alan/anaconda3/lib/python3.7/site-packages/torch/lib/libc10.so)\nframe #1: caffe2::serialize::PyTorchStreamReader::init() + 0x1e9a (0x7fd9f7c11a4a in /home/alan/anaconda3/lib/python3.7/site-packages/torch/lib/libtorch.so)\nframe #2: caffe2::serialize::PyTorchStreamReader::PyTorchStreamReader(std::string const&) + 0x60 (0x7fd9f7c12cb0 in /home/alan/anaconda3/lib/python3.7/site-packages/torch/lib/libtorch.so)\nframe #3: torch::jit::import_ir_module(std::shared_ptr<torch::jit::script::CompilationUnit>, std::string const&, c10::optional<c10::Device>, std::unordered_map<std::string, std::string, std::hash<std::string>, std::equal_to<std::string>, std::allocator<std::pair<std::string const, std::string> > >&) + 0x38 (0x7fd9f8ce4368 in /home/alan/anaconda3/lib/python3.7/site-packages/torch/lib/libtorch.so)\nframe #4: <unknown function> + 0x4d80fc (0x7fda3acbd0fc in /home/alan/anaconda3/lib/python3.7/site-packages/torch/lib/libtorch_python.so)\nframe #5: <unknown function> + 0x1d4804 (0x7fda3a9b9804 in /home/alan/anaconda3/lib/python3.7/site-packages/torch/lib/libtorch_python.so)\nframe #6: _PyMethodDef_RawFastCallKeywords + 0x264 (0x7fda6ebc8114 in /home/alan/anaconda3/bin/python)\nframe #7: _PyCFunction_FastCallKeywords + 0x21 (0x7fda6ebc8231 in /home/alan/anaconda3/bin/python)\nframe #8: _PyEval_EvalFrameDefault + 0x4e9d (0x7fda6ec2ca5d in /home/alan/anaconda3/bin/python)\nframe #9: _PyEval_EvalCodeWithName + 0x2f9 (0x7fda6eb816f9 in /home/alan/anaconda3/bin/python)\nframe #10: _PyFunction_FastCallKeywords + 0x325 (0x7fda6ebc78b5 in /home/alan/anaconda3/bin/python)\nframe #11: _PyEval_EvalFrameDefault + 0x4b09 (0x7fda6ec2c6c9 in /home/alan/anaconda3/bin/python)\nframe #12: _PyFunction_FastCallKeywords + 0xfb (0x7fda6ebc768b in /home/alan/anaconda3/bin/python)\nframe #13: _PyEval_EvalFrameDefault + 0x416 (0x7fda6ec27fd6 in /home/alan/anaconda3/bin/python)\nframe #14: _PyEval_EvalCodeWithName + 0x2f9 (0x7fda6eb816f9 in /home/alan/anaconda3/bin/python)\nframe #15: PyEval_EvalCodeEx + 0x44 (0x7fda6eb825f4 in /home/alan/anaconda3/bin/python)\nframe #16: PyEval_EvalCode + 0x1c (0x7fda6eb8261c in /home/alan/anaconda3/bin/python)\nframe #17: <unknown function> + 0x1d08bd (0x7fda6ec378bd in /home/alan/anaconda3/bin/python)\nframe #18: _PyMethodDef_RawFastCallKeywords + 0xe9 (0x7fda6ebc7f99 in /home/alan/anaconda3/bin/python)\nframe #19: _PyCFunction_FastCallKeywords + 0x21 (0x7fda6ebc8231 in /home/alan/anaconda3/bin/python)\nframe #20: _PyEval_EvalFrameDefault + 0x4764 (0x7fda6ec2c324 in /home/alan/anaconda3/bin/python)\nframe #21: _PyGen_Send + 0x2a2 (0x7fda6ebdde42 in /home/alan/anaconda3/bin/python)\nframe #22: _PyEval_EvalFrameDefault + 0x1a87 (0x7fda6ec29647 in /home/alan/anaconda3/bin/python)\nframe #23: _PyGen_Send + 0x2a2 (0x7fda6ebdde42 in /home/alan/anaconda3/bin/python)\nframe #24: _PyEval_EvalFrameDefault + 0x1a87 (0x7fda6ec29647 in /home/alan/anaconda3/bin/python)\nframe #25: _PyGen_Send + 0x2a2 (0x7fda6ebdde42 in /home/alan/anaconda3/bin/python)\nframe #26: _PyMethodDef_RawFastCallKeywords + 0x8d (0x7fda6ebc7f3d in /home/alan/anaconda3/bin/python)\nframe #27: _PyMethodDescr_FastCallKeywords + 0x4f (0x7fda6ebdcc6f in /home/alan/anaconda3/bin/python)\nframe #28: _PyEval_EvalFrameDefault + 0x4c5c (0x7fda6ec2c81c in /home/alan/anaconda3/bin/python)\nframe #29: _PyFunction_FastCallKeywords + 0xfb (0x7fda6ebc768b in /home/alan/anaconda3/bin/python)\nframe #30: _PyEval_EvalFrameDefault + 0x416 (0x7fda6ec27fd6 in /home/alan/anaconda3/bin/python)\nframe #31: _PyFunction_FastCallKeywords + 0xfb (0x7fda6ebc768b in /home/alan/anaconda3/bin/python)\nframe #32: _PyEval_EvalFrameDefault + 0x6a0 (0x7fda6ec28260 in /home/alan/anaconda3/bin/python)\nframe #33: _PyEval_EvalCodeWithName + 0x2f9 (0x7fda6eb816f9 in /home/alan/anaconda3/bin/python)\nframe #34: _PyFunction_FastCallDict + 0x400 (0x7fda6eb82a30 in /home/alan/anaconda3/bin/python)\nframe #35: _PyObject_Call_Prepend + 0x63 (0x7fda6eb9d943 in /home/alan/anaconda3/bin/python)\nframe #36: PyObject_Call + 0x6e (0x7fda6eb90b9e in /home/alan/anaconda3/bin/python)\nframe #37: _PyEval_EvalFrameDefault + 0x1e35 (0x7fda6ec299f5 in /home/alan/anaconda3/bin/python)\nframe #38: _PyEval_EvalCodeWithName + 0x5da (0x7fda6eb819da in /home/alan/anaconda3/bin/python)\nframe #39: _PyFunction_FastCallKeywords + 0x387 (0x7fda6ebc7917 in /home/alan/anaconda3/bin/python)\nframe #40: _PyEval_EvalFrameDefault + 0x14e6 (0x7fda6ec290a6 in /home/alan/anaconda3/bin/python)\nframe #41: <unknown function> + 0x176909 (0x7fda6ebdd909 in /home/alan/anaconda3/bin/python)\nframe #42: _PyMethodDef_RawFastCallKeywords + 0xe9 (0x7fda6ebc7f99 in /home/alan/anaconda3/bin/python)\nframe #43: _PyCFunction_FastCallKeywords + 0x21 (0x7fda6ebc8231 in /home/alan/anaconda3/bin/python)\nframe #44: _PyEval_EvalFrameDefault + 0x4764 (0x7fda6ec2c324 in /home/alan/anaconda3/bin/python)\nframe #45: _PyEval_EvalCodeWithName + 0x5da (0x7fda6eb819da in /home/alan/anaconda3/bin/python)\nframe #46: _PyFunction_FastCallKeywords + 0x387 (0x7fda6ebc7917 in /home/alan/anaconda3/bin/python)\nframe #47: _PyEval_EvalFrameDefault + 0x6a0 (0x7fda6ec28260 in /home/alan/anaconda3/bin/python)\nframe #48: <unknown function> + 0x176909 (0x7fda6ebdd909 in /home/alan/anaconda3/bin/python)\nframe #49: _PyMethodDef_RawFastCallKeywords + 0xe9 (0x7fda6ebc7f99 in /home/alan/anaconda3/bin/python)\nframe #50: _PyCFunction_FastCallKeywords + 0x21 (0x7fda6ebc8231 in /home/alan/anaconda3/bin/python)\nframe #51: _PyEval_EvalFrameDefault + 0x4764 (0x7fda6ec2c324 in /home/alan/anaconda3/bin/python)\nframe #52: _PyEval_EvalCodeWithName + 0x5da (0x7fda6eb819da in /home/alan/anaconda3/bin/python)\nframe #53: _PyFunction_FastCallKeywords + 0x387 (0x7fda6ebc7917 in /home/alan/anaconda3/bin/python)\nframe #54: _PyEval_EvalFrameDefault + 0x416 (0x7fda6ec27fd6 in /home/alan/anaconda3/bin/python)\nframe #55: <unknown function> + 0x176909 (0x7fda6ebdd909 in /home/alan/anaconda3/bin/python)\nframe #56: _PyMethodDef_RawFastCallKeywords + 0xe9 (0x7fda6ebc7f99 in /home/alan/anaconda3/bin/python)\nframe #57: _PyCFunction_FastCallKeywords + 0x21 (0x7fda6ebc8231 in /home/alan/anaconda3/bin/python)\nframe #58: _PyEval_EvalFrameDefault + 0x4764 (0x7fda6ec2c324 in /home/alan/anaconda3/bin/python)\nframe #59: _PyEval_EvalCodeWithName + 0x5da (0x7fda6eb819da in /home/alan/anaconda3/bin/python)\nframe #60: _PyFunction_FastCallDict + 0x1d5 (0x7fda6eb82805 in /home/alan/anaconda3/bin/python)\nframe #61: _PyObject_Call_Prepend + 0x63 (0x7fda6eb9d943 in /home/alan/anaconda3/bin/python)\nframe #62: PyObject_Call + 0x6e (0x7fda6eb90b9e in /home/alan/anaconda3/bin/python)\nframe #63: _PyEval_EvalFrameDefault + 0x1e35 (0x7fda6ec299f5 in /home/alan/anaconda3/bin/python)\n",
     "traceback": [
      "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[0;31mRuntimeError\u001b[0m                              Traceback (most recent call last)",
      "\u001b[0;32m<ipython-input-22-1c4f60868c1b>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m\u001b[0m\n\u001b[1;32m      1\u001b[0m \u001b[0;31m# train_dataloader, val_dataloader, test_texts, word2idx = get_data(16)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m----> 2\u001b[0;31m \u001b[0mmodels\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mload_models\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34mr'common/models_'\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m",
      "\u001b[0;32m<ipython-input-21-8b7326135f23>\u001b[0m in \u001b[0;36mload_models\u001b[0;34m(models_path)\u001b[0m\n\u001b[1;32m      3\u001b[0m     \u001b[0;32mfor\u001b[0m \u001b[0mroot\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mdirs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfiles\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mos\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mwalk\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mmodels_path\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m      4\u001b[0m         \u001b[0;32mfor\u001b[0m \u001b[0mfile\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mfiles\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m----> 5\u001b[0;31m             \u001b[0mmodel\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mtorch\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mjit\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mload\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mroot\u001b[0m\u001b[0;34m+\u001b[0m\u001b[0;34m'/'\u001b[0m\u001b[0;34m+\u001b[0m\u001b[0mfile\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m      6\u001b[0m             \u001b[0mmodels\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mmodel\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m__class__\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m__name__\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mmodel\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m      7\u001b[0m     \u001b[0;32mreturn\u001b[0m \u001b[0mmodels\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;32m~/anaconda3/lib/python3.7/site-packages/torch/jit/__init__.py\u001b[0m in \u001b[0;36mload\u001b[0;34m(f, map_location, _extra_files)\u001b[0m\n\u001b[1;32m    159\u001b[0m             \u001b[0;34m(\u001b[0m\u001b[0msys\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mversion_info\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0;34m==\u001b[0m \u001b[0;36m2\u001b[0m \u001b[0;32mand\u001b[0m \u001b[0misinstance\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mf\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0municode\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;32mor\u001b[0m\u001b[0;31m \u001b[0m\u001b[0;31m\\\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    160\u001b[0m             \u001b[0;34m(\u001b[0m\u001b[0msys\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mversion_info\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0;34m==\u001b[0m \u001b[0;36m3\u001b[0m \u001b[0;32mand\u001b[0m \u001b[0misinstance\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mf\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mpathlib\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mPath\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 161\u001b[0;31m         \u001b[0mcpp_module\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mtorch\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_C\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mimport_ir_module\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mcu\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mf\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mmap_location\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0m_extra_files\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m    162\u001b[0m     \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    163\u001b[0m         \u001b[0mcpp_module\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mtorch\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_C\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mimport_ir_module_from_buffer\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mcu\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mf\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mread\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mmap_location\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0m_extra_files\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;31mRuntimeError\u001b[0m: version_number <= kMaxSupportedFileFormatVersion INTERNAL ASSERT FAILED at /pytorch/caffe2/serialize/inline_container.cc:131, please report a bug to PyTorch. Attempted to read a PyTorch file with version 3, but the maximum supported version for reading is 1. Your PyTorch installation may be too old. (init at /pytorch/caffe2/serialize/inline_container.cc:131)\nframe #0: c10::Error::Error(c10::SourceLocation, std::string const&) + 0x33 (0x7fda4805c273 in /home/alan/anaconda3/lib/python3.7/site-packages/torch/lib/libc10.so)\nframe #1: caffe2::serialize::PyTorchStreamReader::init() + 0x1e9a (0x7fd9f7c11a4a in /home/alan/anaconda3/lib/python3.7/site-packages/torch/lib/libtorch.so)\nframe #2: caffe2::serialize::PyTorchStreamReader::PyTorchStreamReader(std::string const&) + 0x60 (0x7fd9f7c12cb0 in /home/alan/anaconda3/lib/python3.7/site-packages/torch/lib/libtorch.so)\nframe #3: torch::jit::import_ir_module(std::shared_ptr<torch::jit::script::CompilationUnit>, std::string const&, c10::optional<c10::Device>, std::unordered_map<std::string, std::string, std::hash<std::string>, std::equal_to<std::string>, std::allocator<std::pair<std::string const, std::string> > >&) + 0x38 (0x7fd9f8ce4368 in /home/alan/anaconda3/lib/python3.7/site-packages/torch/lib/libtorch.so)\nframe #4: <unknown function> + 0x4d80fc (0x7fda3acbd0fc in /home/alan/anaconda3/lib/python3.7/site-packages/torch/lib/libtorch_python.so)\nframe #5: <unknown function> + 0x1d4804 (0x7fda3a9b9804 in /home/alan/anaconda3/lib/python3.7/site-packages/torch/lib/libtorch_python.so)\nframe #6: _PyMethodDef_RawFastCallKeywords + 0x264 (0x7fda6ebc8114 in /home/alan/anaconda3/bin/python)\nframe #7: _PyCFunction_FastCallKeywords + 0x21 (0x7fda6ebc8231 in /home/alan/anaconda3/bin/python)\nframe #8: _PyEval_EvalFrameDefault + 0x4e9d (0x7fda6ec2ca5d in /home/alan/anaconda3/bin/python)\nframe #9: _PyEval_EvalCodeWithName + 0x2f9 (0x7fda6eb816f9 in /home/alan/anaconda3/bin/python)\nframe #10: _PyFunction_FastCallKeywords + 0x325 (0x7fda6ebc78b5 in /home/alan/anaconda3/bin/python)\nframe #11: _PyEval_EvalFrameDefault + 0x4b09 (0x7fda6ec2c6c9 in /home/alan/anaconda3/bin/python)\nframe #12: _PyFunction_FastCallKeywords + 0xfb (0x7fda6ebc768b in /home/alan/anaconda3/bin/python)\nframe #13: _PyEval_EvalFrameDefault + 0x416 (0x7fda6ec27fd6 in /home/alan/anaconda3/bin/python)\nframe #14: _PyEval_EvalCodeWithName + 0x2f9 (0x7fda6eb816f9 in /home/alan/anaconda3/bin/python)\nframe #15: PyEval_EvalCodeEx + 0x44 (0x7fda6eb825f4 in /home/alan/anaconda3/bin/python)\nframe #16: PyEval_EvalCode + 0x1c (0x7fda6eb8261c in /home/alan/anaconda3/bin/python)\nframe #17: <unknown function> + 0x1d08bd (0x7fda6ec378bd in /home/alan/anaconda3/bin/python)\nframe #18: _PyMethodDef_RawFastCallKeywords + 0xe9 (0x7fda6ebc7f99 in /home/alan/anaconda3/bin/python)\nframe #19: _PyCFunction_FastCallKeywords + 0x21 (0x7fda6ebc8231 in /home/alan/anaconda3/bin/python)\nframe #20: _PyEval_EvalFrameDefault + 0x4764 (0x7fda6ec2c324 in /home/alan/anaconda3/bin/python)\nframe #21: _PyGen_Send + 0x2a2 (0x7fda6ebdde42 in /home/alan/anaconda3/bin/python)\nframe #22: _PyEval_EvalFrameDefault + 0x1a87 (0x7fda6ec29647 in /home/alan/anaconda3/bin/python)\nframe #23: _PyGen_Send + 0x2a2 (0x7fda6ebdde42 in /home/alan/anaconda3/bin/python)\nframe #24: _PyEval_EvalFrameDefault + 0x1a87 (0x7fda6ec29647 in /home/alan/anaconda3/bin/python)\nframe #25: _PyGen_Send + 0x2a2 (0x7fda6ebdde42 in /home/alan/anaconda3/bin/python)\nframe #26: _PyMethodDef_RawFastCallKeywords + 0x8d (0x7fda6ebc7f3d in /home/alan/anaconda3/bin/python)\nframe #27: _PyMethodDescr_FastCallKeywords + 0x4f (0x7fda6ebdcc6f in /home/alan/anaconda3/bin/python)\nframe #28: _PyEval_EvalFrameDefault + 0x4c5c (0x7fda6ec2c81c in /home/alan/anaconda3/bin/python)\nframe #29: _PyFunction_FastCallKeywords + 0xfb (0x7fda6ebc768b in /home/alan/anaconda3/bin/python)\nframe #30: _PyEval_EvalFrameDefault + 0x416 (0x7fda6ec27fd6 in /home/alan/anaconda3/bin/python)\nframe #31: _PyFunction_FastCallKeywords + 0xfb (0x7fda6ebc768b in /home/alan/anaconda3/bin/python)\nframe #32: _PyEval_EvalFrameDefault + 0x6a0 (0x7fda6ec28260 in /home/alan/anaconda3/bin/python)\nframe #33: _PyEval_EvalCodeWithName + 0x2f9 (0x7fda6eb816f9 in /home/alan/anaconda3/bin/python)\nframe #34: _PyFunction_FastCallDict + 0x400 (0x7fda6eb82a30 in /home/alan/anaconda3/bin/python)\nframe #35: _PyObject_Call_Prepend + 0x63 (0x7fda6eb9d943 in /home/alan/anaconda3/bin/python)\nframe #36: PyObject_Call + 0x6e (0x7fda6eb90b9e in /home/alan/anaconda3/bin/python)\nframe #37: _PyEval_EvalFrameDefault + 0x1e35 (0x7fda6ec299f5 in /home/alan/anaconda3/bin/python)\nframe #38: _PyEval_EvalCodeWithName + 0x5da (0x7fda6eb819da in /home/alan/anaconda3/bin/python)\nframe #39: _PyFunction_FastCallKeywords + 0x387 (0x7fda6ebc7917 in /home/alan/anaconda3/bin/python)\nframe #40: _PyEval_EvalFrameDefault + 0x14e6 (0x7fda6ec290a6 in /home/alan/anaconda3/bin/python)\nframe #41: <unknown function> + 0x176909 (0x7fda6ebdd909 in /home/alan/anaconda3/bin/python)\nframe #42: _PyMethodDef_RawFastCallKeywords + 0xe9 (0x7fda6ebc7f99 in /home/alan/anaconda3/bin/python)\nframe #43: _PyCFunction_FastCallKeywords + 0x21 (0x7fda6ebc8231 in /home/alan/anaconda3/bin/python)\nframe #44: _PyEval_EvalFrameDefault + 0x4764 (0x7fda6ec2c324 in /home/alan/anaconda3/bin/python)\nframe #45: _PyEval_EvalCodeWithName + 0x5da (0x7fda6eb819da in /home/alan/anaconda3/bin/python)\nframe #46: _PyFunction_FastCallKeywords + 0x387 (0x7fda6ebc7917 in /home/alan/anaconda3/bin/python)\nframe #47: _PyEval_EvalFrameDefault + 0x6a0 (0x7fda6ec28260 in /home/alan/anaconda3/bin/python)\nframe #48: <unknown function> + 0x176909 (0x7fda6ebdd909 in /home/alan/anaconda3/bin/python)\nframe #49: _PyMethodDef_RawFastCallKeywords + 0xe9 (0x7fda6ebc7f99 in /home/alan/anaconda3/bin/python)\nframe #50: _PyCFunction_FastCallKeywords + 0x21 (0x7fda6ebc8231 in /home/alan/anaconda3/bin/python)\nframe #51: _PyEval_EvalFrameDefault + 0x4764 (0x7fda6ec2c324 in /home/alan/anaconda3/bin/python)\nframe #52: _PyEval_EvalCodeWithName + 0x5da (0x7fda6eb819da in /home/alan/anaconda3/bin/python)\nframe #53: _PyFunction_FastCallKeywords + 0x387 (0x7fda6ebc7917 in /home/alan/anaconda3/bin/python)\nframe #54: _PyEval_EvalFrameDefault + 0x416 (0x7fda6ec27fd6 in /home/alan/anaconda3/bin/python)\nframe #55: <unknown function> + 0x176909 (0x7fda6ebdd909 in /home/alan/anaconda3/bin/python)\nframe #56: _PyMethodDef_RawFastCallKeywords + 0xe9 (0x7fda6ebc7f99 in /home/alan/anaconda3/bin/python)\nframe #57: _PyCFunction_FastCallKeywords + 0x21 (0x7fda6ebc8231 in /home/alan/anaconda3/bin/python)\nframe #58: _PyEval_EvalFrameDefault + 0x4764 (0x7fda6ec2c324 in /home/alan/anaconda3/bin/python)\nframe #59: _PyEval_EvalCodeWithName + 0x5da (0x7fda6eb819da in /home/alan/anaconda3/bin/python)\nframe #60: _PyFunction_FastCallDict + 0x1d5 (0x7fda6eb82805 in /home/alan/anaconda3/bin/python)\nframe #61: _PyObject_Call_Prepend + 0x63 (0x7fda6eb9d943 in /home/alan/anaconda3/bin/python)\nframe #62: PyObject_Call + 0x6e (0x7fda6eb90b9e in /home/alan/anaconda3/bin/python)\nframe #63: _PyEval_EvalFrameDefault + 0x1e35 (0x7fda6ec299f5 in /home/alan/anaconda3/bin/python)\n"
     ]
    }
   ],
   "metadata": {}
  },
  {
   "cell_type": "code",
   "execution_count": 31,
   "source": [
    "models_names = models.keys()\n",
    "models_names"
   ],
   "outputs": [
    {
     "output_type": "execute_result",
     "data": {
      "text/plain": [
       "dict_keys(['AttentionCNN', 'BiLSTMAttn', 'BiLSTM', 'DeepCNN', 'TextCNN1d', 'TextCNN2d'])"
      ]
     },
     "metadata": {},
     "execution_count": 31
    }
   ],
   "metadata": {}
  },
  {
   "cell_type": "code",
   "execution_count": 32,
   "source": [
    "model_list = [Helper.freeze_parameters(model) for model in models.values()]\n",
    "model_list = [model.to(device) for model in models.values()]\n"
   ],
   "outputs": [],
   "metadata": {}
  },
  {
   "cell_type": "code",
   "execution_count": 77,
   "source": [
    "class Ensemble(nn.Module):\n",
    "\n",
    "    def __init__(self, model_list, dropout=0.5):\n",
    "        super(Ensemble, self).__init__()\n",
    "        self.model_list = model_list\n",
    "        num_class = 35\n",
    "\n",
    "        self.fc1 = nn.Linear(num_class, num_class * 2)\n",
    "        self.fc1_cat = nn.Linear(num_class * len(model_list), num_class * 2)\n",
    "\n",
    "        self.fc2 = nn.Linear(num_class * 4, num_class * 2)\n",
    "        self.fc3 = nn.Linear(num_class * 2, num_class)\n",
    "\n",
    "        self.dropout = nn.Dropout(p=dropout)\n",
    "\n",
    "    def forward(self, x):\n",
    "\n",
    "        out_list = [model(x) for model in self.model_list]\n",
    "\n",
    "        cat_x = torch.cat(tuple(out_list), 1)\n",
    "        cat_x_2 = F.relu(self.fc1_cat(cat_x))\n",
    "\n",
    "        # out = out_list[0]\n",
    "        # for out_ in out_list[1:]:\n",
    "        #     out += out_\n",
    "\n",
    "        # x = self.dropout(F.relu(self.fc1(out)))\n",
    "        # cat_x_2 = torch.cat((x, cat_x), 1)\n",
    "        # cat_x_2 = F.relu(self.fc2(cat_x_2))\n",
    "        logits = self.fc3(cat_x_2)\n",
    "        return logits\n"
   ],
   "outputs": [],
   "metadata": {}
  },
  {
   "cell_type": "code",
   "execution_count": 81,
   "source": [
    "dropout = 0.1\n",
    "lr = 0.2\n",
    "epochs = 50\n",
    "\n",
    "enb_model = Ensemble(model_list, dropout==dropout)\n",
    "enb_model.to(device)\n",
    "enb_model.apply(weights_init_uniform_rule)\n",
    "optimizer = optim.Adam(enb_model.parameters(), lr=lr)\n",
    "# turn this off\n",
    "scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=3, gamma=0.1)\n",
    "\n",
    "\n",
    "loss_fn = nn.CrossEntropyLoss()\n",
    "train(enb_model, \n",
    "      optimizer, \n",
    "      loss_fn, \n",
    "      train_dataloader, \n",
    "      val_dataloader, device=device, epochs=epochs, patience=5)\n"
   ],
   "outputs": [
    {
     "output_type": "stream",
     "name": "stdout",
     "text": [
      " Epoch  |  Train Loss  |  Val Loss  |  Val Acc  |   Val F1   | Learning Rate |  Elapsed \n",
      "---------------------------------------------------------------------------------------\n",
      "   1    |  12.688980   |  3.159897  |   9.99    |  0.0168   |  0.2000   |   17.69  \n",
      "   2    |   5.786403   |  3.134591  |   14.22   |  0.0231   |  0.2000   |   17.59  \n",
      "   3    |   3.170321   |  3.197419  |   6.18    |  0.0102   |  0.2000   |   17.49  \n",
      "   4    |   6.744385   |  3.158462  |   14.22   |  0.0231   |  0.2000   |   17.49  \n",
      "   5    |   3.154043   |  3.141069  |   14.22   |  0.0231   |  0.2000   |   17.60  \n",
      "   6    |   3.151886   |  3.163415  |   6.18    |  0.0106   |  0.2000   |   17.54  \n",
      "   7    |   3.158820   |  3.136465  |   14.22   |  0.0231   |  0.2000   |   17.51  \n",
      "Early stopping\n",
      "\n",
      "\n",
      "Training complete! Best results: [f1] 0.02 [accuracy] 14.22%.\n"
     ]
    },
    {
     "output_type": "execute_result",
     "data": {
      "text/plain": [
       "(0.023107845547124948, 14.224137931034482)"
      ]
     },
     "metadata": {},
     "execution_count": 81
    }
   ],
   "metadata": {}
  },
  {
   "cell_type": "code",
   "execution_count": 22,
   "source": [
    "models_ = torch.load(r'common/models/TextCNN1d_[f1]05611_[ep]025_[lr]50_[gram]1_[Emd]False.model')"
   ],
   "outputs": [],
   "metadata": {}
  },
  {
   "cell_type": "code",
   "execution_count": 26,
   "source": [
    "import pandas as pd\n",
    "output = pd.DataFrame(columns=['id', 'label'])\n",
    "\n",
    "for i, text in tqdm(enumerate(test_texts)):\n",
    "    label = reversed_label[predict(text, model=models_, word2idx=word2idx).numpy()[0]]\n",
    "    output.loc[i] = [i, label]\n",
    "output.to_csv(r'out.csv', index=False)\n"
   ],
   "outputs": [
    {
     "output_type": "stream",
     "name": "stderr",
     "text": [
      "6004it [00:30, 197.08it/s]\n"
     ]
    }
   ],
   "metadata": {}
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "source": [
    "train_texts, input_ids, test_texts, labels, word2idx, embeddings = load_data(\n",
    "            gram=1, max_len=64)"
   ],
   "outputs": [
    {
     "output_type": "stream",
     "name": "stderr",
     "text": [
      "100%|██████████| 3456/3456 [00:00<00:00, 70748.83it/s]"
     ]
    },
    {
     "output_type": "stream",
     "name": "stdout",
     "text": [
      "Loading pretrained vectors...\n"
     ]
    },
    {
     "output_type": "stream",
     "name": "stderr",
     "text": [
      "\n"
     ]
    }
   ],
   "metadata": {}
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "source": [
    "from sklearn.ensemble import VotingClassifier\n",
    "from sklearn.svm import SVC\n",
    "from sklearn.ensemble import RandomForestClassifier\n",
    "from sklearn.linear_model import LogisticRegression\n",
    "from sklearn.naive_bayes import CategoricalNB\n",
    "from sklearn.neighbors import KNeighborsClassifier\n",
    "from sklearn.tree import DecisionTreeClassifier\n",
    "from sklearn.ensemble import GradientBoostingClassifier\n",
    "from sklearn.ensemble import AdaBoostClassifier\n",
    "from sklearn.experimental import enable_hist_gradient_boosting\n",
    "from sklearn.ensemble import ExtraTreesClassifier\n",
    "from sklearn.svm import LinearSVC\n",
    "from sklearn.neural_network import MLPClassifier\n",
    "\n",
    "import lightgbm as lgb\n",
    "\n",
    "from sklearn.model_selection import KFold\n",
    "from sklearn.metrics import f1_score\n",
    "from sklearn import metrics\n",
    "import matplotlib.pyplot as plt \n",
    "\n",
    "import pickle"
   ],
   "outputs": [],
   "metadata": {}
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "source": [
    "\n",
    "weighted = {i: v for i, v in zip(range(0, 35), (input_ids.shape[0] / (35 * np.bincount(labels))))}\n",
    "mlp = MLPClassifier(random_state=31)\n",
    "linsvm = LinearSVC(random_state=31, multi_class=\"ovr\", class_weight=weighted)\n",
    "etc = ExtraTreesClassifier(n_estimators=100, min_samples_split=2, random_state=31, class_weight=weighted)\n",
    "ada = AdaBoostClassifier(n_estimators=100, random_state=31)\n",
    "gb = GradientBoostingClassifier(random_state=31, n_estimators=100)\n",
    "dt = DecisionTreeClassifier(random_state=31, class_weight=weighted)\n",
    "svc = SVC(class_weight=weighted)\n",
    "rf = RandomForestClassifier(max_depth=5, \n",
    "                            random_state=31, class_weight=weighted,\n",
    "                            n_jobs=-1)\n",
    "cnb = CategoricalNB()\n",
    "lr = LogisticRegression(random_state=31,\n",
    "                        multi_class='ovr', n_jobs=-1, class_weight=weighted)\n",
    "knn = KNeighborsClassifier(n_neighbors=10, n_jobs=-1)\n",
    "lgbm = lgb.LGBMClassifier(boosting_type='gbdt', class_weight=None, colsample_bytree=1.0,\n",
    "                         importance_type='split', learning_rate=0.01, max_depth=-1,\n",
    "                         min_child_samples=20, min_child_weight=0.001, min_split_gain=0.0,\n",
    "                         n_estimators=100, n_jobs=-1, num_leaves=31, objective=None,\n",
    "                         random_state=None, reg_alpha=0.0, reg_lambda=0.0, silent=True,\n",
    "                         subsample=1.0, subsample_for_bin=2000, subsample_freq=0)\n",
    "\n",
    "def nn_predict(model, input_):\n",
    "    return torch.argmax(model.to(\"cpu\")(torch.tensor(input_)), dim=1).flatten().numpy()\n",
    "\n",
    "def predict(tokens, model, word2idx, flip=True, max_len=64):\n",
    "    \"\"\"Predict probability that a review is positive.\"\"\"\n",
    "    model = model.to(\"cpu\")\n",
    "\n",
    "    # Tokenize, pad and encode text\n",
    "    padded_tokens = tokens + ['<pad>'] * (max_len - len(tokens))\n",
    "\n",
    "    if len(padded_tokens) > max_len:\n",
    "        padded_tokens = padded_tokens[:max_len]\n",
    "\n",
    "    if flip:\n",
    "        padded_tokens += ['<pad>'] * 2 + padded_tokens[::-1]\n",
    "\n",
    "    input_id = [word2idx.get(token, word2idx['<unk>'])\n",
    "                for token in padded_tokens]\n",
    "\n",
    "    # Compute logits\n",
    "    logits = model(torch.tensor([input_id]))\n",
    "\n",
    "    return torch.argmax(logits, dim=1).flatten()"
   ],
   "outputs": [],
   "metadata": {}
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "source": [
    "models"
   ],
   "outputs": [
    {
     "output_type": "execute_result",
     "data": {
      "text/plain": [
       "{}"
      ]
     },
     "metadata": {},
     "execution_count": 17
    }
   ],
   "metadata": {}
  },
  {
   "cell_type": "code",
   "execution_count": 105,
   "source": [
    "del models['BiLSTMAttn']"
   ],
   "outputs": [],
   "metadata": {}
  },
  {
   "cell_type": "code",
   "execution_count": 170,
   "source": [
    "X = input_ids\n",
    "y = labels\n",
    "kf = KFold(n_splits=5, shuffle=True, random_state=31)\n",
    "\n",
    "\n",
    "for k, (train_index, test_index) in enumerate(kf.split(X)):\n",
    "    X_train, X_test = X[train_index], X[test_index]\n",
    "    y_train, y_test = y[train_index], y[test_index]\n",
    "\n",
    "    nn_trans_train = np.array([nn_predict(model, X_train) for model in models.values()]).T\n",
    "    nn_trans_test = np.array([nn_predict(model, X_test) for model in models.values()]).T\n",
    "\n",
    "\n",
    "\n",
    "    eclf1 = VotingClassifier(estimators=[('gb', gb), ('mlp', mlp),\n",
    "                                         ('linsvm', linsvm),\n",
    "                                         ('ada', ada),\n",
    "                                         ('etc', etc),\n",
    "                                         ('dt', dt),\n",
    "                                         ('svc', svc),\n",
    "                                         ('rf', rf), \n",
    "                                         ('cnb', cnb), \n",
    "                                         ('lr', lr), \n",
    "                                         ('knn', knn),\n",
    "                                         ('lgb', lgbm)\n",
    "                                         ],\n",
    "                             n_jobs=-1, verbose=True)\n",
    "    eclf1 = eclf1.fit(nn_trans_train, y_train)\n",
    "    y_pred = eclf1.predict(nn_trans_test)\n",
    "    f1 = f1_score(y_test, y_pred, average='macro')\n",
    "    print('[{}] f1: {}'.format(k, f1))\n",
    "    # save_model(eclf1, './voting_model_{}.pkl'.format(k))\n",
    "\n",
    "# [0] f1: 0.7451720931259481\n",
    "# [1] f1: 0.7166397128535958\n",
    "# [2] f1: 0.7155343590697362\n",
    "# [3] f1: 0.7358101693481943\n",
    "# [4] f1: 0.6986469405995048\n",
    "\n",
    "# [0] f1: 0.6887550961532423\n",
    "# [1] f1: 0.6596611472242127\n",
    "# [2] f1: 0.6742436663988278\n",
    "# [3] f1: 0.690392245889996\n",
    "# [4] f1: 0.6748518325883921"
   ],
   "outputs": [
    {
     "output_type": "stream",
     "name": "stdout",
     "text": [
      "[0] f1: 0.7212099336850472\n",
      "[1] f1: 0.6958660531615133\n",
      "[2] f1: 0.7177110101210198\n",
      "[3] f1: 0.7233755541588586\n",
      "[4] f1: 0.6927858222645139\n"
     ]
    }
   ],
   "metadata": {}
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "source": [
    "output = pd.DataFrame(columns=['id', 'label'])\n",
    "for i, tokens in tqdm(enumerate(test_texts)):\n",
    "    nn_labels = []\n",
    "    for model in models.values():\n",
    "        nn_labels.append(predict(tokens, model, word2idx, flip=True, max_len=64).numpy()[0])\n",
    "    pred = reversed_label[eclf1.predict([nn_labels])[0]]\n",
    "    output.loc[i] = [i, pred]\n",
    "output.to_csv(r'./ensemble_res3.csv', index=False)"
   ],
   "outputs": [],
   "metadata": {}
  },
  {
   "cell_type": "code",
   "execution_count": 190,
   "source": [
    "kf = KFold(n_splits=5, shuffle=True, random_state=256)\n",
    "kdict = {}\n",
    "for k, (train_index, test_index) in enumerate(kf.split(train_texts)):\n",
    "    kdict[k] = {'train_index':train_index.tolist(), 'valid_index':test_index.tolist()}\n",
    "save_json(r'index.json', kdict)"
   ],
   "outputs": [
    {
     "output_type": "stream",
     "name": "stdout",
     "text": [
      "index.json saved.\n"
     ]
    }
   ],
   "metadata": {}
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "source": [],
   "outputs": [],
   "metadata": {}
  }
 ],
 "metadata": {
  "orig_nbformat": 4,
  "language_info": {
   "name": "python",
   "version": "3.7.6",
   "mimetype": "text/x-python",
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "pygments_lexer": "ipython3",
   "nbconvert_exporter": "python",
   "file_extension": ".py"
  },
  "kernelspec": {
   "name": "python3",
   "display_name": "Python 3.7.6 64-bit ('base': conda)"
  },
  "interpreter": {
   "hash": "7b4c34fa5edc2b5c200e84280da452af41185f443fff3e767a73b82cf30c2550"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}