{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 8,
   "source": [
    "import os\n",
    "from common.configs.tools import reversed_label, set_seed, predict, weights_init_uniform_rule, seed_num, save_json, load_json\n",
    "# from common.configs import Helper\n",
    "from torch.autograd import Variable\n",
    "from data import to_data_loader, load_data\n",
    "from sklearn.model_selection import train_test_split\n",
    "from sklearn.metrics import f1_score\n",
    "import torch\n",
    "import torch.nn as nn\n",
    "import torch.nn.functional as F\n",
    "import torch.optim as optim\n",
    "import numpy as np\n",
    "import random\n",
    "import torch.nn.init as init\n",
    "from tqdm import tqdm\n",
    "from train import EarlyStopping\n",
    "from train import evaluate\n",
    "from torch.optim import lr_scheduler\n",
    "from tensorboardX import SummaryWriter\n",
    "import time\n",
    "\n",
    "writer = SummaryWriter('./Resultlog')\n",
    "\n",
    "torch.manual_seed(seed_num)\n",
    "random.seed(seed_num)\n",
    "\n",
    "if torch.cuda.is_available():\n",
    "    print('gpu is available: {}'.format(torch.cuda.get_device_name(0)))\n",
    "    print('device count: {}'.format(torch.cuda.device_count()))\n",
    "\n",
    "device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n",
    "print('Using device:', device)\n"
   ],
   "outputs": [
    {
     "output_type": "stream",
     "name": "stdout",
     "text": [
      "gpu is available: TITAN RTX\n",
      "device count: 3\n",
      "Using device: cuda\n"
     ]
    }
   ],
   "metadata": {}
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "source": [
    "def load_models(models_path):\n",
    "    models = {}\n",
    "    for root, dirs, files in os.walk(models_path):\n",
    "        for file in files:\n",
    "            model = torch.load(root+'/'+file)\n",
    "            models[model.__class__.__name__] = model\n",
    "    return models\n",
    "\n",
    "def get_data(batch_size):\n",
    "    train_texts, input_ids, test_texts, labels, word2idx, embeddings = load_data(\n",
    "            gram=1, max_len=64)\n",
    "\n",
    "    X_train, X_val, y_train, y_val = train_test_split(\n",
    "        input_ids, labels, test_size=0.1, random_state=42)\n",
    "\n",
    "\n",
    "    train_dataloader, val_dataloader = to_data_loader(\n",
    "                X_train.astype(float), X_val.astype(float), y_train, y_val, batch_size=batch_size)\n",
    "    return train_dataloader, val_dataloader, test_texts, word2idx\n",
    "\n",
    "def train(model, optimizer, loss_fn, train_dataloader, val_dataloader=None, device=torch.device('cpu'), epochs=10, patience=5):\n",
    "    # Tracking best validation accuracy\n",
    "    best_accuracy = 0\n",
    "    best_f1 = 0\n",
    "    early_stopping = EarlyStopping(\n",
    "        path=None, savecp=False, patience=patience, verbose=False)\n",
    "\n",
    "    # Start training loop\n",
    "    print(f\"{'Epoch':^7} | {'Train Loss':^12} | {'Val Loss':^10} | {'Val Acc':^9} | {'Val F1':^10} | {'Learning Rate':^10} | {'Elapsed':^9}\")\n",
    "    print(\"-\"*87)\n",
    "\n",
    "    valid_epochs_loss = []\n",
    "\n",
    "    scheduler = lr_scheduler.ReduceLROnPlateau(\n",
    "        optimizer, 'min', factor=0.5, patience=5, min_lr=0.0001)\n",
    "\n",
    "    for epoch_i in range(epochs):\n",
    "        # =======================================\n",
    "        #               Training\n",
    "        # =======================================\n",
    "\n",
    "        # Tracking time and loss\n",
    "        t0_epoch = time.time()\n",
    "        total_loss = 0\n",
    "\n",
    "        # Put the model into the training mode\n",
    "        model.train()\n",
    "\n",
    "        for step, batch in enumerate(train_dataloader):\n",
    "            # Load batch to GPU\n",
    "            b_input_ids, b_labels = tuple(t.to(device) for t in batch)\n",
    "\n",
    "            # Zero out any previously calculated gradients\n",
    "            model.zero_grad()\n",
    "\n",
    "            # Perform a forward pass. This will return logits.\n",
    "            # b_input_ids = b_input_ids.type(torch.LongTensor)\n",
    "            # b_labels = b_labels.type(torch.LongTensor)\n",
    "            logits = model(b_input_ids.to(device).long())\n",
    "\n",
    "            # Compute loss and accumulate the loss values\n",
    "\n",
    "            loss = loss_fn(logits, b_labels)\n",
    "            total_loss += loss.item()\n",
    "\n",
    "            # Perform a backward pass to calculate gradients\n",
    "            loss.backward(retain_graph=True)\n",
    "\n",
    "            # Update parameters\n",
    "            optimizer.step()\n",
    "\n",
    "        learning_rate = optimizer.param_groups[-1]['lr']\n",
    "\n",
    "        # Calculate the average loss over the entire training data\n",
    "        avg_train_loss = total_loss / len(train_dataloader)\n",
    "\n",
    "        writer.add_scalar(\"train loss\", avg_train_loss, epoch_i)\n",
    "        writer.add_scalar(\"val loss\", avg_train_loss, epoch_i)\n",
    "\n",
    "        for name, weight in model.named_parameters():\n",
    "            writer.add_histogram(name, weight, epoch_i)\n",
    "            # writer.add_histogram(f'{name}.grad', weight.grad, epoch_i)\n",
    "\n",
    "        # =======================================\n",
    "        #               Evaluation\n",
    "        # =======================================\n",
    "        if val_dataloader is not None:\n",
    "            # After the completion of each training epoch, measure the model's\n",
    "            # performance on our validation set.\n",
    "            val_loss, val_accuracy, val_f1 = evaluate(\n",
    "                model, val_dataloader, loss_fn, device)\n",
    "\n",
    "            # Track the best accuracy\n",
    "            if val_accuracy > best_accuracy:\n",
    "                best_accuracy = val_accuracy\n",
    "\n",
    "            if val_f1 > best_f1:\n",
    "                best_f1 = val_f1\n",
    "\n",
    "            valid_epochs_loss.append(val_loss)\n",
    "\n",
    "            # Print performance over the entire training data\n",
    "            time_elapsed = time.time() - t0_epoch\n",
    "            scheduler.step(val_loss)\n",
    "            print(f\"{epoch_i + 1:^7} | {avg_train_loss:^12.6f} | {val_loss:^10.6f} | {val_accuracy:^9.2f} | {val_f1:^9.4f} | {learning_rate:^9.4f} | {time_elapsed:^9.2f}\")\n",
    "\n",
    "        early_stopping(\n",
    "            val_loss=valid_epochs_loss[-1], model=model)\n",
    "        if early_stopping.early_stop:\n",
    "            print(\"Early stopping\")\n",
    "            break\n",
    "\n",
    "    print(\"\\n\")\n",
    "    print(\n",
    "        f\"Training complete! Best results: [f1] {best_f1:.2f} [accuracy] {best_accuracy:.2f}%.\")\n",
    "    return best_f1, best_accuracy"
   ],
   "outputs": [],
   "metadata": {}
  },
  {
   "cell_type": "code",
<<<<<<< HEAD
   "execution_count": null,
   "source": [
    "# train_dataloader, val_dataloader, test_texts, word2idx = get_data(16)\n",
    "models = load_models(r'common/models')"
=======
   "execution_count": 24,
   "source": [
    "# train_dataloader, val_dataloader, test_texts, word2idx = get_data(16)\n",
    "models = load_models(r'common/ms')\n",
    "bert_result = load_json(r'bert_res.json')\n",
    "train_valid_index = load_json(r'index.json')"
>>>>>>> 33812086ce1c90fbdc95cfb19969a83f8a23153b
   ],
   "outputs": [],
   "metadata": {}
  },
  {
   "cell_type": "code",
   "execution_count": 20,
   "source": [
    "bert_result.keys()"
   ],
   "outputs": [
    {
     "output_type": "execute_result",
     "data": {
      "text/plain": [
       "dict_keys(['test_res', 'valid_res'])"
      ]
     },
     "metadata": {},
     "execution_count": 20
    }
   ],
   "metadata": {}
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "source": [
    "models_names = models.keys()\n",
    "models_names"
   ],
   "outputs": [
    {
     "output_type": "execute_result",
     "data": {
      "text/plain": [
       "dict_keys(['TextCNN1d', 'BiLSTM', 'RCNN', 'AttentionCNN', 'TextCNN2d', 'DeepCNN'])"
      ]
     },
     "metadata": {},
     "execution_count": 6
    }
   ],
   "metadata": {}
  },
  {
   "cell_type": "code",
   "execution_count": 32,
   "source": [
    "model_list = [Helper.freeze_parameters(model) for model in models.values()]\n",
    "model_list = [model.to(device) for model in models.values()]\n"
   ],
   "outputs": [],
   "metadata": {}
  },
  {
   "cell_type": "code",
   "execution_count": 26,
   "source": [
    "import pandas as pd\n",
    "output = pd.DataFrame(columns=['id', 'label'])\n",
    "\n",
    "for i, text in tqdm(enumerate(test_texts)):\n",
    "    label = reversed_label[predict(text, model=models_, word2idx=word2idx).numpy()[0]]\n",
    "    output.loc[i] = [i, label]\n",
    "output.to_csv(r'out.csv', index=False)\n"
   ],
   "outputs": [
    {
     "output_type": "stream",
     "name": "stderr",
     "text": [
      "6004it [00:30, 197.08it/s]\n"
     ]
    }
   ],
   "metadata": {}
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "source": [
    "train_texts, input_ids, test_texts, labels, word2idx, embeddings = load_data(\n",
    "            gram=1, max_len=64)"
   ],
   "outputs": [
    {
     "output_type": "stream",
     "name": "stderr",
     "text": [
      "100%|██████████| 3456/3456 [00:00<00:00, 131993.39it/s]"
     ]
    },
    {
     "output_type": "stream",
     "name": "stdout",
     "text": [
      "Loading pretrained vectors...\n"
     ]
    },
    {
     "output_type": "stream",
     "name": "stderr",
     "text": [
      "\n"
     ]
    }
   ],
   "metadata": {}
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "source": [
    "from sklearn.ensemble import VotingClassifier\n",
    "from sklearn.svm import SVC\n",
    "from sklearn.ensemble import RandomForestClassifier\n",
    "from sklearn.linear_model import LogisticRegression\n",
    "from sklearn.naive_bayes import CategoricalNB\n",
    "from sklearn.neighbors import KNeighborsClassifier\n",
    "from sklearn.tree import DecisionTreeClassifier\n",
    "from sklearn.ensemble import GradientBoostingClassifier\n",
    "from sklearn.ensemble import AdaBoostClassifier\n",
    "from sklearn.experimental import enable_hist_gradient_boosting\n",
    "from sklearn.ensemble import ExtraTreesClassifier\n",
    "from sklearn.svm import LinearSVC\n",
    "from sklearn.neural_network import MLPClassifier\n",
    "\n",
    "import lightgbm as lgb\n",
    "\n",
    "from sklearn.model_selection import KFold\n",
    "from sklearn.metrics import f1_score\n",
    "from sklearn import metrics\n",
    "import matplotlib.pyplot as plt \n",
    "\n",
    "import pickle"
   ],
   "outputs": [],
   "metadata": {}
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "source": [
    "\n",
    "weighted = {i: v for i, v in zip(range(0, 35), (input_ids.shape[0] / (35 * np.bincount(labels))))}\n",
    "mlp = MLPClassifier(random_state=31)\n",
    "linsvm = LinearSVC(random_state=31, multi_class=\"ovr\", class_weight=weighted)\n",
    "etc = ExtraTreesClassifier(n_estimators=100, min_samples_split=2, random_state=31, class_weight=weighted)\n",
    "ada = AdaBoostClassifier(n_estimators=100, random_state=31)\n",
    "gb = GradientBoostingClassifier(random_state=31, n_estimators=100)\n",
    "dt = DecisionTreeClassifier(random_state=31, class_weight=weighted)\n",
    "svc = SVC(class_weight=weighted)\n",
    "rf = RandomForestClassifier(max_depth=5, \n",
    "                            random_state=31, class_weight=weighted,\n",
    "                            n_jobs=-1)\n",
    "cnb = CategoricalNB()\n",
    "lr = LogisticRegression(random_state=31,\n",
    "                        multi_class='ovr', n_jobs=-1, class_weight=weighted)\n",
    "knn = KNeighborsClassifier(n_neighbors=10, n_jobs=-1)\n",
    "lgbm = lgb.LGBMClassifier(boosting_type='gbdt', class_weight=None, colsample_bytree=1.0,\n",
    "                         importance_type='split', learning_rate=0.01, max_depth=-1,\n",
    "                         min_child_samples=20, min_child_weight=0.001, min_split_gain=0.0,\n",
    "                         n_estimators=100, n_jobs=-1, num_leaves=31, objective=None,\n",
    "                         random_state=None, reg_alpha=0.0, reg_lambda=0.0, silent=True,\n",
    "                         subsample=1.0, subsample_for_bin=2000, subsample_freq=0)\n",
    "\n",
    "def nn_predict(model, input_):\n",
    "    return torch.argmax(model.to(\"cpu\")(torch.tensor(input_)), dim=1).flatten().numpy()\n",
    "\n",
    "def predict(tokens, model, word2idx, flip=True, max_len=64):\n",
    "    \"\"\"Predict probability that a review is positive.\"\"\"\n",
    "    model = model.to(\"cpu\")\n",
    "\n",
    "    # Tokenize, pad and encode text\n",
    "    padded_tokens = tokens + ['<pad>'] * (max_len - len(tokens))\n",
    "\n",
    "    if len(padded_tokens) > max_len:\n",
    "        padded_tokens = padded_tokens[:max_len]\n",
    "\n",
    "    if flip:\n",
    "        padded_tokens += ['<pad>'] * 2 + padded_tokens[::-1]\n",
    "\n",
    "    input_id = [word2idx.get(token, word2idx['<unk>'])\n",
    "                for token in padded_tokens]\n",
    "\n",
    "    # Compute logits\n",
    "    logits = model(torch.tensor([input_id]))\n",
    "\n",
    "    return torch.argmax(logits, dim=1).flatten()"
   ],
   "outputs": [],
   "metadata": {}
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "source": [
    "del models['BiLSTMAttn']"
   ],
   "outputs": [],
   "metadata": {}
  },
  {
   "cell_type": "code",
   "execution_count": 23,
   "source": [
    "bert_result.keys()"
   ],
   "outputs": [
    {
     "output_type": "execute_result",
     "data": {
      "text/plain": [
       "dict_keys(['test_res', 'valid_res'])"
      ]
     },
     "metadata": {},
     "execution_count": 23
    }
   ],
   "metadata": {}
  },
  {
   "cell_type": "code",
   "execution_count": 25,
   "source": [
    "X = input_ids\n",
    "y = labels\n",
    "\n",
    "\n",
    "for k, index in train_valid_index.items():\n",
    "    train_index = index['train_index']\n",
    "    valid_index = index['valid_index']\n",
    "\n",
    "    X_train, X_test = X[train_index], X[valid_index]\n",
    "    y_train, y_test = y[train_index], y[valid_index]\n",
    "\n",
    "    bert_train = bert_result['train_valid'][\"train\"][str(k)]\n",
    "    bert_valid = bert_result['train_valid'][\"valid\"][str(k)]\n",
    "    print(len(bert_train))\n",
    "\n",
    "\n",
    "    nn_trans_train = np.array([nn_predict(model, X_train) for model in models.values()].append(bert_train)).T\n",
    "    nn_trans_test = np.array([nn_predict(model, X_test) for model in models.values()].append(bert_valid)).T\n",
    "    \n",
    "    eclf1 = VotingClassifier(estimators=[('gb', gb), ('mlp', mlp),\n",
    "                                         ('linsvm', linsvm),\n",
    "                                         ('ada', ada),\n",
    "                                         ('etc', etc),\n",
    "                                         ('dt', dt),\n",
    "                                         ('svc', svc),\n",
    "                                         ('rf', rf), \n",
    "                                         ('cnb', cnb), \n",
    "                                         ('lr', lr), \n",
    "                                         ('knn', knn),\n",
    "                                         ('lgb', lgbm)\n",
    "                                         ],\n",
    "                             n_jobs=-1, verbose=True)\n",
    "    eclf1 = eclf1.fit(nn_trans_train, y_train)\n",
    "    y_pred = eclf1.predict(nn_trans_test)\n",
    "    f1 = f1_score(y_test, y_pred, average='macro')\n",
    "    print('[{}] f1: {}'.format(k, f1))\n"
   ],
   "outputs": [
    {
     "output_type": "stream",
     "name": "stdout",
     "text": [
      "11207\n",
      "(11207, 6)\n"
     ]
    }
   ],
   "metadata": {}
  },
  {
   "cell_type": "code",
   "execution_count": 170,
   "source": [
    "X = input_ids\n",
    "y = labels\n",
    "kf = KFold(n_splits=5, shuffle=True, random_state=31)\n",
    "\n",
    "\n",
    "for k, (train_index, test_index) in enumerate(kf.split(X)):\n",
    "    X_train, X_test = X[train_index], X[test_index]\n",
    "    y_train, y_test = y[train_index], y[test_index]\n",
    "\n",
    "    nn_trans_train = np.array([nn_predict(model, X_train) for model in models.values()]).T\n",
    "    nn_trans_test = np.array([nn_predict(model, X_test) for model in models.values()]).T\n",
    "\n",
    "\n",
    "\n",
    "    eclf1 = VotingClassifier(estimators=[('gb', gb), ('mlp', mlp),\n",
    "                                         ('linsvm', linsvm),\n",
    "                                         ('ada', ada),\n",
    "                                         ('etc', etc),\n",
    "                                         ('dt', dt),\n",
    "                                         ('svc', svc),\n",
    "                                         ('rf', rf), \n",
    "                                         ('cnb', cnb), \n",
    "                                         ('lr', lr), \n",
    "                                         ('knn', knn),\n",
    "                                         ('lgb', lgbm)\n",
    "                                         ],\n",
    "                             n_jobs=-1, verbose=True)\n",
    "    eclf1 = eclf1.fit(nn_trans_train, y_train)\n",
    "    y_pred = eclf1.predict(nn_trans_test)\n",
    "    f1 = f1_score(y_test, y_pred, average='macro')\n",
    "    print('[{}] f1: {}'.format(k, f1))\n",
    "    # save_model(eclf1, './voting_model_{}.pkl'.format(k))\n",
    "\n",
    "# [0] f1: 0.7451720931259481\n",
    "# [1] f1: 0.7166397128535958\n",
    "# [2] f1: 0.7155343590697362\n",
    "# [3] f1: 0.7358101693481943\n",
    "# [4] f1: 0.6986469405995048\n",
    "\n",
    "# [0] f1: 0.6887550961532423\n",
    "# [1] f1: 0.6596611472242127\n",
    "# [2] f1: 0.6742436663988278\n",
    "# [3] f1: 0.690392245889996\n",
    "# [4] f1: 0.6748518325883921"
   ],
   "outputs": [
    {
     "output_type": "stream",
     "name": "stdout",
     "text": [
      "[0] f1: 0.7212099336850472\n",
      "[1] f1: 0.6958660531615133\n",
      "[2] f1: 0.7177110101210198\n",
      "[3] f1: 0.7233755541588586\n",
      "[4] f1: 0.6927858222645139\n"
     ]
    }
   ],
   "metadata": {}
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "source": [
    "output = pd.DataFrame(columns=['id', 'label'])\n",
    "for i, tokens in tqdm(enumerate(test_texts)):\n",
    "    nn_labels = []\n",
    "    for model in models.values():\n",
    "        nn_labels.append(predict(tokens, model, word2idx, flip=True, max_len=64).numpy()[0])\n",
    "    pred = reversed_label[eclf1.predict([nn_labels])[0]]\n",
    "    output.loc[i] = [i, pred]\n",
    "output.to_csv(r'./ensemble_res3.csv', index=False)"
   ],
   "outputs": [],
   "metadata": {}
  },
  {
   "cell_type": "code",
   "execution_count": 190,
   "source": [
    "kf = KFold(n_splits=5, shuffle=True, random_state=256)\n",
    "kdict = {}\n",
    "for k, (train_index, test_index) in enumerate(kf.split(train_texts)):\n",
    "    kdict[k] = {'train_index':train_index.tolist(), 'valid_index':test_index.tolist()}\n",
    "save_json(r'index.json', kdict)"
   ],
   "outputs": [
    {
     "output_type": "stream",
     "name": "stdout",
     "text": [
      "index.json saved.\n"
     ]
    }
   ],
   "metadata": {}
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "source": [],
   "outputs": [],
   "metadata": {}
  }
 ],
 "metadata": {
  "orig_nbformat": 4,
  "language_info": {
   "name": "python",
<<<<<<< HEAD
   "version": "3.8.8",
=======
   "version": "3.6.12",
>>>>>>> 33812086ce1c90fbdc95cfb19969a83f8a23153b
   "mimetype": "text/x-python",
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "pygments_lexer": "ipython3",
   "nbconvert_exporter": "python",
   "file_extension": ".py"
  },
  "kernelspec": {
   "name": "python3",
<<<<<<< HEAD
   "display_name": "Python 3.8.8 64-bit (conda)"
  },
  "interpreter": {
   "hash": "1b89aa55be347d0b8cc51b3a166e8002614a385bd8cff32165269c80e70c12a7"
=======
   "display_name": "Python 3.6.12 64-bit ('py36': conda)"
  },
  "interpreter": {
   "hash": "2e2ff3a457722a20f87dbf10c05994872f65588779806bce29ecd514429d1c22"
>>>>>>> 33812086ce1c90fbdc95cfb19969a83f8a23153b
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}