{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 30,
   "source": [
    "import os\n",
    "from common.configs.tools import label_map, reversed_label, set_seed, predict, weights_init_uniform_rule, seed_num, save_json\n",
    "from common.configs.path import paths \n",
    "from torch.autograd import Variable\n",
    "from data import to_data_loader, load_data\n",
    "from sklearn.model_selection import train_test_split\n",
    "from sklearn.metrics import f1_score\n",
    "import torch\n",
    "import torch.nn as nn\n",
    "import torch.nn.functional as F\n",
    "import torch.optim as optim\n",
    "import numpy as np\n",
    "import random\n",
    "import torch.nn.init as init\n",
    "from tqdm import tqdm\n",
    "from train import EarlyStopping\n",
    "from train import evaluate\n",
    "from torch.optim import lr_scheduler\n",
    "from tensorboardX import SummaryWriter\n",
    "import time\n",
    "from gensim.models import FastText\n",
    "import gensim\n",
    "import pandas as pd\n",
    "\n",
    "\n",
    "writer = SummaryWriter('./Resultlog')\n",
    "\n",
    "torch.manual_seed(seed_num)\n",
    "random.seed(seed_num)\n",
    "\n",
    "if torch.cuda.is_available():\n",
    "    print('gpu is available: {}'.format(torch.cuda.get_device_name(0)))\n",
    "    print('device count: {}'.format(torch.cuda.device_count()))\n",
    "\n",
    "device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n",
    "print('Using device:', device)\n"
   ],
   "outputs": [
    {
     "output_type": "stream",
     "name": "stdout",
     "text": [
      "gpu is available: NVIDIA GeForce GTX 1050 with Max-Q Design\n",
      "device count: 1\n",
      "Using device: cuda\n"
     ]
    }
   ],
   "metadata": {}
  },
  {
   "cell_type": "code",
   "execution_count": 23,
   "source": [
    "def load_models(models_path):\n",
    "    models = {}\n",
    "    for root, dirs, files in os.walk(models_path):\n",
    "        for file in files:\n",
    "            model = torch.jit.load(root+'/'+file)\n",
    "            models[model.__class__.__name__] = model\n",
    "    return models\n",
    "\n",
    "def get_data(batch_size):\n",
    "    train_texts, input_ids, test_texts, labels, word2idx, embeddings = load_data(\n",
    "            gram=1, max_len=64)\n",
    "\n",
    "    X_train, X_val, y_train, y_val = train_test_split(\n",
    "        input_ids, labels, test_size=0.1, random_state=42)\n",
    "\n",
    "\n",
    "    train_dataloader, val_dataloader = to_data_loader(\n",
    "                X_train.astype(float), X_val.astype(float), y_train, y_val, batch_size=batch_size)\n",
    "    return train_dataloader, val_dataloader, test_texts, word2idx\n",
    "\n",
    "def train(model, optimizer, loss_fn, train_dataloader, val_dataloader=None, device=torch.device('cpu'), epochs=10, patience=5):\n",
    "    # Tracking best validation accuracy\n",
    "    best_accuracy = 0\n",
    "    best_f1 = 0\n",
    "    early_stopping = EarlyStopping(\n",
    "        path=None, savecp=False, patience=patience, verbose=False)\n",
    "\n",
    "    # Start training loop\n",
    "    print(f\"{'Epoch':^7} | {'Train Loss':^12} | {'Val Loss':^10} | {'Val Acc':^9} | {'Val F1':^10} | {'Learning Rate':^10} | {'Elapsed':^9}\")\n",
    "    print(\"-\"*87)\n",
    "\n",
    "    valid_epochs_loss = []\n",
    "\n",
    "    scheduler = lr_scheduler.ReduceLROnPlateau(\n",
    "        optimizer, 'min', factor=0.5, patience=5, min_lr=0.0001)\n",
    "\n",
    "    for epoch_i in range(epochs):\n",
    "        # =======================================\n",
    "        #               Training\n",
    "        # =======================================\n",
    "\n",
    "        # Tracking time and loss\n",
    "        t0_epoch = time.time()\n",
    "        total_loss = 0\n",
    "\n",
    "        # Put the model into the training mode\n",
    "        model.train()\n",
    "\n",
    "        for step, batch in enumerate(train_dataloader):\n",
    "            # Load batch to GPU\n",
    "            b_input_ids, b_labels = tuple(t.to(device) for t in batch)\n",
    "\n",
    "            # Zero out any previously calculated gradients\n",
    "            model.zero_grad()\n",
    "\n",
    "            # Perform a forward pass. This will return logits.\n",
    "            # b_input_ids = b_input_ids.type(torch.LongTensor)\n",
    "            # b_labels = b_labels.type(torch.LongTensor)\n",
    "            logits = model(b_input_ids.to(device).long())\n",
    "\n",
    "            # Compute loss and accumulate the loss values\n",
    "\n",
    "            loss = loss_fn(logits, b_labels)\n",
    "            total_loss += loss.item()\n",
    "\n",
    "            # Perform a backward pass to calculate gradients\n",
    "            loss.backward(retain_graph=True)\n",
    "\n",
    "            # Update parameters\n",
    "            optimizer.step()\n",
    "\n",
    "        learning_rate = optimizer.param_groups[-1]['lr']\n",
    "\n",
    "        # Calculate the average loss over the entire training data\n",
    "        avg_train_loss = total_loss / len(train_dataloader)\n",
    "\n",
    "        writer.add_scalar(\"train loss\", avg_train_loss, epoch_i)\n",
    "        writer.add_scalar(\"val loss\", avg_train_loss, epoch_i)\n",
    "\n",
    "        for name, weight in model.named_parameters():\n",
    "            writer.add_histogram(name, weight, epoch_i)\n",
    "            # writer.add_histogram(f'{name}.grad', weight.grad, epoch_i)\n",
    "\n",
    "        # =======================================\n",
    "        #               Evaluation\n",
    "        # =======================================\n",
    "        if val_dataloader is not None:\n",
    "            # After the completion of each training epoch, measure the model's\n",
    "            # performance on our validation set.\n",
    "            val_loss, val_accuracy, val_f1 = evaluate(\n",
    "                model, val_dataloader, loss_fn, device)\n",
    "\n",
    "            # Track the best accuracy\n",
    "            if val_accuracy > best_accuracy:\n",
    "                best_accuracy = val_accuracy\n",
    "\n",
    "            if val_f1 > best_f1:\n",
    "                best_f1 = val_f1\n",
    "\n",
    "            valid_epochs_loss.append(val_loss)\n",
    "\n",
    "            # Print performance over the entire training data\n",
    "            time_elapsed = time.time() - t0_epoch\n",
    "            scheduler.step(val_loss)\n",
    "            print(f\"{epoch_i + 1:^7} | {avg_train_loss:^12.6f} | {val_loss:^10.6f} | {val_accuracy:^9.2f} | {val_f1:^9.4f} | {learning_rate:^9.4f} | {time_elapsed:^9.2f}\")\n",
    "\n",
    "        early_stopping(\n",
    "            val_loss=valid_epochs_loss[-1], model=model)\n",
    "        if early_stopping.early_stop:\n",
    "            print(\"Early stopping\")\n",
    "            break\n",
    "\n",
    "    print(\"\\n\")\n",
    "    print(\n",
    "        f\"Training complete! Best results: [f1] {best_f1:.2f} [accuracy] {best_accuracy:.2f}%.\")\n",
    "    return best_f1, best_accuracy\n",
    "\n",
    "def most_similar(word, w2v_model, ftt_model):\n",
    "    w2v_most = w2v_model.wv.most_similar(positive=[word])[0]\n",
    "    ftt_most = ftt_model.wv.most_similar(positive=[word])[0]\n",
    "    if w2v_most[1] > ftt_most[1]:\n",
    "        return w2v_most[0]\n",
    "    else:\n",
    "        return ftt_most[0]\n",
    "\n",
    "train_df = pd.read_csv(paths['train_data'])\n",
    "test_df = pd.read_csv(paths['test_data'])\n",
    "train_df.label = train_df.label.apply(lambda e: label_map[e])\n",
    "Y = train_df.label.values\n",
    "\n",
    "ftt_model = FastText.load(r'common/output/ftt_1gram_100.model')\n",
    "w2v_model = FastText.load(r'common/output/w2v_1gram_100.model')"
   ],
   "outputs": [],
   "metadata": {}
  },
  {
   "cell_type": "code",
   "execution_count": 39,
   "source": [
    "train_texts, input_ids, test_texts, labels, word2idx, embeddings = load_data(1, 64)"
   ],
   "outputs": [
    {
     "output_type": "stream",
     "name": "stderr",
     "text": [
      "100%|██████████| 3456/3456 [00:00<00:00, 163591.49it/s]"
     ]
    },
    {
     "output_type": "stream",
     "name": "stdout",
     "text": [
      "Loading pretrained vectors...\n"
     ]
    },
    {
     "output_type": "stream",
     "name": "stderr",
     "text": [
      "\n"
     ]
    }
   ],
   "metadata": {}
  },
  {
   "cell_type": "code",
   "execution_count": 46,
   "source": [
    "train_df.loc[train_df.label==2].text.values[32]"
   ],
   "outputs": [
    {
     "output_type": "execute_result",
     "data": {
      "text/plain": [
       "'12163 8224 13343 17281 4529 15475 803 24058 8750 10935 13554 23743 12163 19897 2281 12772 10449 14051 24965 10274 1242 21871 13814 20548 15927 274'"
      ]
     },
     "metadata": {},
     "execution_count": 46
    }
   ],
   "metadata": {}
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "source": [
    "import nltk\n",
    "def augment(label, text_df, w2v_model, ftt_model):\n",
    "    label_text = train_df.loc[train_df.label==label].text.values\n",
    "    \n",
    "    generate_num = random.sample(range(300, 450))\n",
    "    for i in range(generate_num):\n",
    "        get_rand_index = random.randint(len(label_text))\n",
    "        org_text = label_text[get_rand_index]\n",
    "        org_text_split = np.array(nltk.word_tokenize(org_text))\n",
    "        generate_index_sent = random.sample(range(0, len(org_text_split)), 10)\n",
    "        # most_similar(word, w2v_model, ftt_model)\n",
    "        text_df.iloc[len(text_df)] = [len(text_df), generate_text, label]"
   ],
   "outputs": [],
   "metadata": {}
  },
  {
   "cell_type": "code",
   "execution_count": 34,
   "source": [
    "# text_1_gram = [nltk.word_tokenize(line) for line in text.tolist()]\n",
    "train_df.label.value_counts()[-25:]"
   ],
   "outputs": [
    {
     "output_type": "execute_result",
     "data": {
      "text/plain": [
       "12    591\n",
       "3     377\n",
       "23    358\n",
       "7     323\n",
       "27    293\n",
       "17    281\n",
       "8     247\n",
       "2     239\n",
       "34    223\n",
       "5     205\n",
       "22    185\n",
       "32    178\n",
       "4     176\n",
       "9     159\n",
       "19    156\n",
       "31    134\n",
       "11    103\n",
       "25     62\n",
       "15     49\n",
       "20     33\n",
       "33     32\n",
       "28     20\n",
       "30     19\n",
       "24     18\n",
       "1      16\n",
       "Name: label, dtype: int64"
      ]
     },
     "metadata": {},
     "execution_count": 34
    }
   ],
   "metadata": {}
  },
  {
   "cell_type": "code",
   "execution_count": 37,
   "source": [
    "a = np.array([1,2,3,4,5])\n",
    "for \n",
    "a[[0, 4]]"
   ],
   "outputs": [
    {
     "output_type": "execute_result",
     "data": {
      "text/plain": [
       "array([1, 5])"
      ]
     },
     "metadata": {},
     "execution_count": 37
    }
   ],
   "metadata": {}
  },
  {
   "cell_type": "code",
   "execution_count": 38,
   "source": [
    "random.sample(range(0, 64), 10)"
   ],
   "outputs": [
    {
     "output_type": "execute_result",
     "data": {
      "text/plain": [
       "[35, 15, 14, 8, 47, 6, 43, 59, 34, 5]"
      ]
     },
     "metadata": {},
     "execution_count": 38
    }
   ],
   "metadata": {}
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "source": [],
   "outputs": [],
   "metadata": {}
  }
 ],
 "metadata": {
  "orig_nbformat": 4,
  "language_info": {
   "name": "python",
   "version": "3.8.8",
   "mimetype": "text/x-python",
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "pygments_lexer": "ipython3",
   "nbconvert_exporter": "python",
   "file_extension": ".py"
  },
  "kernelspec": {
   "name": "python3",
   "display_name": "Python 3.8.8 64-bit (conda)"
  },
  "interpreter": {
   "hash": "1b89aa55be347d0b8cc51b3a166e8002614a385bd8cff32165269c80e70c12a7"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}