{
 "cells": [
  {
   "cell_type": "markdown",
   "source": [
    "[2021-09-13 22:36] 1 gram best: 71.59%\n",
    "[2021-09-14 00:25] 2 gram best: 71.52%"
   ],
   "metadata": {}
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "source": [
    "from torch.utils.data import Dataset, DataLoader, WeightedRandomSampler\n",
    "import pandas as pd\n",
    "import numpy as np\n",
    "\n",
    "import random\n",
    "import time\n",
    "\n",
    "from common.configs.path import paths\n",
    "from common.configs.tools import label_map\n",
    "\n",
    "from sklearn.model_selection import train_test_split\n",
    "\n",
    "import torch\n",
    "import torch.nn as nn\n",
    "import torch.nn.functional as F\n",
    "import torch.optim as optim\n",
    "from torch.autograd import Variable\n",
    "\n",
    "from torch.utils.tensorboard import SummaryWriter\n",
    "\n",
    "import warnings\n",
    "warnings.filterwarnings(\"ignore\")\n",
    "\n",
    "import pickle\n",
    "\n",
    "import json\n",
    "\n",
    "import nltk\n",
    "from nltk import ngrams\n",
    "\n",
    "from gensim.models import Word2Vec\n",
    "from gensim.models.fasttext import FastText\n",
    "\n",
    "from tqdm import tqdm\n",
    "\n",
    "%matplotlib inline"
   ],
   "outputs": [],
   "metadata": {}
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "source": [
    "if torch.cuda.is_available():\n",
    "    print('gpu is available: {}'.format(torch.cuda.get_device_name(0)))\n",
    "    print('device count: {}'.format(torch.cuda.device_count()))\n",
    "\n",
    "device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n",
    "print('Using device:', device)\n",
    "print()\n",
    "\n",
    "def save_model(model, path):\n",
    "    with open(path, 'wb') as f:\n",
    "        pickle.dump(model, f)\n",
    "\n",
    "\n",
    "def load_model(path):\n",
    "    with open(path, 'rb') as f:\n",
    "        model = pickle.load(f)\n",
    "    return model\n",
    "\n",
    "def load_json(path):\n",
    "    with open(path, 'r') as f:\n",
    "        dict_ = json.load(f)\n",
    "    return dict_\n",
    "\n",
    "def save_json(path, dt):\n",
    "    with open(path, 'w') as f:\n",
    "        json.dump(dt, f)\n",
    "    print(path, 'saved.')\n",
    "\n",
    "def load_npy(path):\n",
    "    return np.load(path)\n",
    "\n",
    "def save_text(path, line):\n",
    "    with open(path, 'r') as f:\n",
    "        f.write(line)\n",
    "\n",
    "def train_word_vector(text, gram):\n",
    "    # w2v = Word2Vec(sentences=text, size=50, window=5, min_count=1, workers=-1)\n",
    "    # ftt = FastText(sentences=text, size=50, window=5, min_count=1, workers=-1)\n",
    "    w2v = Word2Vec(sentences=text, vector_size=50, window=5, min_count=1, workers=-1)\n",
    "    ftt = FastText(sentences=text, vector_size=50, window=5, min_count=1, workers=-1)\n",
    "    w2v.save('common/output/w2v_{}gram_100.model'.format(gram))\n",
    "    ftt.save('common/output/ftt_{}gram_100.model'.format(gram))\n",
    "    return w2v, ftt\n",
    "\n",
    "def comb_vector(w2v_m, ftt_m):\n",
    "    # return {w: (np.concatenate((w2v_m.wv[w], ftt_m.wv[w]))).tolist() for w in w2v_m.wv.vocab.keys()}\n",
    "    return {w: (np.concatenate((w2v_m.wv[w], ftt_m.wv[w]))).tolist() for w in list(w2v_m.wv.index_to_key)}\n"
   ],
   "outputs": [
    {
     "output_type": "stream",
     "name": "stdout",
     "text": [
      "gpu is available: NVIDIA GeForce GTX 1050 with Max-Q Design\n",
      "device count: 1\n",
      "Using device: cuda\n",
      "\n"
     ]
    }
   ],
   "metadata": {}
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "source": [
    "train_df = pd.read_csv(paths['train_data'])\n",
    "test_df = pd.read_csv(paths['test_data'])\n",
    "train_df.label = train_df.label.apply(lambda e: label_map[e])\n",
    "Y = train_df.label.values"
   ],
   "outputs": [],
   "metadata": {}
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "source": [
    "import pandas as pd\n",
    "def count_len(text_):\n",
    "    return pd.DataFrame({'id': range(len(text_)), 'len' :[len(l) for l in text_]})"
   ],
   "outputs": [],
   "metadata": {}
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "source": [
    "\n",
    "# text = pd.concat([train_df['text'], test_df['text']])\n",
    "# text_1_gram = [nltk.word_tokenize(line) for line in text.tolist()]\n",
    "# text_2_gram = [[' '.join(grams) for grams in ngrams(nltk.word_tokenize(line), n=2)] for line in text.to_list()]\n",
    "# text_3_gram = [[' '.join(grams) for grams in ngrams(nltk.word_tokenize(line), n=3)] for line in text.to_list()]\n",
    "\n",
    "\n",
    "# save_json(r'./common/output/corpus_1_gram.json', text_1_gram)\n",
    "# save_json(r'./common/output/corpus_2_gram.json', text_2_gram)\n",
    "# save_json(r'./common/output/corpus_3_gram.json', text_3_gram)\n",
    "\n",
    "text_1_gram = load_json('common/output/corpus_1_gram.json')\n",
    "# text_2_gram = load_json('common/output/corpus_2_gram.json')\n",
    "# text_3_gram = load_json('common/output/corpus_3_gram.json')\n",
    "\n",
    "w2c_1_gram, ftt_1_gram = train_word_vector(text_1_gram, 1)\n",
    "word_embeddings_1_gram = comb_vector(w2c_1_gram, ftt_1_gram)\n",
    "save_json('common/output/word_embeddings_1_gram_100.json', word_embeddings_1_gram)\n",
    "\n",
    "\n",
    "# w2c_2_gram, ftt_2_gram = train_word_vector(text_2_gram, 2)\n",
    "# word_embeddings_2_gram = comb_vector(w2c_2_gram, ftt_2_gram)\n",
    "# save_json('common/output/word_embeddings_2_gram_100.json', word_embeddings_2_gram)\n",
    "\n",
    "# w2c_3_gram, ftt_3_gram = train_word_vector(text_3_gram, 3)\n",
    "# word_embeddings_3_gram = comb_vector(w2c_3_gram, ftt_3_gram)\n",
    "# save_json('common/output/word_embeddings_3_gram_100.json', word_embeddings_3_gram)\n",
    "\n",
    "# word_embeddings_1_gram = load_json(r'./output/word_embeddings_1_gram.json')\n",
    "# word_embeddings_2_gram = load_json(r'./output/word_embeddings_2_gram.json')\n",
    "# word_embeddings_3_gram = load_json(r'./output/word_embeddings_3_gram.json')"
   ],
   "outputs": [
    {
     "output_type": "stream",
     "name": "stdout",
     "text": [
      "common/output/word_embeddings_1_gram_100.json saved.\n"
     ]
    }
   ],
   "metadata": {}
  },
  {
   "cell_type": "code",
   "execution_count": 21,
   "source": [
    "most_similar('7057')"
   ],
   "outputs": [
    {
     "output_type": "execute_result",
     "data": {
      "text/plain": [
       "'4740'"
      ]
     },
     "metadata": {},
     "execution_count": 21
    }
   ],
   "metadata": {}
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "source": [
    "def most_similar(word):\n",
    "    w2v_most = w2c_1_gram.wv.most_similar(positive=[word])[0]\n",
    "    ftt_most = ftt_1_gram.wv.most_similar(positive=[word])[0]\n",
    "    if w2v_most[1] > ftt_most[1]:\n",
    "        return w2v_most[0]\n",
    "    else:\n",
    "        return ftt_most[0]"
   ],
   "outputs": [],
   "metadata": {}
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "source": [
    "w2c_1_gram.wv.most_similar(positive=['7442'])[0][1]\n",
    "# ftt_1_gram.wv.most_similar(positive=['7442'])"
   ],
   "outputs": [
    {
     "output_type": "execute_result",
     "data": {
      "text/plain": [
       "0.4544706344604492"
      ]
     },
     "metadata": {},
     "execution_count": 16
    }
   ],
   "metadata": {}
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "source": [
    "ftt_1_gram.wv.most_similar(positive=['7442'])"
   ],
   "outputs": [
    {
     "output_type": "execute_result",
     "data": {
      "text/plain": [
       "[('27442', 0.6191792488098145),\n",
       " ('7057', 0.46636179089546204),\n",
       " ('8528', 0.444609671831131),\n",
       " ('19447', 0.4425199329853058),\n",
       " ('16240', 0.4255737364292145),\n",
       " ('2970', 0.42535200715065),\n",
       " ('2856', 0.41807541251182556),\n",
       " ('25442', 0.4103670120239258),\n",
       " ('17701', 0.4001133441925049),\n",
       " ('17488', 0.39631959795951843)]"
      ]
     },
     "metadata": {},
     "execution_count": 12
    }
   ],
   "metadata": {}
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "source": [],
   "outputs": [],
   "metadata": {}
  }
 ],
 "metadata": {
  "orig_nbformat": 4,
  "language_info": {
   "name": "python",
   "version": "3.8.8",
   "mimetype": "text/x-python",
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "pygments_lexer": "ipython3",
   "nbconvert_exporter": "python",
   "file_extension": ".py"
  },
  "kernelspec": {
   "name": "python3",
   "display_name": "Python 3.8.8 64-bit (conda)"
  },
  "interpreter": {
   "hash": "1b89aa55be347d0b8cc51b3a166e8002614a385bd8cff32165269c80e70c12a7"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}