{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [],
   "source": [
    "import numpy as np\n",
    "import pandas as pd\n",
    "from sklearn.model_selection import train_test_split\n",
    "from sklearn.linear_model import LogisticRegression\n",
    "from sklearn.model_selection import GridSearchCV\n",
    "from sklearn.model_selection import cross_val_score\n",
    "from sklearn.svm import LinearSVC, SVC\n",
    "import torch\n",
    "import transformers as tfs\n",
    "import warnings\n",
    "\n",
    "warnings.filterwarnings('ignore')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {
    "scrolled": true
   },
   "outputs": [],
   "source": [
    "import pandas as pd\n",
    "import numpy as np\n",
    "from tqdm import tqdm\n",
    "import re\n",
    "from sklearn.feature_extraction.text import TfidfVectorizer\n",
    "from sklearn.model_selection import train_test_split\n",
    "from nltk.corpus import stopwords\n",
    "from nltk.stem import SnowballStemmer\n",
    "import pymongo\n",
    "import os\n",
    "stop = stopwords.words('english')\n",
    "os.environ[\"KMP_DUPLICATE_LIB_OK\"]=\"TRUE\"\n",
    "from tqdm import tqdm\n",
    "import joblib\n",
    "from nltk.stem.wordnet import WordNetLemmatizer\n",
    "import string\n",
    "# test_data = pd.read_excel('../data/test/MBFC-sentences-Dataset2.xlsx',engine='openpyxl')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [],
   "source": [
    "# test_data\n",
    "# joblib.dump(test_data,'../data/test/711ktest_data.data')\n",
    "test_data = joblib.load('../data/test/711ktest_data.data')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import warnings\n",
    "warnings.filterwarnings('ignore')\n",
    "import numpy as np\n",
    "import pandas as pd\n",
    "from tqdm import tqdm\n",
    "import re\n",
    "import os\n",
    "from sklearn.model_selection import train_test_split\n",
    "from sklearn.linear_model import LogisticRegression\n",
    "from sklearn.model_selection import GridSearchCV\n",
    "from sklearn.model_selection import cross_val_score\n",
    "from sklearn.feature_extraction.text import TfidfVectorizer\n",
    "from sklearn.svm import LinearSVC, SVC\n",
    "import torch\n",
    "import torch.nn as nn\n",
    "import transformers as tfs\n",
    "\n",
    "from nltk.corpus import stopwords\n",
    "from nltk.stem import SnowballStemmer\n",
    "from nltk.stem.wordnet import WordNetLemmatizer\n",
    "import joblib\n",
    "import string\n",
    "import xlrd\n",
    "import nltk\n",
    "\n",
    "nltk.download('wordnet')\n",
    "os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0\"\n",
    "\n",
    "\n",
    "stop = stopwords.words('english')\n",
    "os.environ[\"KMP_DUPLICATE_LIB_OK\"]=\"TRUE\"\n",
    "test_data = joblib.load('../data/test/711ktest_data.data')\n",
    "\n",
    "# test set\n",
    "comp = re.compile('[^A-Z^a-z^0-9^ ]')\n",
    "splits=400\n",
    "cuda = True\n",
    "start = 0\n",
    "end = 711580\n",
    "sets = range(start,end,splits)\n",
    "sents={}\n",
    "lr_clf = joblib.load('../models/lr_clf.clf')\n",
    "model_class, tokenizer_class, pretrained_weights = (tfs.BertModel, tfs.BertTokenizer, 'bert-base-uncased')\n",
    "tokenizer = tokenizer_class.from_pretrained(pretrained_weights)\n",
    "model = model_class.from_pretrained(pretrained_weights)\n",
    "\n",
    "if cuda:\n",
    "    model = model.cuda()\n",
    "    model = nn.DataParallel(model)\n",
    "\n",
    "for (index,i) in enumerate(tqdm(sets)):\n",
    "    pc = r\"\\d+\\.?\\d*\"\n",
    "    regex = re.compile('[%s]' % re.escape(string.punctuation+'”“’�—–≥≤€‘…°'))\n",
    "    demo = test_data['sentences'][i:i+splits].apply(lambda w:' '.join([regex.sub('',re.sub(pc,'',' '.join\n",
    "                                                                                       (WordNetLemmatizer().lemmatize(word.lower()).split()))) \n",
    "                                                                   for word in str(w).split() if word not in (stop)]))\n",
    "    test_tokenized = demo.apply((lambda x: tokenizer.encode(x, add_special_tokens=True,max_length=77,truncation=True)))\n",
    "    test_max_len = 0\n",
    "    for j in test_tokenized.values:\n",
    "        if len(j) > test_max_len:\n",
    "            test_max_len = len(j)\n",
    "    test_padded = np.array([j + [0] * (test_max_len-len(j)) for j in test_tokenized.values]) #add 0 sufix\n",
    "    print(\"test set shape:\",test_padded.shape)\n",
    "\n",
    "    test_attention_mask = np.where(test_padded != 0, 1, 0)\n",
    "    test_input_ids = torch.tensor(test_padded).long()\n",
    "    test_attention_mask = torch.tensor(test_attention_mask).long()\n",
    "    if cuda:\n",
    "        test_input_ids = test_input_ids.cuda()\n",
    "        test_attention_mask = test_attention_mask.cuda()\n",
    "    with torch.no_grad():\n",
    "        test_last_hidden_states = model(test_input_ids, attention_mask=test_attention_mask)\n",
    "    print(test_last_hidden_states[0].size())\n",
    "    test_features = test_last_hidden_states[0][:,0,:].cpu().numpy()\n",
    "    y_pred = lr_clf.predict(test_features)\n",
    "    sents[index] = pd.DataFrame({'sentences': test_data['sentences'][i:i+splits],'polit':y_pred})\n",
    "\n",
    "c_set = pd.concat(sents,axis=0,ignore_index=True)\n",
    "c_set.to_csv('../data/test/testv3dataset.csv',index=None)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [],
   "source": [
    "c_set = pd.concat(sents,axis=0,ignore_index=True)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>sentences</th>\n",
       "      <th>polit</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>0</th>\n",
       "      <td>Last week, Kim took to Instagram following Kan...</td>\n",
       "      <td>1</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>1</th>\n",
       "      <td>The rapper tweeted: 'I would like to apologize...</td>\n",
       "      <td>1</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>2</th>\n",
       "      <td>I did not cover her like she has covered me.to...</td>\n",
       "      <td>1</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>3</th>\n",
       "      <td>Kim I want to say I know I hurt you.</td>\n",
       "      <td>2</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>4</th>\n",
       "      <td>Please forgive me.</td>\n",
       "      <td>2</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>...</th>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>100195</th>\n",
       "      <td>She opened up about her weeks-long battle with...</td>\n",
       "      <td>2</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>100196</th>\n",
       "      <td>Long hauler':</td>\n",
       "      <td>1</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>100197</th>\n",
       "      <td>On Saturday she shared a selfie wearing a rain...</td>\n",
       "      <td>2</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>100198</th>\n",
       "      <td>This was me on April 2nd after being sick for ...</td>\n",
       "      <td>2</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>100199</th>\n",
       "      <td>I had never been this kind of sick.</td>\n",
       "      <td>1</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "<p>100200 rows × 2 columns</p>\n",
       "</div>"
      ],
      "text/plain": [
       "                                                sentences  polit\n",
       "0       Last week, Kim took to Instagram following Kan...      1\n",
       "1       The rapper tweeted: 'I would like to apologize...      1\n",
       "2       I did not cover her like she has covered me.to...      1\n",
       "3                    Kim I want to say I know I hurt you.      2\n",
       "4                                      Please forgive me.      2\n",
       "...                                                   ...    ...\n",
       "100195  She opened up about her weeks-long battle with...      2\n",
       "100196                                      Long hauler':      1\n",
       "100197  On Saturday she shared a selfie wearing a rain...      2\n",
       "100198  This was me on April 2nd after being sick for ...      2\n",
       "100199                I had never been this kind of sick.      1\n",
       "\n",
       "[100200 rows x 2 columns]"
      ]
     },
     "execution_count": 11,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "c_set "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {},
   "outputs": [],
   "source": [
    "c_set.to_csv('../data/testv3dataset.csv',index=None)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.8.8"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 4
}
