{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "import pandas as pd\n",
    "import numpy as np\n",
    "from tqdm import tqdm\n",
    "import re\n",
    "from sklearn.feature_extraction.text import TfidfVectorizer\n",
    "from sklearn.model_selection import train_test_split\n",
    "from nltk.corpus import stopwords\n",
    "from nltk.stem import SnowballStemmer\n",
    "import pymongo\n",
    "import os\n",
    "os.environ[\"KMP_DUPLICATE_LIB_OK\"]=\"TRUE\""
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "                                               Sentence  polit\n",
      "2249  Preliminary data Centers Disease Control Preve...      1\n",
      "6345  OnePlus revealed specs OnePlus Buds, promising...      2\n",
      "4795  It scientific fact (probably) Shaun Dead Hot F...      1\n",
      "8395  Real Housewives New York star Sonja Morgan upf...      2\n",
      "3558  Image copyright Drew Feeback Image caption Cle...      1\n",
      "...                                                 ...    ...\n",
      "1289  Education Secretary Betsy DeVos met heavy crit...      1\n",
      "1393  LaChina Robinson sends congratulatory video Ka...      1\n",
      "4069  A horror movie friction eroding public trust a...      1\n",
      "5323  It's hard imagine world without Pikachu point ...      2\n",
      "1851  (CNN) White House adviser President's daughter...      1\n",
      "\n",
      "[9497 rows x 2 columns]\n"
     ]
    }
   ],
   "source": [
    "myclient = pymongo.MongoClient(\"mongodb://192.168.0.217:27017/\")\n",
    "mydb = myclient[\"news\"]\n",
    "mycol = mydb[\"political\"]\n",
    "left_query = { \"nutrition.political_bias.subfeatures.value\": 0}\n",
    "right_query = { \"nutrition.political_bias.subfeatures.value\": 4}\n",
    "\n",
    "\n",
    "lr = mycol.find(left_query).limit(4800)\n",
    "rr = mycol.find(right_query).limit(4697)\n",
    "left_data = pd.DataFrame(columns=['Sentence'],data=[i['article']['text'] for i in list(lr)] )\n",
    "right_data = pd.DataFrame(columns=['Sentence'],data=[i['article']['text'] for i in list(rr)] )\n",
    "left_data['polit'] = 1\n",
    "right_data['polit'] = 2\n",
    "pc = r\"\\d+\\.?\\d*\"\n",
    "\n",
    "stop = stopwords.words('english')\n",
    "b_set = pd.concat( [left_data,right_data], axis=0,ignore_index=True)\n",
    "b_set['Sentence'] = b_set['Sentence'].apply(lambda w:' '.join([re.sub(pc,\"\",word) for word in str(w).split() if word not in (stop)]))\n",
    "b_set = b_set.sample(frac = 1)\n",
    "print(b_set)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 93,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|██████████| 800/800 [00:01<00:00, 644.38it/s]\n"
     ]
    },
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>Sentence</th>\n",
       "      <th>SUBJpolit</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>0</th>\n",
       "      <td>An exciting possibility theyre nanoflares like...</td>\n",
       "      <td>1</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>1</th>\n",
       "      <td>I went said I want make rock roll record said ...</td>\n",
       "      <td>1</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>2</th>\n",
       "      <td>The total number tests reached 130404 increase...</td>\n",
       "      <td>2</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>3</th>\n",
       "      <td>Buick Enlarge Image Buick Two new SUVs electri...</td>\n",
       "      <td>1</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>4</th>\n",
       "      <td>Perry said wearing bright yellow hoodie brand...</td>\n",
       "      <td>1</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>...</th>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>821</th>\n",
       "      <td>March 2020  Boone comes forward set record str...</td>\n",
       "      <td>1</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>822</th>\n",
       "      <td>Its major problem isnt going away source told ...</td>\n",
       "      <td>1</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>823</th>\n",
       "      <td>In related news Facebook managed use 86 perce...</td>\n",
       "      <td>2</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>824</th>\n",
       "      <td>Yet boost scheduled end states except New York...</td>\n",
       "      <td>2</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>825</th>\n",
       "      <td>The entire Steelers offense torpedoed Ben Roet...</td>\n",
       "      <td>1</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "<p>826 rows × 2 columns</p>\n",
       "</div>"
      ],
      "text/plain": [
       "                                              Sentence  SUBJpolit\n",
       "0    An exciting possibility theyre nanoflares like...          1\n",
       "1    I went said I want make rock roll record said ...          1\n",
       "2    The total number tests reached 130404 increase...          2\n",
       "3    Buick Enlarge Image Buick Two new SUVs electri...          1\n",
       "4     Perry said wearing bright yellow hoodie brand...          1\n",
       "..                                                 ...        ...\n",
       "821  March 2020  Boone comes forward set record str...          1\n",
       "822  Its major problem isnt going away source told ...          1\n",
       "823   In related news Facebook managed use 86 perce...          2\n",
       "824  Yet boost scheduled end states except New York...          2\n",
       "825  The entire Steelers offense torpedoed Ben Roet...          1\n",
       "\n",
       "[826 rows x 2 columns]"
      ]
     },
     "execution_count": 93,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "from tqdm import tqdm\n",
    "import joblib\n",
    "df = pd.read_excel('../data/data.xlsx')\n",
    "df.to_csv('../data/target_origin51.csv',index=None)\n",
    "# Load files in different encoding formats\n",
    "df_extra_1 = pd.read_csv('../data/extra1.csv',encoding='utf-8')\n",
    "df_extra_2 = pd.read_csv('../data/extra2.csv',encoding='utf-8')\n",
    "df_extra_3 = pd.read_csv('../data/extra3.csv',encoding='utf-8')\n",
    "df_extra_4 = pd.read_csv('../data/extra4.csv',encoding='utf-8')\n",
    "df_extra_5 = pd.read_csv('../data/extra5.csv',encoding='utf-8')\n",
    "# df_extra_6 = pd.read_csv('../data/extra6.csv',encoding='utf-8')\n",
    "# Remove extra spaces in the text\n",
    "df_obj = df_extra_4.select_dtypes(['object'])\n",
    "df_extra_4[df_obj.columns] = df_obj.apply(lambda x: x.str.strip())\n",
    "# define the stopwords\n",
    "stop = stopwords.words('english')\n",
    "# remove the empty data, without this step, we can't go on.\n",
    "df = df.dropna(subset=['Sentence'])\n",
    "# --- debug ----\n",
    "df['SUBJpolit'] = np.where((df['SUBJpolit'] == 3) | (df['SUBJpolit'] == 5), 2,1)\n",
    "res=pd.concat([df_extra_1,df_extra_2,df_extra_3,df_extra_4,df_extra_5],ignore_index=True,axis=0)\n",
    "res = res.drop(columns=['Unnamed: 2'])\n",
    "res.rename(columns={'polit':'SUBJpolit'},inplace = True)\n",
    "# --- save target_origin_sentence\n",
    "target_origin=pd.concat([res,df],ignore_index=True,axis=0)\n",
    "target_origin.to_csv('../data/target_origin571.csv',encoding='utf_8_sig',index=None)\n",
    "target_origin.to_csv('../data/target_origin.csv',index=None)\n",
    "# --- save target_without_stopwords ---\n",
    "target_without_stopwords = target_origin\n",
    "comp = re.compile('[^A-Z^a-z^0-9^ ]')\n",
    "\n",
    "t_set = target_without_stopwords[target_without_stopwords['SUBJpolit']==1]\n",
    "f_set = target_without_stopwords[target_without_stopwords['SUBJpolit']==2]\n",
    "b_t_set = target_without_stopwords[target_without_stopwords['SUBJpolit']==1].iloc[0:int(len(t_set)*0.49)]\n",
    "\n",
    "b_set = pd.concat( [b_t_set,f_set], axis=0,ignore_index=True)\n",
    "b_set = b_set.sample(frac = 1)\n",
    "\n",
    "\n",
    "# add more data from tanbih\n",
    "b_set.drop(b_set.index, inplace=True)\n",
    "\n",
    "# clear b_set\n",
    "myclient = pymongo.MongoClient(\"mongodb://192.168.0.217:27017/\")\n",
    "mydb = myclient[\"news\"]\n",
    "mycol = mydb[\"political\"]\n",
    "left_query = { \"nutrition.political_bias.subfeatures.value\": 0}\n",
    "right_query = { \"nutrition.political_bias.subfeatures.value\": 4}\n",
    "\n",
    "\n",
    "lr = mycol.find(left_query).limit(400)\n",
    "rr = mycol.find(right_query).limit(400)\n",
    "left_data = pd.DataFrame(columns=['Sentence'],data=[i['article']['text'] for i in list(lr)] )\n",
    "right_data = pd.DataFrame(columns=['Sentence'],data=[i['article']['text'] for i in list(rr)] )\n",
    "left_data['polit'] = 1\n",
    "right_data['polit'] = 2\n",
    "bb = pd.concat( [left_data,right_data], axis=0,ignore_index=True)\n",
    "\n",
    "for (index,item) in enumerate(tqdm(bb['Sentence'])):\n",
    "    tmp = pd.DataFrame(columns=['Sentence','SUBJpolit'],data=[(x.strip() ,bb['polit'][index]) for x in item.split('.') if len(x.strip())>20 and len(x)<100] )\n",
    "    b_set = pd.concat([b_set,tmp], axis=0,ignore_index=True)\n",
    "    \n",
    "b_set.to_csv('../data/sentence_set.csv',index=None)\n",
    "b_set['Sentence'] = b_set['Sentence'].apply(lambda w:' '.join([comp.sub(\"\",word) for word in str(w).split() if word not in (stop)]))\n",
    "b_set = b_set.sample(frac = 1)\n",
    "b_set.to_csv('../data/target_without_stopwords.csv',index=None)\n",
    "\n",
    "# print('none bias numbers: %s' % len(t_set))\n",
    "# print('bias numbers: %s' % len(f_set))\n",
    "# print('balanced none bias numbers: %s' % len(b_t_set))\n",
    "\n",
    "# joblib.dump(b_set,'../models/826_b_set.data')\n",
    "\n",
    "b_set = pd.DataFrame(columns=['Sentence','SUBJpolit'],data=[(item ,b_set[b_set['Sentence'] == item]['SUBJpolit'].values[0]) for item in b_set['Sentence'] if len(item.split(' '))>10] )\n",
    "b_set\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 94,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "1    458\n",
      "2    368\n",
      "Name: SUBJpolit, dtype: int64\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "['../models/826_clear.data']"
      ]
     },
     "execution_count": 94,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "print(b_set['SUBJpolit'].value_counts())\n",
    "b_set.to_csv('../data/target_without_stopwords_clear.csv',index=None)\n",
    "joblib.dump(b_set,'../models/826_clear.data')\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 55,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "8687"
      ]
     },
     "execution_count": 55,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "len(b_set['Sentence'].values)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 122,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "['(Bloomberg) -- Luxshare Precision Industry Co',\n",
       " ' is acquiring Wistron Corp',\n",
       " '’s iPhone production business in China via a $472 million deal, potentially becoming the first mainland company to win a coveted role assembling Apple Inc',\n",
       " '’s device during U',\n",
       " ' trade tensions',\n",
       " '\\n\\nTaiwan-based Wistron, one of just three contract manufacturers that assemble Apple’s iPhones, said Friday it’s agreed to sell two subsidiaries in eastern China for 3',\n",
       " '3 billion yuan to Luxshare',\n",
       " ' One of those units, based in the city of Kunshan, is Wistron’s sole iPhone manufacturing site, according to people familiar with the company’s business',\n",
       " '\\n\\nThe deal is a win for Luxshare, which produces accessories and components from cables and chargers to antennas but is also the world’s biggest manufacturer of Apple’s AirPods -- a lucrative business that helped the company become one of 2019’s top Asian stock performers',\n",
       " ' Apple in turn has sought a mainland Chinese partner in part to broaden local sources during a trade war',\n",
       " '\\n\\nA Wistron representative said the Kunshan site being sold to Luxshare makes smartphones, notebooks and connected devices, and the company will shift production of some of those products elsewhere in China',\n",
       " ' She declined to comment on the its iPhone business',\n",
       " ' An Apple representative didn’t immediately respond to an emailed request for comment',\n",
       " '\\n\\nRead more: Apple’s AirPods Fire Up One of Asia’s Top Stocks in 2019\\n\\nLuxshare and other contract manufacturers have long sought to compete in assembling Apple’s most profitable device',\n",
       " ' Wistron is the smallest of three iPhone assemblers, lagging behind Pegatron Corp',\n",
       " ' and Hon Hai Precision Industry Co',\n",
       " ', the company also known as Foxconn',\n",
       " ' Wistron continues to assemble cheaper iPhones in India',\n",
       " '\\n\\n“With the acquisition of the Wistron’s iPhone unit, Luxshare can now become an iPhone assembler,” GF Securities analyst Jeff Pu said',\n",
       " ' “This will first pose a threat to Pegatron’s iPhone business in China',\n",
       " '”\\n\\nThe deal is slated to be completed by year’s end pending regulatory approval, Wistron said in an emailed statement',\n",
       " '\\n\\nRead more: IPhone Makers Look Beyond China in Supply-Chain Rethink\\n\\nFor more articles like this, please visit us at bloomberg',\n",
       " 'com\\n\\nSubscribe now to stay ahead with the most trusted business news source',\n",
       " '\\n\\n©2020 Bloomberg L']"
      ]
     },
     "execution_count": 122,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "q = right_data['Sentence'][0].split('.')\n",
    "bbb=[]\n",
    "for i in q:\n",
    "    if(len(i)>10):\n",
    "        bbb.append(i)\n",
    "len(bbb)\n",
    "bbb"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 112,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>Sentence</th>\n",
       "      <th>SUBJpolit</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>0</th>\n",
       "      <td>Second, it is incredibly narrow-minded to assu...</td>\n",
       "      <td>2</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>1</th>\n",
       "      <td>Libraries have a multitude of benefits, and m...</td>\n",
       "      <td>2</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>2</th>\n",
       "      <td>Some of these benefits include acting as a qu...</td>\n",
       "      <td>2</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>3</th>\n",
       "      <td>One neighborhood found that, after a local li...</td>\n",
       "      <td>2</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>4</th>\n",
       "      <td>Similarly, a Pew survey conducted in 2015 fou...</td>\n",
       "      <td>2</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>...</th>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>5595</th>\n",
       "      <td>Some of these benefits include acting as a qu...</td>\n",
       "      <td>1</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>5596</th>\n",
       "      <td>One neighborhood found that, after a local li...</td>\n",
       "      <td>1</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>5597</th>\n",
       "      <td>Similarly, a Pew survey conducted in 2015 fou...</td>\n",
       "      <td>1</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>5598</th>\n",
       "      <td>People see libraries as a way to connect with...</td>\n",
       "      <td>1</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>5599</th>\n",
       "      <td></td>\n",
       "      <td>1</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "<p>5600 rows × 2 columns</p>\n",
       "</div>"
      ],
      "text/plain": [
       "                                               Sentence SUBJpolit\n",
       "0     Second, it is incredibly narrow-minded to assu...         2\n",
       "1      Libraries have a multitude of benefits, and m...         2\n",
       "2      Some of these benefits include acting as a qu...         2\n",
       "3      One neighborhood found that, after a local li...         2\n",
       "4      Similarly, a Pew survey conducted in 2015 fou...         2\n",
       "...                                                 ...       ...\n",
       "5595   Some of these benefits include acting as a qu...         1\n",
       "5596   One neighborhood found that, after a local li...         1\n",
       "5597   Similarly, a Pew survey conducted in 2015 fou...         1\n",
       "5598   People see libraries as a way to connect with...         1\n",
       "5599                                                            1\n",
       "\n",
       "[5600 rows x 2 columns]"
      ]
     },
     "execution_count": 112,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "sentence_set.dropna(axis=0, how='any', inplace=True)\n",
    "sentence_set"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 106,
   "metadata": {},
   "outputs": [],
   "source": [
    "import numpy as np\n",
    "import pandas as pd\n",
    "from sklearn.model_selection import train_test_split\n",
    "from sklearn.linear_model import LogisticRegression\n",
    "from sklearn.model_selection import GridSearchCV\n",
    "from sklearn.model_selection import cross_val_score\n",
    "from sklearn.svm import LinearSVC, SVC\n",
    "import torch\n",
    "import transformers as tfs\n",
    "import warnings\n",
    "import joblib\n",
    "warnings.filterwarnings('ignore')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 96,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertModel: ['cls.predictions.bias', 'cls.predictions.transform.LayerNorm.bias', 'cls.predictions.decoder.weight', 'cls.predictions.transform.dense.bias', 'cls.predictions.transform.dense.weight', 'cls.predictions.transform.LayerNorm.weight', 'cls.seq_relationship.weight', 'cls.seq_relationship.bias']\n",
      "- This IS expected if you are initializing BertModel from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n",
      "- This IS NOT expected if you are initializing BertModel from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n"
     ]
    }
   ],
   "source": [
    "# read model from lib\n",
    "model_class, tokenizer_class, pretrained_weights = (tfs.BertModel, tfs.BertTokenizer, 'bert-base-uncased')\n",
    "tokenizer = tokenizer_class.from_pretrained(pretrained_weights)\n",
    "model = model_class.from_pretrained(pretrained_weights)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 97,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Train set shape: (826, 2)\n",
      "1    458\n",
      "2    368\n",
      "Name: SUBJpolit, dtype: int64\n"
     ]
    }
   ],
   "source": [
    "print(\"Train set shape:\", b_set.shape)\n",
    "print(b_set['SUBJpolit'].value_counts())"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 98,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Means to add [CLS] and [SEP] symbols at the beginning and end of the sentence\n",
    "train_tokenized = b_set['Sentence'].apply((lambda x: tokenizer.encode(x, add_special_tokens=True,max_length=1024,truncation=True)))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 99,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "0      [101, 2019, 10990, 6061, 2027, 2890, 28991, 10...\n",
      "1      [101, 1045, 2253, 2056, 1045, 2215, 2191, 2600...\n",
      "2      [101, 1996, 2561, 2193, 5852, 2584, 7558, 1274...\n",
      "3      [101, 28865, 4372, 8017, 3351, 3746, 28865, 20...\n",
      "4      [101, 6890, 2056, 4147, 4408, 3756, 7415, 2666...\n",
      "                             ...                        \n",
      "821    [101, 2233, 12609, 15033, 3310, 2830, 2275, 25...\n",
      "822    [101, 2049, 2350, 3291, 3475, 2102, 2183, 2185...\n",
      "823    [101, 1999, 3141, 2739, 9130, 3266, 2224, 6564...\n",
      "824    [101, 2664, 12992, 5115, 2203, 2163, 3272, 204...\n",
      "825    [101, 1996, 2972, 15280, 10048, 9862, 2098, 38...\n",
      "Name: Sentence, Length: 826, dtype: object\n"
     ]
    }
   ],
   "source": [
    "print(train_tokenized)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 100,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "train set shape: (826, 28)\n"
     ]
    }
   ],
   "source": [
    "#process the sentences into the same length\n",
    "train_max_len = 0\n",
    "for i in train_tokenized.values:\n",
    "    if len(i) > train_max_len:\n",
    "        train_max_len = len(i)\n",
    "train_padded = np.array([i + [0] * (train_max_len-len(i)) for i in train_tokenized.values]) #add 0 sufix\n",
    "print(\"train set shape:\",train_padded.shape)\n",
    "\n",
    "#output：train set shape: (3000, 66)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 101,
   "metadata": {},
   "outputs": [],
   "source": [
    "#let the model know which words are not to be processed \"the [PAD] symbol\"\n",
    "train_attention_mask = np.where(train_padded != 0, 1, 0)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 102,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "torch.Size([826, 28, 768])\n"
     ]
    }
   ],
   "source": [
    "#in order to improve the training speed, add a series of [PAD] symbols at the end of short sentences:\n",
    "train_input_ids = torch.tensor(train_padded).long()\n",
    "train_attention_mask = torch.tensor(train_attention_mask).long()\n",
    "with torch.no_grad():\n",
    "    train_last_hidden_states = model(train_input_ids, attention_mask=train_attention_mask)\n",
    "print(train_last_hidden_states[0].size())\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 103,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "0      1\n",
      "1      1\n",
      "2      2\n",
      "3      1\n",
      "4      1\n",
      "      ..\n",
      "821    1\n",
      "822    1\n",
      "823    2\n",
      "824    2\n",
      "825    1\n",
      "Name: SUBJpolit, Length: 826, dtype: int64\n"
     ]
    },
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>Sentence</th>\n",
       "      <th>SUBJpolit</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>0</th>\n",
       "      <td>An exciting possibility theyre nanoflares like...</td>\n",
       "      <td>1</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>1</th>\n",
       "      <td>I went said I want make rock roll record said ...</td>\n",
       "      <td>1</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>2</th>\n",
       "      <td>The total number tests reached 130404 increase...</td>\n",
       "      <td>2</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>3</th>\n",
       "      <td>Buick Enlarge Image Buick Two new SUVs electri...</td>\n",
       "      <td>1</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>4</th>\n",
       "      <td>Perry said wearing bright yellow hoodie brand...</td>\n",
       "      <td>1</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>...</th>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>821</th>\n",
       "      <td>March 2020  Boone comes forward set record str...</td>\n",
       "      <td>1</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>822</th>\n",
       "      <td>Its major problem isnt going away source told ...</td>\n",
       "      <td>1</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>823</th>\n",
       "      <td>In related news Facebook managed use 86 perce...</td>\n",
       "      <td>2</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>824</th>\n",
       "      <td>Yet boost scheduled end states except New York...</td>\n",
       "      <td>2</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>825</th>\n",
       "      <td>The entire Steelers offense torpedoed Ben Roet...</td>\n",
       "      <td>1</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "<p>826 rows × 2 columns</p>\n",
       "</div>"
      ],
      "text/plain": [
       "                                              Sentence  SUBJpolit\n",
       "0    An exciting possibility theyre nanoflares like...          1\n",
       "1    I went said I want make rock roll record said ...          1\n",
       "2    The total number tests reached 130404 increase...          2\n",
       "3    Buick Enlarge Image Buick Two new SUVs electri...          1\n",
       "4     Perry said wearing bright yellow hoodie brand...          1\n",
       "..                                                 ...        ...\n",
       "821  March 2020  Boone comes forward set record str...          1\n",
       "822  Its major problem isnt going away source told ...          1\n",
       "823   In related news Facebook managed use 86 perce...          2\n",
       "824  Yet boost scheduled end states except New York...          2\n",
       "825  The entire Steelers offense torpedoed Ben Roet...          1\n",
       "\n",
       "[826 rows x 2 columns]"
      ]
     },
     "execution_count": 103,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "import joblib\n",
    "\n",
    "joblib.dump(train_last_hidden_states,'../models/826_clear.bert')\n",
    "print(b_set['SUBJpolit'])\n",
    "b_set"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "from sklearn.datasets import load_iris\n",
    "from sklearn.model_selection import train_test_split\n",
    "from sklearn.naive_bayes import GaussianNB\n",
    "from sklearn.naive_bayes import MultinomialNB\n",
    "from sklearn.naive_bayes import ComplementNB\n",
    "from sklearn.naive_bayes import BernoulliNB \n",
    "from sklearn.naive_bayes import CategoricalNB\n",
    "import matplotlib.pyplot as plt\n",
    "from sklearn.metrics import precision_score\n",
    "from sklearn.metrics import roc_curve                \n",
    "from sklearn.metrics import recall_score   \n",
    "from sklearn.metrics import f1_score                 \n",
    "\n",
    "training_accuracy = []\n",
    "gnb_test_accuracy = []\n",
    "gnb_test_recall = []\n",
    "gnb_test_fscore = []\n",
    "bnb_test_accuracy = []\n",
    "bnb_test_recall = []\n",
    "bnb_test_fscore = []\n",
    "svm_test_accuracy = []\n",
    "svm_test_recall = []\n",
    "svm_test_fscore = []\n",
    "lr_test_accuracy=[]\n",
    "lr_test_recall=[]\n",
    "lr_test_fscore=[]\n",
    "gnb = GaussianNB()\n",
    "bnb = BernoulliNB()\n",
    "lr_clf = LogisticRegression()\n",
    "svm_clf = SVC(C=0.8, kernel='linear', gamma=20, decision_function_shape='ovr',probability=True)\n",
    "sets = [0+(i/100)*2 for i in range(1,10)]\n",
    "r_set = range(1,10)\n",
    "\n",
    "for i in r_set:\n",
    "    X_train, X_test, Y_train, Y_test = train_test_split(train_features, train_labels.astype('int'), random_state=i)\n",
    "    y_pred1 = gnb.fit(X_train, Y_train).predict(X_test)\n",
    "    y_pred4 = bnb.fit(X_train, Y_train).predict(X_test)\n",
    "    svm_clf.fit(X_train, Y_train.ravel())\n",
    "    lr_clf.fit(X_train, Y_train)\n",
    "    y_pred = svm_clf.predict(X_test)\n",
    "    svm_acu = precision_score(Y_test, y_pred,average='weighted')\n",
    "    svm_recall = recall_score(Y_test, y_pred,average='weighted')\n",
    "    svm_fscore = f1_score(Y_test, y_pred,average='weighted')\n",
    "    svm_test_accuracy.append(svm_acu)\n",
    "    svm_test_recall.append(svm_recall)\n",
    "    svm_test_fscore.append(svm_fscore)\n",
    "    y_pred2 = lr_clf.predict(X_test)\n",
    "    lr_accuracy = precision_score(Y_test, y_pred2,average='weighted')\n",
    "    lr_recall = recall_score(Y_test, y_pred2,average='weighted')\n",
    "    lr_fscore = f1_score(Y_test, y_pred2,average='weighted')\n",
    "    lr_test_accuracy.append(lr_test_accuracy)\n",
    "    lr_test_recall.append(lr_recall)\n",
    "    lr_test_fscore.append(lr_fscore)\n",
    "    gnb_test_accuracy.append((Y_test == y_pred1).sum()/X_test.shape[0])\n",
    "    bnb_test_accuracy.append((Y_test == y_pred4).sum()/X_test.shape[0])\n",
    "grid = plt.GridSpec(3, 3, wspace=0.5, hspace=0.5)\n",
    "plt.subplot(grid[0,0:3])\n",
    "plt.plot(r_set, lr_test_accuracy, label=\"LR accuracy\")\n",
    "plt.plot(r_set, lr_test_recall, label=\"LR recall\")\n",
    "plt.plot(r_set, lr_test_fscore, label=\"LR f1-score\")\n",
    "plt.ylabel(\"value\")\n",
    "plt.xlabel(\"random_state\")\n",
    "plt.legend()\n",
    "plt.subplot(grid[1,0:3])\n",
    "plt.plot(r_set, svm_test_accuracy, label=\"SVM accuracy\")\n",
    "plt.plot(r_set, svm_test_recall, label=\"SVM recall\")\n",
    "plt.plot(r_set, svm_test_fscore, label=\"SVM f1-score\")\n",
    "plt.ylabel(\"value\")\n",
    "plt.xlabel(\"random_state\")\n",
    "plt.legend()\n",
    "plt.subplot(grid[2,0:3])\n",
    "plt.plot(r_set, gnb_test_accuracy, label=\"GaussianNB accuracy\")\n",
    "plt.plot(r_set, bnb_test_accuracy, label=\"BernoulliNB accuracy\")\n",
    "plt.ylabel(\"value\")\n",
    "plt.xlabel(\"random_state\")\n",
    "plt.legend()\n",
    "plt.show()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 58,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "best lr: 0.633065, avg: 0.608627\n",
      "best svm: 0.635753, avg: 0.608504\n",
      "best gnb: 0.654570, avg: 0.624267\n",
      "best bb: 0.645161, avg: 0.627566\n"
     ]
    }
   ],
   "source": [
    "print(\"best lr: %f, avg: %f\" % (max(lr_test_accuracy),(sum(lr_test_accuracy)/len(lr_test_accuracy))))\n",
    "print(\"best svm: %f, avg: %f\" % (max(svm_test_accuracy),(sum(svm_test_accuracy)/len(svm_test_accuracy))))\n",
    "print(\"best gnb: %f, avg: %f\" % (max(gnb_test_accuracy),(sum(gnb_test_accuracy)/len(gnb_test_accuracy))))\n",
    "print(\"best bb: %f, avg: %f\" % (max(bnb_test_accuracy),(sum(bnb_test_accuracy)/len(bnb_test_accuracy))))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 59,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "[0.6182795698924731,\n",
       " 0.6196236559139785,\n",
       " 0.6021505376344086,\n",
       " 0.6075268817204301,\n",
       " 0.5873655913978495,\n",
       " 0.5940860215053764,\n",
       " 0.6236559139784946,\n",
       " 0.635752688172043,\n",
       " 0.6196236559139785,\n",
       " 0.5846774193548387,\n",
       " 0.6008064516129032]"
      ]
     },
     "execution_count": 59,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "svm_test_accuracy"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 89,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "[0.6406396989651929,\n",
       " 0.6199435559736595,\n",
       " 0.6077140169332079,\n",
       " 0.6180620884289746,\n",
       " 0.632173095014111,\n",
       " 0.6142991533396049,\n",
       " 0.625587958607714,\n",
       " 0.6312323612417686,\n",
       " 0.6180620884289746]"
      ]
     },
     "execution_count": 89,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "lr_test_fscore"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 88,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "[0.6406396989651929,\n",
       " 0.6199435559736595,\n",
       " 0.6077140169332079,\n",
       " 0.6180620884289746,\n",
       " 0.632173095014111,\n",
       " 0.6142991533396049,\n",
       " 0.625587958607714,\n",
       " 0.6312323612417686,\n",
       " 0.6180620884289746]"
      ]
     },
     "execution_count": 88,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "lr_test_recall"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 70,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "[0.31586021505376344]"
      ]
     },
     "execution_count": 70,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "svm_test_accuracy"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 108,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "[0.3333333333333333,\n",
       " 1.0,\n",
       " 0.6666666666666666,\n",
       " 1.0,\n",
       " 0.5,\n",
       " 1.0,\n",
       " 0.8333333333333334,\n",
       " 0.5,\n",
       " 0.6666666666666666,\n",
       " 1.0,\n",
       " 0.5]"
      ]
     },
     "execution_count": 108,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "lr_test_accuracy"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 109,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "[0.3333333333333333,\n",
       " 0.8333333333333334,\n",
       " 0.6666666666666666,\n",
       " 0.8333333333333334,\n",
       " 0.6666666666666666,\n",
       " 0.8333333333333334,\n",
       " 0.8333333333333334,\n",
       " 0.6666666666666666,\n",
       " 0.8333333333333334,\n",
       " 0.8333333333333334,\n",
       " 0.5]"
      ]
     },
     "execution_count": 109,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "gnb_test_accuracy"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 110,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "[0.3333333333333333,\n",
       " 0.8333333333333334,\n",
       " 0.6666666666666666,\n",
       " 1.0,\n",
       " 0.5,\n",
       " 0.6666666666666666,\n",
       " 0.6666666666666666,\n",
       " 0.3333333333333333,\n",
       " 0.5,\n",
       " 0.6666666666666666,\n",
       " 0.6666666666666666]"
      ]
     },
     "execution_count": 110,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "bnb_test_accuracy"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.8.5"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 4
}
