{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [],
   "source": [
    "#Packages\n",
    "import nltk \n",
    "import pandas as pd\n",
    "import glob\n",
    "import re\n",
    "from nltk.corpus import stopwords\n",
    "from nltk.tag import pos_tag\n",
    "from collections import Counter\n",
    "from nltk import sent_tokenize\n",
    "# nltk.download('wordnet')\n",
    "# nltk.download('stopwords')\n",
    "# nltk.download('averaged_perceptron_tagger')\n",
    "    \n",
    "class features:\n",
    "    Dale_Chall_List = pd.read_csv(\"./data/Dale Chall List.txt\")\n",
    "    columns = ['Class','Total Number Of Sentences', 'Average Sentence Length', 'Average Word Length', \\\n",
    "               'Number of Uncommon Words', 'Number of Unique Words', 'Words with 1 to 3 syllables', \\\n",
    "               \"Words with 4 syllables\", \"Words with 5 syllables\", \"Words with 6 syllables\", \\\n",
    "               \"Words with more than 7 syllables\", \"Average number of syllables\"]\n",
    "    ptcol = ['CC', 'CD', 'NNS', 'VBP', 'NN', 'RB', 'MD', 'VB', 'VBZ', 'VBD', 'VBG', 'IN', 'JJ',  'FW', 'WDT', \\\n",
    "     'RBR', 'PRP$', 'VBN', 'PRP', 'DT', 'JJS', 'RP', 'JJR', 'WRB',  'WP', 'NNP', 'WP$',\\\n",
    "     'PDT', 'RBS', \"''\", 'NNPS', 'SYM', 'EX','TO','UH']\n",
    "    ptcol.sort()\n",
    "    columns.extend(ptcol)\n",
    "\n",
    "#Preprocessing\n",
    "    def preprocessing(self,text1):    \n",
    "        text1 = re.sub('[^a-zA-Z]', ' ', text1)\n",
    "        return [word for word in text1.lower().split() if not word in set(stopwords.words('english'))]\n",
    " \n",
    "#Feature extraction\n",
    "    def avg_sentence_length(self, text, num_sents):\n",
    "        \"\"\"句子平均长度\"\"\"\n",
    "        avg = float(len(text)/num_sents)\n",
    "        return avg\n",
    " \n",
    "    def avg_word_length(self, text):\n",
    "        \"\"\"单词的平均长度\"\"\"\n",
    "        s=0\n",
    "        for w in text:\n",
    "            s+=len(w)\n",
    "            a=s/len(text)\n",
    "        return a\n",
    " \n",
    "    def syllable_count_single_word(self, word):\n",
    "        \"\"\"单音节的总数\"\"\"\n",
    "        word = word.lower()\n",
    "        count = 0\n",
    "        vowels = \"aeiouy\"\n",
    "        if word[0] in vowels:\n",
    "            count += 1\n",
    "        for index in range(1, len(word)):\n",
    "            if word[index] in vowels and word[index - 1] not in vowels:\n",
    "                count += 1\n",
    "        if word.endswith(\"e\"):\n",
    "            count -= 1\n",
    "        if count == 0:\n",
    "            count += 1\n",
    "        return count\n",
    " \n",
    "    def avg_syllables(self, text):\n",
    "        \"\"\"文章单音节\"\"\"\n",
    "        s=0\n",
    "        for w in text:\n",
    "            s+=self.syllable_count_single_word(w)\n",
    "        a=s/len(text)\n",
    "        return a\n",
    " \n",
    "    def pos_count_in_list(self, list1):\n",
    "        \"\"\"词性标注\"\"\"\n",
    "        pt = pos_tag(list1)\n",
    "        dictpt = dict(pt)\n",
    "        dictpt = Counter(dictpt.values())\n",
    "        vl = []\n",
    "        for i in self.ptcol:\n",
    "            if i in dictpt.keys():\n",
    "                vl.append(dictpt[i])\n",
    "            else:\n",
    "                vl.append(0)\n",
    "        return vl\n",
    " \n",
    "    def dif_words(self, text):\n",
    "        \"\"\"词频\"\"\"\n",
    "        frequency = Counter(text)\n",
    "        return len(frequency)\n",
    " \n",
    "    def freq_syl(self, text):\n",
    "        \"\"\"平均音节频数\"\"\"\n",
    "        count = [0,0,0,0,0]\n",
    "        uniq_words = Counter(text).keys()\n",
    "        for word in uniq_words:\n",
    "            x = self.syllable_count_single_word(word)\n",
    "            if(x > 1 and x <=3):\n",
    "                count[0]+=1\n",
    "            elif(x == 4):\n",
    "                count[1]+=1\n",
    "            elif(x == 5):\n",
    "                count[2]+=1\n",
    "            elif(x == 6):\n",
    "                count[3]+=1\n",
    "            else:\n",
    "                count[4]+=1\n",
    "        return count\n",
    " \n",
    "    def not_in_dale_chall(self,text):\n",
    "        \"\"\"戴尔查词表\"\"\"\n",
    "        n = [w for w in text if w not in self.Dale_Chall_List]\n",
    "        n1 = len(n)\n",
    "        return n1\n",
    "    \n",
    "#     def total_polysyllables(self):\n",
    "#         \"\"\"多音节总数\"\"\"\n",
    "#         return\n",
    "    \n",
    "#     def total_words(self):\n",
    "#         \"\"\"总词数\"\"\"\n",
    "#         return len(nltk.word_tokenize(self))\n",
    "    \n",
    "    \n",
    "\n",
    "#CREATING THE DATAFRAMES\n",
    "    def fextr(self,txt_article,txt_class):\n",
    "        c2 = [txt_class]\n",
    "#         file = open(filename, 'r', encoding = \"utf8\")\n",
    "#         text2 = file.read()\n",
    "        text2 = txt_article\n",
    "        txt = self.preprocessing(text2)\n",
    "        #print(txt,len(txt))\n",
    "        avg1 = len(sent_tokenize(text2))#句子总数\n",
    "        c2.append(avg1) \n",
    "        c2.append(self.avg_sentence_length(txt, avg1))\n",
    "        c2.extend([self.avg_word_length(txt),self.not_in_dale_chall(txt),self.dif_words(txt)])\n",
    "        c2.extend(self.freq_syl(txt))\n",
    "        c2.append(self.avg_syllables(txt))\n",
    "        vallist = self.pos_count_in_list(txt)\n",
    "        c2.extend(vallist)\n",
    "        #print(len(c2),c2)\n",
    "        return c2\n",
    "    \n",
    "    def create_dataframe(self,txt):\n",
    "        data = []       \n",
    "        for index, row in txt.iterrows():\n",
    "            #print(type(np.array(row[\"class\"])),(list(self.fextr(row[\"article\"]))))\n",
    "            data.append(list(self.fextr(row[\"article\"],row[\"class\"])))\n",
    "            #print(row[\"article\"], row[\"class\"])\n",
    "            #print(type(row[\"article\"]), type(row[\"class\"]))\n",
    "            #print(data)\n",
    "            if index%1000 == 0:\n",
    "                print(\"当前已处理文本数量为：\",index)\n",
    "                #print(\"当前处理文本为：\",data)\n",
    "        #data = np.array(data).reshape(len(data),47)\n",
    "        df = pd.DataFrame(data,columns = self.columns)\n",
    "        return(df)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 31,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "       Total Number Of Sentences  Average Sentence Length  \\\n",
      "25132                         13                 4.846154   \n",
      "\n",
      "       Average Word Length  Number of Uncommon Words  Number of Unique Words  \\\n",
      "25132             4.587302                        63                      36   \n",
      "\n",
      "       Words with 1 to 3 syllables  Words with 4 syllables  \\\n",
      "25132                           10                       0   \n",
      "\n",
      "       Words with 5 syllables  Words with 6 syllables  \\\n",
      "25132                       0                       0   \n",
      "\n",
      "       Words with more than 7 syllables  ...  VB  VBD  VBG  VBN  VBP  VBZ  \\\n",
      "25132                                26  ...   0    8    0    0    0    0   \n",
      "\n",
      "       WDT  WP  WP$  WRB  \n",
      "25132    0   0    0    0  \n",
      "\n",
      "[1 rows x 46 columns] ['middle']\n"
     ]
    }
   ],
   "source": [
    "## Packages \n",
    "import numpy as np\n",
    "import pandas as pd\n",
    "from sklearn.model_selection import train_test_split\n",
    "from sklearn.svm import SVC  \n",
    "from sklearn.preprocessing import StandardScaler\n",
    " \n",
    "#Reading the dataset\n",
    "Dataframe_Final = pd.read_csv('./data/train_features.csv')\n",
    "# print(Dataframe_Final)\n",
    "\n",
    "#Splitting into features and class\n",
    "X = Dataframe_Final.drop('Class',axis=1)\n",
    "y = Dataframe_Final['Class']\n",
    " \n",
    "#Splitting into training and test sets\n",
    "X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.21, random_state = 0, shuffle = False)\n",
    "\n",
    "#Feature extraction of input text\n",
    "f2 = features()\n",
    "col = f2.columns[1:47]\n",
    "data = []\n",
    "# tlist = []\n",
    "# while True:\n",
    "#     print(\"Enter the name of a text file or press n to exit\")\n",
    "#     fn = input()\n",
    "#     if(fn == 'n'):\n",
    "#         break\n",
    "#     tlist.append(fn)\n",
    "#     fn+='.txt'\n",
    "#     fl = f2.fextr(fn)\n",
    "#     data.append(fl)\n",
    "# for i in range(len(data)):\n",
    "#     data[i] = data[i][1:47]\n",
    "data = Dataframe_Final.iloc[25132:25133,1:47]\n",
    "df = pd.DataFrame(data,columns = col)\n",
    "\n",
    "#Scaling features\n",
    "sc = StandardScaler()\n",
    "X_train2 = sc.fit_transform(X_train)\n",
    "X_test2 = sc.transform(X_test)\n",
    "df2 = sc.transform(df)\n",
    "\n",
    "#Training classifier\n",
    "svclassifier = SVC(kernel='rbf', C=0.4, random_state=0, gamma = 0.1) \n",
    "svclassifier.fit(X_train2, y_train)\n",
    "y_p = svclassifier.predict(X_test2)\n",
    "\n",
    "#Predicting class\n",
    "pred = svclassifier.predict(df2)\n",
    "col2 = [\"Name of text\", \"Predicted class\"]\n",
    "#data2 = Dataframe_Final.iloc[54:55,0:1]\n",
    "print(data,pred)\n",
    "\n",
    "# for i in range(0,len(tlist)):\n",
    "#     x = [tlist[i], pred[i]]\n",
    "#     data2.append(x)\n",
    "df3 = pd.DataFrame(data2,columns = col2)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 27,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>Name of text</th>\n",
       "      <th>Predicted class</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>0</th>\n",
       "      <td>high</td>\n",
       "      <td>high</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "</div>"
      ],
      "text/plain": [
       "  Name of text Predicted class\n",
       "0         high            high"
      ]
     },
     "execution_count": 27,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "df3"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "py_torch",
   "language": "python",
   "name": "py_torch"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.8.5"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 4
}
