{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "import pandas as pd\n",
    "import jieba\n",
    "import warnings\n",
    "warnings.filterwarnings(\"ignore\")"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 读取新闻数据"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>category</th>\n",
       "      <th>content</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>0</th>\n",
       "      <td>C11-Space</td>\n",
       "      <td>宇航学-报-JOURNAL-OF-ASTRONAUTICS-1999-卷-期-vol20-N...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>1</th>\n",
       "      <td>C11-Space</td>\n",
       "      <td>航空工程-维修-AVIATION-ENGINEERING-MAINTENANCE1999-期...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>2</th>\n",
       "      <td>C11-Space</td>\n",
       "      <td>宇航学-报-JOURNAL-OF-ASTRONAUTICS-1999-卷-期-vol20-N...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>3</th>\n",
       "      <td>C11-Space</td>\n",
       "      <td>航空工程-维修-AVIATION-ENGINEERING-MAINTENANCE1999-期...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>4</th>\n",
       "      <td>C11-Space</td>\n",
       "      <td>宇航学-报-JOURNAL-OF-ASTRONAUTICS-1999-卷-期-vol20-N...</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "</div>"
      ],
      "text/plain": [
       "    category                                            content\n",
       "0  C11-Space  宇航学-报-JOURNAL-OF-ASTRONAUTICS-1999-卷-期-vol20-N...\n",
       "1  C11-Space  航空工程-维修-AVIATION-ENGINEERING-MAINTENANCE1999-期...\n",
       "2  C11-Space  宇航学-报-JOURNAL-OF-ASTRONAUTICS-1999-卷-期-vol20-N...\n",
       "3  C11-Space  航空工程-维修-AVIATION-ENGINEERING-MAINTENANCE1999-期...\n",
       "4  C11-Space  宇航学-报-JOURNAL-OF-ASTRONAUTICS-1999-卷-期-vol20-N..."
      ]
     },
     "execution_count": 3,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "df_news = pd.read_csv('data/corpus_clean.csv',names=['category','content'],encoding='utf-8')\n",
    "df_news = df_news.dropna()\n",
    "df_news.head()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>category</th>\n",
       "      <th>content</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>0</th>\n",
       "      <td>C11-Space</td>\n",
       "      <td>[宇航学, 报, JOURNAL, OF, ASTRONAUTICS, 1999, 卷, 期...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>1</th>\n",
       "      <td>C11-Space</td>\n",
       "      <td>[航空工程, 维修, AVIATION, ENGINEERING, MAINTENANCE1...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>2</th>\n",
       "      <td>C11-Space</td>\n",
       "      <td>[宇航学, 报, JOURNAL, OF, ASTRONAUTICS, 1999, 卷, 期...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>3</th>\n",
       "      <td>C11-Space</td>\n",
       "      <td>[航空工程, 维修, AVIATION, ENGINEERING, MAINTENANCE1...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>4</th>\n",
       "      <td>C11-Space</td>\n",
       "      <td>[宇航学, 报, JOURNAL, OF, ASTRONAUTICS, 1999, 卷, 期...</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "</div>"
      ],
      "text/plain": [
       "    category                                            content\n",
       "0  C11-Space  [宇航学, 报, JOURNAL, OF, ASTRONAUTICS, 1999, 卷, 期...\n",
       "1  C11-Space  [航空工程, 维修, AVIATION, ENGINEERING, MAINTENANCE1...\n",
       "2  C11-Space  [宇航学, 报, JOURNAL, OF, ASTRONAUTICS, 1999, 卷, 期...\n",
       "3  C11-Space  [航空工程, 维修, AVIATION, ENGINEERING, MAINTENANCE1...\n",
       "4  C11-Space  [宇航学, 报, JOURNAL, OF, ASTRONAUTICS, 1999, 卷, 期..."
      ]
     },
     "execution_count": 3,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "content_temp= df_news.content.values.tolist()\n",
    "content=[]\n",
    "for lineiter in content_temp:\n",
    "    line=lineiter.split('-')\n",
    "    content.append(line)\n",
    "df_news.content=content\n",
    "df_news.head()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "(19636, 2)"
      ]
     },
     "execution_count": 4,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "df_news.shape"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 划分训练集和测试集"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>content</th>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>category</th>\n",
       "      <th></th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>C11-Space</th>\n",
       "      <td>1282</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>C15-Energy</th>\n",
       "      <td>65</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>C16-Electronics</th>\n",
       "      <td>55</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>C17-Communication</th>\n",
       "      <td>52</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>C19-Computer</th>\n",
       "      <td>2714</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>C23-Mine</th>\n",
       "      <td>67</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>C29-Transport</th>\n",
       "      <td>116</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>C3-Art</th>\n",
       "      <td>1482</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>C31-Enviornment</th>\n",
       "      <td>2435</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>C32-Agriculture</th>\n",
       "      <td>2043</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>C34-Economy</th>\n",
       "      <td>3201</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>C35-Law</th>\n",
       "      <td>103</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>C36-Medical</th>\n",
       "      <td>104</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>C37-Military</th>\n",
       "      <td>150</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>C38-Politics</th>\n",
       "      <td>2050</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>C39-Sports</th>\n",
       "      <td>2507</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>C4-Literature</th>\n",
       "      <td>67</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>C5-Education</th>\n",
       "      <td>120</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>C6-Philosophy</th>\n",
       "      <td>89</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>C7-History</th>\n",
       "      <td>934</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "</div>"
      ],
      "text/plain": [
       "                   content\n",
       "category                  \n",
       "C11-Space             1282\n",
       "C15-Energy              65\n",
       "C16-Electronics         55\n",
       "C17-Communication       52\n",
       "C19-Computer          2714\n",
       "C23-Mine                67\n",
       "C29-Transport          116\n",
       "C3-Art                1482\n",
       "C31-Enviornment       2435\n",
       "C32-Agriculture       2043\n",
       "C34-Economy           3201\n",
       "C35-Law                103\n",
       "C36-Medical            104\n",
       "C37-Military           150\n",
       "C38-Politics          2050\n",
       "C39-Sports            2507\n",
       "C4-Literature           67\n",
       "C5-Education           120\n",
       "C6-Philosophy           89\n",
       "C7-History             934"
      ]
     },
     "execution_count": 5,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "from sklearn.model_selection import train_test_split\n",
    "# 使用train_test_split函数划分数据集(训练集占75%，测试集占25%)\n",
    "df_news.groupby('category').count()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [],
   "source": [
    "#将分类转成数值型\n",
    "cat=df_news['category']\n",
    "cat=cat.drop_duplicates().values.tolist()\n",
    "def convert_y_lable(catogory):\n",
    "    label_mapping={}\n",
    "    i=0\n",
    "    for cat in catogory:\n",
    "        label_mapping[cat]=i\n",
    "        i=i+1\n",
    "    return label_mapping\n",
    "label_mapping=convert_y_lable(cat)\n",
    "df_news['category']= df_news['category'].map(label_mapping)\n",
    "##将数据按类别分组\n",
    "cat=df_news['category']\n",
    "cat=cat.drop_duplicates().values.tolist()\n",
    "def splitDatabycat(datasource,cat):\n",
    "    data=[]\n",
    "    for catiter in cat:\n",
    "        data.append(datasource.loc[datasource['category'].isin([catiter])])\n",
    "    return data\n",
    "data=splitDatabycat(df_news,cat)\n",
    "##从分组数据中按一定比例划分为训练集和测试集\n",
    "def train_test_split_data(datasource):\n",
    "    data_x_train=[]\n",
    "    data_x_test=[]\n",
    "    data_y_train=[]\n",
    "    data_y_test=[]\n",
    "    for dataiter in datasource:\n",
    "        x=dataiter['content']\n",
    "        y=dataiter['category']\n",
    "        x_train,x_test,y_train,y_test = train_test_split(x,y,test_size=0.2,random_state = 1,shuffle=False)\n",
    "        data_x_train.append(x_train)\n",
    "        data_x_test.append(x_test)\n",
    "        data_y_train.append(y_train)\n",
    "        data_y_test.append(y_test)\n",
    "    return data_x_train,data_x_test,data_y_train,data_y_test\n",
    "x_train,x_test,y_train,y_test = train_test_split_data(data)\n",
    "#得到测试集\n",
    "data_x_test=pd.concat(x_test)\n",
    "data_y_test=pd.concat(y_test)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [],
   "source": [
    "def train_train_split_data(x_train_source,y_train_source,test_size=0.1):\n",
    "    test_size=1-test_size\n",
    "    data_x_train=[]\n",
    "    data_y_train=[]\n",
    "    if test_size<0.03:\n",
    "        return pd.concat(x_train_source),pd.concat(y_train_source)\n",
    "    else:\n",
    "        for i in range(len(x_train_source)):\n",
    "            x_train,x_test,y_train,y_test = train_test_split(x_train_source[i],y_train_source[i],test_size=test_size,random_state = 1,shuffle=False)\n",
    "            data_x_train.append(x_train)\n",
    "            data_y_train.append(y_train)\n",
    "        return pd.concat(data_x_train),pd.concat(data_y_train)\n",
    "#得到训练集\n",
    "data_x_train,data_y_train=train_train_split_data(x_train,y_train,0.04)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 使用one-hot词袋模型生成向量并用算法进行分类"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [],
   "source": [
    "import numpy as np\n",
    "def words_array_linesepspace(datasource):\n",
    "    words = []\n",
    "    for line in datasource:\n",
    "        words.append(' '.join(line))\n",
    "    return words"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [],
   "source": [
    "from sklearn.feature_extraction.text import CountVectorizer\n",
    "from sklearn.naive_bayes import MultinomialNB\n",
    "from sklearn.metrics import precision_score,recall_score,f1_score,accuracy_score\n",
    "def cal_score(y_true,y_pred):\n",
    "    score=[]\n",
    "    score.append(precision_score(y_true, y_pred, average='macro'))\n",
    "    score.append(recall_score(y_true, y_pred, average='macro'))\n",
    "    score.append(f1_score(y_true, y_pred, average='macro'))\n",
    "    score.append(accuracy_score(y_true, y_pred))\n",
    "    return score\n",
    "wordstest=words_array_linesepspace(data_x_test)\n",
    "vec = CountVectorizer(analyzer='word', max_features=4000,  lowercase = False)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [],
   "source": [
    "def NBtest(percent,x_train_source=x_train,y_train_source=y_train,x_test_source=wordstest,y_test_source=data_y_test,vectorizer=vec):\n",
    "    data_x_train,data_y_train=train_train_split_data(x_train_source,y_train_source,percent)\n",
    "    wordstrain=words_array_linesepspace(data_x_train)\n",
    "    vectorizer.fit(wordstrain)\n",
    "    classifier = MultinomialNB()\n",
    "    classifier.fit(vectorizer.transform(wordstrain), data_y_train)\n",
    "    y_pred=classifier.predict(vectorizer.transform(x_test_source))\n",
    "    score=cal_score(y_test_source,y_pred)\n",
    "    score.append(len(wordstrain))\n",
    "    return score,y_pred "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {},
   "outputs": [],
   "source": [
    "from sklearn.svm import SVC\n",
    "from sklearn.preprocessing import StandardScaler\n",
    "def svmtest(percent,x_train_source=x_train,y_train_source=y_train,x_test_source=wordstest,y_test_source=data_y_test,vectorizer=vec):\n",
    "    scaler = StandardScaler()\n",
    "    data_x_train,data_y_train=train_train_split_data(x_train_source,y_train_source,percent)\n",
    "    wordstrain=words_array_linesepspace(data_x_train)\n",
    "    vectorizer.fit(wordstrain)\n",
    "    svc= SVC(class_weight='balanced')\n",
    "    svc.fit(scaler.fit_transform(vectorizer.transform(wordstrain).toarray()), data_y_train)\n",
    "    y_pred=svc.predict(scaler.fit_transform(vectorizer.transform(x_test_source).toarray()))\n",
    "    score=cal_score(y_test_source,y_pred)\n",
    "    score.append(len(wordstrain))\n",
    "    return score,y_pred"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 使用TF-IDF建立词向量"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {},
   "outputs": [],
   "source": [
    "from sklearn.feature_extraction.text import TfidfVectorizer\n",
    "vectorizer = TfidfVectorizer(analyzer='word', max_features=4000,  lowercase = False)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "   "
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 基于规则的算法"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {},
   "outputs": [],
   "source": [
    "from sklearn.utils import check_array"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "metadata": {},
   "outputs": [],
   "source": [
    "def rulebased_train(x_train_source,y_train_source,catogory=cat):\n",
    "    word_cat_set_dic={'word_exclude':set()}\n",
    "    for catiter in catogory:\n",
    "        word_cat_set_dic[catiter]=set()\n",
    "    data_x_train=np.asarray(x_train_source, order='C')\n",
    "    data_y_train=np.asarray(y_train_source, order='C')\n",
    "    for index in range(len(data_x_train)):\n",
    "        lineset=set({}.fromkeys(data_x_train[index]).keys())\n",
    "        lineset=lineset-word_cat_set_dic['word_exclude']\n",
    "        y_label=data_y_train[index]\n",
    "        insecset=set()\n",
    "        for inseciter in word_cat_set_dic.keys():\n",
    "            if inseciter!='word_exclude' and inseciter!=y_label:\n",
    "                tempset=lineset&word_cat_set_dic[inseciter]\n",
    "                lineset=lineset-tempset\n",
    "                insecset.update(tempset)\n",
    "        for inseciter in word_cat_set_dic.keys():\n",
    "            if inseciter!='word_exclude' and inseciter!=y_label:\n",
    "                word_cat_set_dic[inseciter]=word_cat_set_dic[inseciter]-insecset\n",
    "        word_cat_set_dic['word_exclude'].update(insecset)\n",
    "        word_cat_set_dic[y_label].update(lineset)\n",
    "    return word_cat_set_dic\n",
    "def rulebased_predict(rule,x_test_source,y_test_source):\n",
    "    x_test=np.asarray(x_test_source, order='C')\n",
    "    y_pred=[]\n",
    "    for xiter in x_test:\n",
    "        xset=set({}.fromkeys(xiter).keys())\n",
    "        y=20\n",
    "        insecnum=0\n",
    "        for inseciter in rule.keys():\n",
    "            if inseciter!='word_exclude':\n",
    "                insecset=rule[inseciter]&xset\n",
    "                inseclen=len(insecset)\n",
    "                if inseclen>insecnum:\n",
    "                    y=inseciter\n",
    "                    insecnum=inseclen\n",
    "        y_pred.append(y)\n",
    "    return y_pred\n",
    "def rulebased_test(percent,x_train_source=x_train,y_train_source=y_train,x_test_source=data_x_test,y_test_source=data_y_test):\n",
    "    data_x_train,data_y_train=train_train_split_data(x_train_source,y_train_source,percent)\n",
    "    ruledic=rulebased_train(x_train_source=data_x_train,y_train_source=data_y_train)\n",
    "    y_pred=rulebased_predict(rule=ruledic,x_test_source=x_test_source,y_test_source=y_test_source)\n",
    "    score=cal_score(y_test_source,y_pred)\n",
    "    score.append(len(data_x_train))\n",
    "    return score,y_pred\n",
    "def ensemble_predict(y_predlist):\n",
    "    y_pred=[]\n",
    "    if len(y_predlist)>0:\n",
    "        for j in range(len(y_predlist[0])):\n",
    "            ydic={}\n",
    "            ydic[y_predlist[0][j]]=1\n",
    "            if len(y_predlist)>1:\n",
    "                for i in range(1,len(y_predlist)):\n",
    "                    yij=y_predlist[i][j]\n",
    "                    if yij in ydic.keys():\n",
    "                        ydic[yij]=ydic[yij]+1\n",
    "                    else:\n",
    "                        ydic[yij]=1\n",
    "            y=0\n",
    "            yc=0\n",
    "            for iydic in ydic.keys():\n",
    "                if ydic[iydic]>yc:\n",
    "                    y=iydic\n",
    "                    yc=ydic[iydic]\n",
    "            y_pred.append(y)\n",
    "    return y_pred\n",
    "def ensemble_test(y_predlist,y_test_source=data_y_test):\n",
    "    y_pred=ensemble_predict(y_predlist)\n",
    "    score=cal_score(y_test_source,y_pred)\n",
    "    score.append(len(y_pred))\n",
    "    return score,y_pred"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "metadata": {},
   "outputs": [],
   "source": [
    "result = pd.DataFrame(columns =('feature_model','algoritm','precision','recall','f1_measure','accuracy','train_sample_num'))\n",
    "def record_result(result,score,algoritm,feature_model):\n",
    "    result=result.append(pd.DataFrame({'feature_model':[feature_model],'algoritm':[algoritm],'precision':[score[0]],'recall':[score[1]],'f1_measure':[score[2]],'accuracy':[score[3]],'train_sample_num':[score[4]]}),ignore_index=True)\n",
    "    return result"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 19,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "0.03 end!\n",
      "0.13 end!\n",
      "0.23 end!\n",
      "0.33000000000000007 end!\n",
      "0.43000000000000005 end!\n",
      "0.53 end!\n",
      "0.6300000000000001 end!\n",
      "0.7300000000000001 end!\n",
      "0.8300000000000001 end!\n",
      "0.93 end!\n"
     ]
    }
   ],
   "source": [
    "for per in np.arange(0.03,1.03,0.1):\n",
    "    y_pred=[]\n",
    "    \n",
    "    score,y_predi=NBtest(percent=per)\n",
    "    y_pred.append(y_predi)\n",
    "    result=record_result(result,score,'Naive Bayesian','one-hot')\n",
    "    \n",
    "    score,y_predi=NBtest(percent=per,vectorizer=vectorizer)\n",
    "    y_pred.append(y_predi)\n",
    "    result=record_result(result,score,'Naive Bayesian','TF-IDF')\n",
    "    \n",
    "    score,y_predi=svmtest(percent=per)\n",
    "    y_pred.append(y_predi)\n",
    "    result=record_result(result,score,'svm','one-hot')\n",
    "    \n",
    "    score,y_predi=svmtest(percent=per,vectorizer=vectorizer)\n",
    "    y_pred.append(y_predi)\n",
    "    result=record_result(result,score,'svm','TF-IDF')\n",
    "    \n",
    "    score,y_predi=rulebased_test(percent=per)\n",
    "    y_pred.append(y_predi)\n",
    "    result=record_result(result,score,'rulebased','-')\n",
    "    \n",
    "    score,y_predi=ensemble_test(y_pred)\n",
    "    result=record_result(result,score,'ensemble','-')\n",
    "    print(per,'end!')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 20,
   "metadata": {},
   "outputs": [],
   "source": [
    "result.to_excel('result.xlsx', sheet_name='result') \n",
    "result.to_csv('result.csv')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 234,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "{'是'}\n",
      "{'是', '我', '我是', 'Google'}\n",
      "{'我是', 'Google'}\n",
      "{'是', '我'}\n"
     ]
    }
   ],
   "source": [
    "a = set(('Google','我是','是'))\n",
    "b = set(('Google','我是','我' ))\n",
    "print(a-b)\n",
    "print(a| b)\n",
    "print(a& b)\n",
    "print(a^ b)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 235,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "True\n"
     ]
    }
   ],
   "source": [
    "a='abc'\n",
    "b='可用'\n",
    "print(a!=b)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 19,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "range(1, 3)\n"
     ]
    }
   ],
   "source": [
    "list=range(1,3,1)\n",
    "print(list)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.7"
  },
  "latex_envs": {
   "LaTeX_envs_menu_present": true,
   "autoclose": false,
   "autocomplete": true,
   "bibliofile": "biblio.bib",
   "cite_by": "apalike",
   "current_citInitial": 1,
   "eqLabelWithNumbers": true,
   "eqNumInitial": 1,
   "hotkeys": {
    "equation": "Ctrl-E",
    "itemize": "Ctrl-I"
   },
   "labels_anchors": false,
   "latex_user_defs": false,
   "report_style_numbering": false,
   "user_envs_cfg": false
  },
  "toc": {
   "base_numbering": 1,
   "nav_menu": {},
   "number_sections": true,
   "sideBar": true,
   "skip_h1_title": false,
   "title_cell": "Table of Contents",
   "title_sidebar": "Contents",
   "toc_cell": false,
   "toc_position": {},
   "toc_section_display": true,
   "toc_window_display": false
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
