{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2022-04-16T08:09:07.299726Z",
     "start_time": "2022-04-16T08:09:06.019735Z"
    },
    "pycharm": {
     "name": "#%%\n"
    }
   },
   "outputs": [],
   "source": [
    "import time\n",
    "import pandas as pd\n",
    "from sklearn.feature_extraction.text import TfidfVectorizer\n",
    "from sklearn.model_selection import train_test_split\n",
    "import sys\n",
    "# 将自定义模块所在的目录加入到搜索目录中\n",
    "sys.path.append('../lib/')\n",
    "from lib import text_classification_utils as utils"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 1.数据预处理"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "##### 1.1清华数据集"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {
    "pycharm": {
     "name": "#%%\n"
    }
   },
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Building prefix dict from the default dictionary ...\n",
      "Loading model from cache C:\\Users\\16287\\AppData\\Local\\Temp\\jieba.cache\n",
      "Loading model cost 2.851 seconds.\n",
      "Prefix dict has been built successfully.\n"
     ]
    }
   ],
   "source": [
    "data = utils.load_thucnews()\n",
    "THUCNews_data = pd.DataFrame(data, columns=[\"words\", \"sentiment\"])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {
    "pycharm": {
     "name": "#%%\n"
    }
   },
   "outputs": [
    {
     "data": {
      "text/plain": "                                               words sentiment\n0  马晓旭 意外 受伤 让 国奥 警惕 无奈 大雨 格外 青睐 殷家 军 记者 傅亚雨 沈阳 报...        体育\n1  商瑞华 首战 复仇 心切 中国 玫瑰 要 用 美国 方式 攻克 瑞典 多曼来 了 瑞典 来 ...        体育\n2  冠军 球队 迎新 欢乐 派对 黄旭获 大奖 张军 赢 下 PK 赛 新浪 体育讯 月 日晚 ...        体育\n3  辽足 签约 危机 引 注册 难关 高层 威逼利诱 合同 笑里藏刀 新浪 体育讯 月 日 辽足...        体育\n4  揭秘 谢亚龙 被 带走 总局 电话 骗局 复制 南杨 轨迹 体坛周报 特约记者 张锐 北京 ...        体育",
      "text/html": "<div>\n<style scoped>\n    .dataframe tbody tr th:only-of-type {\n        vertical-align: middle;\n    }\n\n    .dataframe tbody tr th {\n        vertical-align: top;\n    }\n\n    .dataframe thead th {\n        text-align: right;\n    }\n</style>\n<table border=\"1\" class=\"dataframe\">\n  <thead>\n    <tr style=\"text-align: right;\">\n      <th></th>\n      <th>words</th>\n      <th>sentiment</th>\n    </tr>\n  </thead>\n  <tbody>\n    <tr>\n      <th>0</th>\n      <td>马晓旭 意外 受伤 让 国奥 警惕 无奈 大雨 格外 青睐 殷家 军 记者 傅亚雨 沈阳 报...</td>\n      <td>体育</td>\n    </tr>\n    <tr>\n      <th>1</th>\n      <td>商瑞华 首战 复仇 心切 中国 玫瑰 要 用 美国 方式 攻克 瑞典 多曼来 了 瑞典 来 ...</td>\n      <td>体育</td>\n    </tr>\n    <tr>\n      <th>2</th>\n      <td>冠军 球队 迎新 欢乐 派对 黄旭获 大奖 张军 赢 下 PK 赛 新浪 体育讯 月 日晚 ...</td>\n      <td>体育</td>\n    </tr>\n    <tr>\n      <th>3</th>\n      <td>辽足 签约 危机 引 注册 难关 高层 威逼利诱 合同 笑里藏刀 新浪 体育讯 月 日 辽足...</td>\n      <td>体育</td>\n    </tr>\n    <tr>\n      <th>4</th>\n      <td>揭秘 谢亚龙 被 带走 总局 电话 骗局 复制 南杨 轨迹 体坛周报 特约记者 张锐 北京 ...</td>\n      <td>体育</td>\n    </tr>\n  </tbody>\n</table>\n</div>"
     },
     "execution_count": 3,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "THUCNews_data.head()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {
    "pycharm": {
     "name": "#%%\n"
    }
   },
   "outputs": [],
   "source": [
    "THUCNews_copy = THUCNews_data.copy()\n",
    "index_THUCNews = THUCNews_copy['words'].count()\n",
    "for i in range(int(index_THUCNews)):\n",
    "        res = 0\n",
    "        temp = THUCNews_copy['sentiment'][i]\n",
    "        if temp == '体育':\n",
    "            res = 1\n",
    "        elif temp =='娱乐':\n",
    "            res = 2\n",
    "        elif temp =='家具':\n",
    "            res = 3\n",
    "        elif temp =='房产':\n",
    "            res = 4\n",
    "        elif temp =='教育':\n",
    "            res = 5\n",
    "        elif temp =='时尚':\n",
    "            res = 6\n",
    "        elif temp =='时政':\n",
    "            res = 7\n",
    "        elif temp =='游戏':\n",
    "            res = 8\n",
    "        elif temp =='科技':\n",
    "            res = 8\n",
    "        else:\n",
    "            res = 10\n",
    "        THUCNews_copy['sentiment'][i] = res"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {
    "pycharm": {
     "name": "#%%\n"
    }
   },
   "outputs": [],
   "source": [
    "THUCNews_copy['sentiment'] = THUCNews_copy['sentiment'].astype('int')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {
    "pycharm": {
     "name": "#%%\n"
    }
   },
   "outputs": [],
   "source": [
    "#TfidfVectorizer()函数\n",
    "#统计某训练文本中，某个词的出现次数\n",
    "THUC_vectorizer =TfidfVectorizer(token_pattern='\\[?\\w+\\]?', stop_words=utils.chinese_stopwords)\n",
    "THUCNews_words= THUC_vectorizer.fit_transform(THUCNews_copy[\"words\"])\n",
    "THUCNews_sentiment = THUCNews_copy[\"sentiment\"]"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "##### 1.2 IMDB数据集"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {
    "pycharm": {
     "name": "#%%\n"
    }
   },
   "outputs": [],
   "source": [
    "IMDB_data = utils.load_IMDBDatas()\n",
    "IMDB_data.columns = ['words','sentiment']"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {
    "pycharm": {
     "name": "#%%\n"
    }
   },
   "outputs": [
    {
     "data": {
      "text/plain": "                                                   words sentiment\n0      One of the other reviewers has mentioned that ...  positive\n1      A wonderful little production br br The filmin...  positive\n2      I thought this was a wonderful way to spend ti...  positive\n3      Basically there s a family where a little boy ...  negative\n4      Petter Mattei s Love in the Time of Money is a...  positive\n...                                                  ...       ...\n49995  I thought this movie did a down right good job...  positive\n49996  Bad plot bad dialogue bad acting idiotic direc...  negative\n49997  I am a Catholic taught in parochial elementary...  negative\n49998  I m going to have to disagree with the previou...  negative\n49999  No one expects the Star Trek movies to be high...  negative\n\n[50000 rows x 2 columns]",
      "text/html": "<div>\n<style scoped>\n    .dataframe tbody tr th:only-of-type {\n        vertical-align: middle;\n    }\n\n    .dataframe tbody tr th {\n        vertical-align: top;\n    }\n\n    .dataframe thead th {\n        text-align: right;\n    }\n</style>\n<table border=\"1\" class=\"dataframe\">\n  <thead>\n    <tr style=\"text-align: right;\">\n      <th></th>\n      <th>words</th>\n      <th>sentiment</th>\n    </tr>\n  </thead>\n  <tbody>\n    <tr>\n      <th>0</th>\n      <td>One of the other reviewers has mentioned that ...</td>\n      <td>positive</td>\n    </tr>\n    <tr>\n      <th>1</th>\n      <td>A wonderful little production br br The filmin...</td>\n      <td>positive</td>\n    </tr>\n    <tr>\n      <th>2</th>\n      <td>I thought this was a wonderful way to spend ti...</td>\n      <td>positive</td>\n    </tr>\n    <tr>\n      <th>3</th>\n      <td>Basically there s a family where a little boy ...</td>\n      <td>negative</td>\n    </tr>\n    <tr>\n      <th>4</th>\n      <td>Petter Mattei s Love in the Time of Money is a...</td>\n      <td>positive</td>\n    </tr>\n    <tr>\n      <th>...</th>\n      <td>...</td>\n      <td>...</td>\n    </tr>\n    <tr>\n      <th>49995</th>\n      <td>I thought this movie did a down right good job...</td>\n      <td>positive</td>\n    </tr>\n    <tr>\n      <th>49996</th>\n      <td>Bad plot bad dialogue bad acting idiotic direc...</td>\n      <td>negative</td>\n    </tr>\n    <tr>\n      <th>49997</th>\n      <td>I am a Catholic taught in parochial elementary...</td>\n      <td>negative</td>\n    </tr>\n    <tr>\n      <th>49998</th>\n      <td>I m going to have to disagree with the previou...</td>\n      <td>negative</td>\n    </tr>\n    <tr>\n      <th>49999</th>\n      <td>No one expects the Star Trek movies to be high...</td>\n      <td>negative</td>\n    </tr>\n  </tbody>\n</table>\n<p>50000 rows × 2 columns</p>\n</div>"
     },
     "execution_count": 8,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "IMDB_data"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {
    "pycharm": {
     "name": "#%%\n"
    }
   },
   "outputs": [],
   "source": [
    "IMDB_copy = IMDB_data.copy()\n",
    "index_IMDB = IMDB_copy['words'].count()\n",
    "for i in range(int(index_IMDB)):\n",
    "        res = 0\n",
    "        temp = IMDB_copy['sentiment'][i]\n",
    "        if temp == 'positive':\n",
    "            res = 1\n",
    "        else:\n",
    "            res = 2\n",
    "        IMDB_copy['sentiment'][i] = res"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {
    "pycharm": {
     "name": "#%%\n"
    }
   },
   "outputs": [],
   "source": [
    "IMDB_copy['sentiment'] = IMDB_copy['sentiment'].astype('int')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {
    "pycharm": {
     "name": "#%%\n"
    }
   },
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "D:\\SoftWare\\anaconda3\\envs\\fnlp\\lib\\site-packages\\sklearn\\feature_extraction\\text.py:396: UserWarning: Your stop_words may be inconsistent with your preprocessing. Tokenizing the stop words generated tokens ['art', 'daren', 'lrb', 'lsb', 'mayn', 'oughtn', 'rrb', 'rsb', 'tween', 'twere', 'twill', 'twixt', 'twould', 'usedn', 'usen', 'vis', 'viser', 'visest'] not in stop_words.\n",
      "  warnings.warn(\n"
     ]
    }
   ],
   "source": [
    "#TfidfVectorizer()函数\n",
    "#统计某训练文本中，某个词的出现次数\n",
    "IMDB_vectorizer =TfidfVectorizer(token_pattern='\\[?\\w+\\]?', stop_words=utils.english_stopwords)\n",
    "IMDB_words= IMDB_vectorizer.fit_transform(IMDB_copy[\"words\"])\n",
    "IMDB_sentiment = IMDB_copy[\"sentiment\"]"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 2.数据划分"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {
    "pycharm": {
     "name": "#%%\n"
    }
   },
   "outputs": [],
   "source": [
    "THUCNews_train_words,THUCNews_test_words,THUCNews_train_sentiment,THUCNews_test_sentiment = train_test_split(THUCNews_words,THUCNews_sentiment)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {
    "pycharm": {
     "name": "#%%\n"
    }
   },
   "outputs": [],
   "source": [
    "IMDBData_train_words,IMDBData_test_words,IMDBData_train_sentiment,IMDBData_test_sentiment = train_test_split(IMDB_words,IMDB_sentiment)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 3.模型"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {
    "pycharm": {
     "name": "#%%\n"
    }
   },
   "outputs": [],
   "source": [
    "from sklearn.model_selection import cross_val_predict\n",
    "from sklearn.metrics import recall_score,precision_score,f1_score"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "##### 3.1逻辑回归"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "metadata": {
    "pycharm": {
     "name": "#%%\n"
    }
   },
   "outputs": [],
   "source": [
    "from sklearn import  linear_model"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "metadata": {
    "pycharm": {
     "name": "#%%\n"
    }
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "LogisticRegression()"
      ]
     },
     "execution_count": 16,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "###清华数据集\n",
    "lr_THUCNews = linear_model.LogisticRegression()\n",
    "lr_THUCNews.fit(THUCNews_train_words,THUCNews_train_sentiment)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "metadata": {
    "pycharm": {
     "name": "#%%\n"
    }
   },
   "outputs": [],
   "source": [
    "# average参数定义了该指标的计算方法，二分类时average参数默认是binary；多分类时，可选参数有micro、macro、weighted和samples。\n",
    "lr_THUCNews_pred = cross_val_predict(lr_THUCNews,THUCNews_test_words,THUCNews_test_sentiment)\n",
    "lr_precision_THUCNews = precision_score(THUCNews_test_sentiment,lr_THUCNews_pred,average='micro')\n",
    "lr_recall_THUCNews = recall_score(THUCNews_test_sentiment,lr_THUCNews_pred,average='micro')\n",
    "lr_f1_THUCNews = f1_score(THUCNews_test_sentiment,lr_THUCNews_pred,average='micro')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "metadata": {
    "pycharm": {
     "name": "#%%\n"
    }
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "0.902 0.902 0.902\n"
     ]
    }
   ],
   "source": [
    "print(lr_precision_THUCNews,lr_recall_THUCNews,lr_f1_THUCNews)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 19,
   "metadata": {
    "pycharm": {
     "name": "#%%\n"
    }
   },
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "D:\\SoftWare\\anaconda3\\envs\\fnlp\\lib\\site-packages\\sklearn\\linear_model\\_logistic.py:814: ConvergenceWarning: lbfgs failed to converge (status=1):\n",
      "STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.\n",
      "\n",
      "Increase the number of iterations (max_iter) or scale the data as shown in:\n",
      "    https://scikit-learn.org/stable/modules/preprocessing.html\n",
      "Please also refer to the documentation for alternative solver options:\n",
      "    https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression\n",
      "  n_iter_i = _check_optimize_result(\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "LogisticRegression()"
      ]
     },
     "execution_count": 19,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "### IMDB数据集\n",
    "lr_IMDBData = linear_model.LogisticRegression()\n",
    "lr_IMDBData.fit(IMDBData_train_words,IMDBData_train_sentiment)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 20,
   "metadata": {
    "pycharm": {
     "name": "#%%\n"
    }
   },
   "outputs": [],
   "source": [
    "lr_IMDBData_pred = cross_val_predict(lr_IMDBData,IMDBData_test_words,IMDBData_test_sentiment)\n",
    "lr_precision_IMDBData = precision_score(IMDBData_test_sentiment,lr_IMDBData_pred)\n",
    "lr_recall_IMDBData = recall_score(IMDBData_test_sentiment,lr_IMDBData_pred)\n",
    "lr_f1_IMDBData = f1_score(IMDBData_test_sentiment,lr_IMDBData_pred)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 21,
   "metadata": {
    "pycharm": {
     "name": "#%%\n"
    }
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "0.8309859154929577 0.8822961890979257 0.8558727187646233\n"
     ]
    }
   ],
   "source": [
    "print(lr_precision_IMDBData,lr_recall_IMDBData,lr_f1_IMDBData)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "##### 3.2贝叶斯模型"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 22,
   "metadata": {
    "pycharm": {
     "name": "#%%\n"
    }
   },
   "outputs": [],
   "source": [
    "from sklearn.naive_bayes import MultinomialNB"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 23,
   "metadata": {
    "pycharm": {
     "name": "#%%\n"
    }
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "MultinomialNB()"
      ]
     },
     "execution_count": 23,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# 清华数据集\n",
    "bayes_THUCNews = MultinomialNB()\n",
    "bayes_THUCNews.fit(THUCNews_train_words,THUCNews_train_sentiment)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 24,
   "metadata": {
    "pycharm": {
     "name": "#%%\n"
    }
   },
   "outputs": [],
   "source": [
    "# average参数定义了该指标的计算方法，二分类时average参数默认是binary；多分类时，可选参数有micro、macro、weighted和samples。\n",
    "bayes_THUCNews_pred = cross_val_predict(bayes_THUCNews,THUCNews_test_words,THUCNews_test_sentiment)\n",
    "bayes_precision_THUCNews = precision_score(THUCNews_test_sentiment,bayes_THUCNews_pred,average='micro')\n",
    "bayes_recall_THUCNews = recall_score(THUCNews_test_sentiment,bayes_THUCNews_pred,average='micro')\n",
    "bayes_f1_THUCNews = f1_score(THUCNews_test_sentiment,bayes_THUCNews_pred,average='micro')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 25,
   "metadata": {
    "pycharm": {
     "name": "#%%\n"
    }
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "0.8496 0.8496 0.8496\n"
     ]
    }
   ],
   "source": [
    "print(bayes_precision_THUCNews,bayes_recall_THUCNews,bayes_f1_THUCNews)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 26,
   "metadata": {
    "pycharm": {
     "name": "#%%\n"
    }
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "MultinomialNB()"
      ]
     },
     "execution_count": 26,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "### IMDB数据集\n",
    "bayes_IMDBData = MultinomialNB()\n",
    "bayes_IMDBData.fit(IMDBData_train_words,IMDBData_train_sentiment)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 27,
   "metadata": {
    "pycharm": {
     "name": "#%%\n"
    }
   },
   "outputs": [],
   "source": [
    "bayes_IMDBData_pred = cross_val_predict(bayes_IMDBData,IMDBData_test_words,IMDBData_test_sentiment)\n",
    "bayes_precision_IMDBData = precision_score(IMDBData_test_sentiment,bayes_IMDBData_pred)\n",
    "bayes_recall_IMDBData = recall_score(IMDBData_test_sentiment,bayes_IMDBData_pred)\n",
    "bayes_f1_IMDBData = f1_score(IMDBData_test_sentiment,bayes_IMDBData_pred)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 28,
   "metadata": {
    "pycharm": {
     "name": "#%%\n"
    }
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "0.8547843665768194 0.8158867985206625 0.834882764294529\n"
     ]
    }
   ],
   "source": [
    "print(bayes_precision_IMDBData,bayes_recall_IMDBData,bayes_f1_IMDBData)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "##### 3.3svm模型"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 29,
   "metadata": {
    "pycharm": {
     "name": "#%%\n"
    }
   },
   "outputs": [],
   "source": [
    "from sklearn.svm import SVC"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 30,
   "metadata": {
    "pycharm": {
     "name": "#%%\n"
    }
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "SVC()"
      ]
     },
     "execution_count": 30,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# 清华数据集\n",
    "svc_THUCNews =SVC(kernel = 'rbf')\n",
    "svc_THUCNews.fit(THUCNews_train_words,THUCNews_train_sentiment)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 31,
   "metadata": {
    "pycharm": {
     "name": "#%%\n"
    }
   },
   "outputs": [],
   "source": [
    "# average参数定义了该指标的计算方法，二分类时average参数默认是binary；多分类时，可选参数有micro、macro、weighted和samples。\n",
    "svc_THUCNews_pred = cross_val_predict(svc_THUCNews,THUCNews_test_words,THUCNews_test_sentiment)\n",
    "svc_precision_THUCNews = precision_score(THUCNews_test_sentiment,svc_THUCNews_pred,average='micro')\n",
    "svc_recall_THUCNews = recall_score(THUCNews_test_sentiment,svc_THUCNews_pred,average='micro')\n",
    "svc_f1_THUCNews = f1_score(THUCNews_test_sentiment,svc_THUCNews_pred,average='micro')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 32,
   "metadata": {
    "pycharm": {
     "name": "#%%\n"
    }
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "0.8696 0.8696 0.8695999999999999\n"
     ]
    }
   ],
   "source": [
    "print(svc_precision_THUCNews,svc_recall_THUCNews,svc_f1_THUCNews)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 33,
   "metadata": {
    "pycharm": {
     "name": "#%%\n"
    }
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "SVC()"
      ]
     },
     "execution_count": 33,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# IMDB数据集\n",
    "svc_IMDBData =SVC(kernel = 'rbf')\n",
    "svc_IMDBData.fit(IMDBData_train_words,IMDBData_train_sentiment)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 34,
   "metadata": {
    "pycharm": {
     "name": "#%%\n"
    }
   },
   "outputs": [],
   "source": [
    "svc_IMDBData_pred = cross_val_predict(svc_IMDBData,IMDBData_test_words,IMDBData_test_sentiment)\n",
    "svc_precision_IMDBData = precision_score(IMDBData_test_sentiment,svc_IMDBData_pred)\n",
    "svc_recall_IMDBData = recall_score(IMDBData_test_sentiment,svc_IMDBData_pred)\n",
    "svc_f1_IMDBData = f1_score(IMDBData_test_sentiment,svc_IMDBData_pred)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 35,
   "metadata": {
    "pycharm": {
     "name": "#%%\n"
    }
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "0.8288625805484789 0.8893712815565203 0.858051504809184\n"
     ]
    }
   ],
   "source": [
    "print(svc_precision_IMDBData,svc_recall_IMDBData,svc_f1_IMDBData)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "##### 3.4GBDT模型"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 36,
   "metadata": {
    "pycharm": {
     "name": "#%%\n"
    }
   },
   "outputs": [],
   "source": [
    "from sklearn.ensemble import GradientBoostingClassifier"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 37,
   "metadata": {
    "pycharm": {
     "name": "#%%\n"
    }
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "GradientBoostingClassifier()"
      ]
     },
     "execution_count": 37,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# 清华数据集\n",
    "gbdt_THUCNews =GradientBoostingClassifier()\n",
    "gbdt_THUCNews.fit(THUCNews_train_words,THUCNews_train_sentiment)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 38,
   "metadata": {
    "pycharm": {
     "name": "#%%\n"
    }
   },
   "outputs": [],
   "source": [
    "# average参数定义了该指标的计算方法，二分类时average参数默认是binary；多分类时，可选参数有micro、macro、weighted和samples。\n",
    "gbdt_THUCNews_pred = cross_val_predict(gbdt_THUCNews,THUCNews_test_words,THUCNews_test_sentiment)\n",
    "gbdt_precision_THUCNews = precision_score(THUCNews_test_sentiment,gbdt_THUCNews_pred,average='micro')\n",
    "gbdt_recall_THUCNews = recall_score(THUCNews_test_sentiment,gbdt_THUCNews_pred,average='micro')\n",
    "gbdt_f1_THUCNews = f1_score(THUCNews_test_sentiment,gbdt_THUCNews_pred,average='micro')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 39,
   "metadata": {
    "pycharm": {
     "name": "#%%\n"
    }
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "0.9016 0.9016 0.9016\n"
     ]
    }
   ],
   "source": [
    "print(gbdt_precision_THUCNews,gbdt_recall_THUCNews,gbdt_f1_THUCNews)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 40,
   "metadata": {
    "pycharm": {
     "name": "#%%\n"
    }
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "GradientBoostingClassifier()"
      ]
     },
     "execution_count": 40,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# IMDB数据集\n",
    "gbdt_IMDBData =GradientBoostingClassifier()\n",
    "gbdt_IMDBData.fit(IMDBData_train_words,IMDBData_train_sentiment)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 41,
   "metadata": {
    "pycharm": {
     "name": "#%%\n"
    }
   },
   "outputs": [],
   "source": [
    "gbdt_IMDBData_pred = cross_val_predict(gbdt_IMDBData,IMDBData_test_words,IMDBData_test_sentiment)\n",
    "gbdt_precision_IMDBData = precision_score(IMDBData_test_sentiment,gbdt_IMDBData_pred)\n",
    "gbdt_recall_IMDBData = recall_score(IMDBData_test_sentiment,gbdt_IMDBData_pred)\n",
    "gbdt_f1_IMDBData = f1_score(IMDBData_test_sentiment,gbdt_IMDBData_pred)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 42,
   "metadata": {
    "pycharm": {
     "name": "#%%\n"
    }
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "0.742121553179516 0.8482071072519698 0.7916260223606212\n"
     ]
    }
   ],
   "source": [
    "print(gbdt_precision_IMDBData,gbdt_recall_IMDBData,gbdt_f1_IMDBData)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "##### 3.5SGD模型"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 43,
   "metadata": {
    "pycharm": {
     "name": "#%%\n"
    }
   },
   "outputs": [],
   "source": [
    "from sklearn.linear_model import SGDClassifier"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 44,
   "metadata": {
    "pycharm": {
     "name": "#%%\n"
    }
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "SGDClassifier(random_state=1234)"
      ]
     },
     "execution_count": 44,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "sgd_THUCNews=SGDClassifier(random_state=1234)\n",
    "sgd_THUCNews.fit(THUCNews_train_words,THUCNews_train_sentiment)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 45,
   "metadata": {
    "pycharm": {
     "name": "#%%\n"
    }
   },
   "outputs": [],
   "source": [
    "# average参数定义了该指标的计算方法，二分类时average参数默认是binary；多分类时，可选参数有micro、macro、weighted和samples。\n",
    "sgd_THUCNews_pred = cross_val_predict(sgd_THUCNews,THUCNews_test_words,THUCNews_test_sentiment)\n",
    "sgd_precision_THUCNews = precision_score(THUCNews_test_sentiment,sgd_THUCNews_pred,average='micro')\n",
    "sgd_recall_THUCNews = recall_score(THUCNews_test_sentiment,sgd_THUCNews_pred,average='micro')\n",
    "sgd_f1_THUCNews = f1_score(THUCNews_test_sentiment,sgd_THUCNews_pred,average='micro')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 46,
   "metadata": {
    "pycharm": {
     "name": "#%%\n"
    }
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "0.9532 0.9532 0.9532\n"
     ]
    }
   ],
   "source": [
    "print(sgd_precision_THUCNews,sgd_recall_THUCNews,sgd_f1_THUCNews)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 47,
   "metadata": {
    "pycharm": {
     "name": "#%%\n"
    }
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "SGDClassifier(random_state=1234)"
      ]
     },
     "execution_count": 47,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# IMDB数据集\n",
    "sgd_IMDBData =SGDClassifier(random_state=1234)\n",
    "sgd_IMDBData.fit(IMDBData_train_words,IMDBData_train_sentiment)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 48,
   "metadata": {
    "pycharm": {
     "name": "#%%\n"
    }
   },
   "outputs": [],
   "source": [
    "sgd_IMDBData_pred = cross_val_predict(sgd_IMDBData,IMDBData_test_words,IMDBData_test_sentiment)\n",
    "sgd_precision_IMDBData = precision_score(IMDBData_test_sentiment,sgd_IMDBData_pred)\n",
    "sgd_recall_IMDBData = recall_score(IMDBData_test_sentiment,sgd_IMDBData_pred)\n",
    "sgd_f1_IMDBData = f1_score(IMDBData_test_sentiment,sgd_IMDBData_pred)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 49,
   "metadata": {
    "pycharm": {
     "name": "#%%\n"
    }
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "0.8385705483672212 0.8753818941952083 0.8565809141688302\n"
     ]
    }
   ],
   "source": [
    "print(sgd_precision_IMDBData,sgd_recall_IMDBData,sgd_f1_IMDBData)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "##### 3.6随机森林"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 50,
   "metadata": {
    "pycharm": {
     "name": "#%%\n"
    }
   },
   "outputs": [],
   "source": [
    "from sklearn.ensemble import RandomForestClassifier"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 51,
   "metadata": {
    "pycharm": {
     "name": "#%%\n"
    }
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "RandomForestClassifier()"
      ]
     },
     "execution_count": 51,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "rfc_THUCNews=RandomForestClassifier()\n",
    "rfc_THUCNews.fit(THUCNews_train_words,THUCNews_train_sentiment)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 52,
   "metadata": {
    "pycharm": {
     "name": "#%%\n"
    }
   },
   "outputs": [],
   "source": [
    "# average参数定义了该指标的计算方法，二分类时average参数默认是binary；多分类时，可选参数有micro、macro、weighted和samples。\n",
    "rfc_THUCNews_pred = cross_val_predict(rfc_THUCNews,THUCNews_test_words,THUCNews_test_sentiment)\n",
    "rfc_precision_THUCNews = precision_score(THUCNews_test_sentiment,rfc_THUCNews_pred,average='micro')\n",
    "rfc_recall_THUCNews = recall_score(THUCNews_test_sentiment,rfc_THUCNews_pred,average='micro')\n",
    "rfc_f1_THUCNews = f1_score(THUCNews_test_sentiment,rfc_THUCNews_pred,average='micro')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 53,
   "metadata": {
    "pycharm": {
     "name": "#%%\n"
    }
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "0.9212 0.9212 0.9212\n"
     ]
    }
   ],
   "source": [
    "print(rfc_precision_THUCNews,rfc_recall_THUCNews,rfc_f1_THUCNews)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 54,
   "metadata": {
    "pycharm": {
     "name": "#%%\n"
    }
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "RandomForestClassifier()"
      ]
     },
     "execution_count": 54,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# IMDB数据集\n",
    "rfc_IMDBData =RandomForestClassifier()\n",
    "rfc_IMDBData.fit(IMDBData_train_words,IMDBData_train_sentiment)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 55,
   "metadata": {
    "pycharm": {
     "name": "#%%\n"
    }
   },
   "outputs": [],
   "source": [
    "rfc_IMDBData_pred = cross_val_predict(rfc_IMDBData,IMDBData_test_words,IMDBData_test_sentiment)\n",
    "rfc_precision_IMDBData = precision_score(IMDBData_test_sentiment,rfc_IMDBData_pred)\n",
    "rfc_recall_IMDBData = recall_score(IMDBData_test_sentiment,rfc_IMDBData_pred)\n",
    "rfc_f1_IMDBData = f1_score(IMDBData_test_sentiment,rfc_IMDBData_pred)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 56,
   "metadata": {
    "pycharm": {
     "name": "#%%\n"
    }
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "0.8101440392277046 0.8501366779224956 0.8296586896822282\n"
     ]
    }
   ],
   "source": [
    "print(rfc_precision_IMDBData,rfc_recall_IMDBData,rfc_f1_IMDBData)"
   ]
  },
  {
   "cell_type": "markdown",
   "source": [
    "##### 3.7CNN"
   ],
   "metadata": {
    "collapsed": false
   }
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "outputs": [],
   "source": [
    "import pandas as pd\n",
    "import jieba\n",
    "import keras\n",
    "from keras.layers.merge import concatenate\n",
    "from keras.preprocessing.text import Tokenizer\n",
    "from keras.preprocessing.sequence import pad_sequences\n",
    "from keras.layers.embeddings import Embedding\n",
    "from keras.layers import Conv1D, MaxPooling1D, Flatten, Dropout, Dense, Input\n",
    "from keras.models import Model\n",
    "from sklearn.model_selection import train_test_split\n",
    "from sklearn import metrics\n",
    "import numpy as np\n",
    "from keras.models import Sequential\n",
    "from keras.layers import BatchNormalization"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "outputs": [],
   "source": [
    "# THUCNews数据集\n",
    "#数据预处理\n",
    "def THUCNewsdata_process(max_len=200):           #path为句子的存储路径，max_len为句子的固定长度\n",
    "    dataset = THUCNews_copy.astype('str')\n",
    "    cw = lambda x: list(jieba.cut(x))         # 定义分词函数\n",
    "    dataset['words'] = dataset['words'].apply(cw)  # 将句子进行分词\n",
    "    tokenizer = Tokenizer()                   # 创建一个Tokenizer对象，将一个词转换为正整数\n",
    "    tokenizer.fit_on_texts(dataset['words'])  #将词编号，词频越大，编号越小\n",
    "    vocab = tokenizer.word_index              # 得到每个词的编号\n",
    "    x_train, x_test, y_train, y_test = train_test_split(dataset['words'], dataset['sentiment'], test_size=0.1)  #划分数据集\n",
    "    x_train_word_ids = tokenizer.texts_to_sequences(x_train)     #将测试集列表中每个词转换为数字\n",
    "    x_test_word_ids = tokenizer.texts_to_sequences(x_test)       #将训练集列表中每个词转换为数字\n",
    "    x_train_padded_seqs = pad_sequences(x_train_word_ids, maxlen=max_len)  # 将每个句子设置为等长，每句默认为200\n",
    "    x_test_padded_seqs = pad_sequences(x_test_word_ids, maxlen=max_len)    #将超过固定值的部分截掉，不足的在最前面用0填充\n",
    "    return x_train_padded_seqs,y_train,x_test_padded_seqs,y_test,vocab"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "outputs": [],
   "source": [
    "#构建CNN分类模型(LeNet-5)\n",
    "#模型结构：嵌入-卷积池化*2-dropout-BN-全连接-dropout-全连接\n",
    "def CNN_model_THUCNews(x_train_padded_seqs, y_train, x_test_padded_seqs, y_test,vocab):\n",
    "    model = Sequential()\n",
    "    model.add(Embedding(len(vocab) + 1, 300, input_length=200)) #使用Embeeding层将每个词编码转换为词向量\n",
    "    model.add(Conv1D(256, 5, padding='same'))\n",
    "    model.add(MaxPooling1D(11, 11, padding='same'))\n",
    "    model.add(Conv1D(128, 5, padding='same'))\n",
    "    model.add(MaxPooling1D(11, 11, padding='same'))\n",
    "    model.add(Conv1D(64, 11, padding='same'))\n",
    "    model.add(Flatten())\n",
    "    model.add(Dropout(0.1))\n",
    "    model.add(BatchNormalization())  # (批)规范化层\n",
    "    model.add(Dense(256, activation='relu'))\n",
    "    model.add(Dropout(0.1))\n",
    "    model.add(Dense(11, activation='softmax'))\n",
    "    model.compile(loss='categorical_crossentropy',optimizer='adam',metrics=['accuracy'])\n",
    "    one_hot_labels = keras.utils.np_utils.to_categorical(y_train, num_classes=11)  # 将标签转换为one-hot编码\n",
    "    model.fit(x_train_padded_seqs, one_hot_labels,epochs=5, batch_size=800)\n",
    "    predict_x=model.predict(x_test_padded_seqs)\n",
    "    y_predict=np.argmax(predict_x,axis=1)\n",
    "    # y_predict = model.predict_classes(x_test_padded_seqs)  # 预测的是类别，结果就是类别号\n",
    "    y_predict = list(map(str, y_predict))\n",
    "    cnn_precision_THUCNews=metrics.accuracy_score(y_test, y_predict)\n",
    "    cnn_recall_THUCNews= metrics.recall_score(y_test, y_predict,average='weighted')\n",
    "    cnn_f1_THUCNews = metrics.f1_score(y_test, y_predict,average='weighted')\n",
    "    return cnn_precision_THUCNews,cnn_recall_THUCNews,cnn_f1_THUCNews"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": 19,
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 1/5\n",
      "12/12 [==============================] - 89s 7s/step - loss: 2.1171 - accuracy: 0.2507\n",
      "Epoch 2/5\n",
      "12/12 [==============================] - 71s 6s/step - loss: 1.1587 - accuracy: 0.6634\n",
      "Epoch 3/5\n",
      "12/12 [==============================] - 68s 6s/step - loss: 0.2277 - accuracy: 0.9573\n",
      "Epoch 4/5\n",
      "12/12 [==============================] - 87s 7s/step - loss: 0.0262 - accuracy: 0.9950\n",
      "Epoch 5/5\n",
      "12/12 [==============================] - 68s 6s/step - loss: 0.0052 - accuracy: 0.9992\n"
     ]
    }
   ],
   "source": [
    "THUCNews_x_train, THUCNews_y_train, THUCNews_x_test, THUCNews_y_test,THUCNews_vocab =  THUCNewsdata_process()\n",
    "cnn_precision_THUCNews,cnn_recall_THUCNews,cnn_f1_THUCNews = CNN_model_THUCNews(THUCNews_x_train, THUCNews_y_train, THUCNews_x_test, THUCNews_y_test,THUCNews_vocab)"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": 20,
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "0.897 0.897 0.8973162097544206\n"
     ]
    }
   ],
   "source": [
    "print(cnn_precision_THUCNews,cnn_recall_THUCNews,cnn_f1_THUCNews)"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": 22,
   "outputs": [],
   "source": [
    "#IMDB数据集\n",
    "#数据预处理\n",
    "def IMDBdata_process( max_len=200):           #path为句子的存储路径，max_len为句子的固定长度\n",
    "    dataset = IMDB_copy.astype('str')\n",
    "    cw = lambda x: list(jieba.cut(x))         # 定义分词函数\n",
    "    dataset['words'] = dataset['words'].apply(cw)  # 将句子进行分词\n",
    "    tokenizer = Tokenizer()                   # 创建一个Tokenizer对象，将一个词转换为正整数\n",
    "    tokenizer.fit_on_texts(dataset['words'])  #将词编号，词频越大，编号越小\n",
    "    vocab = tokenizer.word_index              # 得到每个词的编号\n",
    "    x_train, x_test, y_train, y_test = train_test_split(dataset['words'], dataset['sentiment'], test_size=0.1)  #划分数据集\n",
    "    x_train_word_ids = tokenizer.texts_to_sequences(x_train)     #将测试集列表中每个词转换为数字\n",
    "    x_test_word_ids = tokenizer.texts_to_sequences(x_test)       #将训练集列表中每个词转换为数字\n",
    "    x_train_padded_seqs = pad_sequences(x_train_word_ids, maxlen=max_len)  # 将每个句子设置为等长，每句默认为50\n",
    "    x_test_padded_seqs = pad_sequences(x_test_word_ids, maxlen=max_len)    #将超过固定值的部分截掉，不足的在最前面用0填充\n",
    "    return x_train_padded_seqs,y_train,x_test_padded_seqs,y_test,vocab"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": 54,
   "outputs": [],
   "source": [
    "# 构建TextCNN模型\n",
    "def CNN_model_IMDB(x_train, y_train, x_test, y_test):\n",
    "    main_input = Input(shape=(200,), dtype='float64')\n",
    "    # 嵌入层（使用预训练的词向量）\n",
    "    embedder = Embedding(len(IMDB_vocab) + 1, 300, input_length=50, trainable=False)\n",
    "    embed = embedder(main_input)\n",
    "    # 卷积层和池化层，设置卷积核大小分别为3,4,5\n",
    "    cnn1 = Conv1D(256, 3, padding='same', strides=1, activation='relu')(embed)\n",
    "    cnn1 = MaxPooling1D(pool_size=48)(cnn1)\n",
    "    cnn2 = Conv1D(256, 4, padding='same', strides=1, activation='relu')(embed)\n",
    "    cnn2 = MaxPooling1D(pool_size=47)(cnn2)\n",
    "    cnn3 = Conv1D(256, 5, padding='same', strides=1, activation='relu')(embed)\n",
    "    cnn3 = MaxPooling1D(pool_size=46)(cnn3)\n",
    "    # 合并三个模型的输出向量\n",
    "    cnn = concatenate([cnn1, cnn2, cnn3], axis=-1)\n",
    "    flat = Flatten()(cnn)\n",
    "    drop = Dropout(0.2)(flat) #在池化层到全连接层之前可以加上dropout防止过拟合\n",
    "    main_output = Dense(3, activation='softmax')(drop)\n",
    "    model = Model(inputs=main_input, outputs=main_output)\n",
    "    model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\n",
    "    one_hot_labels = keras.utils.np_utils.to_categorical(y_train, num_classes=3)  # 将标签转换为one-hot编码\n",
    "    model.fit(x_train, one_hot_labels, batch_size=800, epochs=5)\n",
    "    result = model.predict(x_test)  # 预测样本属于每个类别的概率\n",
    "    result_labels = np.argmax(result, axis=1)  # 获得最大概率对应的标签\n",
    "    y_predict = list(map(str, result_labels))\n",
    "    cnn_precision_IMDB=metrics.accuracy_score(y_test, y_predict)\n",
    "    cnn_recall_IMDB= metrics.recall_score(y_test, y_predict,average='weighted')\n",
    "    cnn_f1_IMDB = metrics.f1_score(y_test, y_predict,average='weighted')\n",
    "    return cnn_precision_IMDB,cnn_recall_IMDB,cnn_f1_IMDB"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": 55,
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 1/5\n",
      "57/57 [==============================] - 347s 6s/step - loss: 0.6908 - accuracy: 0.5603\n",
      "Epoch 2/5\n",
      "57/57 [==============================] - 378s 7s/step - loss: 0.5998 - accuracy: 0.6830\n",
      "Epoch 3/5\n",
      "57/57 [==============================] - 366s 6s/step - loss: 0.5310 - accuracy: 0.7379\n",
      "Epoch 4/5\n",
      "57/57 [==============================] - 370s 7s/step - loss: 0.4837 - accuracy: 0.7724\n",
      "Epoch 5/5\n",
      "57/57 [==============================] - 362s 6s/step - loss: 0.4397 - accuracy: 0.8018\n"
     ]
    }
   ],
   "source": [
    "IMDB_x_train, IMDB_y_train, IMDB_x_test, IMDB_y_test,IMDB_vocab = IMDBdata_process()\n",
    "cnn_precision_IMDB,cnn_recall_IMDB,cnn_f1_IMDB=CNN_model_IMDB(IMDB_x_train, IMDB_y_train, IMDB_x_test, IMDB_y_test)"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": 56,
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "0.7936 0.7936 0.7936049555822329\n"
     ]
    }
   ],
   "source": [
    "print(cnn_precision_IMDB,cnn_recall_IMDB,cnn_f1_IMDB)"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 4.汇总"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 58,
   "metadata": {
    "pycharm": {
     "name": "#%%\n"
    }
   },
   "outputs": [],
   "source": [
    "THUCNews_precision_list = [lr_precision_THUCNews,bayes_precision_THUCNews,svc_precision_THUCNews,gbdt_precision_THUCNews,sgd_precision_THUCNews,rfc_precision_THUCNews,cnn_precision_THUCNews]\n",
    "THUCNews_recall_list = [lr_recall_THUCNews,bayes_recall_THUCNews,svc_recall_THUCNews,gbdt_recall_THUCNews,sgd_recall_THUCNews,rfc_recall_THUCNews,cnn_recall_THUCNews]\n",
    "THUCNews_f1_list = [lr_f1_THUCNews,bayes_f1_THUCNews,svc_f1_THUCNews,gbdt_f1_THUCNews,sgd_recall_THUCNews,rfc_f1_THUCNews,cnn_f1_THUCNews]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 59,
   "metadata": {
    "pycharm": {
     "name": "#%%\n"
    }
   },
   "outputs": [],
   "source": [
    "IMDBData_precision_list = [lr_precision_IMDBData,bayes_precision_IMDBData,svc_precision_IMDBData,gbdt_precision_IMDBData,sgd_precision_IMDBData,rfc_precision_IMDBData,cnn_precision_IMDB]\n",
    "IMDBData_recall_list = [lr_recall_IMDBData,bayes_recall_IMDBData,svc_recall_IMDBData,gbdt_recall_IMDBData,sgd_recall_IMDBData,rfc_recall_IMDBData,cnn_recall_IMDB]\n",
    "IMDBData_f1_list = [lr_f1_IMDBData,bayes_f1_IMDBData,svc_f1_IMDBData,gbdt_f1_IMDBData,sgd_recall_IMDBData,rfc_f1_IMDBData,cnn_f1_IMDB]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 60,
   "metadata": {
    "pycharm": {
     "name": "#%%\n"
    }
   },
   "outputs": [],
   "source": [
    "score=pd.DataFrame({'modelname':['LogisticRegression','MultinomialNB','SVC','GradientBoostingClassifier','SGDCLassifier','RandomForestClassifier','CNN'],\n",
    "                    'THUCNews_precision':THUCNews_precision_list,\n",
    "                    'THUCNews_recall':THUCNews_recall_list,\n",
    "                    'THUCNews_f1':THUCNews_f1_list,\n",
    "                    'IMDBData_precision':IMDBData_precision_list,\n",
    "                    'IMDBData_recall':IMDBData_recall_list,\n",
    "                    'IMDBData_f1':IMDBData_f1_list\n",
    "                    })"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 62,
   "metadata": {
    "pycharm": {
     "name": "#%%\n"
    }
   },
   "outputs": [],
   "source": [
    "score.to_excel('../db/text_classification_db/score.xlsx')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 63,
   "metadata": {
    "pycharm": {
     "name": "#%%\n"
    }
   },
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "fnlp",
   "language": "python",
   "name": "fnlp"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.8.12"
  },
  "toc": {
   "base_numbering": 1,
   "nav_menu": {},
   "number_sections": true,
   "sideBar": true,
   "skip_h1_title": false,
   "title_cell": "Table of Contents",
   "title_sidebar": "Contents",
   "toc_cell": false,
   "toc_position": {},
   "toc_section_display": true,
   "toc_window_display": false
  },
  "varInspector": {
   "cols": {
    "lenName": 16,
    "lenType": 16,
    "lenVar": 40
   },
   "kernels_config": {
    "python": {
     "delete_cmd_postfix": "",
     "delete_cmd_prefix": "del ",
     "library": "var_list.py",
     "varRefreshCmd": "print(var_dic_list())"
    },
    "r": {
     "delete_cmd_postfix": ") ",
     "delete_cmd_prefix": "rm(",
     "library": "var_list.r",
     "varRefreshCmd": "cat(var_dic_list()) "
    }
   },
   "types_to_exclude": [
    "module",
    "function",
    "builtin_function_or_method",
    "instance",
    "_Feature"
   ],
   "window_display": false
  }
 },
 "nbformat": 4,
 "nbformat_minor": 1
}