{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "import os\n",
    "import jieba\n",
    "import re\n",
    "import pandas as pd\n",
    "import numpy as np\n",
    "from sklearn.metrics import classification_report\n",
    "from sklearn.naive_bayes import ComplementNB\n",
    "from sklearn.preprocessing import LabelEncoder\n",
    "from sklearn.feature_extraction.text import TfidfVectorizer"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "pycharm": {
     "name": "#%% md\n"
    }
   },
   "source": [
    "# 一、THUCNews新闻文本"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "pycharm": {
     "name": "#%% md\n"
    }
   },
   "source": [
    "## 1.数据读取\n",
    "\n",
    "本文所用的数据集是清华提供的THUCNews新闻文本分类数据集的一个子集。该数据集共有体育、财经、房产、家居、教育、科技、时尚、时政、游戏、娱乐10个分类，每个分类6500条，总共65000条新闻数据。\n",
    "数据集划分如下：训练集50000条  验证集5000条 测试集10000条"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {
    "pycharm": {
     "name": "#%%\n"
    }
   },
   "outputs": [],
   "source": [
    "'''\n",
    "自定义读取新闻数据方法\n",
    "returns:\n",
    "        -content 文本数据\n",
    "        -label 文本数据标签\n",
    "'''\n",
    "#load data\n",
    "def load_data(filepath,nleft,nright):\n",
    "    categories = os.listdir(filepath)\n",
    "    content = []\n",
    "    label = []\n",
    "    for category in categories:\n",
    "        filelist = os.listdir(filepath+'/'+category)[nleft:nright]\n",
    "        for file in filelist:\n",
    "            with open(filepath+'/'+category+'/'+file,encoding='utf8')as f:\n",
    "                content.append(''.join(jieba.cut(f.read())))\n",
    "                label.append(category)\n",
    "    return content,label\n",
    "\n",
    "'''\n",
    "读取训练数据集\n",
    "本文将每类的前5000条数据作为训练数据\n",
    "'''\n",
    "def train_data(filepath, train_num):\n",
    "    content, label = load_data(filepath,0,train_num)\n",
    "    return content, label\n",
    "\n",
    "'''\n",
    "读取测试集数据\n",
    "本文将每类训练集后的1000条数据作为测试数据\n",
    "'''\n",
    "def test_data(filepath, train_num, test_num):\n",
    "    content, label = load_data(filepath,train_num, test_num+train_num)\n",
    "    return content, label\n",
    "\n",
    "'''\n",
    "读取验证集数据\n",
    "本文将各类的最后500条数据作为验证集数据\n",
    "'''\n",
    "def val_data(filepath, test_num, val_num):\n",
    "    content, label = load_data(filepath, test_num, val_num+test_num)\n",
    "    return content, label"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {
    "pycharm": {
     "name": "#%%\n"
    }
   },
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Building prefix dict from the default dictionary ...\n",
      "Loading model from cache C:\\Users\\hp\\AppData\\Local\\Temp\\jieba.cache\n",
      "Loading model cost 1.604 seconds.\n",
      "Prefix dict has been built successfully.\n"
     ]
    }
   ],
   "source": [
    "#数据源文件目录\n",
    "filepath = r'E:\\StudySource\\FNLP\\DataSets\\清华语料库\\THUCNewsMini\\thucnews'\n",
    "#训练集数据量\n",
    "train_num = 5000\n",
    "#测试集数据量\n",
    "test_num = 1000\n",
    "#验证集数据量\n",
    "val_num = 500\n",
    "train_contents, train_labels = train_data(filepath,train_num)\n",
    "test_contents, test_labels = test_data(filepath,train_num,test_num)\n",
    "val_contents, val_labels = val_data(filepath,test_num,val_num)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "pycharm": {
     "name": "#%% md\n"
    }
   },
   "source": [
    "## 2.数据预处理\n",
    "\n",
    "本文数据预处理主要是去除新闻文本数据中的特殊字符以及停用词，使最后的数据仅保留中英文"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "pycharm": {
     "name": "#%% md\n"
    }
   },
   "source": [
    "### 2.1移除特殊字符"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {
    "pycharm": {
     "name": "#%%\n"
    }
   },
   "outputs": [],
   "source": [
    "#去除文本中的表情字符（只保留中英文和数字）\n",
    "def clear_character(sentence):\n",
    "    pattern1 = '\\[.*?\\]'\n",
    "    pattern2 = re.compile('[^\\u4e00-\\u9fa5^a-z^A-Z^0-9]')\n",
    "    line1 = re.sub(pattern1, '',sentence)\n",
    "    line2 = re.sub(pattern2,'',line1)\n",
    "    new_sentence = ''.join(line2.split())   #去除空白\n",
    "    return new_sentence"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {
    "pycharm": {
     "name": "#%%\n"
    }
   },
   "outputs": [],
   "source": [
    "train_text = list(map(lambda s: clear_character(s), train_contents))\n",
    "test_text = list(map(lambda s: clear_character(s), test_contents))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "pycharm": {
     "name": "#%% md\n"
    }
   },
   "source": [
    "### 2.2分词"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {
    "pycharm": {
     "name": "#%%\n"
    }
   },
   "outputs": [],
   "source": [
    "#使用jieba分词\n",
    "train_seg_text = list(map(lambda s: jieba.lcut(s),train_text))\n",
    "test_seg_text = list(map(lambda s: jieba.lcut(s),test_text))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "pycharm": {
     "name": "#%% md\n"
    }
   },
   "source": [
    "### 2.3去除停用词"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {
    "pycharm": {
     "name": "#%%\n"
    }
   },
   "outputs": [],
   "source": [
    "#本文使用百度的停用词列表\n",
    "stop_words_path = r'E:\\StudySource\\FNLP\\停用词\\baidu_stopwords.txt'\n",
    "def get_stop_words():\n",
    "    file = open(stop_words_path, 'rb').read().decode('utf8').split('\\r\\n')\n",
    "    return set(file)\n",
    "stopwprds = get_stop_words()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {
    "pycharm": {
     "name": "#%%\n"
    }
   },
   "outputs": [],
   "source": [
    "#去除文本中的停用词\n",
    "def drop_stop_words(line, stopwords):\n",
    "    line_clean = []\n",
    "    for word in line:\n",
    "        if word in stopwords:\n",
    "            continue\n",
    "        line_clean.append(word)\n",
    "    return line_clean"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {
    "pycharm": {
     "name": "#%%\n"
    }
   },
   "outputs": [],
   "source": [
    "train_st_text = list(map(lambda s: drop_stop_words(s,stopwprds),train_seg_text))\n",
    "test_st_text = list(map(lambda s: drop_stop_words(s,stopwprds),test_seg_text))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "pycharm": {
     "name": "#%% md\n"
    }
   },
   "source": [
    "### 2.4标签映射"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {
    "pycharm": {
     "name": "#%%\n"
    }
   },
   "outputs": [
    {
     "data": {
      "text/plain": "LabelEncoder()"
     },
     "execution_count": 10,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "le = LabelEncoder()\n",
    "le.fit(train_labels)\n",
    "LabelEncoder()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {
    "pycharm": {
     "name": "#%%\n"
    }
   },
   "outputs": [],
   "source": [
    "label_train_id = le.transform(train_labels)\n",
    "label_test_id = le.transform(test_labels)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {
    "pycharm": {
     "name": "#%%\n"
    }
   },
   "outputs": [],
   "source": [
    "train_c_text = list(map(lambda s: ' '.join(s),train_st_text))\n",
    "test_c_text = list(map(lambda s: ' '.join(s),test_st_text))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "pycharm": {
     "name": "#%% md\n"
    }
   },
   "source": [
    "## 3.计算文本特征值"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {
    "pycharm": {
     "name": "#%%\n"
    }
   },
   "outputs": [],
   "source": [
    "tfidf_model = TfidfVectorizer(binary=False,max_df=0.5,token_pattern=r\"(?u)\\b\\w+\\b\")\n",
    "train_Data = tfidf_model.fit_transform(train_c_text)\n",
    "test_Data = tfidf_model.transform(test_c_text)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "pycharm": {
     "name": "#%% md\n"
    }
   },
   "source": [
    "## 4.建立模型"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "pycharm": {
     "name": "#%% md\n"
    }
   },
   "source": [
    "### 4.1 ComplementNB"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {
    "pycharm": {
     "name": "#%%\n"
    }
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "              precision    recall  f1-score   support\n",
      "\n",
      "           0     0.9940    0.9990    0.9965      1000\n",
      "           1     0.9420    0.9900    0.9654      1000\n",
      "           2     0.9820    0.7080    0.8228      1000\n",
      "           3     0.7929    0.9190    0.8513      1000\n",
      "           4     0.9384    0.9300    0.9342      1000\n",
      "           5     0.9713    0.9810    0.9761      1000\n",
      "           6     0.9495    0.9220    0.9356      1000\n",
      "           7     0.9879    0.9760    0.9819      1000\n",
      "           8     0.9468    0.9960    0.9708      1000\n",
      "           9     0.9468    0.9960    0.9708      1000\n",
      "\n",
      "    accuracy                         0.9417     10000\n",
      "   macro avg     0.9452    0.9417    0.9405     10000\n",
      "weighted avg     0.9452    0.9417    0.9405     10000\n",
      "\n"
     ]
    }
   ],
   "source": [
    "clf = ComplementNB()\n",
    "clf.fit(train_Data, label_train_id)\n",
    "pred = clf.predict(test_Data)\n",
    "print(classification_report(label_test_id, pred, digits=4))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "pycharm": {
     "name": "#%% md\n"
    }
   },
   "source": [
    "### 4.2MultinomialNB"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "metadata": {
    "pycharm": {
     "name": "#%%\n"
    }
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "              precision    recall  f1-score   support\n",
      "\n",
      "           0     0.9980    0.9970    0.9975      1000\n",
      "           1     0.9003    0.9930    0.9444      1000\n",
      "           2     0.9848    0.2600    0.4114      1000\n",
      "           3     0.5721    0.9360    0.7102      1000\n",
      "           4     0.8859    0.9470    0.9154      1000\n",
      "           5     0.9897    0.9620    0.9757      1000\n",
      "           6     0.9630    0.8840    0.9218      1000\n",
      "           7     0.9715    0.9550    0.9632      1000\n",
      "           8     0.9553    0.9840    0.9695      1000\n",
      "           9     0.9639    0.9890    0.9763      1000\n",
      "\n",
      "    accuracy                         0.8907     10000\n",
      "   macro avg     0.9185    0.8907    0.8785     10000\n",
      "weighted avg     0.9185    0.8907    0.8785     10000\n",
      "\n"
     ]
    }
   ],
   "source": [
    "from sklearn.naive_bayes import MultinomialNB\n",
    "mnb = MultinomialNB()\n",
    "mnb.fit(train_Data, label_train_id)\n",
    "pred = mnb.predict(test_Data)\n",
    "print(classification_report(label_test_id, pred, digits=4))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "pycharm": {
     "name": "#%% md\n"
    }
   },
   "source": [
    "### 4.3LogisticRegression"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "metadata": {
    "pycharm": {
     "name": "#%%\n"
    }
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "              precision    recall  f1-score   support\n",
      "\n",
      "           0     0.9980    0.9930    0.9955      1000\n",
      "           1     0.9870    0.9870    0.9870      1000\n",
      "           2     0.9644    0.8390    0.8973      1000\n",
      "           3     0.8813    0.8980    0.8895      1000\n",
      "           4     0.9649    0.9080    0.9356      1000\n",
      "           5     0.9657    0.9860    0.9758      1000\n",
      "           6     0.9255    0.9690    0.9468      1000\n",
      "           7     0.9626    0.9790    0.9707      1000\n",
      "           8     0.9455    0.9880    0.9663      1000\n",
      "           9     0.9474    0.9900    0.9682      1000\n",
      "\n",
      "    accuracy                         0.9537     10000\n",
      "   macro avg     0.9542    0.9537    0.9533     10000\n",
      "weighted avg     0.9542    0.9537    0.9533     10000\n",
      "\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "F:\\Anaconda\\lib\\site-packages\\sklearn\\linear_model\\_logistic.py:814: ConvergenceWarning: lbfgs failed to converge (status=1):\n",
      "STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.\n",
      "\n",
      "Increase the number of iterations (max_iter) or scale the data as shown in:\n",
      "    https://scikit-learn.org/stable/modules/preprocessing.html\n",
      "Please also refer to the documentation for alternative solver options:\n",
      "    https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression\n",
      "  n_iter_i = _check_optimize_result(\n"
     ]
    }
   ],
   "source": [
    "from sklearn.linear_model import LogisticRegression\n",
    "classfier = LogisticRegression()\n",
    "classfier.fit(train_Data,label_train_id)\n",
    "pred = classfier.predict(test_Data)\n",
    "print(classification_report(label_test_id,pred,digits=4))"
   ]
  },
  {
   "cell_type": "markdown",
   "source": [
    "# 二、IMDB影评数据"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%% md\n"
    }
   }
  },
  {
   "cell_type": "markdown",
   "source": [
    "## 1.数据读取"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%% md\n"
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": 26,
   "metadata": {
    "pycharm": {
     "name": "#%%\n"
    }
   },
   "outputs": [],
   "source": [
    "imdb_df = pd.read_csv(r'E:\\StudySource\\FNLP\\DataSets\\IMDB\\IMDB.csv')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 27,
   "outputs": [
    {
     "data": {
      "text/plain": "                                               review sentiment\n0   One of the other reviewers has mentioned that ...  positive\n1   A wonderful little production. <br /><br />The...  positive\n2   I thought this was a wonderful way to spend ti...  positive\n3   Basically there's a family where a little boy ...  negative\n4   Petter Mattei's \"Love in the Time of Money\" is...  positive\n5   Probably my all-time favorite movie, a story o...  positive\n6   I sure would like to see a resurrection of a u...  positive\n7   This show was an amazing, fresh & innovative i...  negative\n8   Encouraged by the positive comments about this...  negative\n9   If you like original gut wrenching laughter yo...  positive\n10  Phil the Alien is one of those quirky films wh...  negative\n11  I saw this movie when I was about 12 when it c...  negative\n12  So im not a big fan of Boll's work but then ag...  negative\n13  The cast played Shakespeare.<br /><br />Shakes...  negative\n14  This a fantastic movie of three prisoners who ...  positive\n15  Kind of drawn in by the erotic scenes, only to...  negative\n16  Some films just simply should not be remade. T...  positive\n17  This movie made it into one of my top 10 most ...  negative\n18  I remember this film,it was the first film i h...  positive\n19  An awful film! It must have been up against so...  negative",
      "text/html": "<div>\n<style scoped>\n    .dataframe tbody tr th:only-of-type {\n        vertical-align: middle;\n    }\n\n    .dataframe tbody tr th {\n        vertical-align: top;\n    }\n\n    .dataframe thead th {\n        text-align: right;\n    }\n</style>\n<table border=\"1\" class=\"dataframe\">\n  <thead>\n    <tr style=\"text-align: right;\">\n      <th></th>\n      <th>review</th>\n      <th>sentiment</th>\n    </tr>\n  </thead>\n  <tbody>\n    <tr>\n      <th>0</th>\n      <td>One of the other reviewers has mentioned that ...</td>\n      <td>positive</td>\n    </tr>\n    <tr>\n      <th>1</th>\n      <td>A wonderful little production. &lt;br /&gt;&lt;br /&gt;The...</td>\n      <td>positive</td>\n    </tr>\n    <tr>\n      <th>2</th>\n      <td>I thought this was a wonderful way to spend ti...</td>\n      <td>positive</td>\n    </tr>\n    <tr>\n      <th>3</th>\n      <td>Basically there's a family where a little boy ...</td>\n      <td>negative</td>\n    </tr>\n    <tr>\n      <th>4</th>\n      <td>Petter Mattei's \"Love in the Time of Money\" is...</td>\n      <td>positive</td>\n    </tr>\n    <tr>\n      <th>5</th>\n      <td>Probably my all-time favorite movie, a story o...</td>\n      <td>positive</td>\n    </tr>\n    <tr>\n      <th>6</th>\n      <td>I sure would like to see a resurrection of a u...</td>\n      <td>positive</td>\n    </tr>\n    <tr>\n      <th>7</th>\n      <td>This show was an amazing, fresh &amp; innovative i...</td>\n      <td>negative</td>\n    </tr>\n    <tr>\n      <th>8</th>\n      <td>Encouraged by the positive comments about this...</td>\n      <td>negative</td>\n    </tr>\n    <tr>\n      <th>9</th>\n      <td>If you like original gut wrenching laughter yo...</td>\n      <td>positive</td>\n    </tr>\n    <tr>\n      <th>10</th>\n      <td>Phil the Alien is one of those quirky films wh...</td>\n      <td>negative</td>\n    </tr>\n    <tr>\n      <th>11</th>\n      <td>I saw this movie when I was about 12 when it c...</td>\n      <td>negative</td>\n    </tr>\n    <tr>\n      <th>12</th>\n      <td>So im not a big fan of Boll's work but then ag...</td>\n      <td>negative</td>\n    </tr>\n    <tr>\n      <th>13</th>\n      <td>The cast played Shakespeare.&lt;br /&gt;&lt;br /&gt;Shakes...</td>\n      <td>negative</td>\n    </tr>\n    <tr>\n      <th>14</th>\n      <td>This a fantastic movie of three prisoners who ...</td>\n      <td>positive</td>\n    </tr>\n    <tr>\n      <th>15</th>\n      <td>Kind of drawn in by the erotic scenes, only to...</td>\n      <td>negative</td>\n    </tr>\n    <tr>\n      <th>16</th>\n      <td>Some films just simply should not be remade. T...</td>\n      <td>positive</td>\n    </tr>\n    <tr>\n      <th>17</th>\n      <td>This movie made it into one of my top 10 most ...</td>\n      <td>negative</td>\n    </tr>\n    <tr>\n      <th>18</th>\n      <td>I remember this film,it was the first film i h...</td>\n      <td>positive</td>\n    </tr>\n    <tr>\n      <th>19</th>\n      <td>An awful film! It must have been up against so...</td>\n      <td>negative</td>\n    </tr>\n  </tbody>\n</table>\n</div>"
     },
     "execution_count": 27,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# 查看前20行数据\n",
    "imdb_df.head(20)"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  },
  {
   "cell_type": "markdown",
   "source": [
    "## 2.数据预处理\n",
    "\n",
    "- 将sentiment转化为0，1数值型数据\n",
    "- 将review评论数据转化为符合格式的词序列"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%% md\n"
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": 28,
   "outputs": [],
   "source": [
    "# 将sentiment转化为0，1\n",
    "# 0 -positive\n",
    "# 1 -negative\n",
    "def transfer_emossion(str):\n",
    "    if str == 'positive':\n",
    "        return 0\n",
    "    else:\n",
    "        return 1\n",
    "imdb_df['sentiment'] = imdb_df['sentiment'].map(lambda a : transfer_emossion(a))"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": 30,
   "outputs": [
    {
     "data": {
      "text/plain": "                                              review  sentiment\n0  One of the other reviewers has mentioned that ...          0\n1  A wonderful little production. <br /><br />The...          0\n2  I thought this was a wonderful way to spend ti...          0\n3  Basically there's a family where a little boy ...          1\n4  Petter Mattei's \"Love in the Time of Money\" is...          0",
      "text/html": "<div>\n<style scoped>\n    .dataframe tbody tr th:only-of-type {\n        vertical-align: middle;\n    }\n\n    .dataframe tbody tr th {\n        vertical-align: top;\n    }\n\n    .dataframe thead th {\n        text-align: right;\n    }\n</style>\n<table border=\"1\" class=\"dataframe\">\n  <thead>\n    <tr style=\"text-align: right;\">\n      <th></th>\n      <th>review</th>\n      <th>sentiment</th>\n    </tr>\n  </thead>\n  <tbody>\n    <tr>\n      <th>0</th>\n      <td>One of the other reviewers has mentioned that ...</td>\n      <td>0</td>\n    </tr>\n    <tr>\n      <th>1</th>\n      <td>A wonderful little production. &lt;br /&gt;&lt;br /&gt;The...</td>\n      <td>0</td>\n    </tr>\n    <tr>\n      <th>2</th>\n      <td>I thought this was a wonderful way to spend ti...</td>\n      <td>0</td>\n    </tr>\n    <tr>\n      <th>3</th>\n      <td>Basically there's a family where a little boy ...</td>\n      <td>1</td>\n    </tr>\n    <tr>\n      <th>4</th>\n      <td>Petter Mattei's \"Love in the Time of Money\" is...</td>\n      <td>0</td>\n    </tr>\n  </tbody>\n</table>\n</div>"
     },
     "execution_count": 30,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "#查看转化之后的数据\n",
    "imdb_df.head()"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": 29,
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "<class 'pandas.core.frame.DataFrame'>\n",
      "RangeIndex: 50000 entries, 0 to 49999\n",
      "Data columns (total 2 columns):\n",
      " #   Column     Non-Null Count  Dtype \n",
      "---  ------     --------------  ----- \n",
      " 0   review     50000 non-null  object\n",
      " 1   sentiment  50000 non-null  int64 \n",
      "dtypes: int64(1), object(1)\n",
      "memory usage: 781.4+ KB\n"
     ]
    }
   ],
   "source": [
    "# 查看imdb_df数据集基本信息\n",
    "imdb_df.info()"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": 31,
   "outputs": [],
   "source": [
    "from bs4 import BeautifulSoup\n",
    "#将IMDB的评论转化为此序列\n",
    "def review_to_wordlist(review):\n",
    "    #去掉html标签,拿到内容\n",
    "    review_text = BeautifulSoup(review, \"html.parser\").get_text()\n",
    "    #用正则表达式取出符合规范的数据\n",
    "    review_text = re.sub(\"[^a-zA-Z]\",\" \",review_text)\n",
    "    #小写化所有的词，并转成词序列\n",
    "    words = review_text.lower().split()\n",
    "    return words"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": 32,
   "outputs": [],
   "source": [
    "imdb_train_label = imdb_df['sentiment']\n",
    "imdb_train_data = []\n",
    "for i in range(len(imdb_df['review'])):\n",
    "    imdb_train_data.append(' '.join(review_to_wordlist(imdb_df['review'][i])))"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": 35,
   "outputs": [
    {
     "data": {
      "text/plain": "'one of the other reviewers has mentioned that after watching just oz episode you ll be hooked they are right as this is exactly what happened with me the first thing that struck me about oz was its brutality and unflinching scenes of violence which set in right from the word go trust me this is not a show for the faint hearted or timid this show pulls no punches with regards to drugs sex or violence its is hardcore in the classic use of the word it is called oz as that is the nickname given to the oswald maximum security state penitentary it focuses mainly on emerald city an experimental section of the prison where all the cells have glass fronts and face inwards so privacy is not high on the agenda em city is home to many aryans muslims gangstas latinos christians italians irish and more so scuffles death stares dodgy dealings and shady agreements are never far away i would say the main appeal of the show is due to the fact that it goes where other shows wouldn t dare forget pretty pictures painted for mainstream audiences forget charm forget romance oz doesn t mess around the first episode i ever saw struck me as so nasty it was surreal i couldn t say i was ready for it but as i watched more i developed a taste for oz and got accustomed to the high levels of graphic violence not just violence but injustice crooked guards who ll be sold out for a nickel inmates who ll kill on order and get away with it well mannered middle class inmates being turned into prison bitches due to their lack of street skills or prison experience watching oz you may become comfortable with what is uncomfortable viewing thats if you can get in touch with your darker side'"
     },
     "execution_count": 35,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# 查看review处理之后的数据\n",
    "imdb_train_data[0]"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  },
  {
   "cell_type": "markdown",
   "source": [
    "## 3.计算词TFIDF向量"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%% md\n"
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "outputs": [],
   "source": [
    "# 切分数据集\n",
    "imdb_train_Data , imdb_test_Data, imdb_train_label, imdb_test_label =\n"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.8.5"
  },
  "toc": {
   "base_numbering": 1,
   "nav_menu": {},
   "number_sections": true,
   "sideBar": true,
   "skip_h1_title": false,
   "title_cell": "Table of Contents",
   "title_sidebar": "Contents",
   "toc_cell": false,
   "toc_position": {},
   "toc_section_display": true,
   "toc_window_display": true
  },
  "varInspector": {
   "cols": {
    "lenName": 16,
    "lenType": 16,
    "lenVar": 40
   },
   "kernels_config": {
    "python": {
     "delete_cmd_postfix": "",
     "delete_cmd_prefix": "del ",
     "library": "var_list.py",
     "varRefreshCmd": "print(var_dic_list())"
    },
    "r": {
     "delete_cmd_postfix": ") ",
     "delete_cmd_prefix": "rm(",
     "library": "var_list.r",
     "varRefreshCmd": "cat(var_dic_list()) "
    }
   },
   "types_to_exclude": [
    "module",
    "function",
    "builtin_function_or_method",
    "instance",
    "_Feature"
   ],
   "window_display": false
  }
 },
 "nbformat": 4,
 "nbformat_minor": 1
}