{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "import numpy as np # linear algebra\n",
    "import pandas as pd \n",
    "from tensorflow.keras.models import Sequential\n",
    "from tensorflow.keras.layers import Dense, Embedding, LSTM, Dropout\n",
    "import tensorflow.keras as keras\n",
    "from tensorflow.keras.preprocessing.text import Tokenizer\n",
    "from tensorflow.keras.preprocessing.sequence import pad_sequences\n",
    "from sklearn.model_selection import train_test_split"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "(array(['text_id', 'full_text', 'cohesion', 'syntax', 'vocabulary',\n",
       "        'phraseology', 'grammar', 'conventions'], dtype=object),\n",
       " 7)"
      ]
     },
     "execution_count": 2,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "df=pd.read_csv('../数据集/train.csv')\n",
    "data_test=pd.read_csv('../数据集/test.csv')\n",
    "df0=pd.read_csv('../数据集/sample_submission.csv')\n",
    "data_test=data_test['full_text']\n",
    "df.columns.values,len(df0.columns.values)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 21,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "(3911,)"
      ]
     },
     "execution_count": 21,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "df['full_text'].shape"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 创建Tokenizer实例对象，num_words表根据单词频率排序，保留前num_words个单词；split分词分隔符\n",
    "#  fit_on_texts 方法学习出文本的字典\n",
    "# 通过texts_to_sequences将文本中的每个词转成数字\n",
    "# 完成分词和编码后通过padding把所有词向量补成同样长度，再在embedding层进行一个向量化，就可以输入到各种模型中了。\n",
    "X_train=df['full_text']\n",
    "y_train=np.array(df.iloc[:,2:])\n",
    "tokenizer = Tokenizer(num_words=15000, split=' ') \n",
    "tokenizer.fit_on_texts(X_train.values)\n",
    "X_train = tokenizer.texts_to_sequences(X_train.values)\n",
    "data_test= tokenizer.texts_to_sequences(data_test.values)\n",
    "X_train = pad_sequences(X_train)\n",
    "data_test = pad_sequences(data_test)\n",
    "X_train,X_val,y_train,y_val=train_test_split(X_train,y_train,test_size=0.1)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 22,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "(3519, 1238)"
      ]
     },
     "execution_count": 22,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "X_train.shape"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 23,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "(3519, 6)"
      ]
     },
     "execution_count": 23,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "y_train.shape"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "(3519, 1238)"
      ]
     },
     "execution_count": 17,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "X_train.shape"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "(3519, 6)"
      ]
     },
     "execution_count": 16,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "y_train.shape"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "Sequential模型，即顺序模型（神经网络）"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Model: \"sequential_3\"\n",
      "_________________________________________________________________\n",
      " Layer (type)                Output Shape              Param #   \n",
      "=================================================================\n",
      " embedding_3 (Embedding)     (None, None, 550)         8250000   \n",
      "                                                                 \n",
      " lstm_3 (LSTM)               (None, 128)               347648    \n",
      "                                                                 \n",
      " dropout_24 (Dropout)        (None, 128)               0         \n",
      "                                                                 \n",
      " dense_24 (Dense)            (None, 128)               16512     \n",
      "                                                                 \n",
      " dropout_25 (Dropout)        (None, 128)               0         \n",
      "                                                                 \n",
      " dense_25 (Dense)            (None, 128)               16512     \n",
      "                                                                 \n",
      " dropout_26 (Dropout)        (None, 128)               0         \n",
      "                                                                 \n",
      " dense_26 (Dense)            (None, 128)               16512     \n",
      "                                                                 \n",
      " dropout_27 (Dropout)        (None, 128)               0         \n",
      "                                                                 \n",
      " dense_27 (Dense)            (None, 128)               16512     \n",
      "                                                                 \n",
      " dropout_28 (Dropout)        (None, 128)               0         \n",
      "                                                                 \n",
      " dense_28 (Dense)            (None, 128)               16512     \n",
      "                                                                 \n",
      " dropout_29 (Dropout)        (None, 128)               0         \n",
      "                                                                 \n",
      " dense_29 (Dense)            (None, 128)               16512     \n",
      "                                                                 \n",
      " dropout_30 (Dropout)        (None, 128)               0         \n",
      "                                                                 \n",
      " dense_30 (Dense)            (None, 128)               16512     \n",
      "                                                                 \n",
      " dropout_31 (Dropout)        (None, 128)               0         \n",
      "                                                                 \n",
      " dense_31 (Dense)            (None, 6)                 774       \n",
      "                                                                 \n",
      "=================================================================\n",
      "Total params: 8,714,006\n",
      "Trainable params: 8,714,006\n",
      "Non-trainable params: 0\n",
      "_________________________________________________________________\n",
      "None\n"
     ]
    }
   ],
   "source": [
    "# 序贯模型为最简单的线性、从头到尾的结构顺序，不分叉，是多个网络层的线性堆叠\n",
    "model=Sequential()\n",
    "# 添加一个嵌入层，期望输入词汇大小为 15000\n",
    "# 输出嵌入维度大小为 550\n",
    "model.add(Embedding(input_dim=15000, output_dim=550))\n",
    "# 添加具有 128 个内部单元的 LSTM 层\n",
    "model.add(LSTM(128))\n",
    "# 添加一个具有 70% 输出单位的 Dropout 层\n",
    "model.add(Dropout(0.7))\n",
    "for i in range(7):\n",
    "    # 添加一个包含 128 个单位和激活 relu 的卷积层\n",
    "    model.add(Dense(128,activation='relu'))\n",
    "    # 添加一个具有 70% 输出单位的 Dropout 层\n",
    "    model.add(Dropout(0.7))\n",
    "# 添加一个包含 6 个单位的卷积层并激活 relu\n",
    "model.add(Dense(6,activation='relu'))\n",
    "#使用损失函数mean_sequard_error和优化器adm进行编译\n",
    "model.compile(\n",
    "    loss = 'mse',  #均方差\n",
    "    optimizer ='adam'\n",
    ")\n",
    "print(model.summary())"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "训练模型并调参"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 1/50\n",
      "110/110 [==============================] - 236s 2s/step - loss: 1.4699\n",
      "Epoch 2/50\n",
      "110/110 [==============================] - 250s 2s/step - loss: 1.0481\n",
      "Epoch 3/50\n",
      "110/110 [==============================] - 240s 2s/step - loss: 0.8949\n",
      "Epoch 4/50\n",
      "110/110 [==============================] - 251s 2s/step - loss: 0.7957\n",
      "Epoch 5/50\n",
      "110/110 [==============================] - 249s 2s/step - loss: 0.7696\n",
      "Epoch 6/50\n",
      "110/110 [==============================] - 256s 2s/step - loss: 0.7264\n",
      "Epoch 7/50\n",
      "110/110 [==============================] - 255s 2s/step - loss: 0.7028\n",
      "Epoch 8/50\n",
      "110/110 [==============================] - 248s 2s/step - loss: 0.6743\n",
      "Epoch 9/50\n",
      "110/110 [==============================] - 238s 2s/step - loss: 0.6443\n",
      "Epoch 10/50\n",
      "110/110 [==============================] - 225s 2s/step - loss: 0.6309\n",
      "Epoch 11/50\n",
      "110/110 [==============================] - 242s 2s/step - loss: 0.6395\n",
      "Epoch 12/50\n",
      "110/110 [==============================] - 257s 2s/step - loss: 0.6134\n",
      "Epoch 13/50\n",
      "110/110 [==============================] - 275s 2s/step - loss: 0.6040\n",
      "Epoch 14/50\n",
      "110/110 [==============================] - 231s 2s/step - loss: 0.6187\n",
      "Epoch 15/50\n",
      "110/110 [==============================] - 260s 2s/step - loss: 0.5965\n",
      "Epoch 16/50\n",
      "110/110 [==============================] - 250s 2s/step - loss: 0.5830\n",
      "Epoch 17/50\n",
      "110/110 [==============================] - 249s 2s/step - loss: 0.5676\n",
      "Epoch 18/50\n",
      "110/110 [==============================] - 254s 2s/step - loss: 0.5727\n",
      "Epoch 19/50\n",
      "110/110 [==============================] - 275s 2s/step - loss: 0.5609\n",
      "Epoch 20/50\n",
      "110/110 [==============================] - 248s 2s/step - loss: 0.5426\n",
      "Epoch 21/50\n",
      "110/110 [==============================] - 239s 2s/step - loss: 0.5471\n",
      "Epoch 22/50\n",
      "110/110 [==============================] - 217s 2s/step - loss: 0.5470\n",
      "Epoch 23/50\n",
      "110/110 [==============================] - 205s 2s/step - loss: 0.5485\n",
      "Epoch 24/50\n",
      "110/110 [==============================] - 218s 2s/step - loss: 0.5705\n",
      "Epoch 25/50\n",
      "110/110 [==============================] - 221s 2s/step - loss: 0.5271\n",
      "Epoch 26/50\n",
      "110/110 [==============================] - 219s 2s/step - loss: 0.5287\n",
      "Epoch 27/50\n",
      "110/110 [==============================] - 264s 2s/step - loss: 0.5299\n",
      "Epoch 28/50\n",
      "110/110 [==============================] - 248s 2s/step - loss: 0.5273\n",
      "Epoch 29/50\n",
      "110/110 [==============================] - 258s 2s/step - loss: 0.5405\n",
      "Epoch 30/50\n",
      "110/110 [==============================] - 263s 2s/step - loss: 0.5021\n",
      "Epoch 31/50\n",
      "110/110 [==============================] - 260s 2s/step - loss: 0.4973\n",
      "Epoch 32/50\n",
      "110/110 [==============================] - 224s 2s/step - loss: 0.4943\n",
      "Epoch 33/50\n",
      "110/110 [==============================] - 255s 2s/step - loss: 0.4885\n",
      "Epoch 34/50\n",
      "110/110 [==============================] - 244s 2s/step - loss: 0.4972\n",
      "Epoch 35/50\n",
      "110/110 [==============================] - 260s 2s/step - loss: 0.4786\n",
      "Epoch 36/50\n",
      "110/110 [==============================] - 266s 2s/step - loss: 0.4793\n",
      "Epoch 37/50\n",
      "110/110 [==============================] - 227s 2s/step - loss: 0.4825\n",
      "Epoch 38/50\n",
      "110/110 [==============================] - 204s 2s/step - loss: 0.4576\n",
      "Epoch 39/50\n",
      "110/110 [==============================] - 203s 2s/step - loss: 0.5692\n",
      "Epoch 40/50\n",
      "110/110 [==============================] - 208s 2s/step - loss: 0.4798\n",
      "Epoch 41/50\n",
      "110/110 [==============================] - 213s 2s/step - loss: 0.4772\n",
      "Epoch 42/50\n",
      "110/110 [==============================] - 207s 2s/step - loss: 0.4587\n",
      "Epoch 43/50\n",
      "110/110 [==============================] - 206s 2s/step - loss: 0.4847\n",
      "Epoch 44/50\n",
      "110/110 [==============================] - 207s 2s/step - loss: 0.4691\n",
      "Epoch 45/50\n",
      "110/110 [==============================] - 218s 2s/step - loss: 0.4512\n",
      "Epoch 46/50\n",
      "110/110 [==============================] - 214s 2s/step - loss: 0.5296\n",
      "Epoch 47/50\n",
      "110/110 [==============================] - 215s 2s/step - loss: 0.4594\n",
      "Epoch 48/50\n",
      "110/110 [==============================] - 212s 2s/step - loss: 0.4351\n",
      "Epoch 49/50\n",
      "110/110 [==============================] - 214s 2s/step - loss: 0.4541\n",
      "Epoch 50/50\n",
      "110/110 [==============================] - 214s 2s/step - loss: 0.4465\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "<keras.callbacks.History at 0x254002c3f10>"
      ]
     },
     "execution_count": 7,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "model.fit(X_train, y_train, epochs = 50)\n",
    "# 将数据提供给模型。\n",
    "# epochs训练模型迭代次数；\n",
    "# batch_size批训练大小，默认32\n",
    "# verbose日志展示，整数，0:为不在标准输出流输出日志信息；1:显示进度条（默认）；2:每个epoch输出一行记录"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "13/13 [==============================] - 8s 477ms/step - loss: 0.4477\n",
      "110/110 [==============================] - 54s 486ms/step - loss: 0.4369\n"
     ]
    }
   ],
   "source": [
    "val_loss=model.evaluate(X_val,y_val)\n",
    "val_loss=model.evaluate(X_train,y_train)\n",
    "# 评估模型"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 1/5\n",
      "110/110 [==============================] - 275s 3s/step - loss: 3.4110\n",
      "Epoch 2/5\n",
      "110/110 [==============================] - 242s 2s/step - loss: 1.6565\n",
      "Epoch 3/5\n",
      "110/110 [==============================] - 237s 2s/step - loss: 1.1603\n",
      "Epoch 4/5\n",
      "110/110 [==============================] - 249s 2s/step - loss: 0.9040\n",
      "Epoch 5/5\n",
      "110/110 [==============================] - 241s 2s/step - loss: 0.8660\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "<keras.callbacks.History at 0x16c06d85ee0>"
      ]
     },
     "execution_count": 7,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "model.fit(X_train, y_train, epochs = 5)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "13/13 [==============================] - 7s 500ms/step - loss: 9.6977\n",
      "110/110 [==============================] - 53s 482ms/step - loss: 10.1034\n"
     ]
    }
   ],
   "source": [
    "val_loss=model.evaluate(X_val,y_val)\n",
    "val_loss=model.evaluate(X_train,y_train)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "数据预测"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>text_id</th>\n",
       "      <th>cohesion</th>\n",
       "      <th>syntax</th>\n",
       "      <th>vocabulary</th>\n",
       "      <th>phraseology</th>\n",
       "      <th>grammar</th>\n",
       "      <th>conventions</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>0</th>\n",
       "      <td>0000C359D63E</td>\n",
       "      <td>3.019878</td>\n",
       "      <td>2.914380</td>\n",
       "      <td>3.141343</td>\n",
       "      <td>3.001769</td>\n",
       "      <td>2.923383</td>\n",
       "      <td>2.988626</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>1</th>\n",
       "      <td>000BAD50D026</td>\n",
       "      <td>3.022939</td>\n",
       "      <td>2.917268</td>\n",
       "      <td>3.144409</td>\n",
       "      <td>3.004750</td>\n",
       "      <td>2.926442</td>\n",
       "      <td>2.991700</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>2</th>\n",
       "      <td>00367BB2546B</td>\n",
       "      <td>3.019752</td>\n",
       "      <td>2.914262</td>\n",
       "      <td>3.141214</td>\n",
       "      <td>3.001645</td>\n",
       "      <td>2.923257</td>\n",
       "      <td>2.988500</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "</div>"
      ],
      "text/plain": [
       "        text_id  cohesion    syntax  vocabulary  phraseology   grammar  \\\n",
       "0  0000C359D63E  3.019878  2.914380    3.141343     3.001769  2.923383   \n",
       "1  000BAD50D026  3.022939  2.917268    3.144409     3.004750  2.926442   \n",
       "2  00367BB2546B  3.019752  2.914262    3.141214     3.001645  2.923257   \n",
       "\n",
       "   conventions  \n",
       "0     2.988626  \n",
       "1     2.991700  \n",
       "2     2.988500  "
      ]
     },
     "execution_count": 10,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# 训练50轮模型得出的结果\n",
    "y_pr=model.predict(data_test)\n",
    "df0.iloc[:,1:]=y_pr\n",
    "df0.to_csv('submission.csv',index=False)\n",
    "df0"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3.9.7 ('base')",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.9.7"
  },
  "orig_nbformat": 4,
  "vscode": {
   "interpreter": {
    "hash": "618d854d3ece49025d2e8a2744aa8fe4aa4e4aad5ecf6d52bf8d246deb8fa2ef"
   }
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
