{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 35,
   "id": "a8121920-766b-4963-94f1-29c45dfc8b9a",
   "metadata": {},
   "outputs": [],
   "source": [
    "import pandas as pd\n",
    "import numpy as np\n",
    "\n",
    "from collections import Counter\n",
    "from sklearn.feature_extraction.text import TfidfVectorizer\n",
    "from sklearn.model_selection import cross_val_predict\n",
    "from sklearn.linear_model import LogisticRegression\n",
    "from sklearn.metrics import classification_report\n",
    "\n",
    "from sklearn.model_selection import cross_val_predict\n",
    "from xgboost import XGBClassifier\n",
    "from sklearn.metrics import classification_report\n",
    "\n",
    "from gensim.models import Word2Vec\n"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "f23b0083-61c4-454a-85ba-313c5bb657ac",
   "metadata": {},
   "source": [
    "# 任务一：报名比赛，下载比赛数据集并完成读取"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "948f8f46-87d2-44f6-99e5-871c2f09a65c",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>name</th>\n",
       "      <th>label</th>\n",
       "      <th>content</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>0</th>\n",
       "      <td>1</td>\n",
       "      <td>0</td>\n",
       "      <td>[4509 3181 1253 2278  290 3562 2051  599 3125 ...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>1</th>\n",
       "      <td>2</td>\n",
       "      <td>1</td>\n",
       "      <td>[ 263 1325 2563 4160 2196  169 3125 2563 2619 ...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>2</th>\n",
       "      <td>3</td>\n",
       "      <td>0</td>\n",
       "      <td>[3635  177 3125 1251 3839 5212 2109 1171 1194 ...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>3</th>\n",
       "      <td>4</td>\n",
       "      <td>1</td>\n",
       "      <td>[3037  266  246 3547 1253 2278 3125  649  697 ...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>4</th>\n",
       "      <td>5</td>\n",
       "      <td>0</td>\n",
       "      <td>[ 177 3125 1547 4060 5212 4687 5164 3125 3974 ...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>...</th>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>13995</th>\n",
       "      <td>13996</td>\n",
       "      <td>0</td>\n",
       "      <td>[5212 1759 1953  139 1953 3180 3187 5212 3414 ...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>13996</th>\n",
       "      <td>13997</td>\n",
       "      <td>1</td>\n",
       "      <td>[ 699  778 2777 1333  224 3543  998  139 4411 ...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>13997</th>\n",
       "      <td>13998</td>\n",
       "      <td>1</td>\n",
       "      <td>[ 506  211  139 3333 3293  286 4358  272 5212 ...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>13998</th>\n",
       "      <td>13999</td>\n",
       "      <td>1</td>\n",
       "      <td>[1583  169  123 2969  998 5212 1759  266 1435 ...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>13999</th>\n",
       "      <td>14000</td>\n",
       "      <td>0</td>\n",
       "      <td>[1759  266 4399  205 5212 1759  266 4399  205 ...</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "<p>14000 rows × 3 columns</p>\n",
       "</div>"
      ],
      "text/plain": [
       "        name  label                                            content\n",
       "0          1      0  [4509 3181 1253 2278  290 3562 2051  599 3125 ...\n",
       "1          2      1  [ 263 1325 2563 4160 2196  169 3125 2563 2619 ...\n",
       "2          3      0  [3635  177 3125 1251 3839 5212 2109 1171 1194 ...\n",
       "3          4      1  [3037  266  246 3547 1253 2278 3125  649  697 ...\n",
       "4          5      0  [ 177 3125 1547 4060 5212 4687 5164 3125 3974 ...\n",
       "...      ...    ...                                                ...\n",
       "13995  13996      0  [5212 1759 1953  139 1953 3180 3187 5212 3414 ...\n",
       "13996  13997      1  [ 699  778 2777 1333  224 3543  998  139 4411 ...\n",
       "13997  13998      1  [ 506  211  139 3333 3293  286 4358  272 5212 ...\n",
       "13998  13999      1  [1583  169  123 2969  998 5212 1759  266 1435 ...\n",
       "13999  14000      0  [1759  266 4399  205 5212 1759  266 4399  205 ...\n",
       "\n",
       "[14000 rows x 3 columns]"
      ]
     },
     "execution_count": 2,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "train_df = pd.read_csv('./data/train.csv')\n",
    "test_df = pd.read_csv('./data/test.csv')\n",
    "train_df"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "329bb5ce-9058-40a9-9e9f-a7c13cada941",
   "metadata": {},
   "source": [
    "# 任务二：对数据集字符进行可视化，统计标签和字符分布"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "id": "a84fba31-98b8-4536-93fb-d9d72e639563",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "[('', 1242675),\n",
       " ('\\n', 195874),\n",
       " ('5212', 173863),\n",
       " ('3125', 116815),\n",
       " ('1759', 72557),\n",
       " ('123', 69893),\n",
       " ('0', 61434),\n",
       " ('139', 58299),\n",
       " ('205', 43617),\n",
       " ('2113', 35667)]"
      ]
     },
     "execution_count": 4,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# 对输入的内容进行处理\n",
    "train_df['content'] = train_df['content'].apply(lambda x: x[1:-1].strip().replace('\\n', ' \\n '))\n",
    "test_df['content'] = test_df['content'].apply(lambda x: x[1:-1].strip().replace('\\n', ' \\n '))\n",
    "\n",
    "train_df['content'] = train_df['content'].apply(lambda x: x.split(' '))\n",
    "test_df['content'] = test_df['content'].apply(lambda x: x.split(' '))\n",
    "\n",
    "# 统计字符出现的频次并输出前10个最常见的字符\n",
    "c = Counter()\n",
    "for content in train_df['content']:\n",
    "    c.update(content)\n",
    "c.most_common(10)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "id": "bc921b03-dad4-41b6-83ff-8ac41a930cb0",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "0    11836\n",
       "1     2164\n",
       "Name: label, dtype: int64"
      ]
     },
     "execution_count": 5,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# 标签统计\n",
    "train_df['label'].value_counts()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "id": "489b9241-f475-470e-bbb6-650bed54aba6",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "322.0"
      ]
     },
     "execution_count": 11,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# 长度统计\n",
    "df = pd.concat([train_df, test_df])\n",
    "df['len'] = df['content'].apply(lambda x: len(x))\n",
    "df['len'].quantile(q=0.95)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "e4030a3d-18ee-4d92-b5b8-ee0f401e963d",
   "metadata": {},
   "source": [
    "# 任务三：使用TFIDF提取文本特征"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 19,
   "id": "84120f93-6980-4485-93d2-f3362bfdd742",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "<14000x5000 sparse matrix of type '<class 'numpy.float64'>'\n",
       "\twith 2548907 stored elements in Compressed Sparse Row format>"
      ]
     },
     "execution_count": 19,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "tfidf = TfidfVectorizer(ngram_range=(1,2), max_features=5000)\n",
    "tfidf.fit(df['content'].apply(lambda x: ' '.join(x)))\n",
    "train_tfidf_feat = tfidf.transform(train_df['content'].apply(lambda x: ' '.join(x)))\n",
    "test_tfidf_feat = tfidf.transform(test_df['content'].apply(lambda x: ' '.join(x)))\n",
    "train_tfidf_feat"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "e4b8e55c-9c83-4029-9daa-f58d03a5223f",
   "metadata": {},
   "source": [
    "# 任务四：使用TFIDF特征和线性模型完成训练和预测"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "id": "05804d4d-d394-4ece-86db-a33636f7a695",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "              precision    recall  f1-score   support\n",
      "\n",
      "           0      0.985     1.000     0.992     11836\n",
      "           1      0.997     0.917     0.955      2164\n",
      "\n",
      "    accuracy                          0.987     14000\n",
      "   macro avg      0.991     0.958     0.974     14000\n",
      "weighted avg      0.987     0.987     0.987     14000\n",
      "\n"
     ]
    }
   ],
   "source": [
    "# 使用cross_val_predict()函数进行交叉验证\n",
    "val_pred = cross_val_predict(\n",
    "    LogisticRegression(),\n",
    "    train_tfidf_feat,\n",
    "    train_df['label']\n",
    ")\n",
    "# 计算并输出分类报告\n",
    "print(classification_report(train_df['label'], val_pred, digits=3))"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "d66c8388-c9b6-47f3-8e0c-6d6df5de18da",
   "metadata": {},
   "source": [
    "# 任务五：使用TFIDF特征和XGBoost完成训练和预测"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 22,
   "id": "7c5bf891-cefe-4dc5-a386-70063eee1328",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "              precision    recall  f1-score   support\n",
      "\n",
      "           0      0.992     0.996     0.994     11836\n",
      "           1      0.980     0.954     0.967      2164\n",
      "\n",
      "    accuracy                          0.990     14000\n",
      "   macro avg      0.986     0.975     0.980     14000\n",
      "weighted avg      0.990     0.990     0.990     14000\n",
      "\n"
     ]
    }
   ],
   "source": [
    "val_pred = cross_val_predict(\n",
    "    XGBClassifier(n_estimators=50),\n",
    "    train_tfidf_feat,\n",
    "    train_df['label']\n",
    ")\n",
    "print(classification_report(train_df['label'], val_pred, digits=3))"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "e0321f3a-ae51-47c1-b422-0bc66e848f53",
   "metadata": {},
   "source": [
    "# 任务六：学会训练Word2Vec词向量"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 27,
   "id": "b14a4e70-cd70-4f6a-aa26-cdacfd5c76d7",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "<gensim.models.keyedvectors.KeyedVectors at 0x202b10efee0>"
      ]
     },
     "execution_count": 27,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "seq_list = df['content'].tolist()\n",
    "\n",
    "model = Word2Vec(sentences=seq_list, vector_size=100, window=5, min_count=1, workers=-1)\n",
    "model.wv"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 38,
   "id": "be63eb9a-df42-411e-939b-f46fce20ed75",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "array([-8.6196875e-03,  3.6657380e-03,  5.1898835e-03,  5.7419371e-03,\n",
       "        7.4669169e-03, -6.1676763e-03,  1.1056137e-03,  6.0472824e-03,\n",
       "       -2.8400517e-03, -6.1735227e-03, -4.1022300e-04, -8.3689503e-03,\n",
       "       -5.6000138e-03,  7.1045374e-03,  3.3525396e-03,  7.2256685e-03,\n",
       "        6.8002464e-03,  7.5307419e-03, -3.7891555e-03, -5.6180713e-04,\n",
       "        2.3483753e-03, -4.5190332e-03,  8.3887316e-03, -9.8581649e-03,\n",
       "        6.7646410e-03,  2.9144168e-03, -4.9328329e-03,  4.3981862e-03,\n",
       "       -1.7395759e-03,  6.7113829e-03,  9.9648498e-03, -4.3624449e-03,\n",
       "       -5.9933902e-04, -5.6956387e-03,  3.8508223e-03,  2.7866268e-03,\n",
       "        6.8910765e-03,  6.1010956e-03,  9.5384959e-03,  9.2734173e-03,\n",
       "        7.8980681e-03, -6.9895051e-03, -9.1558648e-03, -3.5575390e-04,\n",
       "       -3.0998420e-03,  7.8943158e-03,  5.9385728e-03, -1.5456629e-03,\n",
       "        1.5109634e-03,  1.7900396e-03,  7.8175711e-03, -9.5101884e-03,\n",
       "       -2.0553112e-04,  3.4691954e-03, -9.3897345e-04,  8.3817719e-03,\n",
       "        9.0107825e-03,  6.5365052e-03, -7.1162224e-04,  7.7104042e-03,\n",
       "       -8.5343365e-03,  3.2071066e-03, -4.6379971e-03, -5.0889566e-03,\n",
       "        3.5896183e-03,  5.3703380e-03,  7.7695129e-03, -5.7665063e-03,\n",
       "        7.4333595e-03,  6.6254949e-03, -3.7098003e-03, -8.7456414e-03,\n",
       "        5.4374672e-03,  6.5097548e-03, -7.8755140e-04, -6.7098569e-03,\n",
       "       -7.0859264e-03, -2.4970602e-03,  5.1432536e-03, -3.6652375e-03,\n",
       "       -9.3700597e-03,  3.8267397e-03,  4.8844791e-03, -6.4285635e-03,\n",
       "        1.2085581e-03, -2.0748782e-03,  2.4402141e-05, -9.8835090e-03,\n",
       "        2.6920033e-03, -4.7501065e-03,  1.0876465e-03, -1.5762257e-03,\n",
       "        2.1966719e-03, -7.8815771e-03, -2.7171851e-03,  2.6631975e-03,\n",
       "        5.3466819e-03, -2.3915148e-03, -9.5100952e-03,  4.5058774e-03],\n",
       "      dtype=float32)"
      ]
     },
     "execution_count": 38,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "model.wv[1]"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "b302a2d3-05ab-470c-bd45-aa238c02f807",
   "metadata": {},
   "source": [
    "# 任务七：使用Word2Vec词向量，搭建TextCNN模型进行训练和预测"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 33,
   "id": "616e6503-808e-4064-a2e3-1526a8e672dd",
   "metadata": {},
   "outputs": [],
   "source": [
    "word_list = [j  for i in seq_list for j in i]\n",
    "word_list = list(set(word_list))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 40,
   "id": "02df7602-3dc9-4452-87d3-64a5e58b1461",
   "metadata": {},
   "outputs": [],
   "source": [
    "embeddings = np.array([model.wv[word] for word in word_list])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "82690b06-3e29-4f06-9c34-0c34124f6599",
   "metadata": {},
   "outputs": [],
   "source": [
    "class TextCNNModel(nn.Module):\n",
    "    def __init__(self, embeddings):\n",
    "        super(Model, self).__init__()\n",
    "        self.embedding = nn.Embedding.from_pretrained(embeddings, freeze=False)\n",
    "        self.convs = nn.ModuleList(\n",
    "            [nn.Conv2d(1, 256, (k, 100)) for k in (2, 3, 4)])\n",
    "        self.dropout = nn.Dropout(0.5)\n",
    "        self.fc = nn.Linear(256 * 3, 2)\n",
    "\n",
    "    def conv_and_pool(self, x, conv):\n",
    "        x = F.relu(conv(x)).squeeze(3)\n",
    "        x = F.max_pool1d(x, x.size(2)).squeeze(2)\n",
    "        return x\n",
    "\n",
    "    def forward(self, x):\n",
    "        out = self.embedding(x[0])\n",
    "        out = out.unsqueeze(1)\n",
    "        out = torch.cat([self.conv_and_pool(out, conv) for conv in self.convs], 1)\n",
    "        out = self.dropout(out)\n",
    "        out = self.fc(out)\n",
    "        return out\n",
    "model = TextCNNModel(embeddings)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "2455e452-ba81-412a-a86c-20db08c8af2a",
   "metadata": {},
   "source": [
    "# 任务八：使用Word2Vec词向量，搭建BILSTM模型进行训练和预测"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "b7e48315-eacf-449b-acbe-8739731a7da7",
   "metadata": {},
   "outputs": [],
   "source": [
    "class SentimentClassifier(paddle.nn.Layer):\n",
    "    \n",
    "    def __init__(self, hidden_size, vocab_size, embedding_size=100, class_num=2, num_steps=128, num_layers=1, init_scale=0.1, dropout_rate=None):\n",
    "        super(SentimentClassifier, self).__init__()\n",
    "        self.hidden_size = hidden_size\n",
    "        self.vocab_size = vocab_size\n",
    "        self.embedding_size = embedding_size\n",
    "        self.class_num = class_num\n",
    "        self.num_steps = num_steps\n",
    "        self.num_layers = num_layers\n",
    "        self.dropout_rate = dropout_rate\n",
    "        self.init_scale = init_scale\n",
    "       \n",
    "        # 声明一个LSTM模型，用来把每个句子抽象成向量\n",
    "        self.simple_lstm_rnn = paddle.nn.LSTM(input_size=hidden_size, hidden_size=hidden_size, num_layers=num_layers)\n",
    "        # 声明一个embedding层，用来把句子中的每个词转换为向量\n",
    "        self.embedding = paddle.nn.Embedding(num_embeddings=vocab_size, embedding_dim=embedding_size, sparse=False, \n",
    "                                    weight_attr=paddle.ParamAttr(initializer=paddle.nn.initializer.Uniform(low=-init_scale, high=init_scale)))\n",
    "        # 声明使用上述语义向量映射到具体分类\n",
    "        self.cls_fc = paddle.nn.Linear(in_features=self.hidden_size, out_features=self.class_num, \n",
    "                             weight_attr=None, bias_attr=None)\n",
    "        \n",
    "        # 一般在获取单词的embedding后，会使用dropout层，防止过拟合，提升模型泛化能力\n",
    "        self.dropout_layer = paddle.nn.Dropout(p=self.dropout_rate, mode='upscale_in_train')\n",
    "\n",
    "    def forward(self, inputs):\n",
    "        # 获取输入数据的batch_size\n",
    "        batch_size = inputs.shape[0]\n",
    "\n",
    "        # 本实验默认使用1层的LSTM，首先我们需要定义LSTM的初始hidden和cell，这里我们使用0来初始化这个序列的记忆\n",
    "        init_hidden_data = np.zeros(\n",
    "            (self.num_layers, batch_size, self.hidden_size), dtype='float32')\n",
    "        init_cell_data = np.zeros(\n",
    "            (self.num_layers, batch_size, self.hidden_size), dtype='float32')\n",
    "\n",
    "        # 将这些初始记忆转换为飞桨可计算的向量，并且设置stop_gradient=True，避免这些向量被更新，从而影响训练效果\n",
    "        init_hidden = paddle.to_tensor(init_hidden_data)\n",
    "        init_hidden.stop_gradient = True\n",
    "        init_cell = paddle.to_tensor(init_cell_data)\n",
    "        init_cell.stop_gradient = True\n",
    "\n",
    "        # 对应以上第2步，将输入的句子的mini-batch转换为词向量表示，转换后输入数据shape为[batch_size, max_seq_len, embedding_size]\n",
    "        x_emb = self.embedding(inputs)\n",
    "        x_emb = paddle.reshape(x_emb, shape=[-1, self.num_steps, self.embedding_size])\n",
    "        # 在获取的词向量后添加dropout层\n",
    "        if self.dropout_rate is not None and self.dropout_rate > 0.0:\n",
    "            x_emb = self.dropout_layer(x_emb)\n",
    "        \n",
    "        # 对应以上第3步，使用LSTM网络，把每个句子转换为语义向量\n",
    "        # 返回的last_hidden即为最后一个时间步的输出，其shape为[self.num_layers, batch_size, hidden_size]\n",
    "        rnn_out, (last_hidden, last_cell) = self.simple_lstm_rnn(x_emb, (init_hidden, init_cell))\n",
    "        # 提取最后一层隐状态作为文本的语义向量，其shape为[batch_size, hidden_size]\n",
    "        last_hidden = paddle.reshape(last_hidden[-1], shape=[-1, self.hidden_size])\n",
    "        logits = self.cls_fc(last_hidden)\n",
    "        return logits"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "d42856c8-77d3-4593-810a-669c07e9fa1c",
   "metadata": {},
   "source": [
    "# 任务九：学会Bert基础，transformer库基础使用"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 45,
   "id": "da1cd46f-893e-482d-8b5e-80fe0457d67f",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "\u001b[32m[2023-07-11 20:54:47,737] [    INFO]\u001b[0m - We are using <class 'paddlenlp.transformers.ernie.tokenizer.ErnieTokenizer'> to load 'ernie-3.0-base-zh'.\u001b[0m\n",
      "\u001b[32m[2023-07-11 20:54:47,741] [    INFO]\u001b[0m - Already cached D:\\bert\\models\\ernie-3.0-base-zh\\ernie_3.0_base_zh_vocab.txt\u001b[0m\n",
      "\u001b[32m[2023-07-11 20:54:47,762] [    INFO]\u001b[0m - tokenizer config file saved in D:\\bert\\models\\ernie-3.0-base-zh\\tokenizer_config.json\u001b[0m\n",
      "\u001b[32m[2023-07-11 20:54:47,764] [    INFO]\u001b[0m - Special tokens file saved in D:\\bert\\models\\ernie-3.0-base-zh\\special_tokens_map.json\u001b[0m\n",
      "\u001b[32m[2023-07-11 20:54:47,767] [    INFO]\u001b[0m - We are using <class 'paddlenlp.transformers.ernie.modeling.ErnieForSequenceClassification'> to load 'ernie-3.0-base-zh'.\u001b[0m\n",
      "\u001b[32m[2023-07-11 20:54:47,770] [    INFO]\u001b[0m - Model config ErnieConfig {\n",
      "  \"attention_probs_dropout_prob\": 0.1,\n",
      "  \"enable_recompute\": false,\n",
      "  \"fuse\": false,\n",
      "  \"hidden_act\": \"gelu\",\n",
      "  \"hidden_dropout_prob\": 0.1,\n",
      "  \"hidden_size\": 768,\n",
      "  \"initializer_range\": 0.02,\n",
      "  \"intermediate_size\": 3072,\n",
      "  \"layer_norm_eps\": 1e-12,\n",
      "  \"max_position_embeddings\": 2048,\n",
      "  \"model_type\": \"ernie\",\n",
      "  \"num_attention_heads\": 12,\n",
      "  \"num_hidden_layers\": 12,\n",
      "  \"pad_token_id\": 0,\n",
      "  \"paddlenlp_version\": null,\n",
      "  \"pool_act\": \"tanh\",\n",
      "  \"task_id\": 0,\n",
      "  \"task_type_vocab_size\": 3,\n",
      "  \"type_vocab_size\": 4,\n",
      "  \"use_task_id\": true,\n",
      "  \"vocab_size\": 40000\n",
      "}\n",
      "\u001b[0m\n",
      "\u001b[33m[2023-07-11 20:54:49,054] [ WARNING]\u001b[0m - Some weights of the model checkpoint at ernie-3.0-base-zh were not used when initializing ErnieForSequenceClassification: ['cls.predictions.layer_norm.bias', 'cls.predictions.decoder_bias', 'cls.predictions.transform.weight', 'cls.predictions.layer_norm.weight', 'cls.predictions.transform.bias']\n",
      "- This IS expected if you are initializing ErnieForSequenceClassification from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n",
      "- This IS NOT expected if you are initializing ErnieForSequenceClassification from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\u001b[0m\n",
      "\u001b[33m[2023-07-11 20:54:49,055] [ WARNING]\u001b[0m - Some weights of ErnieForSequenceClassification were not initialized from the model checkpoint at ernie-3.0-base-zh and are newly initialized: ['classifier.weight', 'classifier.bias']\n",
      "You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.\u001b[0m\n"
     ]
    }
   ],
   "source": [
    "import paddle\n",
    "from paddlenlp.transformers import AutoModelForSequenceClassification, AutoTokenizer\n",
    "tokenizer = AutoTokenizer.from_pretrained('ernie-3.0-base-zh')\n",
    "model = AutoModelForSequenceClassification.from_pretrained('ernie-3.0-base-zh')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 48,
   "id": "7ece5326-e825-44bf-94f7-cf62817c45cb",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "Tensor(shape=[1, 2], dtype=float32, place=Place(gpu:0), stop_gradient=False,\n",
       "       [[-0.16138557, -0.20234782]])"
      ]
     },
     "execution_count": 48,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "inputs = tokenizer('今天的天气真的好')\n",
    "input_ids = paddle.to_tensor([inputs['input_ids']], dtype='int64')\n",
    "outputs = model(input_ids=input_ids)\n",
    "outputs"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "43c025ae-0b71-4570-bf81-5636f36a0309",
   "metadata": {},
   "source": [
    "# 任务十：使用Bert在比赛数据集中完成预训练"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "25e477ee-f582-4b16-a430-3eee4199557a",
   "metadata": {},
   "outputs": [],
   "source": [
    "import pandas as pd\n",
    "import numpy as np\n",
    "from tqdm import tqdm\n",
    "import re\n",
    "\n",
    "from sklearn.model_selection import train_test_split\n",
    "from sklearn.metrics import accuracy_score, f1_score\n",
    "\n",
    "import torch\n",
    "from torch import optim\n",
    "import torch.nn.functional as F\n",
    "import torch.nn as nn\n",
    "from torch.utils.data import DataLoader, Dataset\n",
    "\n",
    "from transformers import AutoTokenizer, AutoModelForSequenceClassification, AdamW\n",
    "\n",
    "import logging\n",
    "\n",
    "log = logging.getLogger()\n",
    "log.setLevel(logging.INFO)\n",
    "\n",
    "class Config():\n",
    "    train_data = 'data/train.csv' # 训练集\n",
    "    predict_data = 'data/test.csv' # 测试集\n",
    "    result_data_save = 'result/submission.csv' # 预测结果\n",
    "    device = 'cpu' # 训练驱动\n",
    "\n",
    "    model_path = 'D:/env/bert_model/hfl/chinese-roberta-wwm-ext' # 预训练模型\n",
    "    model_save_path = 'result/model' # 保存模型\n",
    "    \n",
    "    tokenizer = None # 预训练模型的tokenizer\n",
    "    \n",
    "    # 数据标签\n",
    "    label_dict = {0: 0, 1: 1}\n",
    "    num_labels = len(label_dict) # 标签数量\n",
    "    \n",
    "    max_seq_len = 210 # 最大句子长度\n",
    "    test_size = 0.15 # 校验集大小\n",
    "    random_seed = 42 # 随机种子\n",
    "    batch_size = 32 # 训练数据批大小\n",
    "    val_batch_size = 8 # 校验/预测批大小\n",
    "    epochs = 7 # 训练次数\n",
    "    learning_rate = 1e-5 # 学习率\n",
    "    l2_weight_decay = 0.05\n",
    "    ema_decay = 0.99 # ema衰减的比例\n",
    "    \n",
    "    \n",
    "    print_log = 20 # 日志打印步骤\n",
    "\n",
    "config = Config()\n",
    "config.device = 'cuda' if torch.cuda.is_available() else 'cpu'\n",
    "\n",
    "# 自定义dataset\n",
    "class MyDataset(Dataset):\n",
    "    def __init__(self, config: Config, data: list, label: list = None):\n",
    "        self.data = data\n",
    "        self.tokenizer = config.tokenizer \n",
    "        self.max_seq_len = config.max_seq_len\n",
    "        self.len = len(data)\n",
    "        self.label = label\n",
    "\n",
    "    def __getitem__(self, idx):\n",
    "        text = self.data[idx]\n",
    "        # tokenizer\n",
    "        inputs = self.tokenizer.encode_plus(text, return_token_type_ids=True, return_attention_mask=True,\n",
    "                                            max_length=self.max_seq_len, padding='max_length', truncation=True)\n",
    "\n",
    "        # 打包预处理结果\n",
    "        result = {'input_ids': torch.tensor(inputs['input_ids'], dtype=torch.long),\n",
    "                  'token_type_ids': torch.tensor(inputs['token_type_ids'], dtype=torch.long),\n",
    "                  'attention_mask': torch.tensor(inputs['attention_mask'], dtype=torch.long)}\n",
    "        if self.label is not None:\n",
    "            result['labels'] = torch.tensor([self.label[idx]], dtype=torch.long)\n",
    "        # 返回\n",
    "        return result\n",
    "\n",
    "    def __len__(self):\n",
    "        return self.len\n",
    "\n",
    "    \n",
    "data = pd.read_csv(config.train_data)\n",
    "data['text'] = data['content'].apply(lambda x: x.replace('[','').replace(']','').replace('\\n','').replace('  ',' '))\n",
    "\n",
    "data.head(5)\n",
    "\n",
    "tokenizer = AutoTokenizer.from_pretrained(config.model_path)\n",
    "model = AutoModelForSequenceClassification.from_pretrained(config.model_path, num_labels=config.num_labels)\n",
    "\n",
    "config.tokenizer = tokenizer\n",
    "\n",
    "\n",
    "# 拼接生成最终的文本\n",
    "# train_data['text'] = train_data['header'] + '[SEP]' + train_data['title'] + '[SEP]' + train_data['paragraph'] + '[SEP]' + train_data['footer']\n",
    "\n",
    "\n",
    "# 切分数据\n",
    "X_train, X_val, y_train, y_val = train_test_split(data['text'].tolist(), data['label'].tolist(),\n",
    "                                                          test_size=config.test_size,\n",
    "                                                          random_state=config.random_seed)\n",
    "# 构建数据\n",
    "train_dataloader = DataLoader(MyDataset(config, X_train, y_train), batch_size=config.batch_size, shuffle=True)\n",
    "val_dataloader = DataLoader(MyDataset(config, X_val, y_val), batch_size=config.val_batch_size, shuffle=True)\n",
    "\n",
    "class EMA():\n",
    "    def __init__(self, mu):\n",
    "        self.mu = mu\n",
    "        self.shadow = {}\n",
    " \n",
    "    def register(self, name, val):\n",
    "        self.shadow[name] = val.clone()\n",
    " \n",
    "    def get(self, name):\n",
    "        return self.shadow[name]\n",
    " \n",
    "    def update(self, name, x):\n",
    "        assert name in self.shadow\n",
    "        new_average = (1.0 - self.mu) * x + self.mu * self.shadow[name]\n",
    "        self.shadow[name] = new_average.clone()\n",
    "        \n",
    "# 校验方法\n",
    "def val(model, val_dataloader: DataLoader):\n",
    "    model.eval()\n",
    "    total_acc, total_f1, total_loss, test_num_batch = 0., 0., 0., 0\n",
    "    for iter_id, batch in enumerate(val_dataloader):\n",
    "        # 转GPU\n",
    "        batch_cuda = {item: value.to(config.device) for item, value in batch.items()}\n",
    "        # 模型计算\n",
    "        output = model(**batch_cuda)\n",
    "        # 获取结果\n",
    "        loss = output[0]\n",
    "        logits = torch.argmax(output[1], dim=1)\n",
    "\n",
    "        y_pred = [[i] for i in logits.cpu().detach().numpy()]\n",
    "        y_true = batch_cuda['labels'].cpu().detach().numpy()\n",
    "        # 计算指标\n",
    "        acc = accuracy_score(y_true, y_pred)\n",
    "        f1 = f1_score(y_true, y_pred, average='weighted')  \n",
    "        total_loss += loss.item()\n",
    "        total_acc += acc\n",
    "        total_f1 += f1\n",
    "        test_num_batch += 1\n",
    "\n",
    "    return total_loss/test_num_batch, total_acc/test_num_batch, total_f1/test_num_batch\n",
    "\n",
    "# 训练方法\n",
    "def train(model, config: Config, train_dataloader: DataLoader, val_dataloader: DataLoader):\n",
    "    # 模型写入GPU\n",
    "    model.to(config.device)\n",
    "\n",
    "    # 获取BERT模型的所有可训练参数\n",
    "    params = list(model.named_parameters())\n",
    "    # 对除了bias和LayerNorm层的所有参数应用L2正则化\n",
    "    no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']\n",
    "    optimizer_grouped_parameters = [\n",
    "        {'params': [p for n, p in params if not any(nd in n for nd in no_decay)],\n",
    "         'weight_decay': config.l2_weight_decay},\n",
    "        {'params': [p for n, p in params if any(nd in n for nd in no_decay)],\n",
    "         'weight_decay': 0.0}\n",
    "    ]\n",
    "    # 创建优化器并使用正则化更新模型参数\n",
    "    opt = torch.optim.AdamW(optimizer_grouped_parameters, lr=config.learning_rate)\n",
    "    # 梯度衰减\n",
    "    scheduler = optim.lr_scheduler.CosineAnnealingLR(opt, len(train_dataloader) * config.epochs)\n",
    "    \n",
    "    # ema\n",
    "    ema = EMA(0.999)\n",
    "    for name, param in model.named_parameters():\n",
    "        if param.requires_grad:\n",
    "            ema.register(name, param.data)\n",
    "    \n",
    "    # 遍历训练\n",
    "    best_f1 = 0\n",
    "    for epoch in range(config.epochs):\n",
    "        total_acc, total_f1, total_loss, train_num_batch = 0., 0., 0., 0\n",
    "        model.train()\n",
    "        zero_step = 0\n",
    "        for iter_id, batch in enumerate(train_dataloader):\n",
    "            # 数据写入GPU\n",
    "            batch_cuda = {item: value.to(config.device) for item, value in batch.items()}\n",
    "            # 模型计算\n",
    "            output = model(**batch_cuda)\n",
    "            # 获取结果\n",
    "            loss = output[0]\n",
    "            logits = torch.argmax(output[1], dim=1)\n",
    "\n",
    "            y_pred = [[i] for i in logits.cpu().detach().numpy()]\n",
    "            y_true = batch_cuda['labels'].cpu().detach().numpy()\n",
    "\n",
    "            # 计算指标\n",
    "            acc = accuracy_score(y_true, y_pred)\n",
    "            f1 = f1_score(y_true, y_pred, average='weighted')  \n",
    "            total_loss += loss.item()\n",
    "            total_acc += acc\n",
    "            total_f1 += f1\n",
    "\n",
    "            # 反向传播，更新参数\n",
    "            opt.zero_grad() \n",
    "            loss.backward()\n",
    "            opt.step()\n",
    "            scheduler.step()\n",
    "            # 当准确率高于0.93时ema\n",
    "            if acc > 0.97:\n",
    "                for name, param in model.named_parameters():\n",
    "                    if param.requires_grad:\n",
    "                        ema.update(name, param.data)\n",
    "\n",
    "            # 打印\n",
    "            if iter_id % config.print_log == 0:\n",
    "                logging.info('epoch:{}, iter_id:{}, loss:{}, acc:{}, f1:{}'.format(epoch, iter_id, loss.item(), acc, f1))\n",
    "                \n",
    "            train_num_batch += 1\n",
    "        # 校验操作\n",
    "        val_loss, val_acc, val_f1 = val(model, val_dataloader)\n",
    "        if val_f1 > best_f1:\n",
    "            best_f1 = val_f1\n",
    "            # 保存best模型\n",
    "            config.tokenizer.save_pretrained(config.model_save_path + \"/best\")\n",
    "            model.save_pretrained(config.model_save_path + \"/best\")\n",
    "        logging.info('-' * 15+str(epoch)+'-' * 15)\n",
    "        logging.info('avg_train_loss:{}, avg_train_acc:{}, avg_train_f1:{}'.format(total_loss/train_num_batch, total_acc/train_num_batch, total_f1/train_num_batch))\n",
    "        logging.info('val_loss:{}, val_acc:{}, val_f1:{}, best_f1:{}'.format(val_loss, val_acc, val_f1, best_f1))\n",
    "       \n",
    "        logging.info('-' * 30)\n",
    "        \n",
    "    # 保存最终模型\n",
    "    config.tokenizer.save_pretrained(config.model_save_path)\n",
    "    model.save_pretrained(config.model_save_path)\n",
    "\n",
    "# 开始训练\n",
    "train(model, config, train_dataloader, val_dataloader)\n",
    "print('train done.')\n",
    "\n",
    "# 预测方法\n",
    "def predict(config:Config):\n",
    "    # 加载模型\n",
    "    config.tokenizer = AutoTokenizer.from_pretrained(config.model_save_path+'/best')\n",
    "    model = AutoModelForSequenceClassification.from_pretrained(config.model_save_path+'/best')\n",
    "    model.to(config.device)\n",
    "    model.eval()\n",
    "    # 加载数据\n",
    "    test_data = pd.read_csv(config.predict_data)\n",
    "    test_data['text'] = test_data['content'].apply(lambda x: x.replace('[','').replace(']','').replace('\\n','').replace('  ',' '))\n",
    "    # 加载dataloaderz\n",
    "    predict_dataloader = DataLoader(MyDataset(config, test_data['text'].tolist()), batch_size=config.batch_size, shuffle=False)\n",
    "    \n",
    "    predict_result = []\n",
    "    predict_softmax = []\n",
    "    softmax = None\n",
    "    # 遍历预测\n",
    "    for iter_id, batch in enumerate(predict_dataloader):\n",
    "        batch_cuda = {item: value.to(config.device) for item, value in batch.items()}\n",
    "        # 模型计算\n",
    "        output = model(**batch_cuda)\n",
    "        # 获取结果\n",
    "        logits = torch.argmax(output[0], dim=1)\n",
    "        y_pred = [[i] for i in logits.cpu().detach().numpy()]\n",
    "        # 获取softmax\n",
    "        y_softmax = [i for i in F.softmax(output.logits, dim=1).cpu().detach().numpy()]\n",
    "        # 统计结果\n",
    "        predict_result += y_pred\n",
    "        predict_softmax += y_softmax\n",
    "    # 输出结果\n",
    "    test_data['label'] = [i[0] for i in predict_result]\n",
    "    \n",
    "    softmax_df = pd.DataFrame(predict_softmax, columns=list(config.label_dict.keys()))\n",
    "    \n",
    "    # 保存文件\n",
    "    result_df = pd.concat([test_data[['name', 'label']], softmax_df], axis=1)\n",
    "    return result_df\n",
    "\n",
    "result_df = predict(config)\n",
    "result_df.to_csv(config.result_data_save, index=False, encoding='utf-8')\n",
    "print('predict done.')"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "c3881770-e72d-49c5-94aa-d585779541d3",
   "metadata": {},
   "source": [
    "# 任务十一：使用Bert在比赛数据集上完成微调"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "7e804e13-4964-43d6-b5a2-1932fa20c570",
   "metadata": {},
   "source": [
    "线上分数：0.9968，步骤如下：\n",
    "1. 使用roberta对token进行decode，注意要绕过一些roberta的关键字或无用字token\n",
    "2. 对docode完的训练和测试数据进行再次预训练\n",
    "3. 对新的预训练模型进行ft\n",
    "4. 使用995+的结果进行合并，生成伪标签训练数据\n",
    "5. 重新训练，得到最终模型\n",
    "\n",
    "其他trick:\n",
    "1. 样本不是很均匀，加入对抗训练\n",
    "2. 由于样本不均匀，导致全部1个值acc的不差，所以加入ema\n",
    "3. 多模型融合（未尝试）"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "aca6d78a-b114-4e64-8979-6c0acc63ec15",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.10.9"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
