{
 "cells": [
  {
   "cell_type": "markdown",
   "id": "a46d5532",
   "metadata": {},
   "source": [
    "# 导入依赖"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "fb5a58c3",
   "metadata": {},
   "outputs": [],
   "source": [
    "import pandas as pd\n",
    "import numpy as np\n",
    "\n",
    "from sklearn.model_selection import train_test_split\n",
    "from sklearn.metrics import accuracy_score, f1_score\n",
    "\n",
    "import torch\n",
    "from torch import optim\n",
    "import torch.nn.functional as F\n",
    "from torch.utils.data import DataLoader, Dataset\n",
    "\n",
    "from transformers import AutoTokenizer, AutoModelForSequenceClassification, AdamW\n",
    "\n",
    "import logging\n",
    "\n",
    "log = logging.getLogger()\n",
    "log.setLevel(logging.INFO)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "012dc773",
   "metadata": {},
   "source": [
    "# 初始化配置文件"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "355cca84",
   "metadata": {},
   "outputs": [],
   "source": [
    "class Config():\n",
    "    train_data = 'data/train_dataset.npy' # 训练集\n",
    "    predict_data = 'data/eval_dataset.npy' # 测试集\n",
    "    result_data_save = 'result/submission.csv' # 预测结果\n",
    "    device = 'cpu' # 训练驱动\n",
    "\n",
    "    model_path = 'hfl/chinese-roberta-wwm-ext' # 预训练模型\n",
    "    model_save_path = 'result/model' # 保存模型\n",
    "    \n",
    "    tokenizer = None # 预训练模型的tokenizer\n",
    "    \n",
    "    # 数据标签\n",
    "    label_dict = {'晨会早报': 0, '宏观研报': 1, '策略研报': 2, '行业研报': 3, '公司研报': 4, '基金研报': 5, '债券研报': 6, '金融工程': 7, '其他研报': 8, '个股研报': 9}\n",
    "    num_labels = len(label_dict) # 标签数量\n",
    "    \n",
    "    max_seq_len = 128 # 最大句子长度\n",
    "    test_size = 0.15 # 校验集大小\n",
    "    random_seed = 42 # 随机种子\n",
    "    batch_size = 64 # 训练数据批大小\n",
    "    val_batch_size = 8 # 校验/预测批大小\n",
    "    epochs = 10 # 训练次数\n",
    "    learning_rate = 1e-5 # 学习率\n",
    "    l2_weight_decay = 0.05\n",
    "    \n",
    "    print_log = 20 # 日志打印步骤\n",
    "\n",
    "config = Config()\n",
    "config.device = 'cuda' if torch.cuda.is_available() else 'cpu'"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "00fe8c53",
   "metadata": {},
   "source": [
    "# 自定义dataset"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "id": "d4f69787",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 自定义dataset\n",
    "class MyDataset(Dataset):\n",
    "    def __init__(self, config: Config, data: list, label: list = None):\n",
    "        self.data = data\n",
    "        self.tokenizer = config.tokenizer \n",
    "        self.max_seq_len = config.max_seq_len\n",
    "        self.len = len(data)\n",
    "        self.label = label\n",
    "\n",
    "    def __getitem__(self, idx):\n",
    "        text = self.data[idx]\n",
    "        # tokenizer\n",
    "        inputs = self.tokenizer.encode_plus(text, return_token_type_ids=True, return_attention_mask=True,\n",
    "                                            max_length=self.max_seq_len, padding='max_length', truncation=True)\n",
    "\n",
    "        # 打包预处理结果\n",
    "        result = {'input_ids': torch.tensor(inputs['input_ids'], dtype=torch.long),\n",
    "                  'token_type_ids': torch.tensor(inputs['token_type_ids'], dtype=torch.long),\n",
    "                  'attention_mask': torch.tensor(inputs['attention_mask'], dtype=torch.long)}\n",
    "        if self.label is not None:\n",
    "            result['labels'] = torch.tensor([self.label[idx]], dtype=torch.long)\n",
    "        # 返回\n",
    "        return result\n",
    "\n",
    "    def __len__(self):\n",
    "        return self.len\n",
    "\n",
    "    "
   ]
  },
  {
   "cell_type": "markdown",
   "id": "d8a115ce",
   "metadata": {},
   "source": [
    "# 加载数据"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "id": "93f459e3",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>label</th>\n",
       "      <th>header</th>\n",
       "      <th>title</th>\n",
       "      <th>paragraph</th>\n",
       "      <th>footer</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>0</th>\n",
       "      <td>0</td>\n",
       "      <td>仅供内部参考，请勿外传</td>\n",
       "      <td>证券研究报告 | 浙商早知道,报告日期：2022 年 11 月 09 日,重要点评</td>\n",
       "      <td>❑ 【浙商互联网  谢晨】快手（01024.HK）Q3 业绩前瞻：竞争趋缓盈利改善，Q4 或...</td>\n",
       "      <td>http://www.stocke.com.cn 1/5 请务必阅读正文之后的免责条款部分</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>1</th>\n",
       "      <td>0</td>\n",
       "      <td>仅供内部参考，请勿外传</td>\n",
       "      <td>中信证券研, 2023 年海外宏观与大类资产配置展望—东边日出西边雨 ,▍ 美股科技板块 2...</td>\n",
       "      <td>部 重点推荐 ,▍,崔嵘 海外宏观经济首席分析师 ,S1010517040001,宏观经济｜...</td>\n",
       "      <td>证券研究报告 具体分析内容（包括相关风险提示等）详见后文，  请务必阅读正文之后第 19 页...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>2</th>\n",
       "      <td>0</td>\n",
       "      <td>仅供内部参考，请勿外传</td>\n",
       "      <td>2022 年 11 月 10 日星期四,晨会纪要,【今日焦点】,以注册制为抓手提高直融比重...</td>\n",
       "      <td>党的二十大报告提出，“健全资本市场功能，提高直接融资比重。”新形势下，提高直接融资比重是资本...</td>\n",
       "      <td>请务必阅读最后一页股票评级说明和免责声明</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>3</th>\n",
       "      <td>0</td>\n",
       "      <td>仅供内部参考，请勿外传证券研究报告</td>\n",
       "      <td>东吴证券晨会纪要 ,东吴证券晨会纪要 2022-11-11  [Table_Tag] ,宏观...</td>\n",
       "      <td>晨会编辑  曾朵红执业证书：S0600516080001021-60199793zengdh...</td>\n",
       "      <td>请务必阅读正文之后的免责声明部分</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>4</th>\n",
       "      <td>0</td>\n",
       "      <td>仅供内部参考，请勿外传晨会纪要（2022/11/16）</td>\n",
       "      <td>渤海证券研究所晨会, 宏观及策略分析, 行业专题评述, 金融工程研究</td>\n",
       "      <td>崔健  022-28451618  SACNO: S1150511010016 cuijia...</td>\n",
       "      <td>请务必阅读正文之后的声明        渤海证券股份有限公司具备证券投资咨询业务资格    ...</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "</div>"
      ],
      "text/plain": [
       "   label                       header  \\\n",
       "0      0                  仅供内部参考，请勿外传   \n",
       "1      0                  仅供内部参考，请勿外传   \n",
       "2      0                  仅供内部参考，请勿外传   \n",
       "3      0            仅供内部参考，请勿外传证券研究报告   \n",
       "4      0  仅供内部参考，请勿外传晨会纪要（2022/11/16）   \n",
       "\n",
       "                                               title  \\\n",
       "0          证券研究报告 | 浙商早知道,报告日期：2022 年 11 月 09 日,重要点评   \n",
       "1  中信证券研, 2023 年海外宏观与大类资产配置展望—东边日出西边雨 ,▍ 美股科技板块 2...   \n",
       "2  2022 年 11 月 10 日星期四,晨会纪要,【今日焦点】,以注册制为抓手提高直融比重...   \n",
       "3  东吴证券晨会纪要 ,东吴证券晨会纪要 2022-11-11  [Table_Tag] ,宏观...   \n",
       "4              渤海证券研究所晨会, 宏观及策略分析, 行业专题评述, 金融工程研究   \n",
       "\n",
       "                                           paragraph  \\\n",
       "0  ❑ 【浙商互联网  谢晨】快手（01024.HK）Q3 业绩前瞻：竞争趋缓盈利改善，Q4 或...   \n",
       "1  部 重点推荐 ,▍,崔嵘 海外宏观经济首席分析师 ,S1010517040001,宏观经济｜...   \n",
       "2  党的二十大报告提出，“健全资本市场功能，提高直接融资比重。”新形势下，提高直接融资比重是资本...   \n",
       "3  晨会编辑  曾朵红执业证书：S0600516080001021-60199793zengdh...   \n",
       "4  崔健  022-28451618  SACNO: S1150511010016 cuijia...   \n",
       "\n",
       "                                              footer  \n",
       "0    http://www.stocke.com.cn 1/5 请务必阅读正文之后的免责条款部分    \n",
       "1  证券研究报告 具体分析内容（包括相关风险提示等）详见后文，  请务必阅读正文之后第 19 页...  \n",
       "2                               请务必阅读最后一页股票评级说明和免责声明  \n",
       "3                                 请务必阅读正文之后的免责声明部分    \n",
       "4  请务必阅读正文之后的声明        渤海证券股份有限公司具备证券投资咨询业务资格    ...  "
      ]
     },
     "execution_count": 3,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "train_data = pd.DataFrame(list(np.load(config.train_data, allow_pickle=True)))\n",
    "train_data.head(5)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "0cef9771",
   "metadata": {},
   "source": [
    "# 加载预训练模型"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "id": "1c46c690",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Some weights of the model checkpoint at hfl/chinese-roberta-wwm-ext were not used when initializing BertForSequenceClassification: ['cls.predictions.transform.dense.bias', 'cls.seq_relationship.bias', 'cls.predictions.decoder.weight', 'cls.predictions.bias', 'cls.predictions.transform.LayerNorm.bias', 'cls.seq_relationship.weight', 'cls.predictions.transform.LayerNorm.weight', 'cls.predictions.transform.dense.weight']\n",
      "- This IS expected if you are initializing BertForSequenceClassification from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n",
      "- This IS NOT expected if you are initializing BertForSequenceClassification from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n",
      "Some weights of BertForSequenceClassification were not initialized from the model checkpoint at hfl/chinese-roberta-wwm-ext and are newly initialized: ['classifier.weight', 'classifier.bias']\n",
      "You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.\n"
     ]
    }
   ],
   "source": [
    "tokenizer = AutoTokenizer.from_pretrained(config.model_path)\n",
    "model = AutoModelForSequenceClassification.from_pretrained(config.model_path, num_labels=config.num_labels)\n",
    "\n",
    "config.tokenizer = tokenizer"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "3cfbdd27",
   "metadata": {},
   "source": [
    "# 创建dataloader"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "id": "f847eed9",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 拼接生成最终的文本\n",
    "train_data['text'] = train_data['header'] + '[SEP]' + train_data['title'] + '[SEP]' + train_data['paragraph'] + '[SEP]' + train_data['footer']\n",
    "# 切分数据\n",
    "X_train, X_val, y_train, y_val = train_test_split(train_data['text'].tolist(), train_data['label'].tolist(),\n",
    "                                                          test_size=config.test_size,\n",
    "                                                          random_state=config.random_seed)\n",
    "# 构建数据\n",
    "train_dataloader = DataLoader(MyDataset(config, X_train, y_train), batch_size=config.batch_size, shuffle=True)\n",
    "val_dataloader = DataLoader(MyDataset(config, X_val, y_val), batch_size=config.val_batch_size, shuffle=True)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "6eb8c3aa",
   "metadata": {},
   "source": [
    "# 训练模型"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "id": "a34f2bf0",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:root:epoch:0, iter_id:0, loss:2.2925097942352295, acc:0.125, f1:0.12116228070175439\n",
      "INFO:root:epoch:0, iter_id:20, loss:1.6396788358688354, acc:0.3125, f1:0.2135964912280702\n",
      "INFO:root:epoch:0, iter_id:40, loss:1.0177865028381348, acc:0.734375, f1:0.65247042326546\n",
      "INFO:root:---------------0---------------\n",
      "INFO:root:avg_train_loss:1.5650783805620103, avg_train_acc:0.4922388136288999, avg_train_f1:0.4080102334879386\n",
      "INFO:root:val_loss:1.0458162820945351, val_acc:0.6788135593220338, val_acc:0.6313647981020862, best_f1:0.6313647981020862\n",
      "INFO:root:------------------------------\n",
      "INFO:root:epoch:1, iter_id:0, loss:1.0913031101226807, acc:0.734375, f1:0.6735634157509158\n",
      "INFO:root:epoch:1, iter_id:20, loss:0.9109288454055786, acc:0.765625, f1:0.7347834967320261\n",
      "INFO:root:epoch:1, iter_id:40, loss:0.5402162075042725, acc:0.859375, f1:0.8479111615904069\n",
      "INFO:root:---------------1---------------\n",
      "INFO:root:avg_train_loss:0.7638342607588995, avg_train_acc:0.8119612068965518, avg_train_f1:0.7883159272240182\n",
      "INFO:root:val_loss:0.558816951462778, val_acc:0.8601694915254238, val_acc:0.8495998116760828, best_f1:0.8495998116760828\n",
      "INFO:root:------------------------------\n",
      "INFO:root:epoch:2, iter_id:0, loss:0.812037467956543, acc:0.765625, f1:0.7491474701065218\n",
      "INFO:root:epoch:2, iter_id:20, loss:0.45078736543655396, acc:0.90625, f1:0.8921522556390977\n",
      "INFO:root:epoch:2, iter_id:40, loss:0.292633980512619, acc:0.96875, f1:0.9604166666666666\n",
      "INFO:root:---------------2---------------\n",
      "INFO:root:avg_train_loss:0.45413881966045927, avg_train_acc:0.892844314449918, avg_train_f1:0.8855554954495695\n",
      "INFO:root:val_loss:0.3750258767503803, val_acc:0.9021186440677966, val_acc:0.8989827145547488, best_f1:0.8989827145547488\n",
      "INFO:root:------------------------------\n",
      "INFO:root:epoch:3, iter_id:0, loss:0.3189634084701538, acc:0.90625, f1:0.9119902612207673\n",
      "INFO:root:epoch:3, iter_id:20, loss:0.33030834794044495, acc:0.921875, f1:0.9201883012820512\n",
      "INFO:root:epoch:3, iter_id:40, loss:0.3076692223548889, acc:0.9375, f1:0.9370915032679739\n",
      "INFO:root:---------------3---------------\n",
      "INFO:root:avg_train_loss:0.308211869427136, avg_train_acc:0.9293026477832513, avg_train_f1:0.9284582064750327\n",
      "INFO:root:val_loss:0.2981670295535508, val_acc:0.9152542372881356, val_acc:0.9082079878690048, best_f1:0.9082079878690048\n",
      "INFO:root:------------------------------\n",
      "INFO:root:epoch:4, iter_id:0, loss:0.16035234928131104, acc:0.984375, f1:0.9844450280112044\n",
      "INFO:root:epoch:4, iter_id:20, loss:0.22028644382953644, acc:0.921875, f1:0.9236424721915794\n",
      "INFO:root:epoch:4, iter_id:40, loss:0.16299688816070557, acc:0.953125, f1:0.9519786031578485\n",
      "INFO:root:---------------4---------------\n",
      "INFO:root:avg_train_loss:0.2366646627585093, avg_train_acc:0.9418103448275862, avg_train_f1:0.941041911921206\n",
      "INFO:root:val_loss:0.2725198914186429, val_acc:0.9161016949152542, val_acc:0.9113654706875048, best_f1:0.9113654706875048\n",
      "INFO:root:------------------------------\n",
      "INFO:root:epoch:5, iter_id:0, loss:0.1591169387102127, acc:0.96875, f1:0.9758833827322503\n",
      "INFO:root:epoch:5, iter_id:20, loss:0.24135752022266388, acc:0.953125, f1:0.9524612312030076\n",
      "INFO:root:epoch:5, iter_id:40, loss:0.11816520243883133, acc:0.984375, f1:0.984337637494022\n",
      "INFO:root:---------------5---------------\n",
      "INFO:root:avg_train_loss:0.1980056686415559, avg_train_acc:0.9526760057471264, avg_train_f1:0.9519183107680349\n",
      "INFO:root:val_loss:0.257260720103474, val_acc:0.9173728813559322, val_acc:0.9154262051507813, best_f1:0.9154262051507813\n",
      "INFO:root:------------------------------\n",
      "INFO:root:epoch:6, iter_id:0, loss:0.22741706669330597, acc:0.9375, f1:0.9298878205128205\n",
      "INFO:root:epoch:6, iter_id:20, loss:0.1444770246744156, acc:0.96875, f1:0.9690068786069921\n",
      "INFO:root:epoch:6, iter_id:40, loss:0.10873167216777802, acc:0.984375, f1:0.9843070947414168\n",
      "INFO:root:---------------6---------------\n",
      "INFO:root:avg_train_loss:0.17401388571375892, avg_train_acc:0.9570633210180624, avg_train_f1:0.956190846471874\n",
      "INFO:root:val_loss:0.25264419331136395, val_acc:0.9216101694915254, val_acc:0.9235174077335094, best_f1:0.9235174077335094\n",
      "INFO:root:------------------------------\n",
      "INFO:root:epoch:7, iter_id:0, loss:0.12694011628627777, acc:0.96875, f1:0.9686789772727273\n",
      "INFO:root:epoch:7, iter_id:20, loss:0.13447785377502441, acc:0.984375, f1:0.9830357142857142\n",
      "INFO:root:epoch:7, iter_id:40, loss:0.17595680058002472, acc:0.953125, f1:0.953063725490196\n",
      "INFO:root:---------------7---------------\n",
      "INFO:root:avg_train_loss:0.16122685532484735, avg_train_acc:0.9630157019704433, avg_train_f1:0.9621441791549052\n",
      "INFO:root:val_loss:0.25679370483099406, val_acc:0.9216101694915254, val_acc:0.9209851236334285, best_f1:0.9235174077335094\n",
      "INFO:root:------------------------------\n",
      "INFO:root:epoch:8, iter_id:0, loss:0.13517944514751434, acc:0.96875, f1:0.9699284105534106\n",
      "INFO:root:epoch:8, iter_id:20, loss:0.21480195224285126, acc:0.9375, f1:0.9229525862068966\n",
      "INFO:root:epoch:8, iter_id:40, loss:0.10893161594867706, acc:0.984375, f1:0.9843180091185411\n",
      "INFO:root:---------------8---------------\n",
      "INFO:root:avg_train_loss:0.15199961406844004, avg_train_acc:0.9627206486042693, avg_train_f1:0.962064884967356\n",
      "INFO:root:val_loss:0.2542808538271209, val_acc:0.923728813559322, val_acc:0.9228241861716437, best_f1:0.9235174077335094\n",
      "INFO:root:------------------------------\n",
      "INFO:root:epoch:9, iter_id:0, loss:0.17214752733707428, acc:0.953125, f1:0.9507113821138211\n",
      "INFO:root:epoch:9, iter_id:20, loss:0.08061660081148148, acc:1.0, f1:1.0\n",
      "INFO:root:epoch:9, iter_id:40, loss:0.1884879171848297, acc:0.953125, f1:0.9515177039941191\n",
      "INFO:root:---------------9---------------\n",
      "INFO:root:avg_train_loss:0.15217520101439386, avg_train_acc:0.9645037972085385, avg_train_f1:0.9633848042858114\n",
      "INFO:root:val_loss:0.25419466266945256, val_acc:0.923728813559322, val_acc:0.9231840193704601, best_f1:0.9235174077335094\n",
      "INFO:root:------------------------------\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "train done.\n"
     ]
    }
   ],
   "source": [
    "# 校验方法\n",
    "def val(model, val_dataloader: DataLoader):\n",
    "    model.eval()\n",
    "    total_acc, total_f1, total_loss, test_num_batch = 0., 0., 0., 0\n",
    "    for iter_id, batch in enumerate(val_dataloader):\n",
    "        # 转GPU\n",
    "        batch_cuda = {item: value.to(config.device) for item, value in batch.items()}\n",
    "        # 模型计算\n",
    "        output = model(**batch_cuda)\n",
    "        # 获取结果\n",
    "        loss = output[0]\n",
    "        logits = torch.argmax(output[1], dim=1)\n",
    "\n",
    "        y_pred = [[i] for i in logits.cpu().detach().numpy()]\n",
    "        y_true = batch_cuda['labels'].cpu().detach().numpy()\n",
    "        # 计算指标\n",
    "        acc = accuracy_score(y_true, y_pred)\n",
    "        f1 = f1_score(y_true, y_pred, average='weighted')  \n",
    "        total_loss += loss.item()\n",
    "        total_acc += acc\n",
    "        total_f1 += f1\n",
    "        test_num_batch += 1\n",
    "\n",
    "    return total_loss/test_num_batch, total_acc/test_num_batch, total_f1/test_num_batch\n",
    "\n",
    "# 训练方法\n",
    "def train(model, config: Config, train_dataloader: DataLoader, val_dataloader: DataLoader):\n",
    "    # 模型写入GPU\n",
    "    model.to(config.device)\n",
    "\n",
    "    # 获取BERT模型的所有可训练参数\n",
    "    params = list(model.named_parameters())\n",
    "    # 对除了bias和LayerNorm层的所有参数应用L2正则化\n",
    "    no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']\n",
    "    optimizer_grouped_parameters = [\n",
    "        {'params': [p for n, p in params if not any(nd in n for nd in no_decay)],\n",
    "         'weight_decay': config.l2_weight_decay},\n",
    "        {'params': [p for n, p in params if any(nd in n for nd in no_decay)],\n",
    "         'weight_decay': 0.0}\n",
    "    ]\n",
    "    # 创建优化器并使用正则化更新模型参数\n",
    "    opt = torch.optim.AdamW(optimizer_grouped_parameters, lr=config.learning_rate)\n",
    "    # 梯度衰减\n",
    "    scheduler = optim.lr_scheduler.CosineAnnealingLR(opt, len(train_dataloader) * config.epochs)\n",
    "    \n",
    "    # 遍历训练\n",
    "    best_f1 = 0\n",
    "    for epoch in range(config.epochs):\n",
    "        total_acc, total_f1, total_loss, train_num_batch = 0., 0., 0., 0\n",
    "        model.train()\n",
    "        zero_step = 0\n",
    "        for iter_id, batch in enumerate(train_dataloader):\n",
    "            # 数据写入GPU\n",
    "            batch_cuda = {item: value.to(config.device) for item, value in batch.items()}\n",
    "            # 模型计算\n",
    "            output = model(**batch_cuda)\n",
    "            # 获取结果\n",
    "            loss = output[0]\n",
    "            logits = torch.argmax(output[1], dim=1)\n",
    "\n",
    "            y_pred = [[i] for i in logits.cpu().detach().numpy()]\n",
    "            y_true = batch_cuda['labels'].cpu().detach().numpy()\n",
    "\n",
    "            # 计算指标\n",
    "            acc = accuracy_score(y_true, y_pred)\n",
    "            f1 = f1_score(y_true, y_pred, average='weighted')  \n",
    "            total_loss += loss.item()\n",
    "            total_acc += acc\n",
    "            total_f1 += f1\n",
    "\n",
    "            # 反向传播，更新参数\n",
    "            opt.zero_grad() \n",
    "            loss.backward()\n",
    "            opt.step()\n",
    "            scheduler.step()\n",
    "\n",
    "            # 打印\n",
    "            if iter_id % config.print_log == 0:\n",
    "                logging.info('epoch:{}, iter_id:{}, loss:{}, acc:{}, f1:{}'.format(epoch, iter_id, loss.item(), acc, f1))\n",
    "                \n",
    "            train_num_batch += 1\n",
    "        # 校验操作\n",
    "        val_loss, val_acc, val_f1 = val(model, val_dataloader)\n",
    "        if val_f1 > best_f1:\n",
    "            best_f1 = val_f1\n",
    "            # 保存best模型\n",
    "            config.tokenizer.save_pretrained(config.model_save_path + \"/best\")\n",
    "            model.save_pretrained(config.model_save_path + \"/best\")\n",
    "        logging.info('-' * 15+str(epoch)+'-' * 15)\n",
    "        logging.info('avg_train_loss:{}, avg_train_acc:{}, avg_train_f1:{}'.format(total_loss/train_num_batch, total_acc/train_num_batch, total_f1/train_num_batch))\n",
    "        logging.info('val_loss:{}, val_acc:{}, val_acc:{}, best_f1:{}'.format(val_loss, val_acc, val_f1, best_f1))\n",
    "       \n",
    "        logging.info('-' * 30)\n",
    "        \n",
    "    # 保存最终模型\n",
    "    config.tokenizer.save_pretrained(config.model_save_path)\n",
    "    model.save_pretrained(config.model_save_path)\n",
    "\n",
    "# 开始训练\n",
    "train(model, config, train_dataloader, val_dataloader)\n",
    "print('train done.')"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "570fa335",
   "metadata": {},
   "source": [
    "# 预测模型"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "id": "1a9d03b9",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "predict done.\n"
     ]
    }
   ],
   "source": [
    "# 预测方法\n",
    "def predict(config:Config):\n",
    "    # 加载模型\n",
    "    config.tokenizer = AutoTokenizer.from_pretrained(config.model_save_path)\n",
    "    model = AutoModelForSequenceClassification.from_pretrained(config.model_save_path)\n",
    "    model.to(config.device)\n",
    "    model.eval()\n",
    "    # 加载数据\n",
    "    test_data = pd.DataFrame(list(np.load(config.predict_data, allow_pickle=True)))\n",
    "    test_data['text'] = test_data['header'] + '[SEP]' + test_data['title'] + '[SEP]' + test_data['paragraph'] + '[SEP]' + test_data['footer']\n",
    "    # 加载dataloader\n",
    "    predict_dataloader = DataLoader(MyDataset(config, test_data['text'].tolist()), batch_size=config.val_batch_size, shuffle=False)\n",
    "    \n",
    "    predict_result = []\n",
    "    predict_softmax = []\n",
    "    softmax = None\n",
    "    # 遍历预测\n",
    "    for iter_id, batch in enumerate(predict_dataloader):\n",
    "        batch_cuda = {item: value.to(config.device) for item, value in batch.items()}\n",
    "        # 模型计算\n",
    "        output = model(**batch_cuda)\n",
    "        # 获取结果\n",
    "        logits = torch.argmax(output[0], dim=1)\n",
    "        y_pred = [[i] for i in logits.cpu().detach().numpy()]\n",
    "        # 获取softmax\n",
    "        y_softmax = [i for i in F.softmax(output.logits, dim=1).cpu().detach().numpy()]\n",
    "        # 统计结果\n",
    "        predict_result += y_pred\n",
    "        predict_softmax += y_softmax\n",
    "    # 输出结果\n",
    "    test_data['label'] = [i[0] for i in predict_result]\n",
    "    # 保存文件\n",
    "    test_data[['uid', 'label']].to_csv(config.result_data_save, index=False, encoding='utf-8')\n",
    "\n",
    "predict(config)\n",
    "print('predict done.')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "72ebe064",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.9.12"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
