{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "cf26af6c",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2024-07-06T13:23:00.137827Z",
     "start_time": "2024-07-06T13:22:57.079323Z"
    }
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "train_dataset 的样本数量：9665\n",
      "单样本示例：至道无难，唯嫌拣择。但莫憎爱，洞然明白。山僧则不然，至道最难，须是拣择。若无憎爱，争见明白。\n"
     ]
    }
   ],
   "source": [
    "from paddlenlp.datasets import load_dataset\n",
    "\n",
    "def read(data_path):\n",
    "    with open(data_path, 'r', encoding='utf-8') as f:\n",
    "        # 跳过列名\n",
    "        next(f)\n",
    "        for line in f:\n",
    "            yield line.replace(\"\\n\",\"\")\n",
    "\n",
    "# data_path为read()方法的参数\n",
    "train_dataset = load_dataset(read, data_path='./datasets/train2.txt',lazy=False)\n",
    "test_dataset = load_dataset(read, data_path='./datasets/test.txt',lazy=False)\n",
    "dev_dataset = load_dataset(read, data_path='./datasets/dev.txt',lazy=False)\n",
    "\n",
    "print('train_dataset 的样本数量：%d'%len(train_dataset))\n",
    "print('单样本示例：%s'%train_dataset[0])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "8e4dbb70",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2024-07-06T13:23:45.332741Z",
     "start_time": "2024-07-06T13:23:45.317947Z"
    }
   },
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "\u001b[32m[2024-07-06 21:23:45,317] [    INFO]\u001b[0m - Already cached C:\\Users\\16152\\.paddlenlp\\models\\bert-base-chinese\\bert-base-chinese-vocab.txt\u001b[0m\n",
      "\u001b[32m[2024-07-06 21:23:45,327] [    INFO]\u001b[0m - tokenizer config file saved in C:\\Users\\16152\\.paddlenlp\\models\\bert-base-chinese\\tokenizer_config.json\u001b[0m\n",
      "\u001b[32m[2024-07-06 21:23:45,327] [    INFO]\u001b[0m - Special tokens file saved in C:\\Users\\16152\\.paddlenlp\\models\\bert-base-chinese\\special_tokens_map.json\u001b[0m\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "大道分明在眼前，时人不会悮归泉。黄芽本是乾坤气，神水根基与汞连。\n",
      "[101, 1920, 6887, 1146, 3209, 1762, 4706, 1184, 8024, 3198, 782, 679, 833, 100, 2495, 3787, 511, 7942, 5715, 3315, 3221, 746, 1787, 3698, 8024, 4868, 3717, 3418, 1825, 680, 3735, 6825, 511, 102]\n",
      "[CLS]大道分明在眼前，时人不会[UNK]归泉。黄芽本是乾坤气，神水根基与汞连。[SEP]\n",
      "蹑足封韩信，剖心嗔比干。河山千古泪，风雨一番寒。世指鹿为马，人呼鸟作鸾。江头潮汹汹，城脚水漫漫。\n",
      "[101, 6692, 6639, 2196, 7506, 928, 8024, 1189, 2552, 1625, 3683, 2397, 511, 3777, 2255, 1283, 1367, 3801, 8024, 7599, 7433, 671, 4528, 2170, 511, 686, 2900, 7922, 711, 7716, 8024, 782, 1461, 7881, 868, 7895, 511, 3736, 1928, 4060, 3747, 3747, 8024, 1814, 5558, 3717, 4035, 4035, 511, 102]\n",
      "[CLS]蹑足封韩信，剖心嗔比干。河山千古泪，风雨一番寒。世指鹿为马，人呼鸟作鸾。江头潮汹汹，城脚水漫漫。[SEP]\n"
     ]
    }
   ],
   "source": [
    "from paddlenlp.transformers import BertTokenizer\n",
    "\n",
    "bert_tokenizer = BertTokenizer.from_pretrained('bert-base-chinese')\n",
    "\n",
    "# 处理效果展示\n",
    "for poem in test_dataset[0:2]:\n",
    "    token_poem, _ = bert_tokenizer.encode(poem).values()\n",
    "    print(poem)\n",
    "    print(token_poem)\n",
    "    print(''.join(bert_tokenizer.convert_ids_to_tokens(token_poem)))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "id": "8d4ea90f",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2024-07-06T13:35:56.040320Z",
     "start_time": "2024-07-06T13:35:56.027244Z"
    }
   },
   "outputs": [],
   "source": [
    "import paddle\n",
    "from paddle.io import Dataset\n",
    "import numpy as np\n",
    "\n",
    "\n",
    "class PoemData(Dataset):\n",
    "    \"\"\"\n",
    "    构造诗歌数据集，继承paddle.io.Dataset\n",
    "    Parameters:\n",
    "        poems (list): 诗歌数据列表，每一个元素为一首诗歌，诗歌未经编码\n",
    "        max_len: 接收诗歌的最大长度\n",
    "    \"\"\"\n",
    "\n",
    "    def __init__(self, poems, tokenizer, max_len=128):\n",
    "        super(PoemData, self).__init__()\n",
    "        self.poems = poems\n",
    "        self.tokenizer = tokenizer\n",
    "        self.max_len = max_len\n",
    "\n",
    "    def __getitem__(self, idx):\n",
    "        line = self.poems[idx]\n",
    "        token_line = self.tokenizer.encode(line)\n",
    "        token, token_type = token_line['input_ids'], token_line['token_type_ids']\n",
    "        if len(token) > self.max_len + 1:\n",
    "            token = token[:self.max_len] + token[-1:]\n",
    "            token_type = token_type[:self.max_len] + token_type[-1:]\n",
    "        input_token, input_token_type = token[:-1], token_type[:-1]\n",
    "        label_token = np.array((token[1:] + [0] * self.max_len)[:self.max_len], dtype='int64')\n",
    "        # 输入填充\n",
    "        input_token = np.array((input_token + [0] * self.max_len)[:self.max_len], dtype='int64')\n",
    "        input_token_type = np.array((input_token_type + [0] * self.max_len)[:self.max_len], dtype='int64')\n",
    "        input_pad_mask = (input_token != 0).astype('float32')\n",
    "        return input_token, input_token_type, input_pad_mask, label_token, input_pad_mask\n",
    "\n",
    "    def __len__(self):\n",
    "        return len(self.poems)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "id": "ca9c86b5",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2024-07-06T13:43:23.792738Z",
     "start_time": "2024-07-06T13:43:23.781892Z"
    }
   },
   "outputs": [],
   "source": [
    "from paddlenlp.transformers import BertModel, BertForTokenClassification\n",
    "from paddle.nn import Layer, Linear, Softmax\n",
    "\n",
    "class PoetryBertModel(Layer):\n",
    "    \"\"\"\n",
    "    基于BERT预训练模型的诗歌生成模型\n",
    "    \"\"\"\n",
    "    def __init__(self, pretrained_bert_model: str, input_length: int):\n",
    "        super(PoetryBertModel, self).__init__()\n",
    "        bert_model = BertModel.from_pretrained(pretrained_bert_model)\n",
    "        self.vocab_size, self.hidden_size = bert_model.embeddings.word_embeddings.parameters()[0].shape\n",
    "        self.bert_for_class = BertForTokenClassification.from_pretrained(pretrained_bert_model)\n",
    "        # 生成下三角矩阵，用来mask句子后边的信息\n",
    "        self.sequence_length = input_length\n",
    "        # lower_triangle_mask为input_length * input_length的下三角矩阵（包含主对角线），该掩码作为注意力掩码的一部分（在forward的\n",
    "        # 处理中为0的部分会被处理成无穷小量，以方便在计算注意力权重的时候保证被掩盖的部分权重约等于0）。而之所以写为下三角矩阵的形式，与\n",
    "        # transformer的多头注意力计算的机制有关，细节可以了解相关论文获悉。\n",
    "        self.lower_triangle_mask = paddle.tril(paddle.tensor.full((input_length, input_length), 1, 'float32'))\n",
    "        '''\n",
    "        tril    返回输入矩阵 input 的下三角部分，其余部分被设为0。 矩形的下三角部分被定义为对角线上和下方的元素。\n",
    "        full    创建形状大小为 shape 并且数据类型为 dtype 的 Tensor，其中元素值均为 fill_value \n",
    "        cast    将 x 的数据类型转换为 dtype 并输出。支持输出和输入的数据类型相同。\n",
    "        matmul  计算两个 Tensor 的乘积，遵循完整的广播规则，关于广播规则\n",
    "        \n",
    "        '''\n",
    "\n",
    "    def forward(self, token, token_type, input_mask, input_length=None):\n",
    "        # 计算attention mask\n",
    "        mask_left = paddle.reshape(input_mask, input_mask.shape + [1])\n",
    "        mask_right = paddle.reshape(input_mask, [input_mask.shape[0], 1, input_mask.shape[1]])\n",
    "        # 输入句子中有效的位置\n",
    "        mask_left = paddle.cast(mask_left, 'float32')\n",
    "        mask_right = paddle.cast(mask_right, 'float32')\n",
    "        attention_mask = paddle.matmul(mask_left, mask_right)\n",
    "        # 注意力机制计算中有效的位置\n",
    "        if input_length is not None:\n",
    "            # 之所以要再计算一次，是因为用于推理预测时，可能输入的长度不为实例化时设置的长度。这里的模型在训练时假设输入的\n",
    "            # 长度是被填充成一致的——这一步不是必须的，但是处理成一致长度比较方便处理（对应地，增加了显存的用度）。\n",
    "            lower_triangle_mask = paddle.tril(paddle.tensor.full((input_length, input_length), 1, 'float32'))\n",
    "        else:\n",
    "            lower_triangle_mask = self.lower_triangle_mask\n",
    "        attention_mask = attention_mask * lower_triangle_mask\n",
    "        # 无效的位置设为极小值\n",
    "        attention_mask = (1 - paddle.unsqueeze(attention_mask, axis=[1])) * -1e10\n",
    "        attention_mask = paddle.cast(attention_mask, self.bert_for_class.parameters()[0].dtype)\n",
    "\n",
    "        output_logits = self.bert_for_class(token, token_type_ids=token_type, attention_mask=attention_mask)\n",
    "        \n",
    "        return output_logits"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "id": "d4e5f20a",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2024-07-06T13:43:25.167818Z",
     "start_time": "2024-07-06T13:43:25.165135Z"
    }
   },
   "outputs": [],
   "source": [
    "class PoetryBertModelLossCriterion(Layer):\n",
    "    def forward(self, pred_logits, label, input_mask):\n",
    "        loss = paddle.nn.functional.cross_entropy(pred_logits, label, ignore_index=0, reduction='none')\n",
    "        masked_loss = paddle.mean(loss * input_mask, axis=0)\n",
    "        return paddle.sum(masked_loss)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "id": "f69a6e1e",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2024-07-06T13:43:28.042239Z",
     "start_time": "2024-07-06T13:43:25.635435Z"
    }
   },
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "\u001b[32m[2024-07-06 21:43:25,635] [    INFO]\u001b[0m - Already cached C:\\Users\\16152\\.paddlenlp\\models\\bert-base-chinese\\model_state.pdparams\u001b[0m\n",
      "\u001b[32m[2024-07-06 21:43:25,635] [    INFO]\u001b[0m - Loading weights file model_state.pdparams from cache at C:\\Users\\16152\\.paddlenlp\\models\\bert-base-chinese\\model_state.pdparams\u001b[0m\n",
      "\u001b[32m[2024-07-06 21:43:25,831] [    INFO]\u001b[0m - Loaded weights file from disk, setting weights to model.\u001b[0m\n",
      "\u001b[33m[2024-07-06 21:43:26,879] [ WARNING]\u001b[0m - Some weights of the model checkpoint at bert-base-chinese were not used when initializing BertModel: ['cls.seq_relationship.bias', 'cls.predictions.decoder_weight', 'cls.predictions.layer_norm.bias', 'cls.predictions.decoder_bias', 'cls.predictions.transform.weight', 'cls.predictions.layer_norm.weight', 'cls.predictions.transform.bias', 'cls.seq_relationship.weight']\n",
      "- This IS expected if you are initializing BertModel from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n",
      "- This IS NOT expected if you are initializing BertModel from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\u001b[0m\n",
      "\u001b[32m[2024-07-06 21:43:26,879] [    INFO]\u001b[0m - All the weights of BertModel were initialized from the model checkpoint at bert-base-chinese.\n",
      "If your task is similar to the task the model of the checkpoint was trained on, you can already use BertModel for predictions without further training.\u001b[0m\n",
      "\u001b[32m[2024-07-06 21:43:26,899] [    INFO]\u001b[0m - Already cached C:\\Users\\16152\\.paddlenlp\\models\\bert-base-chinese\\model_state.pdparams\u001b[0m\n",
      "\u001b[32m[2024-07-06 21:43:26,899] [    INFO]\u001b[0m - Loading weights file model_state.pdparams from cache at C:\\Users\\16152\\.paddlenlp\\models\\bert-base-chinese\\model_state.pdparams\u001b[0m\n",
      "\u001b[32m[2024-07-06 21:43:27,129] [    INFO]\u001b[0m - Loaded weights file from disk, setting weights to model.\u001b[0m\n",
      "\u001b[33m[2024-07-06 21:43:27,393] [ WARNING]\u001b[0m - Some weights of the model checkpoint at bert-base-chinese were not used when initializing BertForTokenClassification: ['cls.seq_relationship.bias', 'cls.predictions.decoder_weight', 'cls.predictions.layer_norm.bias', 'cls.predictions.decoder_bias', 'cls.predictions.transform.weight', 'cls.predictions.layer_norm.weight', 'cls.predictions.transform.bias', 'cls.seq_relationship.weight']\n",
      "- This IS expected if you are initializing BertForTokenClassification from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n",
      "- This IS NOT expected if you are initializing BertForTokenClassification from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\u001b[0m\n",
      "\u001b[33m[2024-07-06 21:43:27,393] [ WARNING]\u001b[0m - Some weights of BertForTokenClassification were not initialized from the model checkpoint at bert-base-chinese and are newly initialized: ['classifier.weight', 'classifier.bias']\n",
      "You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.\u001b[0m\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "----------------------------------------------------------------------------------------------------------------------------------------------\n",
      "        Layer (type)                                      Input Shape                                    Output Shape            Param #    \n",
      "==============================================================================================================================================\n",
      "        Embedding-4                                       [[1, 128]]                                     [1, 128, 768]         16,226,304   \n",
      "        Embedding-5                                       [[1, 128]]                                     [1, 128, 768]           393,216    \n",
      "        Embedding-6                                       [[1, 128]]                                     [1, 128, 768]            1,536     \n",
      "        LayerNorm-26                                    [[1, 128, 768]]                                  [1, 128, 768]            1,536     \n",
      "         Dropout-38                                     [[1, 128, 768]]                                  [1, 128, 768]              0       \n",
      "      BertEmbeddings-2                                        []                                         [1, 128, 768]              0       \n",
      "         Linear-74                                      [[1, 128, 768]]                                  [1, 128, 768]           590,592    \n",
      "         Linear-75                                      [[1, 128, 768]]                                  [1, 128, 768]           590,592    \n",
      "         Linear-76                                      [[1, 128, 768]]                                  [1, 128, 768]           590,592    \n",
      "         Linear-77                                      [[1, 128, 768]]                                  [1, 128, 768]           590,592    \n",
      "   MultiHeadAttention-13     [[1, 128, 768], [1, 128, 768], [1, 128, 768], [1, 1, 128, 128], None]       [1, 128, 768]              0       \n",
      "         Dropout-40                                     [[1, 128, 768]]                                  [1, 128, 768]              0       \n",
      "        LayerNorm-27                                    [[1, 128, 768]]                                  [1, 128, 768]            1,536     \n",
      "         Linear-78                                      [[1, 128, 768]]                                 [1, 128, 3072]          2,362,368   \n",
      "         Dropout-39                                    [[1, 128, 3072]]                                 [1, 128, 3072]              0       \n",
      "         Linear-79                                     [[1, 128, 3072]]                                  [1, 128, 768]          2,360,064   \n",
      "         Dropout-41                                     [[1, 128, 768]]                                  [1, 128, 768]              0       \n",
      "        LayerNorm-28                                    [[1, 128, 768]]                                  [1, 128, 768]            1,536     \n",
      " TransformerEncoderLayer-13                             [[1, 128, 768]]                                  [1, 128, 768]              0       \n",
      "         Linear-80                                      [[1, 128, 768]]                                  [1, 128, 768]           590,592    \n",
      "         Linear-81                                      [[1, 128, 768]]                                  [1, 128, 768]           590,592    \n",
      "         Linear-82                                      [[1, 128, 768]]                                  [1, 128, 768]           590,592    \n",
      "         Linear-83                                      [[1, 128, 768]]                                  [1, 128, 768]           590,592    \n",
      "   MultiHeadAttention-14     [[1, 128, 768], [1, 128, 768], [1, 128, 768], [1, 1, 128, 128], None]       [1, 128, 768]              0       \n",
      "         Dropout-43                                     [[1, 128, 768]]                                  [1, 128, 768]              0       \n",
      "        LayerNorm-29                                    [[1, 128, 768]]                                  [1, 128, 768]            1,536     \n",
      "         Linear-84                                      [[1, 128, 768]]                                 [1, 128, 3072]          2,362,368   \n",
      "         Dropout-42                                    [[1, 128, 3072]]                                 [1, 128, 3072]              0       \n",
      "         Linear-85                                     [[1, 128, 3072]]                                  [1, 128, 768]          2,360,064   \n",
      "         Dropout-44                                     [[1, 128, 768]]                                  [1, 128, 768]              0       \n",
      "        LayerNorm-30                                    [[1, 128, 768]]                                  [1, 128, 768]            1,536     \n",
      " TransformerEncoderLayer-14                             [[1, 128, 768]]                                  [1, 128, 768]              0       \n",
      "         Linear-86                                      [[1, 128, 768]]                                  [1, 128, 768]           590,592    \n",
      "         Linear-87                                      [[1, 128, 768]]                                  [1, 128, 768]           590,592    \n",
      "         Linear-88                                      [[1, 128, 768]]                                  [1, 128, 768]           590,592    \n",
      "         Linear-89                                      [[1, 128, 768]]                                  [1, 128, 768]           590,592    \n",
      "   MultiHeadAttention-15     [[1, 128, 768], [1, 128, 768], [1, 128, 768], [1, 1, 128, 128], None]       [1, 128, 768]              0       \n",
      "         Dropout-46                                     [[1, 128, 768]]                                  [1, 128, 768]              0       \n",
      "        LayerNorm-31                                    [[1, 128, 768]]                                  [1, 128, 768]            1,536     \n",
      "         Linear-90                                      [[1, 128, 768]]                                 [1, 128, 3072]          2,362,368   \n",
      "         Dropout-45                                    [[1, 128, 3072]]                                 [1, 128, 3072]              0       \n",
      "         Linear-91                                     [[1, 128, 3072]]                                  [1, 128, 768]          2,360,064   \n",
      "         Dropout-47                                     [[1, 128, 768]]                                  [1, 128, 768]              0       \n",
      "        LayerNorm-32                                    [[1, 128, 768]]                                  [1, 128, 768]            1,536     \n",
      " TransformerEncoderLayer-15                             [[1, 128, 768]]                                  [1, 128, 768]              0       \n",
      "         Linear-92                                      [[1, 128, 768]]                                  [1, 128, 768]           590,592    \n",
      "         Linear-93                                      [[1, 128, 768]]                                  [1, 128, 768]           590,592    \n",
      "         Linear-94                                      [[1, 128, 768]]                                  [1, 128, 768]           590,592    \n",
      "         Linear-95                                      [[1, 128, 768]]                                  [1, 128, 768]           590,592    \n",
      "   MultiHeadAttention-16     [[1, 128, 768], [1, 128, 768], [1, 128, 768], [1, 1, 128, 128], None]       [1, 128, 768]              0       \n",
      "         Dropout-49                                     [[1, 128, 768]]                                  [1, 128, 768]              0       \n",
      "        LayerNorm-33                                    [[1, 128, 768]]                                  [1, 128, 768]            1,536     \n",
      "         Linear-96                                      [[1, 128, 768]]                                 [1, 128, 3072]          2,362,368   \n",
      "         Dropout-48                                    [[1, 128, 3072]]                                 [1, 128, 3072]              0       \n",
      "         Linear-97                                     [[1, 128, 3072]]                                  [1, 128, 768]          2,360,064   \n",
      "         Dropout-50                                     [[1, 128, 768]]                                  [1, 128, 768]              0       \n",
      "        LayerNorm-34                                    [[1, 128, 768]]                                  [1, 128, 768]            1,536     \n",
      " TransformerEncoderLayer-16                             [[1, 128, 768]]                                  [1, 128, 768]              0       \n",
      "         Linear-98                                      [[1, 128, 768]]                                  [1, 128, 768]           590,592    \n",
      "         Linear-99                                      [[1, 128, 768]]                                  [1, 128, 768]           590,592    \n",
      "         Linear-100                                     [[1, 128, 768]]                                  [1, 128, 768]           590,592    \n",
      "         Linear-101                                     [[1, 128, 768]]                                  [1, 128, 768]           590,592    \n",
      "   MultiHeadAttention-17     [[1, 128, 768], [1, 128, 768], [1, 128, 768], [1, 1, 128, 128], None]       [1, 128, 768]              0       \n",
      "         Dropout-52                                     [[1, 128, 768]]                                  [1, 128, 768]              0       \n",
      "        LayerNorm-35                                    [[1, 128, 768]]                                  [1, 128, 768]            1,536     \n",
      "         Linear-102                                     [[1, 128, 768]]                                 [1, 128, 3072]          2,362,368   \n",
      "         Dropout-51                                    [[1, 128, 3072]]                                 [1, 128, 3072]              0       \n",
      "         Linear-103                                    [[1, 128, 3072]]                                  [1, 128, 768]          2,360,064   \n",
      "         Dropout-53                                     [[1, 128, 768]]                                  [1, 128, 768]              0       \n",
      "        LayerNorm-36                                    [[1, 128, 768]]                                  [1, 128, 768]            1,536     \n",
      " TransformerEncoderLayer-17                             [[1, 128, 768]]                                  [1, 128, 768]              0       \n",
      "         Linear-104                                     [[1, 128, 768]]                                  [1, 128, 768]           590,592    \n",
      "         Linear-105                                     [[1, 128, 768]]                                  [1, 128, 768]           590,592    \n",
      "         Linear-106                                     [[1, 128, 768]]                                  [1, 128, 768]           590,592    \n",
      "         Linear-107                                     [[1, 128, 768]]                                  [1, 128, 768]           590,592    \n",
      "   MultiHeadAttention-18     [[1, 128, 768], [1, 128, 768], [1, 128, 768], [1, 1, 128, 128], None]       [1, 128, 768]              0       \n",
      "         Dropout-55                                     [[1, 128, 768]]                                  [1, 128, 768]              0       \n",
      "        LayerNorm-37                                    [[1, 128, 768]]                                  [1, 128, 768]            1,536     \n",
      "         Linear-108                                     [[1, 128, 768]]                                 [1, 128, 3072]          2,362,368   \n",
      "         Dropout-54                                    [[1, 128, 3072]]                                 [1, 128, 3072]              0       \n",
      "         Linear-109                                    [[1, 128, 3072]]                                  [1, 128, 768]          2,360,064   \n",
      "         Dropout-56                                     [[1, 128, 768]]                                  [1, 128, 768]              0       \n",
      "        LayerNorm-38                                    [[1, 128, 768]]                                  [1, 128, 768]            1,536     \n",
      " TransformerEncoderLayer-18                             [[1, 128, 768]]                                  [1, 128, 768]              0       \n",
      "         Linear-110                                     [[1, 128, 768]]                                  [1, 128, 768]           590,592    \n",
      "         Linear-111                                     [[1, 128, 768]]                                  [1, 128, 768]           590,592    \n",
      "         Linear-112                                     [[1, 128, 768]]                                  [1, 128, 768]           590,592    \n",
      "         Linear-113                                     [[1, 128, 768]]                                  [1, 128, 768]           590,592    \n",
      "   MultiHeadAttention-19     [[1, 128, 768], [1, 128, 768], [1, 128, 768], [1, 1, 128, 128], None]       [1, 128, 768]              0       \n",
      "         Dropout-58                                     [[1, 128, 768]]                                  [1, 128, 768]              0       \n",
      "        LayerNorm-39                                    [[1, 128, 768]]                                  [1, 128, 768]            1,536     \n",
      "         Linear-114                                     [[1, 128, 768]]                                 [1, 128, 3072]          2,362,368   \n",
      "         Dropout-57                                    [[1, 128, 3072]]                                 [1, 128, 3072]              0       \n",
      "         Linear-115                                    [[1, 128, 3072]]                                  [1, 128, 768]          2,360,064   \n",
      "         Dropout-59                                     [[1, 128, 768]]                                  [1, 128, 768]              0       \n",
      "        LayerNorm-40                                    [[1, 128, 768]]                                  [1, 128, 768]            1,536     \n",
      " TransformerEncoderLayer-19                             [[1, 128, 768]]                                  [1, 128, 768]              0       \n",
      "         Linear-116                                     [[1, 128, 768]]                                  [1, 128, 768]           590,592    \n",
      "         Linear-117                                     [[1, 128, 768]]                                  [1, 128, 768]           590,592    \n",
      "         Linear-118                                     [[1, 128, 768]]                                  [1, 128, 768]           590,592    \n",
      "         Linear-119                                     [[1, 128, 768]]                                  [1, 128, 768]           590,592    \n",
      "   MultiHeadAttention-20     [[1, 128, 768], [1, 128, 768], [1, 128, 768], [1, 1, 128, 128], None]       [1, 128, 768]              0       \n",
      "         Dropout-61                                     [[1, 128, 768]]                                  [1, 128, 768]              0       \n",
      "        LayerNorm-41                                    [[1, 128, 768]]                                  [1, 128, 768]            1,536     \n",
      "         Linear-120                                     [[1, 128, 768]]                                 [1, 128, 3072]          2,362,368   \n",
      "         Dropout-60                                    [[1, 128, 3072]]                                 [1, 128, 3072]              0       \n",
      "         Linear-121                                    [[1, 128, 3072]]                                  [1, 128, 768]          2,360,064   \n",
      "         Dropout-62                                     [[1, 128, 768]]                                  [1, 128, 768]              0       \n",
      "        LayerNorm-42                                    [[1, 128, 768]]                                  [1, 128, 768]            1,536     \n",
      " TransformerEncoderLayer-20                             [[1, 128, 768]]                                  [1, 128, 768]              0       \n",
      "         Linear-122                                     [[1, 128, 768]]                                  [1, 128, 768]           590,592    \n",
      "         Linear-123                                     [[1, 128, 768]]                                  [1, 128, 768]           590,592    \n",
      "         Linear-124                                     [[1, 128, 768]]                                  [1, 128, 768]           590,592    \n",
      "         Linear-125                                     [[1, 128, 768]]                                  [1, 128, 768]           590,592    \n",
      "   MultiHeadAttention-21     [[1, 128, 768], [1, 128, 768], [1, 128, 768], [1, 1, 128, 128], None]       [1, 128, 768]              0       \n",
      "         Dropout-64                                     [[1, 128, 768]]                                  [1, 128, 768]              0       \n",
      "        LayerNorm-43                                    [[1, 128, 768]]                                  [1, 128, 768]            1,536     \n",
      "         Linear-126                                     [[1, 128, 768]]                                 [1, 128, 3072]          2,362,368   \n",
      "         Dropout-63                                    [[1, 128, 3072]]                                 [1, 128, 3072]              0       \n",
      "         Linear-127                                    [[1, 128, 3072]]                                  [1, 128, 768]          2,360,064   \n",
      "         Dropout-65                                     [[1, 128, 768]]                                  [1, 128, 768]              0       \n",
      "        LayerNorm-44                                    [[1, 128, 768]]                                  [1, 128, 768]            1,536     \n",
      " TransformerEncoderLayer-21                             [[1, 128, 768]]                                  [1, 128, 768]              0       \n",
      "         Linear-128                                     [[1, 128, 768]]                                  [1, 128, 768]           590,592    \n",
      "         Linear-129                                     [[1, 128, 768]]                                  [1, 128, 768]           590,592    \n",
      "         Linear-130                                     [[1, 128, 768]]                                  [1, 128, 768]           590,592    \n",
      "         Linear-131                                     [[1, 128, 768]]                                  [1, 128, 768]           590,592    \n",
      "   MultiHeadAttention-22     [[1, 128, 768], [1, 128, 768], [1, 128, 768], [1, 1, 128, 128], None]       [1, 128, 768]              0       \n",
      "         Dropout-67                                     [[1, 128, 768]]                                  [1, 128, 768]              0       \n",
      "        LayerNorm-45                                    [[1, 128, 768]]                                  [1, 128, 768]            1,536     \n",
      "         Linear-132                                     [[1, 128, 768]]                                 [1, 128, 3072]          2,362,368   \n",
      "         Dropout-66                                    [[1, 128, 3072]]                                 [1, 128, 3072]              0       \n",
      "         Linear-133                                    [[1, 128, 3072]]                                  [1, 128, 768]          2,360,064   \n",
      "         Dropout-68                                     [[1, 128, 768]]                                  [1, 128, 768]              0       \n",
      "        LayerNorm-46                                    [[1, 128, 768]]                                  [1, 128, 768]            1,536     \n",
      " TransformerEncoderLayer-22                             [[1, 128, 768]]                                  [1, 128, 768]              0       \n",
      "         Linear-134                                     [[1, 128, 768]]                                  [1, 128, 768]           590,592    \n",
      "         Linear-135                                     [[1, 128, 768]]                                  [1, 128, 768]           590,592    \n",
      "         Linear-136                                     [[1, 128, 768]]                                  [1, 128, 768]           590,592    \n",
      "         Linear-137                                     [[1, 128, 768]]                                  [1, 128, 768]           590,592    \n",
      "   MultiHeadAttention-23     [[1, 128, 768], [1, 128, 768], [1, 128, 768], [1, 1, 128, 128], None]       [1, 128, 768]              0       \n",
      "         Dropout-70                                     [[1, 128, 768]]                                  [1, 128, 768]              0       \n",
      "        LayerNorm-47                                    [[1, 128, 768]]                                  [1, 128, 768]            1,536     \n",
      "         Linear-138                                     [[1, 128, 768]]                                 [1, 128, 3072]          2,362,368   \n",
      "         Dropout-69                                    [[1, 128, 3072]]                                 [1, 128, 3072]              0       \n",
      "         Linear-139                                    [[1, 128, 3072]]                                  [1, 128, 768]          2,360,064   \n",
      "         Dropout-71                                     [[1, 128, 768]]                                  [1, 128, 768]              0       \n",
      "        LayerNorm-48                                    [[1, 128, 768]]                                  [1, 128, 768]            1,536     \n",
      " TransformerEncoderLayer-23                             [[1, 128, 768]]                                  [1, 128, 768]              0       \n",
      "         Linear-140                                     [[1, 128, 768]]                                  [1, 128, 768]           590,592    \n",
      "         Linear-141                                     [[1, 128, 768]]                                  [1, 128, 768]           590,592    \n",
      "         Linear-142                                     [[1, 128, 768]]                                  [1, 128, 768]           590,592    \n",
      "         Linear-143                                     [[1, 128, 768]]                                  [1, 128, 768]           590,592    \n",
      "   MultiHeadAttention-24     [[1, 128, 768], [1, 128, 768], [1, 128, 768], [1, 1, 128, 128], None]       [1, 128, 768]              0       \n",
      "         Dropout-73                                     [[1, 128, 768]]                                  [1, 128, 768]              0       \n",
      "        LayerNorm-49                                    [[1, 128, 768]]                                  [1, 128, 768]            1,536     \n",
      "         Linear-144                                     [[1, 128, 768]]                                 [1, 128, 3072]          2,362,368   \n",
      "         Dropout-72                                    [[1, 128, 3072]]                                 [1, 128, 3072]              0       \n",
      "         Linear-145                                    [[1, 128, 3072]]                                  [1, 128, 768]          2,360,064   \n",
      "         Dropout-74                                     [[1, 128, 768]]                                  [1, 128, 768]              0       \n",
      "        LayerNorm-50                                    [[1, 128, 768]]                                  [1, 128, 768]            1,536     \n",
      " TransformerEncoderLayer-24                             [[1, 128, 768]]                                  [1, 128, 768]              0       \n",
      "    TransformerEncoder-2                                [[1, 128, 768]]                                  [1, 128, 768]              0       \n",
      "         Linear-146                                       [[1, 768]]                                       [1, 768]              590,592    \n",
      "           Tanh-3                                         [[1, 768]]                                       [1, 768]                 0       \n",
      "        BertPooler-2                                    [[1, 128, 768]]                                    [1, 768]                 0       \n",
      "        BertModel-2                                       [[1, 128]]                               [[1, 128, 768], [1, 768]]        0       \n",
      "         Dropout-75                                     [[1, 128, 768]]                                  [1, 128, 768]              0       \n",
      "         Linear-147                                     [[1, 128, 768]]                                   [1, 128, 2]             1,538     \n",
      "BertForTokenClassification-1                              [[1, 128]]                                      [1, 128, 2]               0       \n",
      "==============================================================================================================================================\n",
      "Total params: 102,269,186\n",
      "Trainable params: 102,269,186\n",
      "Non-trainable params: 0\n",
      "----------------------------------------------------------------------------------------------------------------------------------------------\n",
      "Input size (MB): 0.00\n",
      "Forward/backward pass size (MB): 177.78\n",
      "Params size (MB): 390.13\n",
      "Estimated Total Size (MB): 567.90\n",
      "----------------------------------------------------------------------------------------------------------------------------------------------\n",
      "\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "{'total_params': 102269186, 'trainable_params': 102269186}"
      ]
     },
     "execution_count": 18,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "from paddle.static import InputSpec\n",
    "from paddlenlp.metrics import Perplexity\n",
    "from paddle.optimizer import AdamW\n",
    "\n",
    "net = PoetryBertModel('bert-base-chinese', 128)\n",
    "\n",
    "token_ids = InputSpec((-1, 128), 'int64', 'token')\n",
    "token_type_ids = InputSpec((-1, 128), 'int64', 'token_type')\n",
    "input_mask = InputSpec((-1, 128), 'float32', 'input_mask')\n",
    "label = InputSpec((-1, 128), 'int64', 'label')\n",
    "\n",
    "inputs = [token_ids, token_type_ids, input_mask]\n",
    "labels = [label, input_mask]\n",
    "\n",
    "model = paddle.Model(net, inputs, labels)\n",
    "model.prepare(optimizer=AdamW(learning_rate=0.001, parameters=model.parameters()), loss=PoetryBertModelLossCriterion(), metrics=[Perplexity()])\n",
    "\n",
    "model.summary(inputs, [input.dtype for input in inputs])\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 19,
   "id": "58b45c8a",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2024-07-06T13:43:32.682950Z",
     "start_time": "2024-07-06T13:43:30.727038Z"
    }
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "The loss value printed in the log is the current step, and the metric is the average value of previous steps.\n",
      "Epoch 1/1\n"
     ]
    },
    {
     "ename": "OSError",
     "evalue": "(External) CUDA error(719), unspecified launch failure. \n  [Hint: 'cudaErrorLaunchFailure'. An exception occurred on the device while executing a kernel. Common causes include dereferencing an invalid device pointerand accessing out of bounds shared memory. Less common cases can be system specific - more information about these cases canbe found in the system specific user guide. This leaves the process in an inconsistent state and any further CUDA work willreturn the same error. To continue using CUDA, the process must be terminated and relaunched.] (at ..\\paddle\\phi\\backends\\gpu\\cuda\\cuda_info.cc:272)\n",
     "output_type": "error",
     "traceback": [
      "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[1;31mOSError\u001b[0m                                   Traceback (most recent call last)",
      "Cell \u001b[1;32mIn[19], line 5\u001b[0m\n\u001b[0;32m      3\u001b[0m train_loader \u001b[38;5;241m=\u001b[39m DataLoader(PoemData(train_dataset, bert_tokenizer, \u001b[38;5;241m128\u001b[39m), batch_size\u001b[38;5;241m=\u001b[39m\u001b[38;5;241m32\u001b[39m, shuffle\u001b[38;5;241m=\u001b[39m\u001b[38;5;28;01mTrue\u001b[39;00m)\n\u001b[0;32m      4\u001b[0m dev_loader \u001b[38;5;241m=\u001b[39m DataLoader(PoemData(dev_dataset, bert_tokenizer, \u001b[38;5;241m128\u001b[39m), batch_size\u001b[38;5;241m=\u001b[39m\u001b[38;5;241m32\u001b[39m, shuffle\u001b[38;5;241m=\u001b[39m\u001b[38;5;28;01mTrue\u001b[39;00m)\n\u001b[1;32m----> 5\u001b[0m \u001b[43mmodel\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mfit\u001b[49m\u001b[43m(\u001b[49m\u001b[43mtrain_data\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mtrain_loader\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mepochs\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;241;43m1\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43msave_dir\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[38;5;124;43m./checkpoint\u001b[39;49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43msave_freq\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;241;43m1\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mverbose\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;241;43m1\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43meval_data\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mdev_loader\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43meval_freq\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;241;43m1\u001b[39;49m\u001b[43m)\u001b[49m\n",
      "File \u001b[1;32mD:\\ProgramData\\anaconda3\\envs\\paddle312\\Lib\\site-packages\\paddle\\hapi\\model.py:1986\u001b[0m, in \u001b[0;36mModel.fit\u001b[1;34m(self, train_data, eval_data, batch_size, epochs, eval_freq, log_freq, save_dir, save_freq, verbose, drop_last, shuffle, num_workers, callbacks, accumulate_grad_batches, num_iters)\u001b[0m\n\u001b[0;32m   1984\u001b[0m \u001b[38;5;28;01mfor\u001b[39;00m epoch \u001b[38;5;129;01min\u001b[39;00m \u001b[38;5;28mrange\u001b[39m(epochs):\n\u001b[0;32m   1985\u001b[0m     cbks\u001b[38;5;241m.\u001b[39mon_epoch_begin(epoch)\n\u001b[1;32m-> 1986\u001b[0m     logs \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_run_one_epoch\u001b[49m\u001b[43m(\u001b[49m\u001b[43mtrain_loader\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mcbks\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[38;5;124;43mtrain\u001b[39;49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[43m)\u001b[49m\n\u001b[0;32m   1987\u001b[0m     cbks\u001b[38;5;241m.\u001b[39mon_epoch_end(epoch, logs)\n\u001b[0;32m   1989\u001b[0m     \u001b[38;5;28;01mif\u001b[39;00m do_eval \u001b[38;5;129;01mand\u001b[39;00m epoch \u001b[38;5;241m%\u001b[39m eval_freq \u001b[38;5;241m==\u001b[39m \u001b[38;5;241m0\u001b[39m:\n",
      "File \u001b[1;32mD:\\ProgramData\\anaconda3\\envs\\paddle312\\Lib\\site-packages\\paddle\\hapi\\model.py:2333\u001b[0m, in \u001b[0;36mModel._run_one_epoch\u001b[1;34m(self, data_loader, callbacks, mode, logs)\u001b[0m\n\u001b[0;32m   2327\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m mode \u001b[38;5;241m==\u001b[39m \u001b[38;5;124m'\u001b[39m\u001b[38;5;124mtrain\u001b[39m\u001b[38;5;124m'\u001b[39m:\n\u001b[0;32m   2328\u001b[0m     _inputs\u001b[38;5;241m.\u001b[39mappend(\n\u001b[0;32m   2329\u001b[0m         (step \u001b[38;5;241m+\u001b[39m \u001b[38;5;241m1\u001b[39m) \u001b[38;5;241m%\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_accumulate \u001b[38;5;241m==\u001b[39m \u001b[38;5;241m0\u001b[39m\n\u001b[0;32m   2330\u001b[0m         \u001b[38;5;129;01mor\u001b[39;00m step \u001b[38;5;241m+\u001b[39m \u001b[38;5;241m1\u001b[39m \u001b[38;5;241m==\u001b[39m \u001b[38;5;28mlen\u001b[39m(data_loader)\n\u001b[0;32m   2331\u001b[0m     )\n\u001b[1;32m-> 2333\u001b[0m outs \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mgetattr\u001b[39;49m\u001b[43m(\u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mmode\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m+\u001b[39;49m\u001b[43m \u001b[49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[38;5;124;43m_batch\u001b[39;49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[43m)\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43m_inputs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[0;32m   2335\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_metrics \u001b[38;5;129;01mand\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_loss:\n\u001b[0;32m   2336\u001b[0m     metrics \u001b[38;5;241m=\u001b[39m [[\u001b[38;5;28mfloat\u001b[39m(l) \u001b[38;5;28;01mfor\u001b[39;00m l \u001b[38;5;129;01min\u001b[39;00m outs[\u001b[38;5;241m0\u001b[39m]]]\n",
      "File \u001b[1;32mD:\\ProgramData\\anaconda3\\envs\\paddle312\\Lib\\site-packages\\paddle\\hapi\\model.py:1247\u001b[0m, in \u001b[0;36mModel.train_batch\u001b[1;34m(self, inputs, labels, update)\u001b[0m\n\u001b[0;32m   1196\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21mtrain_batch\u001b[39m(\u001b[38;5;28mself\u001b[39m, inputs, labels\u001b[38;5;241m=\u001b[39m\u001b[38;5;28;01mNone\u001b[39;00m, update\u001b[38;5;241m=\u001b[39m\u001b[38;5;28;01mTrue\u001b[39;00m):\n\u001b[0;32m   1197\u001b[0m \u001b[38;5;250m    \u001b[39m\u001b[38;5;124;03m\"\"\"\u001b[39;00m\n\u001b[0;32m   1198\u001b[0m \n\u001b[0;32m   1199\u001b[0m \u001b[38;5;124;03m    Run one training step on one batch of data. And using `update` indicates\u001b[39;00m\n\u001b[1;32m   (...)\u001b[0m\n\u001b[0;32m   1245\u001b[0m \n\u001b[0;32m   1246\u001b[0m \u001b[38;5;124;03m    \"\"\"\u001b[39;00m\n\u001b[1;32m-> 1247\u001b[0m     loss \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_adapter\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mtrain_batch\u001b[49m\u001b[43m(\u001b[49m\u001b[43minputs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mlabels\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mupdate\u001b[49m\u001b[43m)\u001b[49m\n\u001b[0;32m   1248\u001b[0m     \u001b[38;5;28;01mif\u001b[39;00m in_dynamic_mode() \u001b[38;5;129;01mand\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_input_info \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n\u001b[0;32m   1249\u001b[0m         \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_update_inputs()\n",
      "File \u001b[1;32mD:\\ProgramData\\anaconda3\\envs\\paddle312\\Lib\\site-packages\\paddle\\hapi\\model.py:846\u001b[0m, in \u001b[0;36mDynamicGraphAdapter.train_batch\u001b[1;34m(self, inputs, labels, update)\u001b[0m\n\u001b[0;32m    843\u001b[0m     \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[0;32m    844\u001b[0m         outputs \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mmodel\u001b[38;5;241m.\u001b[39mnetwork(\u001b[38;5;241m*\u001b[39m[to_variable(x) \u001b[38;5;28;01mfor\u001b[39;00m x \u001b[38;5;129;01min\u001b[39;00m inputs])\n\u001b[1;32m--> 846\u001b[0m losses \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mmodel\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_loss\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43m(\u001b[49m\u001b[43mto_list\u001b[49m\u001b[43m(\u001b[49m\u001b[43moutputs\u001b[49m\u001b[43m)\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m+\u001b[39;49m\u001b[43m \u001b[49m\u001b[43mlabels\u001b[49m\u001b[43m)\u001b[49m\u001b[43m)\u001b[49m\n\u001b[0;32m    847\u001b[0m losses \u001b[38;5;241m=\u001b[39m to_list(losses)\n\u001b[0;32m    848\u001b[0m final_loss \u001b[38;5;241m=\u001b[39m paddle\u001b[38;5;241m.\u001b[39madd_n(losses)\n",
      "File \u001b[1;32mD:\\ProgramData\\anaconda3\\envs\\paddle312\\Lib\\site-packages\\paddle\\nn\\layer\\layers.py:1429\u001b[0m, in \u001b[0;36mLayer.__call__\u001b[1;34m(self, *inputs, **kwargs)\u001b[0m\n\u001b[0;32m   1420\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m (\n\u001b[0;32m   1421\u001b[0m     (\u001b[38;5;129;01mnot\u001b[39;00m in_to_static_mode())\n\u001b[0;32m   1422\u001b[0m     \u001b[38;5;129;01mand\u001b[39;00m (\u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_pre_hooks)\n\u001b[1;32m   (...)\u001b[0m\n\u001b[0;32m   1426\u001b[0m     \u001b[38;5;129;01mand\u001b[39;00m (\u001b[38;5;129;01mnot\u001b[39;00m in_profiler_mode())\n\u001b[0;32m   1427\u001b[0m ):\n\u001b[0;32m   1428\u001b[0m     \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_build_once(\u001b[38;5;241m*\u001b[39minputs, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs)\n\u001b[1;32m-> 1429\u001b[0m     \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mforward\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43minputs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[0;32m   1430\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[0;32m   1431\u001b[0m     \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_dygraph_call_func(\u001b[38;5;241m*\u001b[39minputs, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs)\n",
      "Cell \u001b[1;32mIn[17], line 4\u001b[0m, in \u001b[0;36mPoetryBertModelLossCriterion.forward\u001b[1;34m(self, pred_logits, label, input_mask)\u001b[0m\n\u001b[0;32m      2\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21mforward\u001b[39m(\u001b[38;5;28mself\u001b[39m, pred_logits, label, input_mask):\n\u001b[0;32m      3\u001b[0m     loss \u001b[38;5;241m=\u001b[39m paddle\u001b[38;5;241m.\u001b[39mnn\u001b[38;5;241m.\u001b[39mfunctional\u001b[38;5;241m.\u001b[39mcross_entropy(pred_logits, label, ignore_index\u001b[38;5;241m=\u001b[39m\u001b[38;5;241m0\u001b[39m, reduction\u001b[38;5;241m=\u001b[39m\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mnone\u001b[39m\u001b[38;5;124m'\u001b[39m)\n\u001b[1;32m----> 4\u001b[0m     masked_loss \u001b[38;5;241m=\u001b[39m paddle\u001b[38;5;241m.\u001b[39mmean(\u001b[43mloss\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43m \u001b[49m\u001b[43minput_mask\u001b[49m, axis\u001b[38;5;241m=\u001b[39m\u001b[38;5;241m0\u001b[39m)\n\u001b[0;32m      5\u001b[0m     \u001b[38;5;28;01mreturn\u001b[39;00m paddle\u001b[38;5;241m.\u001b[39msum(masked_loss)\n",
      "\u001b[1;31mOSError\u001b[0m: (External) CUDA error(719), unspecified launch failure. \n  [Hint: 'cudaErrorLaunchFailure'. An exception occurred on the device while executing a kernel. Common causes include dereferencing an invalid device pointerand accessing out of bounds shared memory. Less common cases can be system specific - more information about these cases canbe found in the system specific user guide. This leaves the process in an inconsistent state and any further CUDA work willreturn the same error. To continue using CUDA, the process must be terminated and relaunched.] (at ..\\paddle\\phi\\backends\\gpu\\cuda\\cuda_info.cc:272)\n"
     ]
    }
   ],
   "source": [
    "from paddle.io import DataLoader\n",
    "\n",
    "train_loader = DataLoader(PoemData(train_dataset, bert_tokenizer, 128), batch_size=32, shuffle=True)\n",
    "dev_loader = DataLoader(PoemData(dev_dataset, bert_tokenizer, 128), batch_size=32, shuffle=True)\n",
    "model.fit(train_data=train_loader, epochs=1, save_dir='checkpoint_1_web', save_freq=1, verbose=1, eval_data=dev_loader, eval_freq=1)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "68333c9be21db0ef",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.11.5"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
