{
 "cells": [
  {
   "cell_type": "code",
   "id": "initial_id",
   "metadata": {
    "jupyter": {
     "is_executing": true
    }
   },
   "source": [
    "from paddlenlp.datasets import load_dataset\n",
    "\n",
    "def read(data_path):\n",
    "    with open(data_path, 'r', encoding='utf-8') as f:\n",
    "        # 跳过列名\n",
    "        next(f)\n",
    "        for line in f:\n",
    "            yield line.replace(\"\\n\",\"\")\n",
    "\n",
    "# data_path为read()方法的参数\n",
    "train_dataset = load_dataset(read, data_path='./datasets/train2.txt',lazy=False)\n",
    "test_dataset = load_dataset(read, data_path='./datasets/test.txt',lazy=False)\n",
    "dev_dataset = load_dataset(read, data_path='./datasets/dev.txt',lazy=False)\n",
    "\n",
    "print('train_dataset 的样本数量：%d'%len(train_dataset))\n",
    "print('单样本示例：%s'%train_dataset[0])"
   ],
   "outputs": [],
   "execution_count": null
  },
  {
   "cell_type": "code",
   "execution_count": 31,
   "id": "2f64037b",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "大道分明在眼前，时人不会悮归泉。黄芽本是乾坤气，神水根基与汞连。\n",
      "[101, 1920, 6887, 1146, 3209, 1762, 4706, 1184, 8024, 3198, 782, 679, 833, 100, 2495, 3787, 511, 7942, 5715, 3315, 3221, 746, 1787, 3698, 8024, 4868, 3717, 3418, 1825, 680, 3735, 6825, 511, 102]\n",
      "[CLS]大道分明在眼前，时人不会[UNK]归泉。黄芽本是乾坤气，神水根基与汞连。[SEP]\n",
      "蹑足封韩信，剖心嗔比干。河山千古泪，风雨一番寒。世指鹿为马，人呼鸟作鸾。江头潮汹汹，城脚水漫漫。\n",
      "[101, 6692, 6639, 2196, 7506, 928, 8024, 1189, 2552, 1625, 3683, 2397, 511, 3777, 2255, 1283, 1367, 3801, 8024, 7599, 7433, 671, 4528, 2170, 511, 686, 2900, 7922, 711, 7716, 8024, 782, 1461, 7881, 868, 7895, 511, 3736, 1928, 4060, 3747, 3747, 8024, 1814, 5558, 3717, 4035, 4035, 511, 102]\n",
      "[CLS]蹑足封韩信，剖心嗔比干。河山千古泪，风雨一番寒。世指鹿为马，人呼鸟作鸾。江头潮汹汹，城脚水漫漫。[SEP]\n"
     ]
    }
   ],
   "source": [
    "from datasets import load_dataset,Dataset\n",
    "from transformers import BertModel, BertTokenizer\n",
    "import warnings\n",
    "warnings.filterwarnings(\"ignore\")\n",
    "\n",
    "model_name = './bert-base-chinese'\n",
    "model_path = './bert-base-chinese'\n",
    "\n",
    "bert_tokenizer = BertTokenizer.from_pretrained(model_name)\n",
    "bert = BertModel.from_pretrained(model_path)\n",
    "\n",
    "\n",
    "# 处理效果展示\n",
    "for poem in test_dataset[0:2]:\n",
    "    token_poem = bert_tokenizer.encode(poem)\n",
    "    print(poem)\n",
    "    print(token_poem)\n",
    "    print(''.join(bert_tokenizer.convert_ids_to_tokens(token_poem)))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 32,
   "id": "49ade8ac",
   "metadata": {},
   "outputs": [],
   "source": [
    "import torch\n",
    "import numpy as np\n",
    "import torchvision.transforms as transforms\n",
    "from torch.utils.data import Dataset\n",
    "class PoemData(Dataset):\n",
    "    \"\"\"\n",
    "    构造诗歌数据集，继承Dataset\n",
    "    Parameters:\n",
    "        poems (list): 诗歌数据列表，每一个元素为一首诗歌，诗歌未经编码\n",
    "        max_len: 接收诗歌的最大长度\n",
    "    \"\"\"\n",
    "\n",
    "    def __init__(self, poems, tokenizer, max_len=128):\n",
    "        super(PoemData, self).__init__()\n",
    "        self.poems = poems\n",
    "        self.tokenizer = tokenizer\n",
    "        self.max_len = max_len\n",
    "\n",
    "    def __getitem__(self, idx):\n",
    "        line = self.poems[idx]\n",
    "        token_line = self.tokenizer.encode(line)\n",
    "        token, token_type = token_line['input_ids'], token_line['token_type_ids']\n",
    "        if len(token) > self.max_len + 1:\n",
    "            token = token[:self.max_len] + token[-1:]\n",
    "            token_type = token_type[:self.max_len] + token_type[-1:]\n",
    "        input_token, input_token_type = token[:-1], token_type[:-1]\n",
    "        label_token = np.array((token[1:] + [0] * self.max_len)[:self.max_len], dtype='int64')\n",
    "        # 输入填充\n",
    "        input_token = np.array((input_token + [0] * self.max_len)[:self.max_len], dtype='int64')\n",
    "        input_token_type = np.array((input_token_type + [0] * self.max_len)[:self.max_len], dtype='int64')\n",
    "        input_pad_mask = (input_token != 0).astype('float32')\n",
    "        return input_token, input_token_type, input_pad_mask, label_token, input_pad_mask\n",
    "\n",
    "    def __len__(self):\n",
    "        return len(self.poems)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 46,
   "id": "eed63b65",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "<paddlenlp.datasets.dataset.MapDataset at 0x29d90381340>"
      ]
     },
     "execution_count": 46,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "train_dataset\n",
    "ddemo11=PoemData(train_dataset[0], bert_tokenizer,128)\n",
    "ddemo11.__getitem__()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "id": "67d609e9",
   "metadata": {},
   "outputs": [],
   "source": [
    "import torch\n",
    "import torch.nn as nn\n",
    "import torch.nn.functional as F\n",
    "\n",
    "class PoetryBertModel(nn.Module):\n",
    "    def __init__(self, pretrained_bert_model: str, input_length: int):\n",
    "        super(PoetryBertModel, self).__init__()\n",
    "        \n",
    "        # Load the pre-trained BERT model\n",
    "        self.bert_model = BertModel.from_pretrained(pretrained_bert_model)\n",
    "        self.vocab_size, self.hidden_size = self.bert_model.embeddings.word_embeddings.size()\n",
    "        self.bert_for_class = BertForTokenClassification.from_pretrained(pretrained_bert_model, num_labels=self.vocab_size)\n",
    "        \n",
    "        # Generate the lower triangle matrix for attention masking\n",
    "        self.sequence_length = input_length\n",
    "        self.lower_triangle_mask = torch.tril(torch.ones((input_length, input_length), dtype=torch.float32))\n",
    "\n",
    "    def forward(self, token, token_type, input_mask):\n",
    "        # Calculate the attention mask\n",
    "        mask_left = input_mask.unsqueeze(-1)\n",
    "        mask_right = input_mask.unsqueeze(1)\n",
    "        attention_mask = torch.matmul(mask_left, mask_right)\n",
    "\n",
    "        # Attention mechanism calculation\n",
    "        lower_triangle_mask = self.lower_triangle_mask[:input_mask.size(1), :input_mask.size(1)]\n",
    "        attention_mask = attention_mask * lower_triangle_mask\n",
    "        \n",
    "        # Set invalid positions to a very small value\n",
    "        attention_mask = (1 - attention_mask) * -1e10\n",
    "        attention_mask = attention_mask.to(self.bert_for_class.parameters()[0].device)\n",
    "        \n",
    "        output_logits = self.bert_for_class(token, token_type_ids=token_type, attention_mask=attention_mask)\n",
    "        return output_logits\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "id": "eac7124d",
   "metadata": {},
   "outputs": [],
   "source": [
    "class PoetryBertModelLossCriterion(nn.Module):\n",
    "    def forward(self, pred_logits, label, input_mask):\n",
    "        loss = paddle.nn.functional.cross_entropy(pred_logits, label, ignore_index=0, reduction='none')\n",
    "        masked_loss = paddle.mean(loss * input_mask, axis=0)\n",
    "        return paddle.sum(masked_loss)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "3c49891f",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python [conda env:paddle312] *",
   "language": "python",
   "name": "conda-env-paddle312-py"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.12.4"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
