{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Hugging Face BERT的常见使用"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 1. Tokenizer"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "{'input_ids': tensor([[ 101, 2400, 2408, 3793, 1220, 1447, 4852,  833, 1392, 3175, 7481, 4638,\n",
      "         1213, 7030,  102]]), 'token_type_ids': tensor([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]), 'attention_mask': tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]])}\n"
     ]
    }
   ],
   "source": [
    "import torch\n",
    "from   transformers import AutoTokenizer\n",
    "\n",
    "\n",
    "tokenizer = AutoTokenizer.from_pretrained('bert-base-chinese')\n",
    "result    = tokenizer([\"并广泛动员社会各方面的力量\"], return_tensors=\"pt\")\n",
    "print(result)    # 自动添加了一些特殊符号\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 查看特殊符号"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[100, 102, 0, 101, 103]\n",
      "['[UNK]', '[SEP]', '[PAD]', '[CLS]', '[MASK]']\n"
     ]
    }
   ],
   "source": [
    "print(tokenizer.all_special_ids)           #! 查看特殊ID\n",
    "print(tokenizer.all_special_tokens)        #! 查看特殊token"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 多个句子处理"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "{'input_ids': tensor([[ 101, 5018,  671,  702, 1368, 2094,  102],\n",
      "        [ 101, 5018,  753,  702, 1368, 2094,  102]]), 'token_type_ids': tensor([[0, 0, 0, 0, 0, 0, 0],\n",
      "        [0, 0, 0, 0, 0, 0, 0]]), 'attention_mask': tensor([[1, 1, 1, 1, 1, 1, 1],\n",
      "        [1, 1, 1, 1, 1, 1, 1]])}\n",
      "{'input_ids': tensor([[ 101, 5018,  671,  702, 1368, 2094,  102, 5018,  753,  702, 1368, 2094,\n",
      "          102]]), 'token_type_ids': tensor([[0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]]), 'attention_mask': tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]])}\n",
      "{'input_ids': tensor([[ 101, 5018,  671,  702, 1368, 2094,  102,    0,    0],\n",
      "        [ 101, 6821, 3221, 5018,  753,  702, 1368, 2094,  102]]), 'token_type_ids': tensor([[0, 0, 0, 0, 0, 0, 0, 0, 0],\n",
      "        [0, 0, 0, 0, 0, 0, 0, 0, 0]]), 'attention_mask': tensor([[1, 1, 1, 1, 1, 1, 1, 0, 0],\n",
      "        [1, 1, 1, 1, 1, 1, 1, 1, 1]])}\n",
      "{'input_ids': tensor([[ 101, 5018,  671,  702, 1368, 2094,  102, 6821, 3221, 5018,  753,  702,\n",
      "         1368, 2094,  102]]), 'token_type_ids': tensor([[0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1]]), 'attention_mask': tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]])}\n",
      "{'input_ids': tensor([[ 101, 5018,  671,  702, 1368, 2094,  102, 6821, 3221, 5018,  753,  702,\n",
      "         1368, 2094,  102]]), 'token_type_ids': tensor([[0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1]]), 'attention_mask': tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]])}\n"
     ]
    }
   ],
   "source": [
    "result    = tokenizer([\"第一个句子\", \"第二个句子\"], return_tensors=\"pt\")\n",
    "print(result)   \n",
    "\n",
    "result    = tokenizer(\"第一个句子\", \"第二个句子\", return_tensors=\"pt\")\n",
    "print(result)  \n",
    "\n",
    "result    = tokenizer([\"第一个句子\", \"这是第二个句子\"], return_tensors=\"pt\", padding=True)\n",
    "print(result)   \n",
    "\n",
    "result    = tokenizer(\"第一个句子\", \"这是第二个句子\", return_tensors=\"pt\", padding=True)\n",
    "print(result)  \n",
    "\n",
    "result    = tokenizer(\"第一个句子\", \"这是第二个句子\", return_tensors=\"pt\")\n",
    "print(result)  "
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 2. BERT\n",
    "\n",
    "### 2.1 Masked LM"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Some weights of the model checkpoint at bert-base-chinese were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n",
      "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n",
      "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "tensor(1.9522, grad_fn=<NllLossBackward0>) torch.Size([1, 15, 21128])\n",
      "tensor([[ 101, 2400, 2408, 3793, 1220, 1447, 4852,  833,  103, 3175, 7481, 4638,\n",
      "         1213, 7030,  102]])\n",
      "tensor([[ 101, 2400, 2408, 3793, 1220, 1447, 4852,  833, 1392, 3175, 7481, 4638,\n",
      "         1213, 7030,  102]])\n"
     ]
    }
   ],
   "source": [
    "import torch\n",
    "from transformers import BertTokenizer, BertForMaskedLM\n",
    "\n",
    "\n",
    "tokenizer = BertTokenizer.from_pretrained('bert-base-chinese')\n",
    "model     = BertForMaskedLM.from_pretrained('bert-base-chinese')\n",
    "\n",
    "inputs    = tokenizer([\"并广泛动员社会[MASK]方面的力量\"], return_tensors=\"pt\")\n",
    "labels    = tokenizer([\"并广泛动员社会各方面的力量\"], return_tensors=\"pt\")['input_ids']\n",
    "\n",
    "outputs   = model(**inputs, labels=labels)\n",
    "loss      = outputs.loss\n",
    "\n",
    "\"\"\"\n",
    "    深度学习模型输出的\"Logits\"指的是模型最后一个线性层输出的原始预测值，这些值还没有经过归一化处理，如未经过softmax或\n",
    "sigmoid函数转换成概率。在分类问题中，logit通常是指每个类别的得分，这些得分通过softmax函数转换成概率，概率值的总和为1，\n",
    "这样可以更容易地进行类别的判断。\n",
    "\n",
    "    简单来说，如果你的深度学习模型是用于分类任务，那么在模型的最后，你会得到每个类别的一个数值（logit）。这个数值越高，\n",
    "表示模型预测当前输入属于该类别的置信度越高。但是，这些数值在经过softmax转换之前并不直接代表概率。\n",
    "\"\"\"\n",
    "#! 总结: 归一化之前叫得分(logits)，归一化之后叫概率\n",
    "logits    = outputs.logits\n",
    "\n",
    "print(loss, logits.shape)\n",
    "print(inputs['input_ids'])\n",
    "print(labels)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 2.2 BertForNextSentence"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "tensor([[ 5.8735, -5.4668]], grad_fn=<AddmmBackward0>)\n",
      "tensor([[0.9966, 3.5735]], grad_fn=<AddmmBackward0>)\n"
     ]
    }
   ],
   "source": [
    "import torch\n",
    "from transformers import BertTokenizer, BertForNextSentencePrediction\n",
    "\n",
    "\n",
    "tokenizer = BertTokenizer.from_pretrained('bert-base-chinese')\n",
    "model     = BertForNextSentencePrediction.from_pretrained('bert-base-chinese')\n",
    "\n",
    "prompt    = \"在我的后园，可以看见有两个树, \"\n",
    "next_sen1 = \"一棵是枣树, 另一棵也是枣树\"\n",
    "next_sen2 = \"一九四九年九月十五日\"\n",
    "\n",
    "encoding  = tokenizer(prompt, next_sen1, return_tensors=\"pt\")\n",
    "outputs   = model(**encoding, labels=torch.LongTensor([1]))\n",
    "print(outputs.logits)\n",
    "\n",
    "encoding  = tokenizer(prompt, next_sen2, return_tensors=\"pt\")\n",
    "outputs   = model(**encoding, labels=torch.LongTensor([1]))\n",
    "print(outputs.logits)"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.10.12"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
