{
 "cells": [
  {
   "cell_type": "markdown",
   "id": "c2168723-82a6-40a5-864b-1b516415480c",
   "metadata": {},
   "source": [
    "## 1.BertForMaskLM"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "9b5b89f0-a44a-425c-af4e-61cf9a826a60",
   "metadata": {},
   "source": [
    "预训练任务之一，实现了Masked language Model"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "6735f11d-388a-4f5c-9893-77ac75913121",
   "metadata": {},
   "outputs": [],
   "source": [
    "from transformers import BertTokenizer, BertForMaskedLM"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "id": "de615eca-ca03-4ae8-8b75-acd1ce56a655",
   "metadata": {},
   "outputs": [],
   "source": [
    "import torch"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "id": "124f8476-48cc-45d0-835a-d158aeb4ad16",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "86f86dc26bf046c1b726570155e2bdc9",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "Downloading pytorch_model.bin:   0%|          | 0.00/393M [00:00<?, ?B/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Some weights of the model checkpoint at bert-base-chinese were not used when initializing BertForMaskedLM: ['cls.seq_relationship.weight', 'cls.seq_relationship.bias']\n",
      "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n",
      "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n"
     ]
    }
   ],
   "source": [
    "tokenizer = BertTokenizer.from_pretrained(\"bert-base-chinese\")\n",
    "model = BertForMaskedLM.from_pretrained(\"bert-base-chinese\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "id": "87f4c1c1-4eb0-4eed-ba27-53d2721b1ce0",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "{'input_ids': tensor([[ 101, 2400, 2408, 3793, 1220, 1447, 4852,  833,  103, 3175, 7481, 4638,\n",
      "         1213, 7030,  102]]), 'token_type_ids': tensor([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]), 'attention_mask': tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]])}\n",
      "\n",
      "{'input_ids': tensor([[ 101, 2400, 2408, 3793, 1220, 1447, 4852,  833, 1392, 3175, 7481, 4638,\n",
      "         1213, 7030,  102]]), 'token_type_ids': tensor([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]), 'attention_mask': tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]])}\n",
      "\n",
      "tensor([[ 101, 2400, 2408, 3793, 1220, 1447, 4852,  833, 1392, 3175, 7481, 4638,\n",
      "         1213, 7030,  102]])\n"
     ]
    }
   ],
   "source": [
    "inputs = tokenizer([\"并广泛动员社会[MASK]方面的力量\"], return_tensors=\"pt\")\n",
    "labels = tokenizer([\"并广泛动员社会各方面的力量\"], return_tensors=\"pt\")[\"input_ids\"]  #返回Tensor对象\n",
    "print(inputs)\n",
    "print()\n",
    "print(tokenizer([\"并广泛动员社会各方面的力量\"], return_tensors=\"pt\"))\n",
    "print()\n",
    "print(labels)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "id": "cf7dacd2-e64d-41f9-93cf-1d17090ef2b6",
   "metadata": {},
   "outputs": [],
   "source": [
    "outputs = model(**inputs, labels=labels)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "id": "fad03334-aef9-4385-8f2f-d0df9c1bd8b3",
   "metadata": {},
   "outputs": [],
   "source": [
    "loss = outputs.loss\n",
    "logits = outputs.logits"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "da647e41-bdcd-4c08-a27c-0ed9d05b77fd",
   "metadata": {},
   "source": [
    "## 2.BertForNextSentencePrediction"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "987adbde-ddda-4fec-83b0-d7eb2e3624d9",
   "metadata": {},
   "source": [
    "预训练人物之一，预测下一个句子"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "id": "00a286cc-26f4-4a4b-a7ae-b6a506059e30",
   "metadata": {},
   "outputs": [],
   "source": [
    "from transformers import BertTokenizer, BertForNextSentencePrediction"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 19,
   "id": "d6981870-3261-4cff-80e0-d03cd64ffd0f",
   "metadata": {},
   "outputs": [],
   "source": [
    "import torch"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 20,
   "id": "1dd04c4c-957a-47c5-905d-bc6c41db7377",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Some weights of the model checkpoint at bert-base-chinese were not used when initializing BertForNextSentencePrediction: ['cls.predictions.bias', 'cls.predictions.decoder.weight', 'cls.predictions.transform.LayerNorm.bias', 'cls.predictions.transform.dense.bias', 'cls.predictions.transform.LayerNorm.weight', 'cls.predictions.transform.dense.weight']\n",
      "- This IS expected if you are initializing BertForNextSentencePrediction from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n",
      "- This IS NOT expected if you are initializing BertForNextSentencePrediction from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n"
     ]
    }
   ],
   "source": [
    "tokenizer = BertTokenizer.from_pretrained(\"bert-base-chinese\")\n",
    "model = BertForNextSentencePrediction.from_pretrained(\"bert-base-chinese\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 21,
   "id": "bef8cd80-629b-4c17-9247-2b1b35e2ca89",
   "metadata": {},
   "outputs": [],
   "source": [
    "prompt = \"在我的后花园，可以看见墙外有两棵树\"\n",
    "next_sentence1 = \"一棵是枣树，另一科也是枣树\"\n",
    "next_sentence2 = \"一九二四年九月十五日\""
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 22,
   "id": "4cc90195-0c91-4a10-9d45-3bb519b64846",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "{'input_ids': tensor([[ 101, 1762, 2769, 4638, 1400, 5709, 1736, 8024, 1377,  809, 4692, 6224,\n",
      "         1870, 1912, 3300,  697, 3484, 3409,  102,  671, 3484, 3221, 3365, 3409,\n",
      "         8024, 1369,  671, 4906,  738, 3221, 3365, 3409,  102]]), 'token_type_ids': tensor([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1,\n",
      "         1, 1, 1, 1, 1, 1, 1, 1, 1]]), 'attention_mask': tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,\n",
      "         1, 1, 1, 1, 1, 1, 1, 1, 1]])}\n"
     ]
    }
   ],
   "source": [
    "encoding = tokenizer(prompt, next_sentence1, return_tensors=\"pt\")\n",
    "print(encoding)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 23,
   "id": "111cea4a-991c-4886-909d-ee913424b422",
   "metadata": {},
   "outputs": [],
   "source": [
    "outputs = model(**encoding, labels=torch.LongTensor([1]))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 25,
   "id": "fdf89d08-1f37-4503-b5e5-50ec89506acf",
   "metadata": {},
   "outputs": [],
   "source": [
    "logits = outputs.logits"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 26,
   "id": "d9659991-097b-44e6-9828-611cb04de6ad",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "tensor(5.8255, grad_fn=<SelectBackward>) \n",
      " tensor(-5.0126, grad_fn=<SelectBackward>) torch.Size([1, 2])\n"
     ]
    }
   ],
   "source": [
    "print(logits[0, 0], '\\n', logits[0, 1], logits.shape)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 27,
   "id": "1ee234a4-bcb4-43f1-ae92-437221966dda",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "{'input_ids': tensor([[ 101, 1762, 2769, 4638, 1400, 5709, 1736, 8024, 1377,  809, 4692, 6224,\n",
      "         1870, 1912, 3300,  697, 3484, 3409,  102,  671,  736,  753, 1724, 2399,\n",
      "          736, 3299, 1282,  758, 3189,  102]]), 'token_type_ids': tensor([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1,\n",
      "         1, 1, 1, 1, 1, 1]]), 'attention_mask': tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,\n",
      "         1, 1, 1, 1, 1, 1]])}\n"
     ]
    }
   ],
   "source": [
    "encoding = tokenizer(prompt, next_sentence2, return_tensors=\"pt\")\n",
    "print(encoding)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 28,
   "id": "94ef9fd9-63fd-4ea0-adfb-9ff7ac796344",
   "metadata": {},
   "outputs": [],
   "source": [
    "outputs = model(**encoding, labels=torch.LongTensor([1]))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 30,
   "id": "a7cb2a9e-5de5-4bb2-9136-468c2b535aa9",
   "metadata": {},
   "outputs": [],
   "source": [
    "logits = outputs.logits"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 31,
   "id": "3330e85d-2273-47cf-a1cd-d4ade29cf2b5",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "tensor(1.0638, grad_fn=<SelectBackward>) \n",
      " tensor(3.4684, grad_fn=<SelectBackward>) torch.Size([1, 2])\n"
     ]
    }
   ],
   "source": [
    "print(logits[0, 0], '\\n', logits[0, 1], logits.shape)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "a203919b-58a9-43d9-89fb-36a267162748",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.9.19"
  },
  "widgets": {
   "application/vnd.jupyter.widget-state+json": {
    "state": {
     "0bfeffe58c9b475e92fafe7eb570acda": {
      "model_module": "@jupyter-widgets/base",
      "model_module_version": "1.2.0",
      "model_name": "LayoutModel",
      "state": {}
     },
     "3069045a268a42299b8b31bb697ab3df": {
      "model_module": "@jupyter-widgets/controls",
      "model_module_version": "1.5.0",
      "model_name": "DescriptionStyleModel",
      "state": {
       "description_width": ""
      }
     },
     "4a653c1f90904e37951938219aeb1f8a": {
      "model_module": "@jupyter-widgets/controls",
      "model_module_version": "1.5.0",
      "model_name": "FloatProgressModel",
      "state": {
       "bar_style": "success",
       "layout": "IPY_MODEL_7cba6bc791cd47b7a278939c76096bb8",
       "max": 411577189,
       "style": "IPY_MODEL_a6f1e6da5f554cc1b9c9a494d5452c12",
       "value": 411577189
      }
     },
     "717d43d5139845cb9db4c07ea86163c6": {
      "model_module": "@jupyter-widgets/controls",
      "model_module_version": "1.5.0",
      "model_name": "DescriptionStyleModel",
      "state": {
       "description_width": ""
      }
     },
     "78b09c8531784e77abd69e7ae5f9bb35": {
      "model_module": "@jupyter-widgets/controls",
      "model_module_version": "1.5.0",
      "model_name": "HTMLModel",
      "state": {
       "layout": "IPY_MODEL_906a609c1101455e9efbbe04dcb6f98e",
       "style": "IPY_MODEL_3069045a268a42299b8b31bb697ab3df",
       "value": "Downloading pytorch_model.bin: 100%"
      }
     },
     "7cba6bc791cd47b7a278939c76096bb8": {
      "model_module": "@jupyter-widgets/base",
      "model_module_version": "1.2.0",
      "model_name": "LayoutModel",
      "state": {}
     },
     "86f86dc26bf046c1b726570155e2bdc9": {
      "model_module": "@jupyter-widgets/controls",
      "model_module_version": "1.5.0",
      "model_name": "HBoxModel",
      "state": {
       "children": [
        "IPY_MODEL_78b09c8531784e77abd69e7ae5f9bb35",
        "IPY_MODEL_4a653c1f90904e37951938219aeb1f8a",
        "IPY_MODEL_9ee3325874b14b6cbc4fdf17f1a67826"
       ],
       "layout": "IPY_MODEL_0bfeffe58c9b475e92fafe7eb570acda"
      }
     },
     "906a609c1101455e9efbbe04dcb6f98e": {
      "model_module": "@jupyter-widgets/base",
      "model_module_version": "1.2.0",
      "model_name": "LayoutModel",
      "state": {}
     },
     "9ee3325874b14b6cbc4fdf17f1a67826": {
      "model_module": "@jupyter-widgets/controls",
      "model_module_version": "1.5.0",
      "model_name": "HTMLModel",
      "state": {
       "layout": "IPY_MODEL_ba9e9e984179404bbe0a68897c377331",
       "style": "IPY_MODEL_717d43d5139845cb9db4c07ea86163c6",
       "value": " 393M/393M [07:03&lt;00:00, 999kB/s]"
      }
     },
     "a6f1e6da5f554cc1b9c9a494d5452c12": {
      "model_module": "@jupyter-widgets/controls",
      "model_module_version": "1.5.0",
      "model_name": "ProgressStyleModel",
      "state": {
       "description_width": ""
      }
     },
     "ba9e9e984179404bbe0a68897c377331": {
      "model_module": "@jupyter-widgets/base",
      "model_module_version": "1.2.0",
      "model_name": "LayoutModel",
      "state": {}
     }
    },
    "version_major": 2,
    "version_minor": 0
   }
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
