{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "6b5bcd9f",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "c:\\Users\\yuhon\\.conda\\envs\\nlp\\Lib\\site-packages\\tqdm\\auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n",
      "  from .autonotebook import tqdm as notebook_tqdm\n",
      "c:\\Users\\yuhon\\.conda\\envs\\nlp\\Lib\\site-packages\\huggingface_hub\\file_download.py:144: UserWarning: `huggingface_hub` cache-system uses symlinks by default to efficiently store duplicated files but your machine does not support them in E:\\nlp\\model_cache\\models--google-bert--bert-base-chinese. Caching files will still work but in a degraded version that might require more space on your disk. This warning can be disabled by setting the `HF_HUB_DISABLE_SYMLINKS_WARNING` environment variable. For more details, see https://huggingface.co/docs/huggingface_hub/how-to-cache#limitations.\n",
      "To support symlinks on Windows, you either need to activate Developer Mode or to run Python as an administrator. In order to activate developer mode, see this article: https://docs.microsoft.com/en-us/windows/apps/get-started/enable-your-device-for-development\n",
      "  warnings.warn(message)\n",
      "Xet Storage is enabled for this repo, but the 'hf_xet' package is not installed. Falling back to regular HTTP download. For better performance, install the package with: `pip install huggingface_hub[hf_xet]` or `pip install hf_xet`\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "BertModel(\n",
      "  (embeddings): BertEmbeddings(\n",
      "    (word_embeddings): Embedding(21128, 768, padding_idx=0)\n",
      "    (position_embeddings): Embedding(512, 768)\n",
      "    (token_type_embeddings): Embedding(2, 768)\n",
      "    (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n",
      "    (dropout): Dropout(p=0.1, inplace=False)\n",
      "  )\n",
      "  (encoder): BertEncoder(\n",
      "    (layer): ModuleList(\n",
      "      (0-11): 12 x BertLayer(\n",
      "        (attention): BertAttention(\n",
      "          (self): BertSdpaSelfAttention(\n",
      "            (query): Linear(in_features=768, out_features=768, bias=True)\n",
      "            (key): Linear(in_features=768, out_features=768, bias=True)\n",
      "            (value): Linear(in_features=768, out_features=768, bias=True)\n",
      "            (dropout): Dropout(p=0.1, inplace=False)\n",
      "          )\n",
      "          (output): BertSelfOutput(\n",
      "            (dense): Linear(in_features=768, out_features=768, bias=True)\n",
      "            (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n",
      "            (dropout): Dropout(p=0.1, inplace=False)\n",
      "          )\n",
      "        )\n",
      "        (intermediate): BertIntermediate(\n",
      "          (dense): Linear(in_features=768, out_features=3072, bias=True)\n",
      "          (intermediate_act_fn): GELUActivation()\n",
      "        )\n",
      "        (output): BertOutput(\n",
      "          (dense): Linear(in_features=3072, out_features=768, bias=True)\n",
      "          (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n",
      "          (dropout): Dropout(p=0.1, inplace=False)\n",
      "        )\n",
      "      )\n",
      "    )\n",
      "  )\n",
      "  (pooler): BertPooler(\n",
      "    (dense): Linear(in_features=768, out_features=768, bias=True)\n",
      "    (activation): Tanh()\n",
      "  )\n",
      ")\n"
     ]
    }
   ],
   "source": [
    "from  transformers import AutoModel,AutoTokenizer\n",
    "\n",
    "model_name = \"google-bert/bert-base-chinese\" #模型名称 情感分类\n",
    "cache_dir = \"./model_cache\"\n",
    "\n",
    "model = AutoModel.from_pretrained(model_name,cache_dir= cache_dir)\n",
    "tokenizer = AutoTokenizer.from_pretrained(model_name,cache_dir= cache_dir)\n",
    "print(model)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "id": "f70c9368",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "BertTokenizer(name_or_path='E:\\nlp\\model_cache\\models--google-bert--bert-base-chinese\\snapshots\\c30a6ed22ab4564dc1e3b2ecbf6e766b0611a33f', vocab_size=21128, model_max_length=512, is_fast=False, padding_side='right', truncation_side='right', special_tokens={'unk_token': '[UNK]', 'sep_token': '[SEP]', 'pad_token': '[PAD]', 'cls_token': '[CLS]', 'mask_token': '[MASK]'}, clean_up_tokenization_spaces=True, added_tokens_decoder={\n",
      "\t0: AddedToken(\"[PAD]\", rstrip=False, lstrip=False, single_word=False, normalized=False, special=True),\n",
      "\t100: AddedToken(\"[UNK]\", rstrip=False, lstrip=False, single_word=False, normalized=False, special=True),\n",
      "\t101: AddedToken(\"[CLS]\", rstrip=False, lstrip=False, single_word=False, normalized=False, special=True),\n",
      "\t102: AddedToken(\"[SEP]\", rstrip=False, lstrip=False, single_word=False, normalized=False, special=True),\n",
      "\t103: AddedToken(\"[MASK]\", rstrip=False, lstrip=False, single_word=False, normalized=False, special=True),\n",
      "}\n",
      ")\n"
     ]
    }
   ],
   "source": [
    "from  transformers import AutoTokenizer,BertTokenizer\n",
    "\n",
    "# token = AutoTokenizer.from_pretrained(\"bert-base-chinese\") #加载bert-base-chinese在线模型\n",
    "\n",
    "#BertTokenizer是bert专属的分词器,不能再gpt中使用。而AutoTokenizer是通用的分词器可以适配各种分词器。\n",
    "\n",
    "token = BertTokenizer.from_pretrained(r\"E:\\nlp\\model_cache\\models--google-bert--bert-base-chinese\\snapshots\\c30a6ed22ab4564dc1e3b2ecbf6e766b0611a33f\") #本地加载bert-base-chinese模型\n",
    "print(token)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "63840047",
   "metadata": {},
   "source": [
    "使用tokenizer进行文本编码"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "id": "e67832d7",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Keyword arguments {'retturn_token_type_ids': True} not recognized.\n",
      "Keyword arguments {'retturn_token_type_ids': True} not recognized.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "input_ids : [[101, 791, 1921, 4696, 4638, 1962, 4178, 1557, 8024, 1921, 3698, 4696, 679, 7231, 8013, 102, 0, 0, 0, 0], [101, 3209, 1921, 2769, 6206, 1343, 2110, 3413, 1346, 1217, 6817, 1220, 833, 8024, 2769, 6206, 711, 4408, 677, 102]]\n",
      "token_type_ids : [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]\n",
      "attention_mask : [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]\n"
     ]
    }
   ],
   "source": [
    "sents=[\"今天真的好热啊，天气真不错！\",\n",
    "       \"明天我要去学校参加运动会，我要为班上的同学和老师加油。\"]\n",
    "\n",
    "#批量编码句子\n",
    "out = token.batch_encode_plus(batch_text_or_text_pairs=[sents[0],sents[1]],\n",
    "                              add_special_tokens=True,\n",
    "                              max_length=20,\n",
    "                              #当句子长度大于max_length时，截断句子\n",
    "                              truncation=True,\n",
    "                              #长度不足max_length时，填充句子，一律补0\n",
    "                              padding=\"max_length\",\n",
    "                              #可取值为tf,pt,np,默认为list\n",
    "                              return_tensors=None,\n",
    "                              retturn_token_type_ids=True)\n",
    "# print(out)\n",
    "for k,v in out.items():\n",
    "    print(k,\":\",v)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "id": "b1e8b3ad",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[CLS] 今 天 真 的 好 热 啊 ， 天 气 真 不 错 ！ [SEP] [PAD] [PAD] [PAD] [PAD] [CLS] 明 天 我 要 去 学 校 参 加 运 动 会 ， 我 要 为 班 上 [SEP]\n"
     ]
    }
   ],
   "source": [
    "#解码文本数据\n",
    "print(token.decode(out[\"input_ids\"][0]),token.decode(out[\"input_ids\"][1]))"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.11.12"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
