{
 "cells": [
  {
   "cell_type": "code",
   "id": "ac7cda5700b3bd0",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2024-11-08T11:17:57.719103Z",
     "start_time": "2024-11-08T11:17:57.691544Z"
    }
   },
   "source": [
    "import os\n",
    "\n",
    "# 模型文件目录\n",
    "model_dir = os.path.join('D:', os.path.sep, 'ModelSpace', 'Qwen2.5', 'Qwen2.5-1.5B-Instruct')\n",
    "model_dir"
   ],
   "outputs": [
    {
     "data": {
      "text/plain": [
       "'D:\\\\ModelSpace\\\\Qwen2.5\\\\Qwen2.5-1.5B-Instruct'"
      ]
     },
     "execution_count": 2,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "execution_count": 2
  },
  {
   "metadata": {},
   "cell_type": "code",
   "outputs": [],
   "execution_count": null,
   "source": [
    "# 初始化模型\n",
    "from transformers import AutoModelForCausalLM\n",
    "model = AutoModelForCausalLM.from_pretrained(\n",
    "    model_dir,\n",
    "    torch_dtype=\"auto\",\n",
    "    device_map=\"auto\",\n",
    "    local_files_only=True,\n",
    ")\n",
    "print(model)"
   ],
   "id": "12a0eebef098627d"
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2024-11-08T11:18:10.035090Z",
     "start_time": "2024-11-08T11:18:05.969176Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# 初始化分词器\n",
    "from transformers import Qwen2Tokenizer\n",
    "tokenizer = Qwen2Tokenizer.from_pretrained(\n",
    "    model_dir,\n",
    "    local_files_only=True,\n",
    ")\n",
    "print(tokenizer)"
   ],
   "id": "e4c6504b-2598-4894-a01f-50cd9074454f",
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "D:\\Software\\miniconda3\\pkgs\\Qwen2.5\\lib\\site-packages\\tqdm\\auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n",
      "  from .autonotebook import tqdm as notebook_tqdm\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Qwen2Tokenizer(name_or_path='D:\\ModelSpace\\Qwen2.5\\Qwen2.5-1.5B-Instruct', vocab_size=151643, model_max_length=131072, is_fast=False, padding_side='right', truncation_side='right', special_tokens={'eos_token': '<|im_end|>', 'pad_token': '<|endoftext|>', 'additional_special_tokens': ['<|im_start|>', '<|im_end|>', '<|object_ref_start|>', '<|object_ref_end|>', '<|box_start|>', '<|box_end|>', '<|quad_start|>', '<|quad_end|>', '<|vision_start|>', '<|vision_end|>', '<|vision_pad|>', '<|image_pad|>', '<|video_pad|>']}, clean_up_tokenization_spaces=False),  added_tokens_decoder={\n",
      "\t151643: AddedToken(\"<|endoftext|>\", rstrip=False, lstrip=False, single_word=False, normalized=False, special=True),\n",
      "\t151644: AddedToken(\"<|im_start|>\", rstrip=False, lstrip=False, single_word=False, normalized=False, special=True),\n",
      "\t151645: AddedToken(\"<|im_end|>\", rstrip=False, lstrip=False, single_word=False, normalized=False, special=True),\n",
      "\t151646: AddedToken(\"<|object_ref_start|>\", rstrip=False, lstrip=False, single_word=False, normalized=False, special=True),\n",
      "\t151647: AddedToken(\"<|object_ref_end|>\", rstrip=False, lstrip=False, single_word=False, normalized=False, special=True),\n",
      "\t151648: AddedToken(\"<|box_start|>\", rstrip=False, lstrip=False, single_word=False, normalized=False, special=True),\n",
      "\t151649: AddedToken(\"<|box_end|>\", rstrip=False, lstrip=False, single_word=False, normalized=False, special=True),\n",
      "\t151650: AddedToken(\"<|quad_start|>\", rstrip=False, lstrip=False, single_word=False, normalized=False, special=True),\n",
      "\t151651: AddedToken(\"<|quad_end|>\", rstrip=False, lstrip=False, single_word=False, normalized=False, special=True),\n",
      "\t151652: AddedToken(\"<|vision_start|>\", rstrip=False, lstrip=False, single_word=False, normalized=False, special=True),\n",
      "\t151653: AddedToken(\"<|vision_end|>\", rstrip=False, lstrip=False, single_word=False, normalized=False, special=True),\n",
      "\t151654: AddedToken(\"<|vision_pad|>\", rstrip=False, lstrip=False, single_word=False, normalized=False, special=True),\n",
      "\t151655: AddedToken(\"<|image_pad|>\", rstrip=False, lstrip=False, single_word=False, normalized=False, special=True),\n",
      "\t151656: AddedToken(\"<|video_pad|>\", rstrip=False, lstrip=False, single_word=False, normalized=False, special=True),\n",
      "\t151657: AddedToken(\"<tool_call>\", rstrip=False, lstrip=False, single_word=False, normalized=False, special=False),\n",
      "\t151658: AddedToken(\"</tool_call>\", rstrip=False, lstrip=False, single_word=False, normalized=False, special=False),\n",
      "\t151659: AddedToken(\"<|fim_prefix|>\", rstrip=False, lstrip=False, single_word=False, normalized=False, special=False),\n",
      "\t151660: AddedToken(\"<|fim_middle|>\", rstrip=False, lstrip=False, single_word=False, normalized=False, special=False),\n",
      "\t151661: AddedToken(\"<|fim_suffix|>\", rstrip=False, lstrip=False, single_word=False, normalized=False, special=False),\n",
      "\t151662: AddedToken(\"<|fim_pad|>\", rstrip=False, lstrip=False, single_word=False, normalized=False, special=False),\n",
      "\t151663: AddedToken(\"<|repo_name|>\", rstrip=False, lstrip=False, single_word=False, normalized=False, special=False),\n",
      "\t151664: AddedToken(\"<|file_sep|>\", rstrip=False, lstrip=False, single_word=False, normalized=False, special=False),\n",
      "}\n"
     ]
    }
   ],
   "execution_count": 3
  },
  {
   "cell_type": "code",
   "id": "7edd5f4b7e2d49c9",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2024-11-08T12:48:48.713846Z",
     "start_time": "2024-11-08T12:48:33.967552Z"
    }
   },
   "source": [
    "text = 'Transformers分词：台风又双叒叕来了！'\n",
    "tokens = tokenizer.tokenize(text)\n",
    "\n",
    "print(tokens)"
   ],
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "['Transform', 'ers', 'åĪĨ', 'è¯į', 'ï¼ļ', 'åı°é£İ', 'åıĪ', 'åıĮ', 'åı', 'Ĵ', 'åıķ', 'æĿ¥äºĨ', 'ï¼ģ']\n"
     ]
    }
   ],
   "execution_count": 14
  },
  {
   "metadata": {},
   "cell_type": "code",
   "source": [
    "import re\n",
    "PRETOKENIZE_REGEX = r\"\"\"(?i:'s|'t|'re|'ve|'m|'ll|'d)|[^\\r\\n\\p{L}\\p{N}]?\\p{L}+|\\p{N}| ?[^\\s\\p{L}\\p{N}]+[\\r\\n]*|\\s*[\\r\\n]+|\\s+(?!\\S)|\\s+\"\"\"\n",
    "pat = re.compile(PRETOKENIZE_REGEX)\n",
    "re.findall(pat, text)"
   ],
   "id": "648dd70a7c63d9b",
   "outputs": [],
   "execution_count": null
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2024-11-08T12:13:48.656159Z",
     "start_time": "2024-11-08T12:13:48.644502Z"
    }
   },
   "cell_type": "code",
   "source": "'Transformers分词'.encode(\"utf-8\")",
   "id": "80436ef6-ee02-46bc-82be-ca8415002255",
   "outputs": [
    {
     "data": {
      "text/plain": [
       "b'Transformers\\xe5\\x88\\x86\\xe8\\xaf\\x8d'"
      ]
     },
     "execution_count": 9,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "execution_count": 9
  },
  {
   "cell_type": "code",
   "id": "c72b110e-f161-4c31-929d-0c1171684d60",
   "metadata": {},
   "source": [
    "ids = tokenizer.convert_tokens_to_ids(tokens)\n",
    "\n",
    "print(ids)"
   ],
   "outputs": [],
   "execution_count": null
  },
  {
   "cell_type": "code",
   "id": "7557708a-2b14-448f-84cc-6c74957b4166",
   "metadata": {},
   "source": [
    "token_ids = tokenizer.encode(text)\n",
    "\n",
    "print(token_ids)"
   ],
   "outputs": [],
   "execution_count": null
  },
  {
   "cell_type": "code",
   "id": "52df66b3-d415-4034-b016-09f4f3e7a95a",
   "metadata": {},
   "source": [
    "token_text = tokenizer.decode(token_ids)\n",
    "\n",
    "print(token_text)"
   ],
   "outputs": [],
   "execution_count": null
  },
  {
   "cell_type": "code",
   "id": "3f93e552-146c-4782-b3ad-263307f45198",
   "metadata": {},
   "source": [
    "# 添加普通Token：如果词表存在则忽略\n",
    "new_tokens = [\"老牛同学\", \"imxulin\"]\n",
    "new_tokens = set(new_tokens) - set(tokenizer.vocab.keys())\n",
    "\n",
    "num_add_tokens = tokenizer.add_tokens(list(new_tokens))\n",
    "\n",
    "print(f'新增加 {num_add_tokens}个普通Token到词表。')"
   ],
   "outputs": [],
   "execution_count": null
  },
  {
   "cell_type": "code",
   "id": "b9b93348-5631-41a1-ba50-dd0935fd8f32",
   "metadata": {},
   "source": [
    "mew_special_tokens = {'cls_token': '[LNTX]'}\n",
    "\n",
    "num_add_spec_tokens = tokenizer.add_special_tokens(mew_special_tokens)\n",
    "\n",
    "print(f'新增加 {num_add_spec_tokens}个特殊Token到词表。')\n",
    "print(f'特殊Token值：{tokenizer.cls_token}')"
   ],
   "outputs": [],
   "execution_count": null
  },
  {
   "cell_type": "code",
   "id": "73ff8e4d-780d-44bd-b4e4-50890becad5c",
   "metadata": {},
   "source": [
    "text = '大家[LNTX]好，我是老牛同学，他是一位大模型[LNTX]爱好者！'\n",
    "tokens = tokenizer.tokenize(text)\n",
    "\n",
    "print(tokens)"
   ],
   "outputs": [],
   "execution_count": null
  },
  {
   "cell_type": "code",
   "id": "ed97bfab-1646-43ee-92d8-cbed70da716c",
   "metadata": {},
   "source": [
    "print(f'调整前：{model.model.embed_tokens.weight.size()}')\n",
    "\n",
    "model.resize_token_embeddings(len(tokenizer))\n",
    "\n",
    "print(f'调整后：{model.model.embed_tokens.weight.size()}')"
   ],
   "outputs": [],
   "execution_count": null
  },
  {
   "cell_type": "code",
   "id": "9cde065d-7909-4948-a042-48bc133a832a",
   "metadata": {},
   "source": [],
   "outputs": [],
   "execution_count": null
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.12.3"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
