{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 10,
   "id": "06218222",
   "metadata": {},
   "outputs": [],
   "source": [
    "spe_flag = [TOKEN_PAD, TOKEN_UNK, TOKEN_START, TOKEN_END]\n",
    "vocab = spe_flag + [f'{i}' for i in range(10)]\n",
    "\n",
    "token_dict_ = {c : i for i, c in enumerate(vocab)}"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 85,
   "id": "ab7f62fa",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "[2, 4, 5, 6, 7, 8, 9, 10, 11, 3]"
      ]
     },
     "execution_count": 85,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "tokenzer = Tokenizer(token_dict=token_dict_)\n",
    "tokenzer.encode(text=\"0123456789\", max_len=10, start=True, end=True)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 89,
   "id": "225c41f8",
   "metadata": {},
   "outputs": [],
   "source": [
    "# file_name = \"cl.txt\"\n",
    "\n",
    "# with open(file_name, 'a', encoding='utf-8') as f:\n",
    "#     for i in range(10):\n",
    "#         print(f\"write {i} to file\")\n",
    "#         f.write(f\"to {i} into file\\n\")\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "0ffeec26",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "7e885019",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 77,
   "id": "f62f25a7",
   "metadata": {},
   "outputs": [],
   "source": [
    "import unicodedata\n",
    "\n",
    "import unicodedata\n",
    "\n",
    "TOKEN_PAD = '[PAD]'  # Token for padding\n",
    "TOKEN_UNK = '[UNK]'  # Token for unknown words\n",
    "TOKEN_START = '[GO]'  # Token for classification\n",
    "TOKEN_END = '[EOS]'  # Token for separation\n",
    "TOKEN_MASK = '[MASK]'  # Token for masking\n",
    "\n",
    "\n",
    "class Tokenizer(object):\n",
    "    \"\"\" 主要针对字符处理 \"\"\"\n",
    "\n",
    "    def __init__(self,\n",
    "                 token_dict,\n",
    "                 token_start=TOKEN_START,\n",
    "                 token_end=TOKEN_END,\n",
    "                 token_unk=TOKEN_UNK,\n",
    "                 token_pad=TOKEN_PAD):\n",
    "        self._token_dict = token_dict\n",
    "        self._token_dict_inv = {v: k for k, v in token_dict.items()}\n",
    "        self._token_start = token_start\n",
    "        self._token_end = token_end\n",
    "        self._token_unk = token_unk\n",
    "        self._token_pad = token_pad\n",
    "\n",
    "        self._pad_index = self._token_dict.get(self._token_pad, 0)\n",
    "        self._unk_index = self._token_dict.get(self._token_unk, 1)\n",
    "        self._start_index = self._token_dict.get(self._token_start, 2)\n",
    "        self._end_index = self._token_dict.get(self._token_end, 3)\n",
    "\n",
    "    @staticmethod\n",
    "    def _truncate(tokens, max_len=None, start=True, end=True):\n",
    "        if max_len is None:\n",
    "            return\n",
    "        n = (1 if start else 0) + (1 if end else 0)\n",
    "        del tokens[max_len - n:]\n",
    "\n",
    "    def _convert_tokens_to_ids(self, tokens):\n",
    "        return [self._token_dict.get(token, self._unk_index) for token in tokens]\n",
    "\n",
    "    def tokenize(self, text):\n",
    "        tokens = self._tokenize(text)\n",
    "        return tokens\n",
    "\n",
    "    def encode(self, text, max_len=None, start=True, end=True):\n",
    "        tokens = self._tokenize(text)\n",
    "        self._truncate(tokens, max_len, start, end)\n",
    "        token_ids = self._convert_tokens_to_ids(tokens)\n",
    "        if start:\n",
    "            token_ids = [self._start_index] + token_ids\n",
    "        if end:\n",
    "            token_ids = token_ids + [self._end_index]\n",
    "        if max_len is not None:\n",
    "            pad_len = max_len - len(token_ids)\n",
    "            token_ids += [self._pad_index] * pad_len\n",
    "        return token_ids\n",
    "\n",
    "    def decode(self, ids):\n",
    "        tokens = [self._token_dict_inv[i] for i in ids]\n",
    "        return tokens\n",
    "\n",
    "    def _tokenize(self, text):\n",
    "        spaced = ''\n",
    "        for ch in text:\n",
    "            spaced += ch + ' '\n",
    "        tokens = []\n",
    "        for word in spaced.strip().split():\n",
    "            tokens += self._word_piece_tokenize(word)\n",
    "        return tokens\n",
    "\n",
    "    def _word_piece_tokenize(self, word):\n",
    "        if word in self._token_dict:\n",
    "            return [word]\n",
    "        tokens = []\n",
    "        start, stop = 0, 0\n",
    "        while start < len(word):\n",
    "            stop = len(word)\n",
    "            while stop > start:\n",
    "                sub = word[start:stop]\n",
    "                if start > 0:\n",
    "                    sub = '##' + sub\n",
    "                if sub in self._token_dict:\n",
    "                    break\n",
    "                stop -= 1\n",
    "            if start == stop:\n",
    "                stop += 1\n",
    "            tokens.append(sub)\n",
    "            start = stop\n",
    "        return tokens\n",
    "\n",
    "    @staticmethod\n",
    "    def _is_punctuation(ch):\n",
    "        code = ord(ch)\n",
    "        return 33 <= code <= 47 or \\\n",
    "               58 <= code <= 64 or \\\n",
    "               91 <= code <= 96 or \\\n",
    "               123 <= code <= 126 or \\\n",
    "               unicodedata.category(ch).startswith('P')\n",
    "\n",
    "    @staticmethod\n",
    "    def _is_cjk_character(ch):\n",
    "        code = ord(ch)\n",
    "        return 0x4E00 <= code <= 0x9FFF or \\\n",
    "               0x3400 <= code <= 0x4DBF or \\\n",
    "               0x20000 <= code <= 0x2A6DF or \\\n",
    "               0x2A700 <= code <= 0x2B73F or \\\n",
    "               0x2B740 <= code <= 0x2B81F or \\\n",
    "               0x2B820 <= code <= 0x2CEAF or \\\n",
    "               0xF900 <= code <= 0xFAFF or \\\n",
    "               0x2F800 <= code <= 0x2FA1F\n",
    "\n",
    "    @staticmethod\n",
    "    def _is_space(ch):\n",
    "        return ch == ' ' or ch == '\\n' or ch == '\\r' or ch == '\\t' or \\\n",
    "               unicodedata.category(ch) == 'Zs'\n",
    "\n",
    "    @staticmethod\n",
    "    def _is_control(ch):\n",
    "        return unicodedata.category(ch) in ('Cc', 'Cf')\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 60,
   "id": "080ab5f8",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "['1', '', '1']"
      ]
     },
     "execution_count": 60,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "\"1  1\".split(\" \")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "290ab0bc",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "4e469cac",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 69,
   "id": "3d4255ba",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "text:1234\n",
      "text:['1', '##2', '##3', '##4']\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "['[CLS]', '1', '##2', '##3', '##4', '[SEP]']"
      ]
     },
     "execution_count": 69,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "btk = BertTokenizer({})\n",
    "\n",
    "btk.tokenize(\"1234\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 67,
   "id": "f4f09a0f",
   "metadata": {},
   "outputs": [],
   "source": [
    "TOKEN_PAD = ''  # Token for padding\n",
    "TOKEN_UNK = '[UNK]'  # Token for unknown words\n",
    "TOKEN_CLS = '[CLS]'  # Token for classification\n",
    "TOKEN_SEP = '[SEP]'  # Token for separation\n",
    "TOKEN_MASK = '[MASK]'  # Token for masking\n",
    "\n",
    "class BertTokenizer(object):\n",
    "\n",
    "    def __init__(self,\n",
    "                 token_dict,\n",
    "                 token_cls=TOKEN_CLS,\n",
    "                 token_sep=TOKEN_SEP,\n",
    "                 token_unk=TOKEN_UNK,\n",
    "                 pad_index=0,\n",
    "                 cased=False):\n",
    "        \"\"\"Initialize tokenizer.\n",
    "\n",
    "        :param token_dict: A dict maps tokens to indices.\n",
    "        :param token_cls: The token represents classification.\n",
    "        :param token_sep: The token represents separator.\n",
    "        :param token_unk: The token represents unknown token.\n",
    "        :param pad_index: The index to pad.\n",
    "        :param cased: Whether to keep the case.\n",
    "        \"\"\"\n",
    "        self._token_dict = token_dict\n",
    "        self._token_dict_inv = {v: k for k, v in token_dict.items()}\n",
    "        self._token_cls = token_cls\n",
    "        self._token_sep = token_sep\n",
    "        self._token_unk = token_unk\n",
    "        self._pad_index = pad_index\n",
    "        self._cased = cased\n",
    "\n",
    "    @staticmethod\n",
    "    def _truncate(first_tokens, second_tokens=None, max_len=None):\n",
    "        if max_len is None:\n",
    "            return\n",
    "\n",
    "        if second_tokens is not None:\n",
    "            while True:\n",
    "                total_len = len(first_tokens) + len(second_tokens)\n",
    "                if total_len <= max_len - 3:  # 3 for [CLS] .. tokens_a .. [SEP] .. tokens_b [SEP]\n",
    "                    break\n",
    "                if len(first_tokens) > len(second_tokens):\n",
    "                    first_tokens.pop()\n",
    "                else:\n",
    "                    second_tokens.pop()\n",
    "        else:\n",
    "            del first_tokens[max_len - 2:]  # 2 for [CLS] .. tokens .. [SEP]\n",
    "\n",
    "    def _pack(self, first_tokens, second_tokens=None):\n",
    "        first_packed_tokens = [self._token_cls] + first_tokens + [self._token_sep]\n",
    "        if second_tokens is not None:\n",
    "            second_packed_tokens = second_tokens + [self._token_sep]\n",
    "            return first_packed_tokens + second_packed_tokens, len(first_packed_tokens), len(second_packed_tokens)\n",
    "        else:\n",
    "            return first_packed_tokens, len(first_packed_tokens), 0\n",
    "\n",
    "    def _convert_tokens_to_ids(self, tokens):\n",
    "        unk_id = self._token_dict.get(self._token_unk)\n",
    "        return [self._token_dict.get(token, unk_id) for token in tokens]\n",
    "\n",
    "    def tokenize(self, first, second=None):\n",
    "        \"\"\"Split text to tokens.\n",
    "\n",
    "        :param first: First text.\n",
    "        :param second: Second text.\n",
    "        :return: A list of strings.\n",
    "        \"\"\"\n",
    "        first_tokens = self._tokenize(first)\n",
    "        second_tokens = self._tokenize(second) if second is not None else None\n",
    "        tokens, _, _ = self._pack(first_tokens, second_tokens)\n",
    "        return tokens\n",
    "\n",
    "    def encode(self, first, second=None, max_len=None):\n",
    "        first_tokens = self._tokenize(first)\n",
    "        second_tokens = self._tokenize(second) if second is not None else None\n",
    "        self._truncate(first_tokens, second_tokens, max_len)\n",
    "        tokens, first_len, second_len = self._pack(first_tokens, second_tokens)\n",
    "\n",
    "        token_ids = self._convert_tokens_to_ids(tokens)\n",
    "        segment_ids = [0] * first_len + [1] * second_len\n",
    "\n",
    "        if max_len is not None:\n",
    "            pad_len = max_len - first_len - second_len\n",
    "            token_ids += [self._pad_index] * pad_len\n",
    "            segment_ids += [0] * pad_len\n",
    "\n",
    "        return token_ids, segment_ids\n",
    "\n",
    "    def decode(self, ids):\n",
    "        sep = ids.index(self._token_dict[self._token_sep])\n",
    "        try:\n",
    "            stop = ids.index(self._pad_index)\n",
    "        except ValueError as e:\n",
    "            stop = len(ids)\n",
    "        tokens = [self._token_dict_inv[i] for i in ids]\n",
    "        first = tokens[1:sep]\n",
    "        if sep < stop - 1:\n",
    "            second = tokens[sep + 1:stop - 1]\n",
    "            return first, second\n",
    "        return first\n",
    "\n",
    "    def _tokenize(self, text):\n",
    "        print(f\"text:{text}\")\n",
    "        if not self._cased:\n",
    "            text = unicodedata.normalize('NFD', text)\n",
    "            text = ''.join([ch for ch in text if unicodedata.category(ch) != 'Mn'])\n",
    "            text = text.lower()\n",
    "        spaced = ''\n",
    "        for ch in text:\n",
    "            if self._is_punctuation(ch) or self._is_cjk_character(ch):\n",
    "                spaced += ' ' + ch + ' '\n",
    "            elif self._is_space(ch):\n",
    "                spaced += ' '\n",
    "            elif ord(ch) == 0 or ord(ch) == 0xfffd or self._is_control(ch):\n",
    "                continue\n",
    "            else:\n",
    "                spaced += ch\n",
    "        tokens = []\n",
    "        for word in spaced.strip().split():\n",
    "            tokens += self._word_piece_tokenize(word)\n",
    "        print(f\"text:{tokens}\")\n",
    "        return tokens\n",
    "\n",
    "    def _word_piece_tokenize(self, word):\n",
    "        if word in self._token_dict:\n",
    "            return [word]\n",
    "        tokens = []\n",
    "        start, stop = 0, 0\n",
    "        while start < len(word):\n",
    "            stop = len(word)\n",
    "            while stop > start:\n",
    "                sub = word[start:stop]\n",
    "                if start > 0:\n",
    "                    sub = '##' + sub\n",
    "                if sub in self._token_dict:\n",
    "                    break\n",
    "                stop -= 1\n",
    "            if start == stop:\n",
    "                stop += 1\n",
    "            tokens.append(sub)\n",
    "            start = stop\n",
    "        return tokens\n",
    "\n",
    "    @staticmethod\n",
    "    def _is_punctuation(ch):\n",
    "        code = ord(ch)\n",
    "        return 33 <= code <= 47 or \\\n",
    "            58 <= code <= 64 or \\\n",
    "            91 <= code <= 96 or \\\n",
    "            123 <= code <= 126 or \\\n",
    "            unicodedata.category(ch).startswith('P')\n",
    "\n",
    "    @staticmethod\n",
    "    def _is_cjk_character(ch):\n",
    "        code = ord(ch)\n",
    "        return 0x4E00 <= code <= 0x9FFF or \\\n",
    "            0x3400 <= code <= 0x4DBF or \\\n",
    "            0x20000 <= code <= 0x2A6DF or \\\n",
    "            0x2A700 <= code <= 0x2B73F or \\\n",
    "            0x2B740 <= code <= 0x2B81F or \\\n",
    "            0x2B820 <= code <= 0x2CEAF or \\\n",
    "            0xF900 <= code <= 0xFAFF or \\\n",
    "            0x2F800 <= code <= 0x2FA1F\n",
    "\n",
    "    @staticmethod\n",
    "    def _is_space(ch):\n",
    "        return ch == ' ' or ch == '\\n' or ch == '\\r' or ch == '\\t' or \\\n",
    "            unicodedata.category(ch) == 'Zs'\n",
    "\n",
    "    @staticmethod\n",
    "    def _is_control(ch):\n",
    "        return unicodedata.category(ch) in ('Cc', 'Cf')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "e2ae84c6",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "c8885349",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "7d2a59c5",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.8.13"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
