{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 文本规范化"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "import unicodedata\n",
    "class DEL_ASCII(object):\n",
    "    \"\"\" 在 `refactor_text_vdom` 方法中被使用，用于过滤掉字符： b'\\xef\\xb8\\x8f' \"\"\"\n",
    "    def do(self, text):\n",
    "        orig_tokens = self.whitespace_tokenize(text)\n",
    "        split_tokens = []\n",
    "        for token in orig_tokens:\n",
    "            token = self._run_strip_accents(token)                \n",
    "            split_tokens.extend(self._run_split_on_punc(token))\n",
    "        output_tokens = self.whitespace_tokenize(\" \".join(split_tokens))\n",
    "        return output_tokens\n",
    "    \n",
    "    def whitespace_tokenize(self, text):\n",
    "        \"\"\" 去掉白空格并按单词切分句子 \"\"\"\n",
    "        text = text.strip()\n",
    "        if not text:\n",
    "            return []\n",
    "        tokens = text.split()\n",
    "        return tokens\n",
    "\n",
    "    def _run_strip_accents(self, text):\n",
    "        \"\"\" 去掉重音符号 \"\"\"\n",
    "        text = unicodedata.normalize(\"NFD\", text)\n",
    "        output = []\n",
    "        for char in text:\n",
    "            cat = unicodedata.category(char)\n",
    "            if cat == \"Mn\":\n",
    "                continue\n",
    "            output.append(char)\n",
    "        return \"\".join(output)\n",
    "    \n",
    "    def _run_split_on_punc(self, text):\n",
    "        \"\"\" 按标点符号切分 \"\"\"\n",
    "        chars = list(text)\n",
    "        i = 0\n",
    "        start_new_word = True\n",
    "        output = []\n",
    "        while i < len(chars):\n",
    "            char = chars[i]\n",
    "            if self._is_punctuation(char):\n",
    "                output.append([char])\n",
    "                start_new_word = True\n",
    "            else:\n",
    "                if start_new_word:\n",
    "                    output.append([])\n",
    "                start_new_word = False\n",
    "                output[-1].append(char)\n",
    "            i += 1\n",
    "        return [\"\".join(x) for x in output]\n",
    "    \n",
    "    def _is_punctuation(self, char):\n",
    "        \"\"\" 检查是否为标点 \"\"\"\n",
    "        cp = ord(char)\n",
    "        # 把所有非字母，非数字非空格的 ASCII 字符当作标点\n",
    "        # 虽然 ”^“， ”$”， 和 “`” 等，在 Unicode 中不属于标点\n",
    "        if ((cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or\n",
    "                (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126)):\n",
    "            return True\n",
    "        cat = unicodedata.category(char)\n",
    "        if cat.startswith(\"P\"):\n",
    "            return True\n",
    "        return False"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "['I', 'like', 'cafe']"
      ]
     },
     "execution_count": 4,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "x = DEL_ASCII()\n",
    "x.do(\"I like     café\")"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.10.12"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
