{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "1b032014-b3c1-4ea4-ac5a-7646b1c79fdf",
   "metadata": {
    "tags": []
   },
   "outputs": [],
   "source": [
    "# def print_format_table():\n",
    "#     \"\"\"\n",
    "#     prints table of formatted text format options\n",
    "#     \"\"\"\n",
    "#     for style in range(8):\n",
    "#         for fg in range(30, 38):\n",
    "#             s1 = ''\n",
    "#             for bg in range(40, 48):\n",
    "#                 format = ';'.join([str(style), str(fg), str(bg)])\n",
    "#                 s1 += '\\x1b[%sm %s \\x1b[0m' % (format, format)\n",
    "#             print(s1)\n",
    "#         print('\\n')\n",
    "# print_format_table()\n",
    "\n",
    "# for fg in range(31, 38):\n",
    "#     print('\\x1b[2;%s;40m %s \\x1b[0m' % (fg, \"hello\"))\n",
    "    \n",
    "class RainbowPrinter:\n",
    "    def __init__(self):\n",
    "        self.idx = 0\n",
    "        self.format_str = '\\x1b[1;%s;48m%s \\x1b[0m'\n",
    "\n",
    "    def print_word(self, word):\n",
    "        self.idx += 1\n",
    "        if self.idx == 7:\n",
    "            self.idx = 1\n",
    "        print(self.format_str % (30+self.idx, word), end='')\n",
    "\n",
    "    def print_words(self, words):\n",
    "        \"\"\" print sentence made up of tokenwords \"\"\"\n",
    "        if isinstance(words,list) or isinstance(words,tuple):\n",
    "            for token_word in words:\n",
    "                self.print_word(token_word)\n",
    "            print('\\n')\n",
    "        else:\n",
    "            raise"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 63,
   "id": "83a7952c-2859-4621-990e-0a93407236db",
   "metadata": {
    "tags": []
   },
   "outputs": [],
   "source": [
    "import re\n",
    "from collections import Counter\n",
    "\n",
    "\"\"\"\n",
    "SentencePiece treats the input text just as a sequence of Unicode characters. \n",
    "Whitespace is also handled as a normal symbol. \n",
    "To handle the whitespace as a basic token explicitly, SentencePiece first \n",
    "escapes the whitespace with a meta symbol \"▁\" (U+2581) as follows.\n",
    "\"\"\"\n",
    "\n",
    "#\n",
    "# corpus = {\n",
    "#     word_0: [token0, token1, ..., tokenm],\n",
    "#     word_1: [token0, token1, ..., tokenm],\n",
    "#     ...\n",
    "#     word_n: [token0, token1, ..., tokenm],\n",
    "# }\n",
    "#\n",
    "# vocab {\n",
    "#     token_0: count_0,\n",
    "#     token_1: count_1,\n",
    "#       ...\n",
    "#     token_m: count_m,\n",
    "# }\n",
    "#\n",
    "\n",
    "class BytePairEncoder:\n",
    "    \n",
    "    def __init__(self):\n",
    "        self.ws_token = '▁'\n",
    "        self.unk_token = '<UNK>'\n",
    "        \n",
    "        self.corpus = {}\n",
    "        self.word_count = {}\n",
    "        self.vocab = Counter()\n",
    "        \n",
    "        self.id_tokens = {}\n",
    "        self.token_ids = {}\n",
    "       \n",
    "    \n",
    "    def init_state(self, content):\n",
    "        # init corpus and wordcnt\n",
    "        for line in content:\n",
    "            sentence = self.preprocess(line.strip())\n",
    "            self.process_sentence(sentence)\n",
    "            \n",
    "        alphabet = {}\n",
    "        for word, chrs in self.corpus.items():\n",
    "            for ch in chrs:\n",
    "                alphabet[ch] = alphabet.get(ch, 0) + self.word_count[word]\n",
    "        self.vocab.update(alphabet)\n",
    "        \n",
    "        # for debug\n",
    "        self._dump_init()\n",
    "      \n",
    "    \n",
    "    def process_sentence(self, sentence):\n",
    "        words = sentence.split()\n",
    "        for word in words:\n",
    "            word = self.ws_token + word\n",
    "            if word not in self.corpus:\n",
    "                self.corpus[word] = [ch for ch in word]\n",
    "                self.word_count[word] = 1\n",
    "            else:\n",
    "                self.word_count[word] += 1\n",
    "            \n",
    "    \n",
    "    def preprocess(self, text):\n",
    "        return re.sub('\\s+', ' ', text)\n",
    "        \n",
    "        \n",
    "    def _dump_init(self):\n",
    "        print(\"=\" * 12 + \" dump initial state \" + \"=\" * 12)\n",
    "        print(\"==> dump corpus <==\")\n",
    "        for word, text in self.corpus.items():\n",
    "            print(f\"{word} => {text}\")\n",
    "        print('-.' * 20)\n",
    "        print(\"==> dump wordcnt <==\")\n",
    "        for word, count in self.word_count.items():\n",
    "            print(f\"{word} => {count}\")\n",
    "        print('-.' * 20)\n",
    "        print(\"==> dump vocab <==\")\n",
    "        for token, count in self.vocab.items():\n",
    "            print(f\"{token} => {count}\")\n",
    "        print(\"-\" * 40)\n",
    "        \n",
    "        \n",
    "    def gen_bigrams(self):\n",
    "        bigram_counter = Counter()\n",
    "        for word, text in self.corpus.items():\n",
    "            for i in range(len(text) - 1):\n",
    "                # NOTE: use '+' instead of (l,r) to deal with the case\n",
    "                # a,aa is same as aa,a when generate bigram.\n",
    "                bigram = text[i] + text[i+1]\n",
    "                bigram_counter[bigram] += self.word_count[word]\n",
    "        \n",
    "        # for debug\n",
    "        # print(\"==> dump bigram counter <==\")\n",
    "        # for symbol, counter in bigram_counter.most_common(5):\n",
    "        #     print(f\"{symbol} => {counter}\")\n",
    "        return bigram_counter\n",
    "\n",
    "    \n",
    "    def merge_pair(self):\n",
    "        top_bigram, top_count  = self.gen_bigrams().most_common(1)[0]\n",
    "        print(f\"=> top_bigram:{top_bigram}, top_count:{top_count}\")\n",
    "        if top_count == 1:\n",
    "            return\n",
    "        for word, text in self.corpus.items():\n",
    "            merged = False\n",
    "            for i in range(len(text) - 1): \n",
    "                if (text[i] + text[i+1] == top_bigram):\n",
    "                    self.update_vocab(text[i], -self.word_count[word])\n",
    "                    self.update_vocab(text[i+1], -self.word_count[word])\n",
    "                    text[i] = top_bigram\n",
    "                    text[i+1] = ''\n",
    "                    merged = True\n",
    "            if merged:\n",
    "                self.corpus[word] = [token for token in text if token]\n",
    "        self.update_vocab(top_bigram, top_count)\n",
    "    \n",
    "    \n",
    "    def update_vocab(self, symbol, count):\n",
    "        if symbol in self.vocab:\n",
    "            self.vocab[symbol] += count\n",
    "            # NOTE: must comment off, will cut off the way to combine tokenwords\n",
    "            # if self.vocab[symbol] == 0:\n",
    "                # del self.vocab[symbol]\n",
    "        else:\n",
    "            self.vocab[symbol] = count\n",
    "           \n",
    "        \n",
    "    def train(self, text, steps=3):\n",
    "        self.init_state(text)\n",
    "        \n",
    "        for step in range(steps):\n",
    "            print(\"=\" * 12 + f\" step:{step} \" + \"=\" * 12)\n",
    "            self.merge_pair()\n",
    "            # for debug\n",
    "            # self._dump_merge()\n",
    "            \n",
    "        print(\"==> dump final vocab <==\")\n",
    "        for token, count in sorted(self.vocab.items(), key=lambda x:x[1], reverse=True):\n",
    "            print(f\"{token} => {count}\")\n",
    "        self.gen_id_token_map()\n",
    "        \n",
    "\n",
    "    def _dump_merge(self):\n",
    "        print(\"-\" * 40)\n",
    "        print(\"==> dump vocab <==\")\n",
    "        for token, count in sorted(self.vocab.items(), key=lambda x:x[1], reverse=True):\n",
    "            print(f\"{token} => {count}\")\n",
    "        print('-' * 40)\n",
    "        print(\"==> dump corpus <==\")\n",
    "        for word, tokens in self.corpus.items():\n",
    "            print(f\"[{self.word_count[word]:3d}] * {word} => {tokens}\")\n",
    "        print(\"-\" * 40)       \n",
    "\n",
    "\n",
    "    def gen_id_token_map(self):\n",
    "        # descent order\n",
    "        self.id_tokens[0] = self.unk_token\n",
    "        self.token_ids[self.unk_token] = 0\n",
    "        \n",
    "        idx = 1\n",
    "        for token, _ in self.vocab.most_common():\n",
    "            self.id_tokens[idx] = token\n",
    "            self.token_ids[token] = idx\n",
    "            idx += 1\n",
    "        \n",
    "        \n",
    "    def encode(self, text):\n",
    "        if not text: return\n",
    "        text = self.preprocess(text)\n",
    "        text = self.ws_token + re.sub(' ', self.ws_token, text.strip())\n",
    "        seg_txt = self.segment(text)\n",
    "        seg_ids = [self.token_ids[token] if token in self.token_ids else 0 for token in seg_txt]\n",
    "        return (seg_txt, seg_ids)\n",
    "    \n",
    "    \n",
    "    def segment(self, text):\n",
    "        if len(text) == 1:\n",
    "            return text if text in self.vocab else self.unk_token\n",
    "        \n",
    "        segments = [ch for ch in text]\n",
    "        merge_rules = Counter()\n",
    "    \n",
    "        # iter over merge segments [i, i+1]\n",
    "        for i in range(len(segments)-1):\n",
    "            token_word = segments[i] + segments[i+1]\n",
    "            if token_word in self.vocab:\n",
    "                # print(f\"* update rule of combine {segments[i]} and {segments[i+1]} into {token_word}\")\n",
    "                merge_rules.update({(i, token_word):self.vocab[i]})\n",
    "\n",
    "        while merge_rules:\n",
    "            (i, token_word), _ = merge_rules.most_common(1)[0]\n",
    "            # eg: a,b,c  first merge (b,c); then (a,b) is no longer exist\n",
    "            if i >= len(segments)-1 or segments[i] + segments[i+1] != token_word:\n",
    "                # print(f\"! discard rule of combine {segments[i]} and {segments[i+1]} into {token_word}, i={i}\")\n",
    "                merge_rules.pop((i, token_word))\n",
    "                continue\n",
    "            # print(f\"> apply rule of combine {segments[i]} and {segments[i+1]} into {token_word}\")\n",
    "            for i in range(len(segments)-1):\n",
    "                if segments[i] + segments[i+1] == token_word:\n",
    "                    segments[i] = token_word\n",
    "                    segments[i+1] = ''\n",
    "            # print(\"before merge: \", segments)\n",
    "            segments = [seg for seg in segments if seg]\n",
    "            # print(\"after merge: \", segments)\n",
    "            if len(segments) <= 1:\n",
    "                break\n",
    "            for i in range(len(segments)-1):\n",
    "                token_word = segments[i] + segments[i+1]\n",
    "                if token_word in self.vocab:\n",
    "                    merge_rules.update({(i, token_word): self.vocab[i]})\n",
    "                    \n",
    "        return segments\n",
    "        \n",
    "        \n",
    "    def decode(self, ids):\n",
    "        text = ''.join([self.id_tokens[idx] for idx in ids]).replace(self.ws_token, ' ')\n",
    "        return text\n",
    "        \n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 64,
   "id": "08e3bb27-4acf-4179-9eca-7bb3016a8523",
   "metadata": {
    "tags": []
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "============ dump initial state ============\n",
      "==> dump corpus <==\n",
      "▁这是OpenAI => ['▁', '这', '是', 'O', 'p', 'e', 'n', 'A', 'I']\n",
      "▁团队前一段时间放出来的预印版论文。 => ['▁', '团', '队', '前', '一', '段', '时', '间', '放', '出', '来', '的', '预', '印', '版', '论', '文', '。']\n",
      "▁他们的目标是学习一个通用的表示，能够在大量任务上进行应用。 => ['▁', '他', '们', '的', '目', '标', '是', '学', '习', '一', '个', '通', '用', '的', '表', '示', '，', '能', '够', '在', '大', '量', '任', '务', '上', '进', '行', '应', '用', '。']\n",
      "▁这篇论文的亮点主要在于， => ['▁', '这', '篇', '论', '文', '的', '亮', '点', '主', '要', '在', '于', '，']\n",
      "▁他们利用了Transformer网络代替了LSTM作为语言模型来更好的捕获长距离语言结构。 => ['▁', '他', '们', '利', '用', '了', 'T', 'r', 'a', 'n', 's', 'f', 'o', 'r', 'm', 'e', 'r', '网', '络', '代', '替', '了', 'L', 'S', 'T', 'M', '作', '为', '语', '言', '模', '型', '来', '更', '好', '的', '捕', '获', '长', '距', '离', '语', '言', '结', '构', '。']\n",
      "▁然后在进行具体任务有监督微调时, => ['▁', '然', '后', '在', '进', '行', '具', '体', '任', '务', '有', '监', '督', '微', '调', '时', ',']\n",
      "▁使用了模型作为附属任务训练目标。 => ['▁', '使', '用', '了', '模', '型', '作', '为', '附', '属', '任', '务', '训', '练', '目', '标', '。']\n",
      "-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.\n",
      "==> dump wordcnt <==\n",
      "▁这是OpenAI => 1\n",
      "▁团队前一段时间放出来的预印版论文。 => 1\n",
      "▁他们的目标是学习一个通用的表示，能够在大量任务上进行应用。 => 1\n",
      "▁这篇论文的亮点主要在于， => 1\n",
      "▁他们利用了Transformer网络代替了LSTM作为语言模型来更好的捕获长距离语言结构。 => 1\n",
      "▁然后在进行具体任务有监督微调时, => 1\n",
      "▁使用了模型作为附属任务训练目标。 => 1\n",
      "-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.\n",
      "==> dump vocab <==\n",
      "▁ => 7\n",
      "这 => 2\n",
      "是 => 2\n",
      "O => 1\n",
      "p => 1\n",
      "e => 2\n",
      "n => 2\n",
      "A => 1\n",
      "I => 1\n",
      "团 => 1\n",
      "队 => 1\n",
      "前 => 1\n",
      "一 => 2\n",
      "段 => 1\n",
      "时 => 2\n",
      "间 => 1\n",
      "放 => 1\n",
      "出 => 1\n",
      "来 => 2\n",
      "的 => 5\n",
      "预 => 1\n",
      "印 => 1\n",
      "版 => 1\n",
      "论 => 2\n",
      "文 => 2\n",
      "。 => 4\n",
      "他 => 2\n",
      "们 => 2\n",
      "目 => 2\n",
      "标 => 2\n",
      "学 => 1\n",
      "习 => 1\n",
      "个 => 1\n",
      "通 => 1\n",
      "用 => 4\n",
      "表 => 1\n",
      "示 => 1\n",
      "， => 2\n",
      "能 => 1\n",
      "够 => 1\n",
      "在 => 3\n",
      "大 => 1\n",
      "量 => 1\n",
      "任 => 3\n",
      "务 => 3\n",
      "上 => 1\n",
      "进 => 2\n",
      "行 => 2\n",
      "应 => 1\n",
      "篇 => 1\n",
      "亮 => 1\n",
      "点 => 1\n",
      "主 => 1\n",
      "要 => 1\n",
      "于 => 1\n",
      "利 => 1\n",
      "了 => 3\n",
      "T => 2\n",
      "r => 3\n",
      "a => 1\n",
      "s => 1\n",
      "f => 1\n",
      "o => 1\n",
      "m => 1\n",
      "网 => 1\n",
      "络 => 1\n",
      "代 => 1\n",
      "替 => 1\n",
      "L => 1\n",
      "S => 1\n",
      "M => 1\n",
      "作 => 2\n",
      "为 => 2\n",
      "语 => 2\n",
      "言 => 2\n",
      "模 => 2\n",
      "型 => 2\n",
      "更 => 1\n",
      "好 => 1\n",
      "捕 => 1\n",
      "获 => 1\n",
      "长 => 1\n",
      "距 => 1\n",
      "离 => 1\n",
      "结 => 1\n",
      "构 => 1\n",
      "然 => 1\n",
      "后 => 1\n",
      "具 => 1\n",
      "体 => 1\n",
      "有 => 1\n",
      "监 => 1\n",
      "督 => 1\n",
      "微 => 1\n",
      "调 => 1\n",
      ", => 1\n",
      "使 => 1\n",
      "附 => 1\n",
      "属 => 1\n",
      "训 => 1\n",
      "练 => 1\n",
      "----------------------------------------\n",
      "============ step:0 ============\n",
      "=> top_bigram:任务, top_count:3\n",
      "============ step:1 ============\n",
      "=> top_bigram:▁这, top_count:2\n",
      "============ step:2 ============\n",
      "=> top_bigram:论文, top_count:2\n",
      "============ step:3 ============\n",
      "=> top_bigram:▁他, top_count:2\n",
      "============ step:4 ============\n",
      "=> top_bigram:▁他们, top_count:2\n",
      "============ step:5 ============\n",
      "=> top_bigram:目标, top_count:2\n",
      "============ step:6 ============\n",
      "=> top_bigram:进行, top_count:2\n",
      "============ step:7 ============\n",
      "=> top_bigram:用了, top_count:2\n",
      "============ step:8 ============\n",
      "=> top_bigram:作为, top_count:2\n",
      "============ step:9 ============\n",
      "=> top_bigram:语言, top_count:2\n",
      "============ step:10 ============\n",
      "=> top_bigram:模型, top_count:2\n",
      "============ step:11 ============\n",
      "=> top_bigram:▁这是, top_count:1\n",
      "============ step:12 ============\n",
      "=> top_bigram:▁这是, top_count:1\n",
      "============ step:13 ============\n",
      "=> top_bigram:▁这是, top_count:1\n",
      "============ step:14 ============\n",
      "=> top_bigram:▁这是, top_count:1\n",
      "============ step:15 ============\n",
      "=> top_bigram:▁这是, top_count:1\n",
      "============ step:16 ============\n",
      "=> top_bigram:▁这是, top_count:1\n",
      "============ step:17 ============\n",
      "=> top_bigram:▁这是, top_count:1\n",
      "============ step:18 ============\n",
      "=> top_bigram:▁这是, top_count:1\n",
      "============ step:19 ============\n",
      "=> top_bigram:▁这是, top_count:1\n",
      "==> dump final vocab <==\n",
      "的 => 5\n",
      "。 => 4\n",
      "▁ => 3\n",
      "在 => 3\n",
      "r => 3\n",
      "任务 => 3\n",
      "是 => 2\n",
      "e => 2\n",
      "n => 2\n",
      "一 => 2\n",
      "时 => 2\n",
      "来 => 2\n",
      "用 => 2\n",
      "， => 2\n",
      "T => 2\n",
      "▁这 => 2\n",
      "论文 => 2\n",
      "▁他们 => 2\n",
      "目标 => 2\n",
      "进行 => 2\n",
      "用了 => 2\n",
      "作为 => 2\n",
      "语言 => 2\n",
      "模型 => 2\n",
      "O => 1\n",
      "p => 1\n",
      "A => 1\n",
      "I => 1\n",
      "团 => 1\n",
      "队 => 1\n",
      "前 => 1\n",
      "段 => 1\n",
      "间 => 1\n",
      "放 => 1\n",
      "出 => 1\n",
      "预 => 1\n",
      "印 => 1\n",
      "版 => 1\n",
      "学 => 1\n",
      "习 => 1\n",
      "个 => 1\n",
      "通 => 1\n",
      "表 => 1\n",
      "示 => 1\n",
      "能 => 1\n",
      "够 => 1\n",
      "大 => 1\n",
      "量 => 1\n",
      "上 => 1\n",
      "应 => 1\n",
      "篇 => 1\n",
      "亮 => 1\n",
      "点 => 1\n",
      "主 => 1\n",
      "要 => 1\n",
      "于 => 1\n",
      "利 => 1\n",
      "了 => 1\n",
      "a => 1\n",
      "s => 1\n",
      "f => 1\n",
      "o => 1\n",
      "m => 1\n",
      "网 => 1\n",
      "络 => 1\n",
      "代 => 1\n",
      "替 => 1\n",
      "L => 1\n",
      "S => 1\n",
      "M => 1\n",
      "更 => 1\n",
      "好 => 1\n",
      "捕 => 1\n",
      "获 => 1\n",
      "长 => 1\n",
      "距 => 1\n",
      "离 => 1\n",
      "结 => 1\n",
      "构 => 1\n",
      "然 => 1\n",
      "后 => 1\n",
      "具 => 1\n",
      "体 => 1\n",
      "有 => 1\n",
      "监 => 1\n",
      "督 => 1\n",
      "微 => 1\n",
      "调 => 1\n",
      ", => 1\n",
      "使 => 1\n",
      "附 => 1\n",
      "属 => 1\n",
      "训 => 1\n",
      "练 => 1\n",
      "这 => 0\n",
      "论 => 0\n",
      "文 => 0\n",
      "他 => 0\n",
      "们 => 0\n",
      "目 => 0\n",
      "标 => 0\n",
      "任 => 0\n",
      "务 => 0\n",
      "进 => 0\n",
      "行 => 0\n",
      "作 => 0\n",
      "为 => 0\n",
      "语 => 0\n",
      "言 => 0\n",
      "模 => 0\n",
      "型 => 0\n",
      "▁他 => 0\n"
     ]
    }
   ],
   "source": [
    "bpe = BytePairEncoder()\n",
    "corpus = [\n",
    "    # \"Alice is running faster than Bob\",\n",
    "    # \"Bob run slower than Alice\",\n",
    "    # \"FloydHub is the fastest way to build, train and deploy deep learning models. Build deep learning models in the cloud. Train deep learning models.\"\n",
    "    # \"old \" * 7 + \"older \" * 3  + \"finest \" * 9 + \"lowest \" * 4\n",
    "    # \"hug \" * 10 + \"pug \" * 5 + \"pun \" * 12 + \"bun \" * 4 + \"hugs \" * 5\n",
    "    \"这是OpenAI 团队前一段时间放出来的预印版论文。 他们的目标是学习一个通用的表示，能够在大量任务上进行应用。\",\n",
    "    \"这篇论文的亮点主要在于， 他们利用了Transformer网络代替了LSTM作为语言模型来更好的捕获长距离语言结构。\",\n",
    "    \"然后在进行具体任务有监督微调时, 使用了模型作为附属任务训练目标。\"\n",
    "]\n",
    "bpe.train(corpus, 20)\n",
    "# bpe.init_state('\\n'.join(corpus))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "e3f71ed8-12a0-4fe1-9116-86cea4e217c2",
   "metadata": {
    "tags": []
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\u001b[1;31;48m▁他们 \u001b[0m\u001b[1;32;48m论文 \u001b[0m\u001b[1;33;48m的 \u001b[0m\u001b[1;34;48m亮 \u001b[0m\u001b[1;35;48m点 \u001b[0m\u001b[1;36;48m是 \u001b[0m\u001b[1;31;48m用 \u001b[0m\u001b[1;32;48m语言 \u001b[0m\u001b[1;33;48m模型 \u001b[0m\u001b[1;34;48m完 \u001b[0m\u001b[1;35;48m成 \u001b[0m\u001b[1;36;48m对 \u001b[0m\u001b[1;31;48m应 \u001b[0m\u001b[1;32;48m的 \u001b[0m\u001b[1;33;48m目标 \u001b[0m\u001b[1;34;48m任务 \u001b[0m\n",
      "\n",
      " <UNK>论文的亮点是用模型<UNK><UNK><UNK>应的目标任务\n"
     ]
    }
   ],
   "source": [
    "printer = RainbowPrinter()\n",
    "# segments, seg_ids = bpe.encode(\"huggpnun  what ugg is haasnb\")\n",
    "seg_txt, seg_ids = bpe.encode(\"他们论文的亮点是用语言模型完成对应的目标任务\")\n",
    "printer.print_words(seg_txt)\n",
    "print(bpe.decode(seg_ids))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "e8d67db3-47ea-4693-b790-6e058e1a7b09",
   "metadata": {
    "tags": []
   },
   "outputs": [],
   "source": [
    "bpe.train('\\n'.join(corpus))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "f3c2b0f7-6f74-4f7b-a287-5b1d1b8ecd94",
   "metadata": {
    "tags": []
   },
   "outputs": [],
   "source": [
    "bpe.merge_pair()"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.10.12"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
