import collections
from collections import defaultdict
from pprint import pprint

# 原始的语料库
corpus = [
    "This is the Hugging Face Course",
    "This chapter is about tokenization",
    "This section shows several tokenizer algorithms",
    "Hopefully, you will be able to understand how they are trained and generate tokens",
]

tokens = [line.lower().split() for line in corpus]
data = [token for line in tokens for token in line]
keys = list(collections.Counter(data).keys())

# 分解每个词频的结构
words = defaultdict()
for k in keys:
    words[k] = list(k) + ['</w>']

vocab = []
for i in range(40):
    # 进行两两词频统计
    words_freq = defaultdict(int)
    for array in words.values():
        if len(array) <= 1:
            continue
        for i in range(len(array) - 1):
            words_freq[(array[i], array[i + 1])] += 1
    max_freq = max(words_freq, key=words_freq.get)
    # print(max_freq, words_freq[max_freq])
    # print(max_freq)

    # 根据最高词频的词组重组words
    for key in words:
        values = words[key]
        if len(values) <= 1:
            continue
        for i in range(len(values) - 1):
            if values[i] == max_freq[0] and values[i + 1] == max_freq[1]:
                words[key] = values[:i] + [''.join(max_freq)] + values[i + 2:]
    # 此处生成vocab (每一个单独字符的映射)
    vocab = []
    for array in words.values():
        for chr in array:
            if chr not in vocab:
                vocab.append(chr)

print(vocab)
