import jieba
from collections import defaultdict
from pprint import pprint

# 语料文本
corpus = [
    "This is the Hugging Face Course.",
    "This chapter is about tokenization.",
    "This section shows several tokenizer algorithms.",
    "Hopefully, you will be able to understand how they are trained and generate tokens.",
]

# 标准化和预分词
for sentence in corpus:
    sentence = sentence.lower()
    print([c for c in jieba.cut(sentence) if c != " "])
#['this', 'is', 'the', 'hugging', 'face', 'course', '.']
#['this', 'chapter', 'is', 'about', 'tokenization', '.']
#['this', 'section', 'shows', 'several', 'tokenizer', 'algorithms', '.']
#['hopefully', ',', 'you', 'will', 'be', 'able', 'to', 'understand', 'how', 'they', 'are', 'trained', 'and', 'generate', 'tokens', '.']


# 统计单词词频
word_freqs = defaultdict(int)

for sentence in corpus:
    sentence = sentence.lower()
    words = [w for w in jieba.cut(sentence) if w != " "]
    for word in words:
        word_freqs[word] += 1

print(word_freqs)
#defaultdict(<class 'int'>, {'this': 3, 'is': 2, 'the': 1, 'hugging': 1, 'face': 1, 'course': 1, '.': 4, 'chapter': 1, 'about': 1, 'tokenization': 1, 'section': 1, 'shows': 1, 'several': 1, 'tokenizer': 1, 'algorithms':
#1, 'hopefully': 1, ',': 1, 'you': 1, 'will': 1, 'be': 1, 'able': 1, 'to': 1, 'understand': 1, 'how': 1, 'they': 1, 'are': 1, 'trained': 1, 'and': 1, 'generate': 1, 'tokens': 1})


# 初始化最小字符词表
vocab = []
for word in word_freqs.keys():
    for letter in word:
        if letter not in vocab:
            vocab.append(letter)

vocab.sort()
print(vocab)
#[',', '.', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'k', 'l', 'm', 'n', 'o', 'p', 'r', 's', 't', 'u', 'v', 'w', 'y', 'z']

# 特殊token,specail token
vocab = ["<PAD>", "<UNK>", "<BOS>", "<EOS>"] + vocab


# 每个单词的的组成
splits = {word: [c for c in word] for word in word_freqs.keys()}
pprint(splits)
## 'able': ['a', 'b', 'l', 'e']
## 'algorithms': ['a', 'l', 'g', 'o', 'r', 'i', 't', 'h', 'm', 's']

# 开始统计 相邻出现的 次数
def compute_pair_freqs(splits):
    pari_freqs = defaultdict(int)
    for word, freq in word_freqs.items():
        # word拆分后的列表
        split = splits[word]
        # 至少要有2个字符才能合并
        if len(split) == 1:
            continue

        for i in range(len(split) - 1):
            # word中连续的字符
            pair = (split[i], split[i + 1])
            # 累加其频次
            pari_freqs[pair] += freq

    return pari_freqs

pair_freqs = compute_pair_freqs(splits)

for i, key in enumerate(pair_freqs.keys()):
    print(f"{key}: {pair_freqs[key]}")
    if i >= 10:
        break
#('t', 'h'): 6
#('h', 'i'): 3
#('i', 's'): 5
#('h', 'e'): 2



# 合并最高频次对为新的字符
best_pair = None
max_freq = 0
for pair, freq in pair_freqs.items():
    if max_freq < freq:
        best_pair = pair
        max_freq = freq

print(best_pair, max_freq)
# ('t', 'h') 6

# 所以第一个合并的是('t', 'h') -> 'th'，然后将合并后的子词加入到子词词表，同时合并前的两个字符还在词表中
# 学习到的第一条合并规则
merges = {("t", "h"): "th"}
# 加入到词表中
vocab.append("th")