"""
算法核心步骤
- 1. 初始化词汇表：将语料分解为字符（或最小单元）
- 2. 统计字符对频率：遍历语料，统计所有相邻字符对的出现次数
- 3. 合并高频字符对：选择频率最高的字符对合并为新单元
- 4. 更新词汇表与语料表示：用新单元替换原字符对，重复步骤 2-3
- 5. 终止条件：达到预设词汇表大小或字符对频率为 0
"""
from collections import Counter


def merge(indices: list, pair: tuple[int, int], new_index: int) -> list[int]:
    """
    合并字符对
    :param indices: 字符索引列表
    :param pair: 要合并的字符对
    :param new_index: 新字符索引
    :return: 合并后的字符索引列表
    """
    new_indices: list[int] = []
    idx1, idx2 = pair
    is_match = False
    for x, y in zip(indices[:-1], indices[1:]):
        if is_match:
            is_match = False
            continue
        if x == idx1 and y == idx2:
            new_indices.append(new_index)
            is_match = True
            continue
        new_indices.append(x)

    if is_match is False:
        new_indices.append(indices[-1])

    return new_indices


def index_to_token(index: int, merges: dict[int, tuple[int, int]]) -> str:
    """
    将索引转换为标记
    """
    if index in merges:
        index1, index2 = merges[index]
        return index_to_token(index1, merges) + index_to_token(index2, merges)
    return chr(index)


def train_bpe(text: str, num_merges: int):
    """
    训练 BPE 模型
    :param text: 训练文本
    :param num_merges: 合并次数
    :return: 词汇表(包含原始字节和新合并的token)
    """
    print(text)
    # 初始化词汇表
    # 将文本编码为UTF-8字节序列，并转换为整数列表
    indices = list(map(int, text.encode("utf8")))
    # 存储合并规则的字典: (索引对) -> 新索引
    merges: dict[tuple[int, int], int] = {}
    # 词汇表: 索引 -> 对应的字节序列
    vocab: dict[int, bytes] = {x: bytes([x]) for x in range(256)}
    print(len(indices), indices)

    # 进行指定次数的合并操作
    for i in range(num_merges):
        print("="*50)
        # 获取所有相邻的字节对
        pairs = zip(indices[:-1], indices[1:])
        # 统计每个字节对的出现频率
        counts = Counter(pairs)
        # 获取出现频率最高的字节对
        pair = max(counts, key=counts.get) # type: ignore
        print(pair, len(counts), counts[pair], counts)
        # 如果最高频字节对只出现一次，停止合并
        if counts[pair] < 2:
            break
        # 创建反向映射(新索引 -> 原始索引对)，用于调试输出
        merges_reverse = {v: k for k, v in merges.items()}
        # 打印当前合并操作的信息
        print(i, pair, "===>", index_to_token(pair[0], merges_reverse) + index_to_token(pair[1], merges_reverse))
        index1, index2 = pair
        # 为新合并的token分配索引(从256开始)
        new_index = 256 + i
        # 记录合并规则
        merges[pair] = new_index
        # 更新词汇表，新token由两个子token拼接而成
        vocab[new_index] = vocab[index1] + vocab[index2]

        # 在索引列表中进行合并操作
        indices = merge(indices, pair, new_index)
        print(len(indices), indices)

    print(merges)
    print(len(indices), indices)

    # 生成最终的分词结果
    merges_reverse = {v: k for k, v in merges.items()}
    # 将索引转换为可读的token表示
    tokens = [index_to_token(index, merges_reverse) for index in indices]
    print(tokens)
    return vocab


if __name__ == "__main__":
    num_merges = 100
    print("="*100)
    text = "low, lower, newest, widest"
    vocab = train_bpe(text, num_merges)
    print({k: v.decode("utf8") for k, v in vocab.items() if k >= 256})

    print("="*100)
    text = "hello world, 你好"
    vocab = train_bpe(text, num_merges)
    print({k: v.decode("utf8") for k, v in vocab.items() if k >= 256})
