from collections import defaultdict
import jieba
from  typing import *

class BPETokenizer:
    def __init__(self,special_tokens=[]) -> None:
        '''

        :param sepcial_tokens: 额外添加的特殊token,
        '''
        self.word_freqs = defaultdict(int)
        self.merges = {}
        self.token_to_id =  {}
        self.id_to_token = {}

        if special_tokens is None:
            special_tokens = []

        special_tokens = ['<PAD>', '<UNK>', '<BOS>', '<EOS>'] +  special_tokens
        for token in special_tokens:
            self._add_token(token)



    def _add_token(self,token:str) -> None:
        '''
        将token添加到词表中
        :param token:
        :return:
        '''
        # 新添加的token添加到最后，所以idx默认是当前词表的长度
        if token not in self.token_to_id:
            idx = len(self.token_to_id)
            self.token_to_id[token] = idx
            self.id_to_token[idx] = token


    @property
    def vobcab_size(self) -> int:
        return len(self.token_to_id)


    ## 以下是训练bpe相关函数 start
    def _learn_vocab(self,corpus: list[str]) -> None:
        '''
        统计词频
        :param corpus:
        :return:
        '''
        for sentence in corpus:
            sentence = sentence.lower()

            # 分词统计词频
            words = [w for w in jieba.cut(sentence) if w != ""]
            for word in words:
                self.word_freqs[word] += 1

    def _compute_pair_freqs(self,splits) -> dict[Tuple, int]:
        '''
        统计相邻字符的共现频率
        :param splits:
        :return:
        '''
        pair_freqs = defaultdict(int)

        ## 遍历word里面的相关的子字符，统计共现频率
        for word,freq in self.word_freqs.items():
            split = splits[word]
            if len(split) == 1:
                continue

            for i in range(len(split) - 1):
                pair = (split[i], split[i + 1])
                pair_freqs[pair] += freq

        return pair_freqs








