
import os
import json
import datetime
from tqdm import tqdm

class Tokenizer:
    """
    train_path : [{'text':'***'},...]
    n_gram     : 最大切分字符数
    """
    def __init__(self,train_path,n_gram = 4) -> None:
        self.count_key = 'COUNT'
        self.trie_tree = {}
        self.n_gram = n_gram
        self.build_tree(train_path)
        self.vocab = self.extract_legal_tokens()
        self.vocab2 = {v:k for k,v in self.vocab.items()}

    def _add_token_ids_to_tree(self,tokens:list):
        """
        添加实体到字典树中
        """
        tree_node = self.trie_tree
        for token in tokens:
            if tree_node.get(token) is None:
                tree_node[token] = {self.count_key:0}
            tree_node[token][self.count_key] += 1
            tree_node = tree_node[token]

    def build_tree(self,file_path):
        """
        建立字典树
        """
        print('building tree...')
        with open(file_path,'r',encoding='utf-8') as f:
            data = json.load(f)
            for item in tqdm(data):
                # text
                tokens = item['text'].strip().split(' ')
                
                for i in range(len(tokens)):
                    # i = len(token_ids) - self.n_gram - 1
                    n_gram_tokens = tokens[i:i+self.n_gram]
                    self._add_token_ids_to_tree(n_gram_tokens)
        print('done')

    def extract_legal_tokens(self):
        print('extracting legal tokens...')
        # vocab = {'[PAD]':0,'[CLS]':1,'[MASK]':2,'[SEP]':3,'[UNK]':4,'<S>':5,'<T>':6}
        vocab = {}
        for token in tqdm(self.trie_tree):
            legal_tokens = self._search_token(token,self.trie_tree)
            for _legal_tokens in legal_tokens:
                _legal_tokens = _legal_tokens.strip()
                if _legal_tokens not in vocab:
                    vocab[_legal_tokens] = len(vocab)
        print('done')
        return vocab
        
    def _search_token(self,token,trie_tree):
        assert token in trie_tree
        token_num = trie_tree[token][self.count_key]
        next_tokens_list = []
        for next_token in trie_tree[token]:
            if next_token == self.count_key:
                continue
            if trie_tree[token][next_token][self.count_key] / token_num > 0.3 and token_num > 50:
                next_tokens_list += self._search_token(next_token,trie_tree[token])

        if len(next_tokens_list) == 0:
            next_tokens_list.append('')
        for i in range(len(next_tokens_list)):
            next_tokens_list[i] = token + ' ' + next_tokens_list[i]
        return next_tokens_list

    @classmethod
    def from_pretrained(cls,pretrain_path):
        return Tokenizer(pretrain_path)

    @property
    def vocab_size(self):
        return len(self.vocab)

