


class Tokenizer():
    def __init__(self,token_level,lang):
        self.word2idx={} # token到id
        self.idx2word={} # id到token
        self.token_level = token_level
        self.lang = lang

    def cut(self,sent):
        if self.token_level == 'char':
            return list(sent)
        elif self.lang == 'en':
            return sent.split()
        else:
            return None

    def build_vocab(self,sentences):
        for sent in sentences:
            words = self.cut(sent)
            for word in words:
                if word not in self.word2idx:
                    self.word2idx[word] = len(self.word2idx)
        self.idx2word = {id:word for word,id in self.word2idx.items()}

    def __len__(self):
        return len(self.word2idx)

    def encode(self,text):
        words = self.cut(text)
        return [self.word2idx[word] for word in words]

    def decode(self,idxs):
        word_list = [self.idx2word[id] for id in idxs]
        if self.lang == 'zh' or self.token_level=='char':
            return ''.join(word_list)
        else:
            return ' '.join(word_list)

    @staticmethod
    def str_to_list(s):
        s = s.strip('[]')
        s_lst = s.split(',')
        return [int(i) for i in s_lst]


import pandas as pd
df = pd.read_csv('data/name_classfication.txt',sep='\t',header=None)
df.columns = ['name','label']

# 2. 建立名字分词器
def get_name_tk():
    # 1. 实例化token_level='char'的分词器
    name_tk = Tokenizer(token_level='char',lang='en')
    # 2. 调用build_vocab方法传入名字列表(df['name'].tolist()) 构建词汇表
    # 2.1 注意填充符号先初始化下
    name_tk.word2idx={'<pad>':0}
    name_tk.build_vocab(df['name'].tolist())
    return name_tk # 实例化并构建好词表的tokenizer

# 3. 建立国家类别分词器
def get_cate_tk():
    # 1. 实例化token_level='word',lang='en'的分词器
    cate_tk = Tokenizer(token_level='word',lang='en')
    # 2. 调用build_vocab方法传入名字列表(df['label'].tolist()) 构建词汇表
    cate_tk.build_vocab(df['label'].tolist()) #.tolist()就是把dataframe的一列数据转成列表输出
    return cate_tk

if __name__ == "__main__":
    name_tk = get_name_tk()
    print(name_tk.word2idx)
    print(name_tk.idx2word)
    #
    cate_tk = get_cate_tk()
    print(cate_tk.word2idx)
    print(cate_tk.idx2word)

    l = Tokenizer.str_to_list('[1,2,3,4]')
    print(l)
    print(type(l))

