from abc import abstractmethod

from nltk.tokenize import word_tokenize,TreebankWordDetokenizer

from tqdm import tqdm


class BaseTokenizer:
    unk_token = '<unk>'
    pad_token = '<pad>'
    sos_token = '<sos>'
    eos_token = '<eos>'

    def __init__(self, vocab_list):
        self.vocab_list = vocab_list
        self.vocab_size = len(vocab_list)

        self.word2index = {word: index for index, word in enumerate(vocab_list)}
        self.index2word = {index: word for index, word in enumerate(vocab_list)}

        self.pad_token_id = self.word2index[self.pad_token]
        self.unk_token_id = self.word2index[self.unk_token]
        self.sos_token_id = self.word2index[self.sos_token]
        self.eos_token_id = self.word2index[self.eos_token]

    @staticmethod
    @abstractmethod
    def tokenize(text):
        pass

    def encode(self, text, seq_len, add_sos_eos=False):
        tokens = self.tokenize(text)
        # 进行对齐 , 对齐的目的是为什么批量训练，单次预测可以不用对齐
        if add_sos_eos:
            if len(tokens) + 2 == seq_len:
                tokens = [self.sos_token] + tokens + [self.eos_token]
            elif len(tokens) + 2 < seq_len:
                tokens = [self.pad_token] + tokens + [self.eos_token] + [self.pad_token] * (seq_len - len(tokens) - 2)
            else:
                tokens = [self.pad_token] + tokens[:seq_len - 2] + [self.eos_token]
        else:
            if len(tokens) > seq_len:
                tokens = tokens[:seq_len]
            elif len(tokens) < seq_len:
                tokens = tokens + [self.pad_token] * (seq_len - len(tokens))
        return [self.word2index.get(word, self.unk_token_id) for word in tokens]

    @abstractmethod
    def decode(self, word_ids):
        pass

    # 构建词表文件
    @classmethod
    def build_vocab(cls, sentences, vocab_file):

        vocab_set = set()
        for sentence in tqdm(sentences, desc="构建词表"):
            for word in cls.tokenize(sentence):
                if word.strip() != '':
                    vocab_set.add(word)
        word_list = [cls.pad_token, cls.unk_token, cls.sos_token, cls.eos_token] + list(vocab_set)

        with open(vocab_file, "w", encoding="utf-8") as f:
            for word in word_list:
                f.write(word + "\n")
        print("词表保存完成")

    @classmethod
    def from_vocab(cls, vocab_file):

        with open(vocab_file, "r", encoding="utf-8") as f:
            vocab_list = [line[:-1] for line in f.readlines()]
        print("词表加载完成")
        return cls(vocab_list)


class ChineseTokenizer(BaseTokenizer):
    def decode(self, word_ids):
        word_list = [self.index2word[word_id] for word_id in word_ids]
        return "".join(word_ids)

    @staticmethod
    def tokenize(text):
        return list(text)


class EnglishTokenizer(BaseTokenizer):

    def decode(self, word_ids):
        word_list = [self.index2word[word_id] for word_id in word_ids]
        return TreebankWordDetokenizer().detokenize(word_list)

    @staticmethod
    def tokenize(text):
        return word_tokenize(text)

