from abc import abstractmethod
from tqdm import tqdm
from nltk import word_tokenize, TreebankWordDetokenizer
from config import PROCESSED_DATA_DIR, SEQ_LEN
import nltk


# nltk.download('punkt_tab')


class BaseTokenizer:
    unk_token = '<unk>'
    pad_token = '<pad>'
    sos_token = '<sos>'
    eos_token = '<eos>'

    def __init__(self, vocab_list):
        self.vocab_list = vocab_list
        self.vocab_size = len(vocab_list)

        self.word2index = {word: index for index, word in enumerate(vocab_list)}
        self.index2word = {index: word for index, word in enumerate(vocab_list)}

        self.unk_token_index = self.word2index[self.unk_token]
        self.pad_token_index = self.word2index[self.pad_token]
        self.sos_token_index = self.word2index[self.sos_token]
        self.eos_token_index = self.word2index[self.eos_token]

    @staticmethod
    @abstractmethod
    def tokenize(sentence):
        pass

    def encode(self, sentence, seq_len, add_sos_eos=False):
        word_list = self.tokenize(sentence)
        index_list = [self.word2index.get(word, self.unk_token_index) for word in word_list]

        if add_sos_eos:
            index_list = index_list[:seq_len - 2]
            index_list = [self.sos_token_index] + index_list + [self.eos_token_index]
        else:
            index_list = index_list[:seq_len]
        # 截断或填充
        if len(index_list) < seq_len:
            index_list = index_list + [self.pad_token_index] * (seq_len - len(index_list))

        return index_list

    @abstractmethod
    def decode(self, index_list):
        pass

    @classmethod
    def build_vocab(cls, sentences, vocab_file):
        # 构建词表（用训练集）
        vocab_set = set()
        for sentence in tqdm(sentences, desc='vocab_set'):
            for word in cls.tokenize(sentence):
                vocab_set.add(word)
        vocab_list = [cls.pad_token, cls.unk_token, cls.sos_token, cls.eos_token] + list(vocab_set)
        print(len(vocab_list))  # zh:2749  en:7315

        # 保存词表
        with open(vocab_file, 'w', encoding='utf-8') as f:
            for word in vocab_list:
                f.write(word + '\n')

    @classmethod
    def from_vocab(cls, vocab_file):
        # 加载词表文件
        with open(vocab_file, 'r', encoding='utf-8') as f:
            vocab_list = [line.strip() for line in f.readlines()]
        # 创建tokenizer对象
        return cls(vocab_list)


class ChineseTokenizer(BaseTokenizer):
    @staticmethod
    def tokenize(sentence):
        return list(sentence)

    def decode(self, index_list):
        word_list = [self.index2word[index] for index in index_list]
        return ''.join(word_list)


class EnglishTokenizer(BaseTokenizer):

    @staticmethod
    def tokenize(sentence):
        return word_tokenize(sentence)

    def decode(self, index_list):
        word_list = [self.index2word[index] for index in index_list]
        return TreebankWordDetokenizer().detokenize(word_list)


if __name__ == '__main__':
    print(ChineseTokenizer.tokenize('我喜欢乘坐地铁。'))
    zh_tokenizer = ChineseTokenizer.from_vocab(PROCESSED_DATA_DIR / 'zh_vocab.txt')
    print(zh_tokenizer.encode('我喜欢乘坐地铁。', SEQ_LEN, add_sos_eos=False))

    print(EnglishTokenizer.tokenize("I'm happy."))
    print(TreebankWordDetokenizer().detokenize(['I', "'m", 'happy', '.']))
