import jieba


def tokenizer(sentence_list, ch=False, inference=False):
    token = []
    if len(sentence_list) !=0:
        for sentence in sentence_list:
            if ch:
                token.append(["<SOS>"] + [e for e in list(jieba.cut(sentence)) if e != " "] + ["<EOS>"])
            elif inference:
                token.append(["<SOS>"] + [e for e in list(jieba.cut(sentence)) if e != " "])
            else:
                token.append([e for e in list(jieba.cut(sentence)) if e != " "] + ["<EOS>"])
    else:
        token.append(["<SOS>"])
    return token


class Vocab:
    def __init__(self, data, init=True):
        if init:
            # 假设data是你的二维列表，其中包含了分词好的文本数据
            Tag = ["<EOS>", "<SOS>", "<PAD>"]
            # 初始化一个空集合来存储词汇
            vocabulary = set()
            # 遍历二维列表中的每个段落
            for paragraph in data:
                # 将段落中的每个单词添加到词汇集合中
                vocabulary.update(paragraph)

            # 将集合转换为列表并排序
            vocabulary = list(vocabulary)

            for t in Tag:
                if t in vocabulary:
                    vocabulary.remove(t)
                vocabulary.insert(0, t)
            self.vocab = vocabulary
        else:
            self.vocab = data

    def __len__(self):
        return len(self.vocab)

    def word2idx(self, word):
        return self.vocab.index(word)

    def idx2word(self, index):
        return self.vocab[index.int()]


def mapping(sentence_o, vocab, reverse=False):
    """
    :param vocab:
    :param sentence_o:
    :param reverse: False=>word2idx;True=>idx2word
    :return:
    """
    sentence_new = []
    for word in sentence_o:
        if not reverse:
            sentence_new.append(vocab.word2idx(word))
        else:
            sentence_new.append(vocab.idx2word(word))
    return sentence_new
