from tqdm import tqdm


class Word2Sequence:
    # 固定两个字符串，UNK_TAG用于查询时候字典里没有对应的词语，PAD_TAG用于填充那些比较短的文本
    UNK_TAG = "UNK"
    PAD_TAG = "PAD"

    UNK = 0
    PAD = 1

    def __init__(self):
        # 词频统计
        self.inverse_dict = {}
        self.count = {}
        self.dict = {
            self.UNK_TAG: self.UNK,
            self.PAD_TAG: self.PAD
        }

    def fit(self, sentence):
        for word in sentence:
            self.count[word] = self.count.get(word, 0) + 1

    # 创建词典，最小词频5，最大词频不限制，max_features最多保留多少个词语
    def build_vocab(self, min=5, max=None, max_features=None):
        # 忽略count中词频小于min的word
        if min is not None:
            self.count = {word: value for word, value in self.count.items() if value > min}
        # 忽略count中词频大于max的word
        if max is not None:
            self.count = {word: value for word, value in self.count.items() if value < max}
        # 限制最终保留词语数量，排序之后取前max_features个
        if max_features is not None:
            self.count = dict(sorted(self.count.items(), key=lambda x: x[-1], reverse=True)[:max_features])
        for word in self.count:
            self.dict[word] = len(self.dict)  # 词语转成了数字，数字就是当前词语存入时候的计数，即长度
        # 翻转一下，得到一个数字对应词语的字典
        self.inverse_dict = dict(zip(self.dict.values(), self.dict.keys()))

    # 句子转换成向量数组，len是设置句子为固定长度
    def transform(self, sentence, length=None):
        if length is not None:
            if length > len(sentence):
                sentence = sentence + [self.PAD_TAG] * (length - len(sentence))
            if length < len(sentence):
                sentence = sentence[:length]
        return [self.dict.get(word, self.UNK) for word in sentence]  # 字典创建完毕后，如果用字典翻译一个新句子，新句子里有未知词语就用UNK代替

    # 向量数组转换成句子
    def inverse_transform(self, indices):
        return [self.inverse_dict.get(idx) for idx in indices]

    def __len__(self):
        return len(self.count)


def test():
    ws = Word2Sequence()
    ws.fit(["how", "are", "you", "my", "great", "motherland"])
    ws.fit(["how", "are", "you", "my", "great", "motherland"])
    ws.build_vocab(min=0)
    print(ws.dict)
    ret = ws.transform(["how", "are", "you", "my", "great", "fatherland"], length=100)
    print(ret)
    print(ws.inverse_transform(ret))


# pickle提供了一个简单的持久化功能。可以将对象以文件的形式存放在磁盘上。
# pickle模块只能在python中使用，python中几乎所有的数据类型（列表，字典，集合，类等）都可以用pickle来序列化，
def save():
    from rnn.imdb.word_sequence import Word2Sequence
    from rnn.imdb.dataset import tokenize
    import pickle
    import os

    ws = Word2Sequence()
    data_path = r"imdb/aclImdb/train"
    temp_data_path = [os.path.join(data_path, "pos"), os.path.join(data_path, "neg")]
    for path in temp_data_path:
        file_paths = [os.path.join(path, file_name) for file_name in os.listdir(path)]
        for file_path in tqdm(file_paths):
            sentence = tokenize(open(file_path).read())
            ws.fit(sentence)
    # ws.build_vocab(min=10, max_features=10000)
    ws.build_vocab(min=10)
    if not os.path.exists("imdb/model/"):
        os.mkdir("imdb/model/")
    pickle.dump(ws, open("imdb/model/ws.pkl", "wb"))
    print(len(ws))


if __name__ == '__main__':
    save()
