'''
    文本序列化
    实现：构建词典，实现方法把句子转化为数字序列和器翻转
'''

class Word2Sequence:
    UNK_TAG = "UNK"
    PAD_TAG = "PAD"

    UNK = 0
    PAD = 1
    def __init__(self):
        self.dict = {
            self.UNK_TAG:self.UNK,
            self.PAD_TAG:self.PAD
        }
        ### 记录词频
        self.count = {}
    def fit(self,sentence):
        """把单个句子保存到dict中"""
        for word in sentence:
            self.count[word] = self.count.get(word,0) + 1

    def build_vocab(self,min=5,max=None,max_features=None):
        ## 删除count中词频小于min的word
        if min is not None:
            self.count = {word:value for word,value in self.count.items() if value>min}

        if max is not None:
            self.count = {word:value for word,value in self.count.items() if value<max}

        ### 限制保留的词语数
        if max_features is not None:
            temp = sorted(self.count.items(),key= lambda x:x[-1],reverse=True)[:max_features]
            self.count = dict(temp)

        for word in self.count:
            self.dict[word] = len(self.dict)

        #### 获取一个反转的dict
        self.inverse_dict = dict(zip(self.dict.values(),self.dict.keys()))

    def transform(self,sentence,max_len = None):
        ### 将句子转化为序列

        if max_len is not None:
            if max_len > len(sentence):
                sentence = sentence + [self.PAD_TAG]*(max_len-len(sentence))
            if max_len < len(sentence):
                sentence = sentence[:max_len]

        return [self.dict.get(word,self.UNK) for word in sentence]

    def inverse_transform(self,indices):
        ### 把序列转化为句子
        return [self.inverse_dict.get(idx) for idx in indices]
    def __len__(self):
        return len(self.dict)

import re
def tokenizen(text):
    fileters = ['"','#','$','%','&','\(','\)','\*','\+',',','-','\.','/',':',';','<','=','>','@'
        ,'\[','\\','\]','^','_','`','\{','\|','\}','~','\t','\n','\x97','\x96','”','“',]
    text = re.sub("<.*?>"," ",text,flags=re.S)
    text = re.sub("|".join(fileters),' ',text,flags=re.S)
    return [i.strip().lower() for i in text.split()]

if __name__ == '__main__':
    # ws = Word2Sequence()
    # ws.fit(["你", "好", "么"])
    # ws.fit(['我','在','那'])
    # ws.build_vocab(min=0)
    # print(ws.dict)
    # res = ws.transform(['我','爱','背景'],max_len=7)
    # print(res)
    # ress = ws.inverse_transform(res)
    # print(ress)
    from word_sequence import Word2Sequence
    import pickle
    import os
    from tqdm import tqdm

    ws = Word2Sequence()
    data_path = 'data/aclImdb/train'
    temp_data_path = [os.path.join(data_path, 'pos'), os.path.join(data_path, 'neg')]
    for path in temp_data_path:
        filename = os.listdir(path)
        filepath = [os.path.join(path,i) for i in filename if i.endswith('.txt')]
        for file in tqdm(filepath):
            sentence = tokenizen(open(file,encoding='utf-8').read())
            ws.fit(sentence)
    ws.build_vocab(min = 10,max_features=10000)
    print(ws.dict)
    pickle.dump(ws,open('./model/ws.pkl','wb'))
    print(len(ws))




