#encoding=utf8
'''
Created on 2020年6月22日
@author: sida
'''
import pickle,os,jieba,util
def stopwordslist(filepath='stopWords/1893（utf8）.txt'):
    # 创建停用词list
    stopwords = [line.strip() for line in open(filepath, 'r', encoding='utf-8').readlines()]
    return stopwords
def seg_sentence(sentence,stopwords=stopwordslist(),jieba=jieba):
    # 对句子进行分词
    words = jieba.cut(sentence.strip())
    seg = [word for word in words if word not in stopwords and word.strip()!='' and len(word)>1 and not util.is_number(word)]
    return seg

def computeLines(self):
    #读取wiki中的每一行，这一行是一个词语的解释，是个dictionary type
    walk = os.walk('input/wiki_zh')
    for root, dirs, files in walk: 
        if files:
            lines=[]
            for name in files:      #corpus within this dir all could be processed
                with open(root+'/'+name, 'r') as f:
                    lines += f.readlines()
            with open(self.persistence_path + root[-2:], 'wb') as f:
                pickle.dump(lines,f)
def getLines(self):
    #因为Lines是保存在目录中所以，Lines 节点计算函数以及保存函数都是自己负责
    if self.forceUpdate:
            computeLines(self)
            self.forceUpdate=False
    walk = os.walk(self.persistence_path)
    try:
        for root, dirs, files in walk:
            files = [_ for _ in files if len(_)==2 ] #原wiki 一个子文件夹下的全部行
            for file in files:
                with open(root+'/'+file,'rb')as f:
                    print('getLines yield is coming')
                    yield pickle.load(f)
    except:
        print('something is error in getLines')

def computeLines_Seg(self):
    print('Lines 分词Segment')
    lines_iter = self.sunNodes[0].getData()
    linesNames = ['A'+i for i in ('ABCDEFGHIJKLM')]
    i = 0
    for lines in lines_iter:
        train_data = []
        for line in lines:
            dict_data = eval(line)
            sentence_or_doc = dict_data['text']
            trainList = seg_sentence(sentence_or_doc)
            train_data.append( trainList )
        linesWithSpace = [' '.join(line) for line in train_data]
        data = (linesWithSpace,train_data)
        with open(self.persistence_path+'/'+linesNames[i]+'_seg', 'wb') as f:
            pickle.dump(data,f)
            print('pickle ',linesNames[i]);i+=1
def getLines_Seg(self):
    #因为Lines是保存在目录中所以，Lines 节点计算函数以及保存函数都是自己负责
    if self.forceUpdate:
            computeLines_Seg(self)
            self.forceUpdate=False
    walk = os.walk(self.persistence_path)
    try:
        for root, dirs, files in walk:
            files = [_ for _ in files if _.endswith('_seg') ]
            for file in files:
                with open(root+'/'+file,'rb')as f:
                    print('getLines_Seg yield is coming')
                    yield pickle.load(f)
    except:
        print('something is error in getLines_Seg')
    
def compute_train_set(self):
    #将得到的训练数据list到一个train_set
    train_dt_iter = self.sunNodes[0].getData()
    for train_dt in train_dt_iter:
        linesWithSpace,train_data = train_dt
        print('compute_train_set yield is coming')
        yield train_data
def compute_test_set(self):
    train_dt_iter = self.sunNodes[0].getData()
    test_set = []
    for train_dt in train_dt_iter:
        linesWithSpace,train_data = train_dt
        test_set.append(train_data[55])
    return test_set

if __name__ == '__main__':
    print(seg_sentence(sentence='嘿嘿 甜甜圈 嘿嘿12 334 ') )

    
        
        
