# -*-coding: utf-8 -*-
'''
Created on 2018-8-23

@author: xubaifu
'''
import jieba
import codecs
import jieba.posseg as pseg
import fool
jieba.load_userdict("userdict_name.txt")
# 构建停用词表
stop_words = 'stopWords.txt'
stopwords = codecs.open(stop_words,'r',encoding='utf8').readlines()
stopwords = [ w.strip() for w in stopwords ]
# 结巴分词后的停用词性 [标点符号、连词、助词、副词、介词、时语素、‘的’、数词、方位词、代词]
# stop_flag = ['x', 'c', 'u','d', 'p', 't', 'uj', 'm', 'f', 'r', 'ul']
stop_flag = ['x']

class Novel_Demo:
    #自定义停用词
    def load_stopwords(self, path='stopwords.txt'):  #文件必须utf-8编码.encode('utf-8')
        #加载停用词 
        with open(path,'r', encoding='UTF-8') as f:  
            stopwords = filter(lambda x: x, map(lambda x: x.strip(), f.readlines()))  
    #         stopwords.extend([' ', '\t', '\n'])  
        return frozenset(stopwords)  
    
    # 对一篇文章分词、去停用词
    def cut_words(self, filename):
        result = []
        with open(filename, 'r', encoding='UTF-8') as f:
            text = f.read()
            words = pseg.cut(text)
        for word, flag in words:
            if flag not in stop_flag and word not in stopwords:
                result.append(word)
        return result #返回数组
#         return ' '.join(result) #返回字符串
    #基于jieba词性标注
    def jieba_test(self):
        result = []
        with open('novel2.txt', 'r', encoding='UTF-8') as f:
            text = f.read()
            words = pseg.cut(text)
            for word in words:
                if 'nrt' in str(word):
                    print(word)
                    result.append(word)
        result = set(result)
        print(result)
    #基于foolnltp命名实体识别
    def fool_test(self):
        result2 = []
        word_type = ''
        with open('novel2.txt', 'r', encoding='UTF-8') as f:
            text = f.read()
    #         print(fool.pos_cut(text))
            words,ners = fool.analysis(text)
            for word in ners[0]:
                if str(word[2]) == 'person':
                    print(word[2],word[3].strip())
                    word_type = "%s/%s" %(word[2],word[3].strip())
                    result2.append(word_type)
        result2 = set(result2)
        print(result2)    

# pseg.POSTokenizer(tokenizer=None)

if __name__ == '__main__':
    novel_demo = Novel_Demo()
    #基于jieba词性标注
    novel_demo.jieba_test()
    #基于foolnltp命名实体识别
    novel_demo.fool_test()