from pyhanlp import HanLP
import sys


def stopwordslist():
    stopwords = [line.strip() for line in open(r'F:\mypython\final_subject\bbdw\mystopwords.txt', encoding='utf-8').readlines()]
    return stopwords


def nonsubjectlist():
    nonsub = [line.strip() for line in open(r'F:\mypython\final_subject\bbdw\non_subject.txt', encoding='utf-8').readlines()]
    return nonsub


def is_number(s):
    try:
        float(s)
        return True
    except ValueError:
        pass

    try:
        import unicodedata
        unicodedata.numeric(s)
        return True
    except (TypeError, ValueError):
        pass

    return False


# hanlp分词
def seg_hanlp(sentence):
    # 对文档的每一行进行分词
    sentence_depart = HanLP.segment(sentence.strip())
    # 创建停用词列表
    stopwords = stopwordslist()
    nonsub = nonsubjectlist()

    # 输出结果为outstr
    outstr = ''
    # 去停用词
    for i in range(len(sentence_depart)):
        term = sentence_depart[i]
        # 必须以n开头的是名词类的词语
        if term.word not in stopwords and (str(term.nature)[0] == 'n' or str(term.nature) == 'vn')\
                and term.word not in nonsub:
            if term.word not in stopwords:
                if term.word != '\t':
                    # 如果前一个是数字，该词直接丢弃
                    if i - 1 >= 0 and is_number(sentence_depart[i - 1].word):
                        continue
                    # 如果当前词不是数字，写入结果中
                    if not is_number(term.word):
                        outstr += term.word
                        outstr += " "

    return outstr


# 传入待抽取主题的文件路径，对文件做预处理
def filter_file(file):
    with open(file, 'r', encoding='utf-8') as f:
        data = f.readlines()
        with open(r'F:\mypython\final_subject\bbdw\new_file.txt', 'w', encoding='utf-8') as f1:
            for i in range(len(data)):
                sentence = data[i]
                out = seg_hanlp(sentence)
                f1.write(out)
                f1.write('\n')


if __name__ == '__main__':
    filter_file('external.txt')
