#设置停用词，就是无意义的词
def stopword():
    stop_word_path = "G:\信息抽取\datasets\stop_word.txt"
    stopword_list = [sw.replace('\n', '') for sw in open(stop_word_path,encoding='utf-8').readlines()]
    return stopword_list
#划分句子的词
import jieba.posseg as psg
def cut_word(sentence):
    seg_list = psg.cut(sentence)
    return seg_list
#过滤不关键的词
def word_filter(seg_list):
    stopword_list = stopword()
    filter_list = []
    for seg in seg_list:
        word = seg.word
        flag = seg.flag
        if not flag.startswith('n'):
            continue
        if not word in stopword_list and len(word) > 1:
            filter_list.append(word)
    return filter_list
#统计词语，提取关键字
def tf_value(filter_list):
    filter_list = filter_list
    tf_value_dict = {}
    tf_value = {}
    for word in filter_list:
        tf_value_dict[word] = tf_value_dict.get(word, 0.0) + 1.0
    for key, value in tf_value_dict.items():
        tf_value[key] = float(value / len(filter_list))
    return tf_value
#加载数据集，模拟语言环境
def load_data():
    corpus_path = r'G:\信息抽取\datasets\step1_test_2.txt'
    doc_list = []
    for line in open(corpus_path, 'r', encoding='utf-8'):
        content = line.strip()
        seg_list = cut_word(content)
        filter_word = word_filter(seg_list)
        doc_list.append(filter_word)
    return doc_list
#进行idf计算
import math
def train_idf():
    doc_list = load_data()
    idf_dic = {}
    total_doc_num = len(doc_list) # 总的文档的数目
    # 每个词出现的文档数
    for doc in doc_list:
        for word in set(doc):
            idf_dic[word] = idf_dic.get(word, 0.0) + 1.0

    # 按照idf公式进行转换
    for key, value in idf_dic.items():
        # 加1是拉普拉斯平滑,防止部分新词在语料库中没有出现导致分母为0
        idf_dic[key] = math.log(total_doc_num / (1.0 + value))
    return idf_dic
# 进行tf-idf计算
def tf_idf(tf):
    tf_value_dict = tf # tf的值,tf_value是个字典
    idf_value = train_idf() # idf的值,idf是个字典
    tf_idf_dict = {}
    for key, value in tf_value_dict.items():
        tf_idf_dict[key] = value
        for key_idf, value_idf in idf_value.items():
            if key == key_idf:
                tf_idf_dict[key] = value * value_idf
    return tf_idf_dict
# 对数据进行排名，提取前n nn个作为关键词
def rank():
    keyword_num = 1000
    tf_idf_dict = tf_idf(tf)
    final_dict = sorted(tf_idf_dict.items(), key = lambda x: x[1], reverse = True)
    for i in range(0, len(final_dict)):
        print(final_dict[i][0] + '/', end = '')
        if i%30==0:
            print("\n")
        if i > 1000:
            break
# 对数据进行排名，提取前n个作为关键词
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import warnings
warnings.filterwarnings('ignore')
import  ssl
ssl._create_default_https_context = ssl._create_unverified_context
if __name__ == '__main__':
    text = open("G:\信息抽取\datasets\step1_test_2.txt", encoding='utf-8').read()
    seg_list = cut_word(text)
    filter_word = word_filter(seg_list)
    tf = tf_value(filter_word)
    tf_idf(tf)
    rank()