import jieba
from os import path  # 用来获取文档的路径
import jieba.analyse as anls


# 获取当前的项目文件加的路径
d = path.dirname(__file__)
# 读取停用词表
stopwords = [line.strip() for line in open('cn_stopwords.txt', encoding='UTF-8').readlines()]
# 读取要分析的文本
text_path = "/Users/tianwei/PycharmProjects/lda/lda_data/pos_7000.txt"
# 读取要分析的文本，读取格式
text = open(path.join(d, text_path), encoding="utf8").read()

text_split = jieba.cut(text)  # 未去掉停用词的分词结果   list类型

# 去掉停用词的分词结果  list类型
text_split_no = []
for word in text_split:
    if word not in stopwords:
        text_split_no.append(word)
# print(text_split_no)
fW = open('pos.txt', 'w', encoding='UTF-8')
fW.write(' '.join(text_split_no))
fW.close()

text_split_no_str = ' '.join(text_split_no)  # list类型分为str

with open('pos.txt', "r", encoding='UTF-8') as r:
    lines = r.readlines()
with open('pos.txt', "w", encoding='UTF-8') as w:
    for line in lines:
        if len(line) > 1:
            w.write(line)

fW = open('pos.txt', 'w', encoding='UTF-8')
fW.write(' '.join(text_split_no))
fW.close()

text_split_no_str = ' '.join(text_split_no)  # list类型分为str

# 基于tf-idf提取关键词
print("基于TF-IDF提取关键词结果：")
keywords = []
for x, w in anls.extract_tags(text_split_no_str, topK=200, withWeight=True):
    keywords.append(x)  # 前200关键词组成的list
keywords = ' '.join(keywords)  # 转为str
print(keywords)
print("基于词频统计结果")
txt = open("pos.txt", "r", encoding="UTF-8").read()
words = jieba.cut(txt)
counts = {}
for word in words:
    if len(word) == 1:
        continue
    else:
        rword = word
    counts[rword] = counts.get(rword, 0) + 1
items = list(counts.items())
items.sort(key=lambda x: x[1], reverse=True)
for i in range(1000):
    word, count = items[i]
    print((word), count)
