import jieba
import wordcloud
from collections import Counter
import matplotlib.pyplot as plt
from PIL import Image
from jieba import posseg


#所有文本
text=[]
with open("weibo.txt","r",encoding="utf-8") as f:#同一个文件夹下可以不用路径
    #print(f.readlines())#太长了，返回的一个列表
    for line in f.readlines():
        line_cut=line.split('\t')#按 \t’分割#去除噪声
        text.append(line_cut[1])
#print(text)

#停用词表
stop_word=[]
with open('baidu_stopwords.txt',encoding="utf-8") as fs:
    for line1 in fs.readlines():
        stop_word.append(line1.strip('\n'))
#print(stop_word)

#jieba 分割
text_word=[]
for text_line in text:
    a=jieba.cut(text_line)
    for i in a:
        text_word.append(i)
        
#去除停用词
text_final=[]
for txt in text_word:
    if txt not in stop_word:
        text_final.append(txt)
tfreq = Counter(text_final)
tfreq=sorted(tfreq.items(), key = lambda kv:(kv[1], kv[0]))
#print(text_word)
#for term in tfreq:
	#print(term)

#词云，借鉴网络资源
'''
s = []
for k_word, times in tfreq:
    #print(k_word,times)
    s.append(str(k_word)+'')
print(s)
txt = ''
for j in range(0, 900):
    txt = txt+' '+s[j]
#print(txt)
w = wordcloud.WordCloud(background_color="white",
                        width=800,
                        height=800,
                        font_path="C:\\Users\\User\\Desktop\\STXINGKA.TTF")
w.generate(txt)
w.to_file("weibo.png")
ima=Image.open('weibo.png')
ima.show()
'''

#分析词性
text_new=''.join(text_final)
#1.分析各个词性的数量
'''sentence_seged = jieba.posseg.cut(text_new)#形成元组
result={}
for i in sentence_seged:
    count=result.get(i.flag, 0)
    result[i.flag]=count+1
print(result)
'''
#2.分析f方位词的数量和词云展示
sentence_seged = jieba.posseg.cut(text_new)
outstr = ''
fstr = []
for i in sentence_seged:
    outstr += "{}/{} ".format(i.word, i.flag)
    if i.flag == 'f':
        fstr.append(i.word)
with open("微博词频统计.txt",'w',encoding='utf-8') as f:
    print(outstr, file=f)
    dict = {}
    for i in fstr:
        count = dict.get(i, 0)
        dict[i] = count + 1
    c = sorted(dict.items(), key=lambda x: x[1], reverse=True)
    with open("微博词频统计方位词.txt", 'w', encoding='utf-8') as f1:
        print(c, file=f1)
c1=wordcloud.WordCloud(width=800,height=800,min_font_size=10,max_font_size=100,font_step=2,max_words=100,
                       font_path="C:\\Users\\User\\Desktop\\STXINGKA.TTF",scale=2,stopwords={'python'},
                       background_color='white')
c1.generate(" ".join(fstr))
c1.to_file("微博词频统计方位词.png")

    

