import wordcloud as wc
import random
import jieba
import numpy as np
import xlwt
from wordcloud import WordCloud
from PIL import Image
import matplotlib.pyplot as plt
from xlwt import *


#中文分词
with open("1.txt", mode="r", encoding="utf-8") as fp:
    content = fp.read()
res = jieba.lcut(content)
text = " ".join(res)
#生成词云图
word_cloud = wc.WordCloud(font_path="C:\Windows\Fonts\simkai.ttf")
img = Image.open('rio.png')
font = r'C:\Windows\Fonts\FZSTK.TTF'
img_array = np.array(img)
word_cloud = WordCloud(
    background_color='pink',
    width=1500,
    height=1000,
    collocations=False,
    mask=img_array,
    font_path=font
)
word_cloud.generate(text)
plt.imshow(word_cloud)
plt.axis("off")
plt.show()



#excel文件部分
dict = {}
chongfu = {}
sum = 0
for word in res:
    if word not in chongfu:
        dict[word] = 0
        chongfu[word] = 0
        dict[word] += 1
        sum += 1
    else:
        dict[word] += 1
dict1 = sorted(dict.items(), key=lambda x: x[1], reverse=True)
row1 = 0
wb = Workbook(encoding='utf-8')
table = wb.add_sheet('词频表')
i = 0
for i in range(0, sum):
    table.write(row1, 0, dict1[i][0])
    table.write(row1, 1, dict1[i][1])
    row1 += 1
    i += 1
wb.save('chart.xls')