import jieba
import os
from wordcloud import WordCloud
import imageio
import matplotlib.pyplot as plt
import multidict as multidict

data = []
words = []
city_name = []
eliminate = ['之旅', '游记', '旅行', '攻略', '一个', '一场', '2022', '2021', '2019', '2020', '中国']
cities = os.listdir("../csv文件/各城市游记浏览量")
for city in cities:
    city_name.append(city.replace(".csv", ""))

# 获取游记标题
with open("../csv文件/各城市游记浏览量/汇总.csv", "r", encoding="utf-8") as f:
    line = f.read()
    tmp = line.split("\n")
    for i in tmp:
        i = i.split(",")
        data.append(i[1])

# 分词
jieba.load_userdict("./词典.txt")
for i in data:
    result = jieba.cut(i)
    for each_word in result:
        if each_word in city_name or each_word in eliminate or len(each_word.strip()) < 2:  # 去除城市名称以及一个字的词
            continue
        else:
            words.append(each_word)

# print(words)


# 获取词频
def getFrequencyDictForText(sentence):
    fullTermsDict = multidict.MultiDict()
    tmpDict = {}

    # making dict for counting frequencies
    for text in sentence.split(" "):
        val = tmpDict.get(text, 0)
        tmpDict[text.lower()] = val + 1
    for key in tmpDict:
        fullTermsDict.add(key, tmpDict[key])
    return fullTermsDict


word_clean = " ".join(words)  # 形成生成词云需要的输入格式
mask = imageio.v2.imread('../image/1.png')  # 将图片中非白色（#FFF）的部分作为模板
wc = WordCloud(font_path="simkai.ttf",  # 指定字体类型
               background_color="white",  # 指定背景颜色
               max_words=200,  # 词云显示的最大词数
               max_font_size=255,  # 指定最大字号
               mask=mask)  # 指定模板

# wc = wc.generate(word_clean)  # 生成词云
wc = wc.generate_from_frequencies(getFrequencyDictForText(word_clean))
plt.imshow(wc)
plt.axis("off")
plt.show()
wc.to_file("./image/词云.png")
