# -*- coding:utf-8 -*-
from matplotlib.font_manager import FontProperties
import requests
import pandas as pd
import os
import json
from pyecharts.charts import WordCloud
from pyecharts import options as opts
from datetime import date
font = FontProperties(fname='/System/Library/Fonts/STHeiti Light.ttc', size=16)
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.feature_extraction.text import CountVectorizer

def ci_cloud(data1, name):
    corpus = data1["content"].tolist()
    # 将文本中的词语转换为词频矩阵
    vectorizer = CountVectorizer()
    # 计算个词语出现的次数
    X = vectorizer.fit_transform(corpus)
    # 获取词袋中所有文本关键词
    word = vectorizer.get_feature_names()

    df1 = pd.DataFrame(X.toarray(), columns=word)

    # 类调用
    transformer = TfidfTransformer()
    # 将词频矩阵X统计成TF-IDF值
    tfidf = transformer.fit_transform(X)
    # 查看数据结构 tfidf[i][j]表示i类文本中的tf-idf权重
    df2 = pd.DataFrame(tfidf.toarray(), columns=word)
    s1 = df2.apply(lambda x: x.sum()).sort_values(ascending=False)
    s1 = s1[s1 < 10]

    # 将词语和频数进行组合，转化为词云图需要的数据格式
    words = s1.index.tolist()
    words_counts = s1.values.tolist()
    data = list(zip(words, words_counts))

    # 绘制词云
    worls_cloud = WordCloud()
    worls_cloud.add('', data, word_size_range=[20, 100], shape='cardioid')
    worls_cloud.set_global_opts(title_opts=opts.TitleOpts(title='词云示例'))

    # 设置文件名称
    path = 'C:\\Users\\11514\\Desktop'
    today = str(date.today())
    file_name = name +"-"+ today + '-wd.html'
    worls_cloud.render(os.path.join(path, file_name))
if __name__ == '__main__':
    data1 = pd.read_csv("./data/cbc_1.csv")
    data2 = pd.read_csv("./data/ctv_1.csv")
    data3 = pd.read_csv("./data/post_1.csv")
    data4 = pd.read_csv("./data/toronto_1.csv")
    data5 = pd.read_csv("./data/globalnews_1.csv")
    data1["class"] = 'cbc'
    data2['class'] = 'ctv'
    data3['class'] = 'post'
    data4['class'] = 'toro'
    data5['class'] = 'glob'
    data = [data1, data2, data3, data4, data5]
    datas = pd.concat(data)
    datas.sort_values("publish_date", ascending=False, inplace=True)
    datas.index = range(len(datas))
    ci_cloud(data1,"data1")
    ci_cloud(data2,"data2")
    ci_cloud(data3,"data3")
    ci_cloud(data4,"data4")
    ci_cloud(data5,"data5")
    ci_cloud(datas,"datas")



