# -*- coding:utf-8 -*-
import requests
import pandas as pd
import os
import json
from pyecharts.charts import WordCloud
from pyecharts import options as opts
from datetime import date
pd.set_option('display.max_columns', None)
#显示所有行
pd.set_option('display.max_rows', None)

def creat_wd_n(txt):
    # 获取access_token连接准许，需要自己的密匙
    host = 'https://aip.baidubce.com/oauth/2.0/token?grant_type=client_credentials&client_id=tNOwsBxxGLDdEQMSfEzGMG2d&client_secret=D86atOA7FcBnQ8EqoCNAuHSBNl6i3suy'

    response = requests.get(host)
    if response:
        print(response.json()['access_token'])
    access_token = '24.056093c304ac8a40c5cb958e70954477.2592000.1611450337.282335-23387464'
    url = 'https://aip.baidubce.com/rpc/2.0/nlp/v1/lexer?charset=UTF-8&access_token=' + access_token
    # 设置词法分析的请求头
    headers = {'content-type': 'application/json'}

    # 针对该文章做词法分析
    response_2 = requests.post(url,
                               data=json.dumps({'text': txt}),
                               headers=headers)
    if response_2:
        result = response_2.json()

    # 词性语法处理结果可视化
    print(result)
    # 先提取重要数据组成DataFrame
    df_news_pos = pd.DataFrame(result['items'])[['item', 'pos', 'ne']]
    # print(df_news_pos)
    # 再挑选名词性的词频
    df_nper = df_news_pos[(df_news_pos['pos'] == 'xc') | (df_news_pos['ne'] == 'PER')]

    # 词频计数
    nper_counts = df_nper['item'].value_counts()
    return nper_counts
    # 将词语和频数进行组合，转化为词云图需要的数据格式
    words = nper_counts.index.tolist()
    words_counts = nper_counts.values.tolist()
    data = list(zip(words, words_counts))

    # 绘制词云
    worls_cloud = WordCloud()
    worls_cloud.add('', data, word_size_range=[20, 100], shape='cardioid')
    worls_cloud.set_global_opts(title_opts=opts.TitleOpts(title='词云示例'))

    # 设置文件名称
    today = str(date.today())
    file_name = today + '-wd.html'
    worls_cloud.render(os.path.join(path, file_name))


if __name__ == '__main__':
    data1 = pd.read_csv("./data/cbc_1.csv")
    data2 = pd.read_csv("./data/ctv_1.csv")
    data3 = pd.read_csv("./data/post_1.csv")
    data4 = pd.read_csv("./data/toronto_1.csv")
    data5 = pd.read_csv("./data/globalnews_1.csv")
    data1["class"] = 'cbc'
    data2['class'] = 'ctv'
    data3['class'] = 'post'
    data4['class'] = 'toro'
    data5['class'] = 'glob'
    data = [data1, data2, data3, data4, data5]
    datas = pd.concat(data)
    datas.sort_values("publish_date", ascending=False, inplace=True)
    datas.index = range(len(datas))
    path = 'C:\\Users\\11514\\Desktop'
    li = data1["content"].tolist()
    s1_ci = creat_wd_n(li[0])
    li=li[1:]
    for l in li:
        if len(l)<15000:
            x = creat_wd_n(l)
            s1_ci= s1_ci.add(x, fill_value=0)
    print(s1_ci)