#!/usr/bin/python3
# -*- coding: utf-8 -*-
# @Author:Pxz
# @Time :2019/4/16 0016下午 3:40
import json

from utils.kaida2.conf import settings
from utils.kaida2.module.count_data.zhi_wang_analysis import (
    college_count, cloud_count, content_count,
    annual_paper_count, key_count, down_count, zw_analyse, get_begin_end_count_info, get_region_top)
from utils.kaida2.module.mongo_conn.conn_mongo import mongo_coll

from utils.kaida2.module.count_data.zhi_wang_analysis import source_type


def start_make_data(word  = "人工智能"):
    host = settings.HOST
    port = settings.PORT
    db_name = settings.DB_NAME
    coll_zw = settings.COLLECTION_ZW

    coll = mongo_coll(host, port, db_name, coll_zw)

    seed = 1

    res = zw_analyse(coll, word, seed, flag=False)

    # 获取开始发表时间和结束发表时间
    (start_date,end_date,all_count) = get_begin_end_count_info(coll, word)

    word_cloud = cloud_count(coll, word, seed, seed_key=50)

    res_count = content_count(res)

    d_id = dict()
    for k, v in res_count.items():
        d = dict()
        if type(v) == dict:
            d['_count'] = v.get('_count')
            d['_html'] = v.get('_title') +  "##" +  v.get('_html')
            d_id[d['_html']] = d['_count']
    res_info = sorted(d_id.items(), key=lambda x:x[1], reverse=True)
    res_content_text = []
    for k,v in res_info[0:5]:
        text_info = {}
        text_info['count'] = v
        text_info['title'] = k.split("##")[0]
        text_info['html'] = k.split("##")[1]
        res_content_text.append(text_info)

    # 每年的发表数量
    res_annual = annual_paper_count(res)
    res_annual_x = list(res_annual.keys())
    res_annual_y = list(res_annual.values())

    # 获取发表top 3年份
    peer_year_sorted = sorted(res_annual.items(), key=lambda x:x[1], reverse=True)
    top_3_years = [ year[0] for year in peer_year_sorted[0:3]]
    top_year = peer_year_sorted[0][0]

    # 文章中出现关键词总数
    res_key = key_count(res)

    # 文章下载总数
    res_down = down_count(res)

    # 论文来源饼图数据
    pie_data = source_type(res)

    # 学校来源
    res_college = college_count(res)

    # 统计来源中 最大的来源 渠道
    top_region = get_region_top(coll, word)

    word_cloud_sorted = sorted(word_cloud, key=lambda x: x['value'], reverse=True)
    x_data = [ word_info['name'] for word_info in word_cloud_sorted[0:20]]
    y_data = [ word_info['value'] for word_info in word_cloud_sorted[0:20]]
    word_cloud = word_cloud_sorted[0:30]

    line_bar_data = {
        "xAxis": x_data,
        "yAxis": y_data
    }

    return {
        # 统计信息
        "keyword": word,
        "search_begin": start_date,
        "search_end": end_date,
        "journal_type": "期刊、学位论文、会议论文以及重要报纸报道",
        "index_content_count": all_count,
        "table_data": res_annual,
        "public_line_chart": {
            "x_data": res_annual_x,
            "y_data": res_annual_y
        },
        "keyword_count": res_key,
        "content_down": res_down,
        "top_3_years": top_3_years,
        "top_year": top_year,

        # 来源信息
        "word_cloud": word_cloud,
        "region_pie_chart": pie_data,
        "college_pie_chart": res_college,
        "top_region":top_region,
        # 关键词信息
        "line_bar_data": line_bar_data,
        # 高关联度文章
        "relate_content": res_content_text
    }
