#!/usr/bin/python3
# -*- coding: utf-8 -*-
# @Author:Pxz
# @Time :2019/4/15 0015下午 5:32
import collections
from interval import Interval
from utils.kaida2.conf import settings
from utils.kaida2.module.count_data.my_tokenizer import tokenizer
from utils.kaida2.module.mongo_conn.conn_mongo import mongo_coll


def zw_analyse(coll_zw, word, seed, flag):
    html_count = 0
    content_dict = dict()
    contents_dict = dict()
    contents_list = list()
    for content in coll_zw.find({"html": {"$regex": word, "$options": "i"}}).sort("public_year", 1):
        _id = content.get('_id')
        html = content.get('html')
        tags = content.get('tags')
        url = content.get('url')
        title = content.get('title')
        author = content.get('author')
        link_num = content.get('link_num')
        download_num = content.get('download_num')
        source_type = content.get('source_type')
        source_type_level = content.get('source_type_level')
        college = content.get('college')
        public_year = content.get('public_year')

        # 分词，词统计
        contents = tokenizer(html)
        key_count = collections.Counter(contents)[word]

        html_count += 1
        # 获取每条数据正文分词列表
        contents_list.append(contents)

        content_per = dict()
        if not key_count < seed:
            content_per['url'] = url
            content_per['title'] = title
            # content_per['html'] = contents
            content_per['author'] = author
            content_per['tags'] = tags
            content_per['html'] = '关键词：' + '、'.join(tags) + html
            content_per['link_num'] = link_num
            content_per['download_num'] = download_num
            content_per['source_type'] = source_type
            content_per['source_type_level'] = source_type_level
            content_per['college'] = college
            content_per['public_year'] = public_year
            content_per['key_count'] = key_count
        if content_per:
            content_dict[_id] = content_per
    if flag:
        # True:返回所有集合正文分词列表
        return contents_list
    else:
        # False:返回符合关键词统计次数的相关数据
        contents_dict['html_count'] = html_count
        contents_dict['content'] = content_dict
        return contents_dict


def get_begin_end_count_info(coll, word):
    """
    获取含有关键词文章的最早发表时间和最晚发表时间
    :param coll: 
    :param word: 
    :return: 
    """""
    total_infos = coll.find({"html": {"$regex": word, "$options": "i"}, "public_year": {"$exists": True}}).sort(
        "public_year", 1)
    total_count = total_infos.count()
    if total_count:
        total_infos_list = list(total_infos)
        if 'public_year' in total_infos_list[0]:
            begin_date = total_infos_list[0]['public_year']
        else:
            begin_date = total_infos_list[1]['public_year']
        if 'public_year' in total_infos_list[-1]:
            end_date = total_infos_list[-1]['public_year']
        else:
            end_date = total_infos_list[-2]['public_year']
        return begin_date, end_date, total_count
    else:
        return '数据建立开始', '至今', 0


def get_region_top(coll, word):
    contents = coll.find({"html": {"$regex": word, "$options": "i"}, "public_year": {"$exists": True}})
    results_dict = {
        "期刊": 0,
        "学位论文": 0,
        "博士论文": 0,
        "硕士论文": 0,
        "会议论文": 0,
        "重要报纸": 0,
        "其他": 0
    }
    for content in contents:
        if content['source_type'] == '学位论文':
            results_dict['学位论文'] += 1
            if content['source_type_level'] == '博士论文':
                results_dict['博士论文'] += 1
            if content['source_type_level'] == '硕士论文':
                results_dict['硕士论文'] += 1
        elif content['source_type'] == '期刊':
            results_dict['期刊'] += 1
        elif content['source_type'] == '会议论文':
            results_dict['会议论文'] += 1
        elif content['source_type'] == '重要报纸':
            results_dict['重要报纸'] += 1
        else:
            results_dict['其他'] += 1

    top_region = max(results_dict, key=results_dict.get)
    return top_region


def zw_content(coll_zw, word, seed, flag):
    """
    集合正文列表、符合关键词统计次数相关内容
    :param coll_zw: 集合名称
    :param word: 关键词
    :param seed:  关键词出现最少的次数
    :param flag: True:集合正文,False：符合词统计次数的相关数据
    :return:
    """
    content_dict = dict()
    contents_list = list()

    for content in coll_zw.find():
        _id = content.get('_id')
        tags = content.get('tags')
        html = content.get('html') + ''.join(tags)
        url = content.get('url')
        title = content.get('title')
        author = content.get('author')
        link_num = content.get('link_num')
        download_num = content.get('download_num')
        source_type = content.get('source_type')
        college = content.get('college')
        public_year = content.get('public_year')

        # 分词，词统计
        contents = tokenizer(html)
        word_count = collections.Counter(contents)[word]

        # 获取每条数据正文
        contents_list.append(contents)

        content_per = dict()
        if not word_count < seed:
            content_per['url'] = url
            content_per['title'] = title
            content_per['author'] = author
            content_per['tags'] = tags
            content_per['link_num'] = link_num
            content_per['html'] = html
            content_per['download_num'] = download_num
            content_per['source_type'] = source_type
            content_per['college'] = college
            content_per['public_year'] = public_year
            content_per['count'] = word_count
        if content_per:
            content_dict[_id] = content_per
    if flag:
        # 返回所有集合正文
        return contents_list
    else:
        # False:返回符合关键词统计次数的相关数据
        return content_dict


def content_count(contents_dict):
    """
    正文相关统计
    :param contents_dict: 正文相关词字典
    :return: 统计字典
    """
    count_dict = dict()
    count_list = list()
    # 关键字涉及的文章数
    count_dict['html_count'] = contents_dict['html_count']

    t_max = '2005年09'
    t_min = '2005年09'

    # 每篇文章提及多少次
    for k, v in contents_dict['content'].items():
        count_list.append(v.get('key_count'))
        # key = v.get('key_count')
        # title = v.get('title')

        content_per = dict()
        content_per['_count'] = v.get('key_count')
        content_per['_title'] = v.get('title')
        content_per['_html'] = v.get('html')
        v_get = v.get('public_year')
        if v_get and len(v_get) > 2:
            t_k = v_get.replace('期', '')
            if t_max < t_k:
                t_max = t_k
            if t_min > t_k:
                t_min = t_k
                # content_per['t_max'] = t_
                # TODO
        count_dict[k] = content_per
    # 求最大、最小时间
    count_dict['t_max'] = t_max
    count_dict['t_min'] = t_min
    # 某篇文章最大的提及次数
    if count_list:
        count_dict['key_count_max'] = max(count_list)
    return count_dict


def annual_paper_count(contents_dict):
    """
    文献年度数量分析
    :param content_dict: 正文相关词字典
    :return: 文献年度数量字典
    """
    count_dict = dict()
    year_dict = dict()
    for k, v in contents_dict['content'].items():
        content_per = dict()
        content_per['_public_year'] = v.get('public_year')
        count_dict[k] = content_per
    for k, v in count_dict.items():
        public_year = v.get('_public_year')
        if public_year and len(public_year) > 2:
            year = public_year.split("年")[0]
            if year not in year_dict.keys():
                year_dict[year] = 1
            else:
                year_dict[year] += 1

    year_ = sorted(year_dict.items(), key=lambda x: x[0])
    return dict(year_)


def key_count(contents_dict):
    """
    关键词出现的次数统计
    :param content_dict: 正文相关词字典
    :return:
    """

    # word_count = collections.Counter(contents)[word]
    count_ = 0
    for k, v in contents_dict['content'].items():
        count_ += v.get('key_count')
    return count_


def source_type(contents_dict):
    source_type = []
    for k, v in contents_dict['content'].items():
        source_type.append(v.get('source_type_level') if v.get('source_type_level') else '其他')
    pie_data = {
        "legendData": ['博士论文', '硕士论文', '学位论文', '期刊', '重要报纸', "会议论文", "其他"],
        "selected": {
            "博士论文": True,
            "硕士论文": True,
            "学位论文": True,
            "期刊": True,
            "重要报纸": True,
            "会议论文": True,
            "其他": True
        },
        "seriesData": [{
            "name": "硕士论文", "value": source_type.count("硕士论文"),
        }, {
            "name": "博士论文", "value": source_type.count("博士论文"),
        }, {
            "name": "学位论文", "value": source_type.count("学位论文"),
        }
            , {
                "name": "期刊", "value": source_type.count("期刊"),
            }, {
                "name": "其他", "value": source_type.count("其他"),
            }, {
                "name": "重要报纸", "value": source_type.count("重要报纸"),
            }, {
                "name": "会议论文", "value": source_type.count("会议论文"),
            }]
    }
    return pie_data


def college_count(contents_dict):
    """
    作者所在机构统计
    :param content_dict: 正文相关词字典
    :return:
    """
    college_dict = dict()
    for k, v in contents_dict['content'].items():
        college = v.get('college')
        if college:
            if college not in college_dict.keys():
                college_dict[college] = 1
            else:
                college_dict[college] += 1
    college_data = []
    for k, v in college_dict.items():
        college_data.append({
            "name": k,
            "value": v
        })

    # 排序
    sorted_college_data = sorted(college_data, key=lambda x: x['value'], reverse=True)[0:10]

    return {
        "legendData": [k['name'] for k in sorted_college_data],
        "selected": {k['name']: True for k in sorted_college_data},
        "seriesData": sorted_college_data
    }


def down_count(contents_dict):
    """
    文献下载数量分析
    :param contents_dict: 正文相关词字典
    :return: 文献年度下载数量字典
    """
    count_dict = dict()
    # down_n = 10
    count_dict['0-100'] = 0
    count_dict['100-200'] = 0
    count_dict['200-300'] = 0
    count_dict['300-400'] = 0
    count_dict['400-500'] = 0
    count_dict['500-600'] = 0
    count_dict['600-700'] = 0
    count_dict['700-800'] = 0
    for k, v in contents_dict['content'].items():
        down_num = int(v.get('download_num'))
        # if down_num > down_n:
        #     down_n = down_num
        if down_num in Interval(0, 100):
            count_dict['0-100'] += down_num
        elif down_num in Interval(100, 200):
            count_dict['100-200'] += down_num
        elif down_num in Interval(200, 300):
            count_dict['200-300'] += down_num
        elif down_num in Interval(300, 400):
            count_dict['300-400'] += down_num
        elif down_num in Interval(400, 500):
            count_dict['400-500'] += down_num
        elif down_num in Interval(500, 600):
            count_dict['500-600'] += down_num
        elif down_num in Interval(600, 700):
            count_dict['600-700'] += down_num
        elif down_num in Interval(700, 800):
            count_dict['700-800'] += down_num
    return count_dict


def cloud_count(coll_zw, word, seed, seed_key):
    """
    获取词云
    zw_content(coll_zw, word, seed, flag=True)
    :param coll_zw: 集合名称
    :param word: 关键词
    :param seed: 关键词出现最少的次数
    :param seed_key: 计算词云时，关键词最少出现次数
    :return:
    """
    contents_list = zw_analyse(coll_zw, word, seed, flag=True)
    word_dict = dict()
    res_dict = []
    for contents in contents_list:
        for key in contents:
            if key not in word_dict.keys():
                word_dict[key] = 1
            else:
                word_dict[key] += 1
    for k, v in word_dict.items():
        # 计算词云时，关键词最少出现次数
        if not v < seed_key:
            key_dict = {}
            key_dict['name'] = k
            key_dict['value'] = v
            res_dict.append(key_dict)
    return res_dict

# if __name__ == "__main__":
#     host = settings.HOST
#     port = settings.PORT
#     db_name = settings.DB_NAME
#     coll_zw = settings.COLLECTION_ZW
#
#     coll = mongo_coll(host, port, db_name, coll_zw)
#
#     word = "人工智能"
#     seed = 1
#
#     res = zw_analyse(coll, word, seed, flag=False)
#     # print(res)
#     # res = zw_content(coll, word, seed, flag=False)
#     # print(res)
#
#     # word_cloud = cloud_count(coll, word, seed, seed_key=50)
#     # print("词云：%s" % word_cloud)
#
#     res_count = content_count(res)
#     d = dict()
#     d_id = dict()
#     for k, v in res_count.items():
#         if type(v) == dict:
#             d['_count'] = v.get('_count')
#             d['_title'] = v.get('_title')
#             d_id[k] = d
#     dd = d_id.values()
#
#     # print(sorted(dd.items(), key=lambda item: item[0]))
#     print("时间区间:%s" % dd)
#
#     # res_annual = annual_paper_count(res)
#     # print("各个年份发表论文数量：%s" % res_annual)
#
#     # res_key = key_count(res)
#     # print("关键词出现的总次数:%s" % res_key)
#
#     # res_down = down_count(res)
#     # print("文献下载数量:%s" % res_down)
#
#     # source_type(res)
#
#     # res_college = college_count(res)
#     # print("作者所在机构:%s" % res_college)
