'''
    @author : walker
    @time : 2019/10/29
    @description : 对论文中的数据进行统计和分析，并画图
'''

#!/usr/bin/env python3

from config import *
import sys
sys.path.append(r'../match_fieldname_to_oriname')
from get_micro_search_data import *
from statistic_and_pic import *
import random

'''
    使用多线程处理statistic_paper_top_field,将每篇论文进行领域划分
'''
def pool_statistic_paper_top_field(paper_item):
    if paper_item.get("F"):
        F_list = []
        top_field_dict = {}
        for field_item in paper_item["F"]:
            # print(field_item)
            field_info = db_data_info["matched_field_name_info_two"].find({"name":field_item["FN"]})
            field_info = list(field_info)
            # print(len(field_info))
            # print(field_info)

            # 判断是否找到该研究领域对应的顶级领域，如果没找到的话调用micro接口搜索到该领域对应的顶级领域
            f_list = []
            if len(field_info) != 0:

                #去重操作
                for field_info_item in field_info:
                    f_list.append(field_info_item["ori_name"])
                f_list = list(set(f_list))

                '''
                    下面这行代码是获取出现次数最多的顶级领域的代码
                    在选择去除掉一对多个领域、学科的代码中我们选择注释掉
                '''
                # for field_info_item in f_list:
                #     F_list.append(field_info_item)
                '''
                    下面是去除一对多个学科、领域的代码
                '''

                #先选择将一个研究领域对应一个顶级领域的数据添加刀top_field_dict中
                #将一对多的研究领域数据添加到F_list中
                if len(f_list) == 1:
                    top_field_dict[f_list[0]] = 1
                else:
                    for field_info_item in f_list:
                        F_list.append(field_info_item)
                    # F_list = f_list

            else:
                data = get_micro_search_data(field_item["FN"])
                ori_field_list,result_status = data.use_micro_search_interface()
                #查看再次调用micro接口出错没有，出错的话重新调用,且限定重复调用次数为10次数
                i = 0
                # while result_status != 200 and i < 10:
                while result_status != 200:
                    time.sleep(random.randint(10,20))
                    print("重新调用2333333333333333333333333333333")
                    # i = i + 1
                    ori_field_list,result_status = data.use_micro_search_interface()
                # if i == 10:
                #     ori_field_list = []

                print('ori_field_list--------------',ori_field_list)

                '''
                    下面是去除一对多个学科、领域的代码
                '''
                if len(ori_field_list) == 1:
                    top_field_dict[ori_field_list[0]] = 1
                else:
                    for field_info_item in f_list:
                        F_list.append(field_info_item)
                    # F_list = f_list

                for ori_field_item in ori_field_list:
                    '''
                        下面这行代码是获取出现次数最多的顶级领域的代码
                        在选择去除掉一对多个领域、学科的代码中我们选择注释掉
                    '''
                    # F_list.append(ori_field_item)
                    db_data_info["matched_field_name_info_two"].save({"name":field_item["FN"],"ori_name":ori_field_item})


        '''
            下面这行代码是获取出现次数最多的顶级领域的代码
            我们在进行对所有研究领域进行交集的时候我们统计到出现交集的次数肯定是大于一的
        '''
        # F_dict = dict((a, F_list.count(a)) for a in F_list)
        # F_dict = sorted(F_dict.items(), key=lambda x: x[1],reverse = True)
        # print(F_dict)
        # db_data_info['dumplication_paper_info'].update({"_id":paper_item["_id"]},{"$set":{"top_field_name":F_dict[0][0]}})

        '''
            下面是去除一对多个学科、领域的代码
        '''
        #先将一对多的数据进行
        F_dict = dict((a, F_list.count(a)) for a in F_list)
        # F_dict = sorted(F_dict.items(), key=lambda x: x[1],reverse = True)
        #随后将一对多的领域如果出现次数大于1，则将其添加到top_field_list中
        # print(F_dict)
        F_top_field_dict = {k:v for k, v in F_dict.items() if v > 1}
        # print("F_top_field_dict-----",F_top_field_dict)
        for k,v in F_top_field_dict.items():
            # top_field_list.append(F_top_field_list_item)
            if top_field_dict.get(k):
                top_field_dict[k] = top_field_dict[k] + v
            else:
                top_field_dict[k] = v

        top_field_dict_list = []
        for k,v in top_field_dict.items():
            top_field_dict_list.append({k:v})
        print('top_field_dict_list-------------',top_field_dict_list)
        db_data_info['dumplication_paper_info'].update({"_id":paper_item["_id"]},{"$set":{"top_field_name_list":top_field_dict_list}})



        # for top_field_list_item in top_field_list:
        #     db_data_info['dumplication_paper_info'].update({"_id":paper_item["_id"]},{"$set":{"top_field_name":F_dict[0][0]}})

        return None
    else:
        return paper_item['Id']

'''
    确定论文的归属领域并统计好各顶级领域中的论文数量
'''
def statistic_paper_top_field():
    paper_data = db_data_info["dumplication_paper_info"].find({}).skip(210000).limit(0000)
    paper_data = list(paper_data)
    #跨学科数量
    # cross_disciplinary_number = 0
    pool = Pool(processes = 25)
    not_have_f_list = pool.map(pool_statistic_paper_top_field,paper_data)
    not_have_f_list = list(filter(None, not_have_f_list))
    for not_have_f_list_item in not_have_f_list:
        print('not_have_f_list-----------',not_have_f_list_item)

'''
    对研究人员性别进行保存
'''
def do_save_researcher_info(paper_item):
    if paper_item.get("AA"):
        AA_List = []

        for author_item in paper_item.get("AA"):
            # print(author_item)
            author_sex_list = db_data_info["dumplication_researcher_info"].find({"AuId":author_item["AuId"]})

            author_sex_list = list(author_sex_list)
            if len(author_sex_list) == 0:
                print("没找到该作者信息" + "~"*40,author_item["AuN"])
                return author_item["AuId"]
            print(author_sex_list[0].get("Sex"))
            author_item["Sex"] = author_sex_list[0].get("Sex")
            AA_List.append(author_item)

        db_data_info['dumplication_paper_info'].update({"_id":paper_item["_id"]},{"$set":{"AA":AA_List}})

        return None
    else:
        print("23333333333333333333333333333333333333333")
        return paper_item['Id']


'''
    使用多线程将每篇论文中作者的性别信息保存
'''
def save_researcher_info():
    paper_data = db_data_info["dumplication_paper_info"].find({}).skip(200000).limit(90000)
    paper_data = list(paper_data)

    pool = Pool(processes = 20)
    not_success = pool.map(do_save_researcher_info,paper_data)
    not_have_f_list = list(filter(None, not_success))
    for not_have_f_list_item in not_have_f_list:
        print('not_have_f_list-----------',not_have_f_list_item)

'''
    随机选取0.2%的数据作为Pearson相关系数的判定（性别数据准确性判定）
'''
def random_choice_paper_data():
    #随机选取560个数据
    list = random.sample([i for i in range(1,280114)],280)
    paper_list = []
    author_number = 0
    field_number = 0
    #将随机选取的数据取出来存放在list中
    for list_item in list:
        paper_data = db_data_info["dumplication_paper_info"].find({}).skip(list_item).limit(1)

        for paper_data_item in paper_data:
            if paper_data_item.get("F"):
                for author_item in paper_data_item["AA"]:
                    author_number += 1
            if paper_data_item.get("F"):
                for field_item in paper_data_item["F"]:
                    field_number += 1
        paper_list.append(paper_data)

    print(author_number)
    print(author_number / 280)
    # 3883
    print(field_number)
    # 5029
    print(field_number / 280)

'''
    随机选取0.05%的作者
'''
def random_choice_researcher_data():
    list = random.sample([i for i in range(1,927571)],464)
    male_number = 0
    female_number = 0
    for list_item in list:
        researcher_data = db_data_info['dumplication_researcher_info'].find({}).skip(list_item).limit(1)
        for researcher_item in researcher_data:
            if researcher_item.get("Sex") == 0:
                female_number += 1
            else:
                male_number += 1
    print(male_number)
    print(male_number / 464)
    print(female_number)
    print(female_number / 464)

def main():
    #统计论文的归属领域并统计好各顶级领域中的论文数量
    # statistic_paper_top_field()
    # demo = Statistic_field_and_people()
    # demo.get_dumplication_researcher_data()

    #统计论文中男女研究者在每个位置的数量情况
    # demo = Statistic_researcher_rank_info()
    #获取所有位置男女研究者在排序中的数据
    # demo.save_researcher_rank_data()
    #读取所有位置男女研究者在排序中的数据
    # demo.get_researcher_rank_data()

    # demo = Statistic_multi_author_number_and_cooperation_model()
    # 获取到躲着这的数量和单个作者的数量
    # demo.get_multi_author_number()
    #获取到单个领域论文数量和跨领域论文的具体数量
    # demo.get_multi_field_paper_data()
    #获取到所有顶级领域论文的数量和这些论文中男女研究者的数量及其占比数据
    # demo.get_all_field_paper_and_sex_info()

    # #统计当男女各为第一作者时，其余成员的性别占比、论文引用量、跨学科和学校合作数量
    demo = Statistic_first_author_data()
    demo.get_all_data()
    # demo.male_female_person()

    #为每篇论文中作者添加其对应的性别信息
    # save_researcher_info()

    #随机选取0.2%的数据作为Pearson相关系数的判定（性别数据准确性判定）
    # random_choice_paper_data()

    #随机选取0.05%的数据并查看男女性别
    # random_choice_researcher_data()


    '''
        1、统计所有paper中含有多少个领域,多少个作者并进行画图处理
        2、统计每个顶级领域出现的次数
    '''
    # statistic_field_and_people = Statistic_field_and_people()
    # 对统计好的paper_number和field_number进行画图操作
    # statistic_field_and_people.pic_field_and_people_number()
    # statistic_field_and_people.statistic_top_field()

if __name__ == '__main__':
    main()
