#!/usr/bin/env python3

'''
1、将所有的学校的paper都集合起来放置在一个collection中（以paper Id作为唯一标识符（collection中以Id表示））
2、从这个collection中按照年份取其十分之一作为实验数据
3、误差检验，查看每年的取得是否平均(不平均的原因可能是因为某一年发表的论文数量较少)
'''

from config import *
from multiprocessing import Pool
import pymongo
import random


#配置mongodb的信息
client = pymongo.MongoClient(MONGO_URL)
#连接paper_info库
db_paper_info = client[MONGO_PAPER_INFO_DB]
#连接all_paper_info库
db_collaboration_info = client[MONGO_COLLABORATION_DB]


'''
读取paper数据库中的数据
'''
def get_db_data(university_name):
    university_data = db_paper_info[university_name].find({}).batch_size(1)
    return university_data

'''
将所有的学校的paper都集合起来放置在一个collection中
'''
def get_all_university_paper_info(university_name):
    university_data = get_db_data(university_name)
    for data_item in university_data:
        db_collaboration_info['all_paper_info'].save(data_item)
    print(university_name + '已完成存储')


'''
将paper_info中的2008-2017的数据保存在2008_to_2017_paper_info中
'''
def save_into_2008_to_2017_paper_info():
    for i in range(10):
        university_data = db_collaboration_info['all_paper_info'].find({"Y":2008 + i})
        for data_item in university_data:
            db_collaboration_info['2008_to_2017_paper_info'].save(data_item)

'''
将所有的所涉及到的field_name存储到2008_to_2017_field_name_info/exp_2008_to_2017_field_name_info中
将所有的涉及到的researcher_info存储到2008_to_2017_researcher_info/exp_2008_to_2017_researcher_info中
'''
def save_into_2008_to_2017_field_name_info():
    field_name_list = []
    researcher_list = []

    # university_data = db_collaboration_info['2008_to_2017_paper_info'].find({})
    university_data = db_collaboration_info['exp_2008_to_2017_paper_info'].find({})

    for data_item in university_data:
        if data_item.get("F"):
            for field_item in data_item.get("F"):
                print(field_item)

                #db_collaboration_info['2008_to_2017_field_name_info'].save(field_item)
                db_collaboration_info['exp_2008_to_2017_field_name_all_info'].save(field_item)

                #field_name_list.append(field_item)
        if data_item.get("AA"):
            for researcher_item in data_item["AA"]:
                #researcher_list.append(researcher_item)
                print(researcher_item)

                #db_collaboration_info["2008_to_2017_researcher_info"].save(researcher_item)
                db_collaboration_info["exp_2008_to_2017_researcher_all_info"].save(researcher_item)

'''
将所有的reearcher_info加上性别属性
"AuN" : "g srinivasan",
    "AuId" : NumberLong(2932430211),
    "AfN" : "university of rochester",
    "AfId" : 5388228,
    "S" : 5
}
'''
def add_researcher_sex_info():
    # university_data = db_collaboration_info['2008_to_2017_researcher_info'].find({})
    university_data = db_collaboration_info['exp_2008_to_2017_researcher_info'].find({})

    for data_item in university_data:
        if data_item.get("AfN"):
            db_collaboration_info['exp_2008_to_2017_researcher_sex_info'].save({"AuN":data_item["AuN"],"AuId":data_item["AuId"],"AfN":data_item["AfN"],"Sex":1})
        else:
            db_collaboration_info['exp_2008_to_2017_researcher_sex_info'].save({"AuN":data_item["AuN"],"AuId":data_item["AuId"],"AfN":"","Sex":1})


'''
随机抽取每年的5%的数据作为数据集
'''
def get_some_paper_info():
    for i in range(10):
        university_data = db_collaboration_info['2008_to_2017_paper_info'].find({"Y":2008 + i})
        university_data_list = []
        for data_item in university_data:
            university_data_list.append(data_item)
        #print(len(university_data))
        university_data_length = len(university_data_list)
        print(str(2008 + i) + "年共有" + str(university_data_length) + "篇paper")

        after_university_data = []
        after_university_data = random.sample(university_data_list,int(university_data_length/20))
        print("after 随机选取后的长度为：",len(after_university_data))
        for data_item in after_university_data:
            db_collaboration_info['exp_2008_to_2017_paper_info'].save(data_item)

'''
获取到所有作者的位置信息
'''
def get_all_researcher_local_info():
    list = []
    for i in range(100):
        researcher_local_data = db_collaboration_info['exp_2008_to_2017_researcher_all_info'].find({"S":i}).count()
        list.append(researcher_local_data)
    print(list)
    print(len(list))

def main():
    #pool = Pool()
    #pool.map(get_all_university_paper_info,UNIVERSITY_LIST1)

    # add_researcher_sex_info()
    get_all_researcher_local_info()
    #get_reseacher_sex_info()


if __name__ == '__main__':
    main()
