from multiprocessing import Pool

from Get_data_old.config import *
from Get_data_old.save_to_db import *


def start_get_data(university_name, offset, Y):
    data_len = SaveData().save(university_name, offset, Y)
    return data_len


def main(name, year):
    print(name)
    y = 0
    while y < 1:
        off = 0
        y += 1
        while True:
            data_len = start_get_data(name, off, year)
            off += 1
            # 判断获取到的paper_data的长度是否等于1000
            if data_len == 1000:
                continue
            # 当len(paper_data)长度不满1000的时候，说明该topic的论文已经获取到头了，所以退出当前循环
            break

def main_job(name_year):
    return main(name_year[0], name_year[1])

if __name__ == '__main__':
    # university_name = 'tsinghua university'
    # offset_list = [offset for offset in range(0, 5)]
    # Y_list = [y for y in range(2017, 2019)]
    # print(offset_list)
    pool = Pool(processes=8)
    name_year = []
    for name in UNIVERSITY_LIST1:
        for year in range(2000, 2020):
            name_year.append((name, year))
    pool.map(main_job, name_year)

    pool.close()
    pool.join()  # 调用join之前，先调用close函数，否则会出错。执行完close后不会有新的进程加入到pool,join函数等待所有子进程结束



    # for name in UNIVERSITY_LIST1:
    #     print(name)
    #     for year in range(2000, 2020):
    #         y = 0
    #         while y < 1:
    #             off = 0
    #             y += 1
    #             while True:
    #                 data_len = start_get_data(name, off, year)
    #                 off += 1
    #                 # 判断获取到的paper_data的长度是否等于1000
    #                 if data_len == 1000:
    #                     continue
    #                 # 当len(paper_data)长度不满1000的时候，说明该topic的论文已经获取到头了，所以退出当前循环
    #                 break



    # for o in range(5):
    #     start_get_data(university_name, o, 2018)
    # for offset in offset_list:
    #     start_get_data(university_name, offset, 2018)