from multiprocessing import Pool ##导入多进程库
from page_parsing import get_url_list, get_item_info, url_list, item_info, channel_list


## 获得所有的链接，每个分类只获得10页数据
def get_all_urls(channel):
    for page in range(1,20):
        get_url_list(channel,page)

if __name__ == "__main__":
    pool = Pool()  ## 创建进程池
    # pool.map(get_all_urls, channel_list)
    urls_list = [ item["url"] for item in url_list.find() ]
    # print(urls_list)
    pool.map(get_item_info, urls_list)