
# 获得时间列表
import datetime
from multiprocessing import Pool
from config import *
from spider import *


def f(date):
    # 获得新闻类别列表
    news_type_Lists = ['news']
        # ,'ent', 'sports', 'finance', 'tech', 'games', 'auto', 'luxury', 'edu', 'house', 'ru','foxue dao']

    news_urls = []
    responses = []

    # for date in dateLists:
    for site in news_type_Lists:
            temp = get_page_index_temp(date, site)
            maxPageNum = parse_maxNum(temp) if temp else 1
            for pageNum in range(1,maxPageNum+1,1):
                if get_page_index(date,site,pageNum):
                    responses.append(get_page_index(date,site,pageNum))
                    # print(response.text)

    print('The number scrapy pages:',len(responses))
    for response in responses:
        # 返回值为 URL+标签 如：www.baidu.com[国内]
        news_urls += parse_page_index(response)

    # news_urls = set(news_urls)
    print('the number news :',len(news_urls))
    i = 1

    # 将需要爬取的所有URL存到mongodb数据库，方便断点续传
    for url in news_urls:
        data = {
            'url':url
        }
        save_to_mongo(data,MONGO_URL)

    for data in db.posts.find():
        print('post:',data)


    for url in news_urls:
        # print(url)
        web_html = get_page_detail(url)
        # print("type::",type(web_html))
        if web_html:
            result = parse_page_detail(web_html.text, url)
            if result:
                # print('result::',result)
                save_to_mongo(result,MONGO_NEWS)
                print('saved ', i, ' items!!')
                i = i + 1

                # 将爬取完成的URL存到MongoDB中，方便断点续传
                data1 = {
                    'url': url
                }
                save_to_mongo(data1, MONGO_FINISH_URLS)


if __name__ == '__main__':

    dateLists = []
    begin = datetime.date(2018,4,1)
    # end = datetime.date.today()
    end = datetime.date(2018,4,2)
    d = begin
    delta = datetime.timedelta(days=1)
    while d <= end:
        dateLists.append(d.strftime("%Y-%m-%d"))
        d += delta

    pool = Pool(8)#建立进程池，n就是代表了建立几个进程，这个n的设定一般与cpu的核数一样
    pool.map(f,(date for date in dateLists))
    pool.close()
    pool.join()#主进程阻塞等待子进程安全退出，父子进程同步


