# 在Pycharm中调试scrapy爬虫 https://www.jianshu.com/p/6f7cf38d5792
from scrapy.crawler import CrawlerProcess
from scrapy.utils.project import get_project_settings
import datetime
import schedule  # pip install schedule
import time
import os
import sys

#  pip install apscheduler 
# scrapy是基于twisted的异步机制，天生和多线程八字不合←_←定时的话写sh脚本比较简单
def job1():
    process = CrawlerProcess(get_project_settings())
    process.crawl('books')    #  你需要将此处的spider_name替换为你自己的爬虫名
    process.start( ) # stop_after_crawl=True stop_after_crawl参数表示是否在在所有crawlers完成之后停止reactor
    print('job1 finish', datetime.datetime.now())
    # process.stop()

def job2():
    os.system("scrapy crawl books")
    print('job2 finish', datetime.datetime.now())

# if __name__ == '__main__':
#     process = CrawlerProcess(get_project_settings())
#     process.crawl('books')    #  你需要将此处的spider_name替换为你自己的爬虫名
#     process.start()

if __name__ == '__main__':
    # schedule.every().day.at("9:30").do(job1) 
    print('main start', datetime.datetime.now())
    job2()
    schedule.every(1).minutes.do(job2)

    while True:
        #增加退出条件  time > 15.00
        current_time=datetime.datetime.now()
        if current_time.hour > 15 :
            print ("main exit")
            sys.exit(0)

        schedule.run_pending()
        time.sleep(10)   # 让程序按10秒来检查，如果检查太快，会浪费计算机的资源



