from scrapy.crawler import CrawlerProcess
# from weibo_spiders.spiders.findSonSpider import FindSonSpider
# from weibo_spiders.spiders.searchSpider import advSearchSpider
from scrapy.settings import Settings
# from weibo_spiders import settings as my_settings
import sys
import os
import time


def run(kw, stime, etime):
    crawler_settings = Settings()
    crawler_settings.setmodule(my_settings)
    process = CrawlerProcess(settings=crawler_settings)

    process.crawl(advSearchSpider, kw, stime, etime, 'key')
    #process.crawl(FindSonsSpider, kw, 'key')
    process.start()  # the script will block here until all crawling jobs are finished


def run2(kw):
    crawler_settings = Settings()
    crawler_settings.setmodule(my_settings)
    process = CrawlerProcess(settings=crawler_settings)

    #process.crawl(RootknotSpider, kw, 'key')
    process.crawl(FindSonSpider, kw, 'key')



def cmdrun(kw, stime, etime):
    os.chdir(os.path.dirname(__file__))
    os.popen('scrapy crawl advSearch -a kw="{}" -a stime={} -a etime={}'.format(kw, stime, etime))

    os.popen('scrapy crawl findSon -a kw="{}"'.format(kw))

    os.popen('scrapy crawl userSpider -a kw="{}"'.format(kw))

    print('启动成功')


if __name__ == "__main__":
    try:
        kw = sys.argv[1]
        stime = sys.argv[2]
        etime = sys.argv[3]
        print(kw)
        # run(kw, stime, etime)
        # run2(kw)
        # run2(kw)
        cmdrun(kw, stime, etime)
    except Exception:
        print('[Input ERROR]')
