from scrapy.crawler import CrawlerProcess
# from news_spiders.spiders.baiduSearch import BaidusearchSpider
# from news_spiders.spiders.sinaNews import SinaNewsSpider
# from news_spiders.spiders.neteaseNews import NeteaseNewsSpider
from scrapy.settings import Settings
# from news_spiders import settings as my_settings
import sys
import os
os.chdir(os.path.dirname(__file__))


def run(kw):
    print(sys.path.append(os.path.dirname(__file__)))
    os.chdir(os.path.dirname(__file__))
    crawler_settings = Settings()
    crawler_settings.setmodule(my_settings)
    process = CrawlerProcess(settings=crawler_settings)

    process.crawl(BaidusearchSpider, kw, "news.163.com")
    process.crawl(BaidusearchSpider, kw, "news.sina.com.cn")
    process.crawl(NeteaseNewsSpider)
    process.crawl(SinaNewsSpider)
    process.crawl(NeteaseNewsSpider)
    process.crawl(SinaNewsSpider)
    process.start()  # the script will block here until all crawling jobs are finished


def cmdrun(kw):
    os.chdir(os.path.dirname(__file__))
    os.popen(
        'scrapy crawl baiduSearch -a kw="{}" -a site="{}"'.format(kw, 'news.sina.com.cn'))
    os.popen('scrapy crawl baiduSearch -a kw="{}" -a site="{}"'.format(kw, 'news.163.com'))
    os.popen("scrapy crawl neteaseNews")
    os.popen("scrapy crawl sinaNews")

    print('启动成功')


if __name__ == "__main__":
    try:
        kw = sys.argv[1]
        print(kw)
        # run(kw, stime, etime)
        # run2(kw)
        # run2(kw)
        run(kw)
    except Exception:
        print('[Input ERROR]')
