# coding:utf-8



import scrapy
from twisted.internet import reactor
from scrapy.crawler import CrawlerRunner
from scrapy.utils.log import configure_logging
from MultipleSpider.spiders.bili import BiliSpider
from MultipleSpider.spiders.proxy import ProxySpider


configure_logging()
runner = CrawlerRunner()
runner.crawl(BiliSpider)
runner.crawl(ProxySpider)
d = runner.join()
d.addBoth(lambda _: reactor.stop())

reactor.run() # the script will block here until all crawling jobs are finished


# from MultipleSpider.spiders.bili import BiliSpider
# from MultipleSpider.spiders.proxy import ProxySpider
# from scrapy.crawler import CrawlerProcess
#
# #并行 运行爬虫
# process = CrawlerProcess()
# process.crawl(ProxySpider)
# process.crawl(BiliSpider)
# process.start()


# from MultipleSpider.spiders.bili import BiliSpider
# from MultipleSpider.spiders.proxy import ProxySpider
#
# from twisted.internet import reactor
# from scrapy.crawler import CrawlerRunner
# from scrapy.utils.log import configure_logging
#
# configure_logging()
# runner = CrawlerRunner()
# runner.crawl(BiliSpider)
# runner.crawl(ProxySpider)
#
# d = runner.join()
# d.addBoth(lambda _: reactor.stop())
# reactor.run()