import scrapy
from scrapy.crawler import CrawlerRunner
from scrapy.utils.log import configure_logging
from scrapy.http import Request
from twisted.internet import reactor, defer
import json
from items import *
from spiders.movie_spider import *
from spiders.TV_spider import *
from scrapy.settings import Settings

settings = Settings({

    'ITEM_PIPELINES': {
        'tutorial.pipelines.JsonLinesPipeline': 300,
    },
})


def run_parallel():
    configure_logging()
    runner = CrawlerRunner(settings)
    runner.crawl(TVSpider)
    runner.crawl(MovieSpider)
    d = runner.join()
    d.addBoth(lambda _: reactor.stop())
    reactor.run()


def run_sequential():
    configure_logging()
    runner = CrawlerRunner(settings)

    @defer.inlineCallbacks
    def crawl():
        yield runner.crawl(TVSpider)
        yield runner.crawl(MovieSpider)
        reactor.stop()
    crawl()
    reactor.run()


if __name__ == '__main__':
    run_parallel()
