
from twisted.internet import reactor, defer
from scrapy.crawler import CrawlerRunner
from scrapy.utils.log import configure_logging
from spiders.house import *
from spiders.video import *
from scrapy.settings import Settings
# import sys
# sys.path.append("..")
# sys.path.append("../..")
# from tutorial.items import HouseItem, VideoItem
# from items import HouseItem, VideoItem
settings = Settings({
    'ITEM_PIPELINES': {
        'tutorial.pipelines.JsonExportPipeline': 300,
    },
})


def syn():
    configure_logging()
    runner = CrawlerRunner(settings)
    runner.crawl(HouseSpider)
    runner.crawl(VideoSpider)
    d = runner.join()
    d.addBoth(lambda _: reactor.stop())
    reactor.run()
    # the script will block here until all crawling jobs are finished


def seqence():
    configure_logging()
    runner = CrawlerRunner(settings)

    @defer.inlineCallbacks
    def crawl():
        yield runner.crawl(HouseSpider)
        yield runner.crawl(VideoSpider)
        reactor.stop()

    crawl()
    reactor.run()
    # the script will block here until the last crawl call is finished


if __name__ == "__main__":
    syn()
