# from scrapy import cmdline
# cmdline.execute('scrapy crawl df1 -s FEED_URI=file:///H:/scrapyresult/df1.xml'.split())
# cmdline.execute('scrapy crawl df2 -s FEED_URI=file:///H:/scrapyresult/df2.xml'.split())
# cmdline.execute('scrapy crawl dflw -s FEED_URI=file:///H:/scrapyresult/dflw.xml'.split())
# cmdline.execute('scrapy crawl dftfd -s FEED_URI=file:///H:/scrapyresult/dftfd.xml'.split())
# cmdline.execute('scrapy crawl dfbhd -s FEED_URI=file:///H:/scrapyresult/dfbhd.xml'.split())
# cmdline.execute('scrapy crawl dfbhdts -s FEED_URI=file:///H:/scrapyresult/dfbhdts.xml'.split())
# cmdline.execute('scrapy crawl dfx-s FEED_URI=file:///H:/scrapyresult/dfx.xml'.split())
# cmdline.execute('scrapy crawl dfx2-s FEED_URI=file:///H:/scrapyresult/dfx2.xml'.split())
# cmdline.execute('scrapy crawl jotr-s FEED_URI=file:///H:/scrapyresult/jotr.xml'.split())
# cmdline.execute('scrapy crawl joe-s FEED_URI=file:///H:/scrapyresult/joe.xml'.split())
import scrapy
from scrapy.crawler import CrawlerProcess
from novahq_maps.spiders.df1 import Df1Spider
from novahq_maps.spiders.df2 import Df2Spider
from novahq_maps.spiders.dflw import DflwSpider
from novahq_maps.spiders.dftfd import DftfdSpider
from novahq_maps.spiders.dfbhd import DfbhdSpider
from novahq_maps.spiders.dfbhdts import DfbhdtsSpider
from novahq_maps.spiders.dfx import DfxSpider
from novahq_maps.spiders.dfx2 import Dfx2Spider
from novahq_maps.spiders.jotr import JotrSpider
from novahq_maps.spiders.joe import JoeSpider
from scrapy.utils.project import get_project_settings

process = CrawlerProcess(get_project_settings())

process.crawl(Df1Spider)
process.crawl(Df2Spider)
process.crawl(DflwSpider)
process.crawl(DftfdSpider)
process.crawl(DfbhdSpider)
process.crawl(DfbhdtsSpider)
process.crawl(DfxSpider)
process.crawl(Dfx2Spider)
process.crawl(JotrSpider)
process.crawl(JoeSpider)
process.start() # the script will block here until the crawling is finished