from scrapy.crawler import CrawlerProcess

from biquyun.spiders.index import IndexSpider

# 自定义配置
custom_settings = {
    'CONCURRENT_REQUESTS': 16,  # 并行处理数； 1- 串行处理
}

# 创建一个CrawlerProcess对象
process = CrawlerProcess(custom_settings) # 括号中可以添加参数

# 调用参数
process.crawl(IndexSpider, idx="14_14055", start_idx="9607883")
process.start()
