# 通过CrawlerProcess同时运行几个spider
from scrapy.crawler import CrawlerProcess
# 导入获取项目配置的模块
from scrapy.utils.project import get_project_settings
# 导入蜘蛛模块(即自己创建的spider)



# get_project_settings() 必须得有，不然"HTTP status code is not handled or not allowed"
from XiaChuFang.spiders.xiachufang import CategorySpider

process = CrawlerProcess(get_project_settings())
process.crawl(CategorySpider) # 注意引入
#process.crawl(CategoryListSpider) # 注意引入
process.start()