from scrapy.crawler import CrawlerProcess  
from scrapy.utils.project import get_project_settings  
import time  

# 根本就不需要run.py，只要一个main.py就可以debug
# process = CrawlerProcess(get_project_settings())  
#process.crawl('douban')  # 爬虫名称  
# process.crawl('myauto1')  # 爬虫名称  
# process.start()  # 开始执行爬虫
#加了join()和sleep之后用main方法启动没有报错了
# process.join()
# time.sleep(60)