from scrapy.crawler import CrawlerProcess
from scrapy.utils.project import get_project_settings
from baidu_project.spiders.baidu import BaiduSpider

# 获取 Scrapy 项目的配置信息
settings = get_project_settings()

# 创建 CrawlerProcess 并运行爬虫
process = CrawlerProcess(settings)
process.crawl(BaiduSpider)
process.start()  # 阻塞，直到爬虫运行完毕
