import scrapy


class BaiduSpider(scrapy.Spider):
    name = "baidu"
    allowed_domains = ["www.baidu.com"]
    start_urls = ["https://www.baidu.com"]

    def start(self):
        print("=== start_requests 被调用 ===")
        self.logger.info("start_requests 开始执行")

        for i in range(3):
            url = f'https://httpbin.org/status/200?req={i}'
            scrapy.Request(
                url=url,
                callback=self.parse,
                meta={'index': i}
            )
            self.logger.info(f"请求 {i} 已发送")
    def parse(self, response):
        pass
