import scrapy


class QuotesSpider(scrapy.Spider):

    # 爬虫名字
    name = "quotes"
    # 域名限制
    # allowed_domains = ["baidu.com"]
    # 初始爬取地址
    # start_urls = ["https://baidu.com"]

    # async def start(self):
    #     yield scrapy.Request("http://quotes.toscrape.com/")
    # start_urls = [f"http://quotes.toscrape.com/page/{i}/"for i in range(1, 11)]
    #
    # def parse(self, response):
    #     # print(dir(response))
    #     # css选择器
    #     recss = response.css('div.col-md-8 h1')
    #     print(recss.css('a::text').get(), recss.css('a::attr(href)').get())
    #
    #     # xpath选择器
    #     rexpath = response.xpath('//div[@class="col-md-8"]/h1')
    #     print(rexpath.css('a::attr(href)').get(), rexpath.css('a::text').get())
    async def start(self):
        urls = [f'https://httpbin.org/get?age={i}' for i in range(10)]
        for url in urls:
            yield scrapy.Request(url=url)
    def parse(self, response):
        print(response.json())