import scrapy


class QuotesPageallSpider(scrapy.Spider):
    name = "quotes_pageall"
    # allowed_domains = ["bai.com"]
    # start_urls = [f'https://quotes.toscrape.com/page/{i}/' for i in range(1, 11)]

    # async def start(self):
    #     for url in [f'https://quotes.toscrape.com/page/{i}/' for i in range(1,11)]:
    #         yield scrapy.Request(url=url)

    #
    # def parse(self, response):
    #     items = response.xpath(['//div[@class = "quote"]'])
    #     for item in items:
    #         yield {
    #             'text': item.xpath('./span[@class="text"]/text()').get(),
    #             'author': item.xpath('.//small[@class="author"]/text()').get(),
    #             'tags': item.xpath('.//div[@class="tags"]/a/text()').getall(),
    #         }

    async def start(self):
        yield scrapy.Request(url='https://quotes.toscrape.com/')
    def parse(self,response):
        self.log(f"正在爬取{response.url}页面")
        items = response.xpath('//div[@class = "quote"]')
        for item in items:
            yield {
                'text': item.xpath('./span[@class="text"]/text()').get(),
                'author': item.xpath('.//small[@class="author"]/text()').get(),
                'tags': item.xpath('.//div[@class="tags"]/a/text()').getall(),
            }
        next_url = response.xpath('//li[@class="next"]/a/@href').get()
        if next_url:
            # yield scrapy.Request(url=response.urljoin(next_url))
            # next_url = response.urljoin(next_url)
            # yield scrapy.Request(url=next_url)
            yield  response.follow(next_url)
        else:
            print('没有下一页了')