from typing import AsyncIterator, Any


import scrapy
from scrapy.http import Response


class QuotesAllPageSpider(scrapy.Spider):
    name = "quotes_all_page"
    # allowed_domains = ["baidu.com"]
    start_urls = [f"https://quotes.toscrape.com/page/{i}/" for i in range(1, 11)]
    #
    # def parse(self, response):
    #     self.log(f'正在爬取{response.url}')
    #     items = response.xpath('//div[@class="quote"]')
    #     for item in items:
    #         print({
    #             "title":item.xpath('./span[@class="text"]/text()').get(),
    #             "autor":item.xpath('.//small/text()').get(),
    #             "tag":item.xpath('./div[@class="tags"]/a/text()').getall()
    #         })

    async def start(self):
       yield scrapy.Request('https://quotes.toscrape.com')

    def parse(self, response):
        self.log(f'正在爬取{response.url}')

        items = response.xpath('//div[@class="quote"]')
        for item in items:
            data = {
                "title":item.xpath('./span[@class="text"]/text()').get(),
                "autor":item.xpath('.//small/text()').get(),
                "tag":item.xpath('./div[@class="tags"]/a/text()').getall()
            }

            # print(data)
            # yield data

        # next_url = response.xpath('//li[@class="next"]/a/@href').get()
        # if next_url:
        #     # self.log(f'正在爬取{next_url}')
        #     # yield scrapy.Request(f'https://quotes.toscrape.com{next_url}')
        #
        #
        #     # url = response.urljoin(next_url)
        #     # yield scrapy.Request(url)
        #
        #
        #     yield response.follow(next_url)
        #
        #
        # else:
        #     print('爬取结束')