import aiohttp
import scrapy


class QuotesSpider(scrapy.Spider):
    name = "quotes"
    start_urls = [f"https://quotes.toscrape.com/page/{i}" for i in range(1, 2)]

    # async def start(self):
    # for url in [f"https://quotes.toscrape.com/page/{i}" for i in range(1, 11)]:
    #     yield scrapy.Request(url)
    def parse(self, response):
        # print(dir(response),type(response))
        items = response.xpath('//div[@class="quote"]')

        for item in items:

            text = item.xpath('.//span[@class="text"]/text()').get()




    # 深度爬取
    async def start(self):
        yield scrapy.Request("https://quotes.toscrape.com/")

    def parse(self, response):
        self.log(f"正在爬取地址{response.url}")
        items = response.xpath('//div[@class="quote"]')
        # print(len(items))
        for item in items:
            data = {
                'author': item.xpath('.//small[@class="author"]/text()').get(),
                'text': item.xpath('./span[@class="text"]/text()').get(),
                'tags': item.xpath('.//a[@class="tag"]/text()').getall(),
            }
            yield data
        #
        # next_url = response.xpath('//li[@class="next"]/a/@href').get()
        # if next_url:
        #     # yield scrapy.Request(f'https://quotes.toscrape.com{next_url}')
        #
        #     # next_url = response.urljoin(next_url)
        #     # yield scrapy.Request(next_url)
        #
        #     yield response.follow(next_url)
        #
        # else:
        #     print(f"爬取结束")
        #



