import scrapy


class QutotesAllpageSpider(scrapy.Spider):
    name = "quotes_allpage"

    # start_urls = [f"http://quotes.toscrape.com/page/{i}/" for i in range(1,11)]

    async def start(self):
        yield scrapy.Request('http://quotes.toscrape.com/')

    def parse(self, response):
        lines = response.xpath('//div[@class="quote"]')
        for line in lines:
            author = line.xpath('.//small[@class="author"]/text()').get()
            content = line.xpath('.//span[@class="text"]/text()').get()
            tags = line.xpath('.//a[@class="tag"]/text()').getall()
            data = {
                'author': author,
                'content': content[1:-1],
                'tags': tags
            }
            yield data
        next_url = response.xpath('//li[@class="next"]/a/@href').get()

        if next_url:
            # yield scrapy.Request(f'http://quotes.toscrape.com{next_url}')

            # next_url = response.urljoin(next_url)
            # yield scrapy.Request(next_url)

            yield response.follow(next_url)
        else:
            print('爬完了')
