import scrapy


class QuotesSpider(scrapy.Spider):
    name = "quotes"
    allowed_domains = ["toscrape.com"]
    # start_urls = [f"https://quotes.toscrape.com/page/{i}" for i in range(1, 11)]

    def start_requests(self):
        urls = [f"https://quotes.toscrape.com/page/{i}" for i in range(1, 11)]
        for url in urls:
            yield scrapy.Request(url=url, callback=self.parse1)

    def parse1(self, response):
        quotes = response.xpath('//div[@class="quote"]')
        for quote in quotes:
            content = quote.xpath('.//span[@class="text"]/text()').get()
            author = quote.xpath('.//span/small[@class="author"]/text()').get()
            tags = quote.xpath('.//div[@class="tags"]/a/text()').getall()
            yield {
                "content": content,
                "author": author,
                "tags": tags
            }
            print(
                {
                    "content": content,
                    "author": author,
                    "tags": tags
                }
            )
