import scrapy


class ProducterSpider(scrapy.Spider):
    name = "producter"
    # allowed_domains = ["a.com"]
    # start_urls = ["https://a.com"]

    async def start(self):
        yield scrapy.Request('https://quotes.toscrape.com/')


    def parse(self, response):
        items = response.xpath("//div[@class='container'][1]/div[@class='row'][1]/div[@class='col-md-8'][1]/div[@class='quote']")
        for item in items:
            content = item.xpath("./span[@class='text']/text()").get()
            author = item.xpath("//small[@class='author']/text()").get()
            tag = item.xpath(".//a[@class='tag']/text()").getall()
            yield {
                "content": content,
                "author": author,
                "tag": tag
            }
        next_url = response.xpath("//li[@class='next']/a/@href").get()
        if next_url:
            yield response.follow(next_url)
        else:
            print("爬取结束")