# import scrapy


# class ExampleSpider(scrapy.Spider):
#     name = "example"
#     allowed_domains = ["example.com"]
#     start_urls = ["https://example.com"]

#     def parse(self, response):
#         pass

# import scrapy
 
# class ExampleSpider(scrapy.Spider):
#     name = "example"
#     allowed_domains = ["example.com"]
#     start_urls = ["http://example.com/"]
 
#     def parse(self, response):
#         # 提取网页标题
#         title = response.xpath("//title/text()").get()
#         yield {"title": title}


import scrapy
 
class QuotesSpider(scrapy.Spider):
    name = "quotes"
    allowed_domains = ["quotes.toscrape.com"]
    start_urls = ["http://quotes.toscrape.com/"]
 
    def parse(self, response):
        quotes = response.xpath("//div[@class='quote']")
        for quote in quotes:
            text = quote.xpath("span[@class='text']/text()").get()
            author = quote.xpath("span/small[@class='author']/text()").get()
            tags = quote.xpath("div[@class='tags']/a[@class='tag']/text()").getall()
            yield {
                "text": text,
                "author": author,
                "tags": tags,
            }
 
        # 下一页链接
        next_page = response.xpath("//li[@class='next']/a/@href").get()
        if next_page:
            yield response.follow(next_page, self.parse)
