from typing import Any

import scrapy
from scrapy.http import Response


class QuotesSpider(scrapy.Spider):
    name = "quotes"
    # allowed_domains = ["baidu.com"]
    # start_urls = ["https://quotes.toscrape.com/"]

    # def parse(self, response):
        # # css
        # a = response.css("div.header-box h1 a")
        # content = a.css("::text").extract_first()
        # href = a.css("::attr(href)").extract_first()
        # print(content, href)

        # # xpath
        # a = response.xpath("//div[contains(@class, 'container')]/div[@class='row header-box']/div[@class='col-md-8']/h1/a")
        # content = a.xpath('./text()').extract_first()
        # href = a.xpath('./@href').extract_first()
        # print(content, href)

    async def start(self):
        yield scrapy.Request('https://quotes.toscrape.com')

    def parse(self, response):
        print(f"开始爬取{response.url}")
        items = response.xpath("//div[@class='container'][1]/div[@class='row'][1]/div[@class='col-md-8'][1]/div[@class='quote']")
        for item in items:
            content = item.xpath("./span[@class='text']/text()").get()
            author = item.xpath("//small[@class='author']/text()").get()
            tag = item.xpath(".//a[@class='tag']/text()").getall()
            yield {
                "content": content,
                "author": author,
                "tag": tag
            }

        next_url = response.xpath("//li[@class='next']/a/@href").get()
        if next_url:
            yield response.follow(next_url)
        else:
            print("爬取结束")









