from typing import AsyncIterator, Any

import scrapy


class QuotesSpider(scrapy.Spider):
    name = "quotes"# 爬虫名
    # allowed_domains = ["baidu.com"] # 爬虫域名限制
    # start_urls = ["https://quotes.toscrape.com/"]# 初始爬取地址


    start_urls = ["https://quotes.toscrape.com/"]# 初始爬取地址

    async def start(self):
        for url in self.start_urls:
            yield scrapy.Request(url=url)

    def parse(self, response):
        content1 = response.xpath('//*[contains(@class,"header-box")]//h1/a/text()').get()
        href1 = response.xpath('//*[contains(@class, "header-box")]//h1/a/@href').get()
        print(content1, href1)
        a = response.xpath('//*[contains(@class, "header-box")]//h1/a')
        content2 = a.xpath('text()').get()
        href2 = a.xpath('@href').get()
        print(content2, href2)
        print("=========================================")





    # def start_requests(self):# 发送请求
    #         yield scrapy.Request(url="https://quotes.toscrape.com/")


    # def parse(self, response):# 解析函数
    #     content1 = response.css(".header-box h1 a::text").get()
    #     href1 = response.css(".header-box h1 a::attr(href)").get()
    #     print(content1, href1)
    #     a = response.css(".header-box h1 a")
    #     content2 = a.css("::text").extract_first()
    #     href2 = a.css("::attr(href)").extract_first()
    #     print(content2, href2)
    #     print("=========================================")