from pathlib import Path

import scrapy


class QuotesSpider(scrapy.Spider):
    name = "quotes"

    start_urls  = ["https://quotes.toscrape.com/page/1/","https://quotes.toscrape.com/page/2/"]
    async def start(self):
        for url in self.start_urls:
            yield scrapy.Request(url=url)


    # def start_requests(self):
    #     yield scrapy.Request(url="https://httpbin.org/get?a=10")

    def parse(self, response):
        # print(dir(response), "====")
        # print(response.status)
        # print(response.text)
        # print(response.json()['args']['a'])

        # content = response.css(".header-box h1 a::text").extract_first()
        # href = response.css(".header-box h1 a::attr(href)").get()

        # h1 = response.css('.header-box h1')
        # content = h1.css('a::text').extract_first()
        # href = h1.css('a::attr(href)').extract_first()
        # print(content,href)

        # content = response.xpath('//*[contains(@class,"header-box")]//h1/a/text()').extract_first()
        # href = response.xpath('//*[contains(@class,"header-box")]//h1/a/@href').extract_first()
        # print(content,href)

        a = response.xpath('//*[contains(@class,"header-box")]//h1/a')
        content = a.xpath('./text()').extract_first()
        href = a.xpath('./@href').extract_first()
        print(content, href)





