import scrapy
from my_scrapy_projects.scrapy_quotes.scrapy_quotes.items import QuoteItem


class QuotesSpider(scrapy.Spider):
    name = "quotes"
    # allowed_domains = ["baidu.com"]

    # 爬取初始地址
    # start_urls = ["https://quotes.toscrape.com/page/1/"]

    def start_requests(self):
        print(f"爬虫类 start_requests")
        urls = [f"https://quotes.toscrape.com/page/1/" for page in range(11)]
        for url in urls:
            yield scrapy.http.Request(url, callback=self.parse, dont_filter=True)

    def parse(self, response):
        """
        解析函数 通过请求的callback 绑定
        """
        print(f"响应页面结果： {response}")
        items = response.xpath('//div[@class="quote"]')
        for item in items:
            content = item.xpath('./span[@class="text"]/text()').extract_first()
            author = item.xpath('.//small[@class="author"]/text()').extract_first()
            tags = item.xpath('.//div[@class="tags"]/a[@class="tag"]/text()').extract()

            # tags = item.xpath('.//div[@class="tags"]/a[@class="tag"]/@href').extract()
            # print(tags)

            # yield {
            #     "content": content,
            #     "author": author,
            #     "tags": tags,
            # }

            item = QuoteItem()
            item["content"] = content
            item["author"] = author
            item["tags"] = tags


            yield item



        # items = response.css('div.quote')
        # for item in items:
            # content = item.css('span.text::text').extract_first()
            # author = item.css('small.author::text').extract_first()
            # tags = item.css('.tags>a.tag::text').extract()

            # hrefs = item.css('.tags>a.tag::attr("href")').extract()
            # print(hrefs)

