import scrapy


class QuotesAllSpider(scrapy.Spider):
    name = "quotes_all"
    # allowed_domains = ["baidu.com"]
    start_urls = [f"https://quotes.toscrape.com/"]

    # def start_requests(self):
    #     for url in [f"https://quotes.toscrape.com/page/{page}/" for page in range(1,11)]:
    #         yield scrapy.Request(url=url)

    def parse(self, response):
        # print(response.url)
        # self.logger.info(f"正在爬取: {response.url}")
        self.log(f"正在爬取: {response.url}")
        items = response.xpath('//div[@class="quote"]')
        for item in items:
            content = item.xpath('span[@class="text"]/text()').get()
            author = item.xpath('span/small[@class="author"]/text()').get()
            tags = item.xpath('div[@class="tags"]/a/text()').getall()
            dict1 = {
                '作者':author,
                '标签':tags,
                '内容':content,
            }
            # print(dict1)
            yield dict1

        print("=========================================")

        # next_url = response.xpath('//li[@class="next"]/a/@href').get()
        # if next_url:
        #     # yield scrapy.Request(f'https://quotes.toscrape.com{next_url}')
        #     # yield scrapy.Request(url=response.urljoin(next_url))
        #     yield response.follow(next_url)
        # else:
        #     print("没有下一页了,爬虫结束************************************************")



