import scrapy
from scrapy_projects.tutorial.tutorial.items import DataItem

class QuotesAllPageSpider(scrapy.Spider):
    name = "quotes_all_page"
    # start_urls = [f'https://quotes.toscrape.com/page/{i}/' for i in range(1, 11)]

    # async def start(self):
    #     for url in [f'https://quotes.toscrape.com/page/{i}/' for i in range(1, 11)]:
    #         yield scrapy.Request(url=url)

    # def parse(self, response):
    #     items = response.xpath('//div[@class="quote"]')
    #     for item in items:
    #         print({
    #             'author': item.xpath('.//small[@class="author"]/text()').get(),
    #             'text': item.xpath('./span[@class="text"]/text()').get(),
    #             'tags': item.xpath('.//a[@class="tag"]/text()').getall(),
    #         })


    async def start(self):
        yield scrapy.Request("https://quotes.toscrape.com/")

    async def parse(self, response):
        print(response.url, response.body,  "33333", response.headers)
        # self.log(f"正在爬取地址{response.url}")
        # items = response.xpath('//div[@class="quote"]')
        # # print(len(items))
        # for item in items:
        #     data = {
        #         'author': item.xpath('.//small[@class="author"]/text()').get(),
        #         'text': item.xpath('./span[@class="text"]/text()').get(),
        #         'tags': item.xpath('.//a[@class="tag"]/text()').getall(),
        #     }
        #     # print(data)
        #
        #     yield data

            # di = DataItem()
            # di["author"] = data['author']
            # di['text'] = data['text'][1:-1]
            # di['tags'] = data['tags']
            # yield di



        # next_url = response.xpath('//li[@class="next"]/a/@href').get()
        # if next_url:
        #     # yield scrapy.Request(f'https://quotes.toscrape.com{next_url}')
        #
        #
        #     # next_url = response.urljoin(next_url)
        #     # yield scrapy.Request(next_url)
        #
        #
        #     yield response.follow(next_url)
        #
        # else:
        #     print(f"爬取结束")

