# -*- coding: utf-8 -*-

# our first Spider
#
# See documentation in:
# https://docs.scrapy.org/en/latest/intro/tutorial.html#our-first-spider
#
# 关于yield使用浅析 See documentation in:
# https://www.ibm.com/developerworks/cn/opensource/os-cn-python-yield/

import scrapy

class QuotesSpider(scrapy.Spider):
    # 蜘蛛标识, 在项目中必须是唯一的
    name = 'quotes'

    # ##################################################################################################
    # Scrapy schedules the scrapy.Request objects returned by the start_requests method of the Spider.
    # Upon receiving a response for each one, it instantiates Response objects and calls the callback
    # method associated with the request (in this case, the parse method) passing the response as argument.
    # ##################################################################################################
    # def start_requests(self):
    #     urls = [
    #         'http://quotes.toscrape.com/page/1/',
    #         'http://quotes.toscrape.com/page/2/'
    #     ]
    #     for url in urls:
    #         yield scrapy.Request(url=url, callback=self.parse)

    # ##################################################################################################
    # Instead of implementing a start_requests() method that generates scrapy.Request objects from URLs,
    # you can just define a start_urls class attribute with a list of URLs.
    # This list will then be used by the default implementation of start_requests() to create the initial requests for your spider.
    #
    # The parse() method will be called to handle each of the requests for those URLs, even though we haven’t explicitly told Scrapy to do so.
    # This happens because parse() is Scrapy’s default callback method, which is called for requests without an explicitly assigned callback
    # ##################################################################################################
    # start_urls = [
    #     'http://quotes.toscrape.com/page/1/',
    #     'http://quotes.toscrape.com/page/2/'
    # ]

    # 解析响应, 处理每个请求的响应, 把响应结果存储在本地html文件
    # def parse(self, response):
    #     page = response.url.split('/')[-2]
    #     filename = 'quotes-%s.html' % page
    #     with open(filename, 'wb') as f:
    #         f.write(response.body)
    #     self.log('Saved file %s' % filename)

    # 解析响应, 利用css选择器提取数据, 并使用yield关键字返回
    # 运行方式：
    #     1.scrapy crawl quotes
    #           只在终端输出yield返回的数据日志
    #     2.scrapy crawl quotes -o quotes.json
    #           将yield返回的数据以JSON格式存在本地文件中
    #           scrapy是采用追加文件内容而不是覆盖,所以执行多次命令会造成文件是错误的JSON格式
    #     3.scrapy crawl quotes -o quotes.jl
    #           使用JSON Line格式存储,每条记录都是独立的行
    #           可利用“jq”工具轻松地查看大型结构 @see https://stedolan.github.io/jq/
    #           有效避免方式[2]的问题
    #
    # See documentation in:
    # https://docs.scrapy.org/en/latest/intro/tutorial.html#extracting-data-in-our-spider
    # def parse(self, response):
    #     for quote in response.css('div.quote'):
    #         yield {
    #             'text': quote.css('span.text::text').extract_first(),
    #             'author': quote.css('small.author::text').extract_first(),
    #             'tags': quote.css('div.tags a.tag::text').extract(),
    #         }

    # ##################################################################################################
    # Now, after extracting the data, the parse() method looks for the link to the next page,
    # builds a full absolute URL using the urljoin() method (since the links can be relative) and yields a new request to the next page,
    # registering itself as callback to handle the data extraction for the next page and to keep the crawling going through all the pages.
    #
    # What you see here is Scrapy’s mechanism of following links: when you yield a Request in a callback method,
    # Scrapy will schedule that request to be sent and register a callback method to be executed when that request finishes.
    #
    # Using this, you can build complex crawlers that follow links according to rules you define,
    # and extract different kinds of data depending on the page it’s visiting.
    #
    # In our example, it creates a sort of loop, following all the links to the next page until it doesn’t find one – handy for crawling blogs,
    # forums and other sites with pagination.
    # ##################################################################################################
    start_urls = [
        'http://quotes.toscrape.com/page/1/'
    ]
    # 分页获取所有网页并提取需要内容
    def parse(self, response):
        for quote in response.css('div.quote'):
            yield {
                'text': quote.css('span.text::text').extract_first(),
                'author': quote.css('small.author::text').extract_first(),
                'tags': quote.css('div.tags a.tag::text').extract(),
            }
        # 查找下一页链接
        next_page = response.css('li.next a::attr(href)').extract_first()
        # if next_page is not None:
        #     # 使用urljoin()方法构建绝对路径
        #     next_page = response.urljoin(next_page)
        #     # 请求下一页, 回调方法注册为自己, 形成递归调用
        #     yield scrapy.Request(next_page, callback=self.parse)

        # 使用response.follow，直接支持相对URL而无需调用urljoin
        if next_page is not None:
            yield response.follow(next_page, callback=self.parse)
    # ##################################################################################################