import scrapy
from scrapy.spiders import CrawlSpider, Rule
from scrapy.linkextractors import LinkExtractor

class Quotes(CrawlSpider):
    # 爬虫名称
    name = "quotes"
    allow_domain = ['quotes.toscrape.com']
    start_urls = ['https://quotes.toscrape.com/']

    # 设置规则
    rules = (
        # 对于Quotes内容页URL，调用parse_quotes处理
        # 并以此规则跟进获取的链接
        Rule(LinkExtractor(allow = '/page/\d+'), callback = 'parse_quotes',
                                                follow = True),
        # 对于author内容页URL，调用parse_author处理，提取数据
        Rule(LinkExtractor(allow = '/author/\w+'), callback = 'parse_authors')
    )


    # 提取内容页数据方法
    def parse_quotes(self, response):

        for quote in response.css(".quote"):
            yield {
                'content': quote.css('.text::text').extract_first(),
                'author': quote.css('.author::text').extract_first(),
                'tags': quote.css('.tag::text').extract()
            }


    # 获取作者数据方法
    def parse_author(self, response):
        name = response.css('.author-title::text').extract_first()
        author_born_date = response.css('.author_born_date::text').extract_first()
        author_born_location = response.css('.author_born_location::text').extract_first()
        author_description = response.css('.author_description::text').extract_first()

        return(
            {
            'name': name,
            'author_born_date': author_born_date,
            'author_born_location': author_born_location,
            'author_description': author_description
            })
