import scrapy


class QutoesSpider(scrapy.Spider):
    name = 'qutoes'
    allowed_domains = ['quotes.toscrape.com']
    start_urls = ['http://quotes.toscrape.com/']

#使用命令scrapy crawl qutoes -o qutoes.json------输出json文件类型
    def parse(self, response):
        #获取信息
        for quote in response.css("div.quote"):
            text=quote.css("span.text::text").extract_first()
            author=quote.xpath(".//small/text()").extract_first()
            scraped_quote={
                "text":text,
                "author":author,
                "birthday":None
            }
            authorHref=quote.css(".author+a::attr(href)").extract_first()
            authorPg=response.urljoin(authorHref)#转化为绝对路径
            yield scrapy.Request(authorPg,meta={'item':scraped_quote},callback=self.parse_author)

        #处理下一页问题
        nextPg=response.xpath("//li[@class='next']/a/@href").extract_first()
        # if nextPg is not None:
        #     nextPg=response.urljoin(nextPg)#将相对路径的网址转化成完整的绝对路径
        #     yield scrapy.Request(nextPg,callback=self.parse)#callback是调用自己的parse()函数，即使其连续解析下一页HTTP的请求响应。
        #合并从多个页面爬取的数据------使用到response.follow()
        if nextPg is not None:
            yield response.follow(nextPg,callback=self.parse)

    def parse_author(self,response):
        item=response.meta["item"]
        b=response.css(".author-born-date::text").extract_first().strip()
        item['birthday']=b
        return item


