# -*- coding:utf-8 -*-
import scrapy
from example.items import BookItem


class BooksSpider(scrapy.Spider):
    name = "books"
    start_urls = ['http://books.toscrape.com/']

    def parse(self, response):
        # 提取数据
        # 每一本书的信息在<article class="product_pod">中，我们使用
        # css()方法找到所有这样的article元素，并依次迭代
        for sel in response.css('article.product_pod'):
            book = BookItem()
            # 书名信息在article>h3>a元素的title属性里
            # 例如:<a title="A Light in the Attic">A Light in the ...</a>
            name = sel.xpath('./h3/a/@title').extract_first()

            # 书价信息在<p class="price_color">的TEXT中
            # 例如:<p class="price_color">51.77</p>
            price = sel.css('p.price_color::text').extract_first()
            book['name'] = name
            book['price'] = price

            yield book

        # 提取链接
        # 下一页的url在ul.pager>li.next>a里面
        # next_url = response.xpath("//li[@class='next']/a/@href").extract_first()
        next_url = None
        if next_url:
            # 如果找到下一页的URL，得到绝对路径，构造新的Request对象
            next_url = response.urljoin(next_url)
            yield scrapy.Request(next_url, callback=self.parse)
