# -*- coding: utf-8 -*-


import scrapy
from scrapy.linkextractors import LinkExtractor
from ..items import BookItem

class BooksSpider(scrapy.Spider):
    #1. 继承spider创建BooksSpider（已完成）
    name = "books"
    #2. 为Spider取名（已完成）
    allowed_domains = ["books.toscrape.com"]
    #3. 指定起始爬取点（已完成）
    start_urls = ['http://books.toscrape.com/']

    #4. 书籍列表页面的解析函数

    def parse(self, response):
       #提取书籍列表页面中的每本书的链接
       le = LinkExtractor(restrict_css='article.product_pod h3')
       for link in le.extract_links(response):
          #link=etree.fromstring(link)
          yield scrapy.Request(link.url, callback=self.parse_book)

       #提取下一页书籍列表页面的链接
       le=LinkExtractor(restrict_css='ul.pager li.next')
       links=le.extract_links(response)
       if links:
         next_url = links[0].url
         yield scrapy.Request(next_url, callback=self.parse)

       #表示没有这个方法
       # pass

    # 5. 书籍页面的解析函数
    def parse_book(self, response):
        #只需提取书籍信息存入BookItem对象即可
         book = BookItem()
         sel = response.css('div.product_main')
         book['name'] = sel.xpath('./h1/text()').extract_first()
         book['price'] = sel.css('p.price_color::text').extract_first()
         # 这位置必须得换行
         book['review_rating'] = sel.css('p.star-rating::attr(class)')\
                                   .re_first('star-rating([A-Za-z]+)')

         sel = response.css('table.table.table-striped')
         book['upc'] = sel.xpath('(.//tr)[1]/td/text()').extract_first()
         book['stock'] = sel.xpath('(.//tr)[last()-1]/td/text()')\
                             .re_first('\((\d+)available\)')
         book['review_num'] = sel.xpath('(.//tr)[last()]/td/text()').extract_first()
         yield book
        #pass
