import scrapy
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
from scrapy_redis.spiders import RedisCrawlSpider
import pprint


class AmazonSpider(RedisCrawlSpider):
    name = 'amazon'
    allowed_domains = ['amazon.cn']
    # start_urls = ['https://www.amazon.cn/%E5%9B%BE%E4%B9%A6/b/ref=sd_allcat_books_%7C1?ie=UTF-8&node=658390051']
    redis_key = "amazon"

    rules = (
        # 这一个Rule是用来匹配大分类的url，还能同时取到小分类
        # 不用精确到/a/@href了，linkextractor叫做链接提取器，而且会自动补全
        Rule(LinkExtractor(restrict_xpaths=("//div[@id='s-refinements']/div[1]/ul/li[@class='a-spacing-micro apb-browse-refinements-indent-2']")), follow=True),

        # 到了这里就可以提取查看所有结果，无调用，也无需follow
        Rule(LinkExtractor(restrict_xpaths=("//div[@class='a-box-inner']/a"))),

        # 这里是进入了查看所有结果页面，开始提取每一本书的信息，此时需要callback，处理每一本书的信息，也需要follow
        Rule(LinkExtractor(restrict_xpaths=("//h2[@class='a-size-mini a-spacing-none a-color-base s-line-clamp-2']/a")), callback="parse_book_details", follow=True),
    )

    def parse_book_details(self, response):
        """处理每一本书的详细情况"""
        item = {}
        item["book_name"] = response.xpath("//span[@id='productTitle']/text()").extract_first()
        if item["book_name"]:
            item["book_name"] = item["book_name"].strip()
        item["book_price"] = response.xpath("//span[@class='a-color-base']/span/text()").extract_first()
        if item["book_price"]:
            item["book_price"] = item["book_price"].strip()
        item["book_author"] = response.xpath("//span[@class='author notFaded']/a/text()").extract_first()
        item["book_press"] = response.xpath("//span[contains(text(),'出版社') and contains(@class, 'a-text-bold')]/following-sibling::span/text()").extract_first()
        item["book_publish_date"] = response.xpath("//span[contains(text(),'出版日期') and contains(@class, 'a-text-bold')]/following-sibling::span/text()").extract_first()
        item["book_url"] = response.url

        pprint.pprint(item)
