# -*- coding: utf-8 -*-
import scrapy
from JD_Book.items import JdBookItem
from copy import deepcopy
import json
from scrapy_redis.spiders import RedisSpider
class BookSpider(RedisSpider):
    name = 'book2'
    allowed_domains = ['jd.com', 'p.3.cn']

    # start_urls = ['https://book.jd.com/booksort.html']
    redis_key = 'book_redis'
    page = 0
    # 解析第一层大分类的名字
    def parse(self, response):
        # 解析大分类的名字--52
        da_list = response.xpath('//*[@id="booksort"]/div[2]/dl/dt[1]')
        # 遍历取到小的图书分类名字和链接--880
        for tr in da_list:
            item = JdBookItem()
            item['big_name'] = tr.xpath('a/text()').extract_first()
            # 小分类
            xiao_list = tr.xpath('./following-sibling::*[1]/em')
            for em in xiao_list:
                item['small_name'] = em.xpath('a/text()').extract_first()
                small_link = 'https:' + em.xpath('a/@href').extract_first()
                # 发送图书列表也 第二层
                yield scrapy.Request(small_link, callback=self.parse_book, meta={'book': deepcopy(item)})

    # 解析 列表页的 图书
    def parse_book(self, response):
        # 接收上一页 传入的item
        item = response.meta['book']
        # 去除所有的图书
        book_list = response.xpath('//div[@id="plist"]/ul/li')

        for book in book_list:
            # 2.图书名字
            item['book_name'] = book.xpath('.//div[@class="p-name"]/a/em/text()').extract_first().strip()
            # 3.作者
            item['book_author'] = book.xpath('.//span[@class="author_type_1"]/a/text()').extract_first()

            book_id = book.xpath('./div/@data-sku').extract_first()
            price_url = 'https://p.3.cn/prices/mgets?skuIds=J_{}'.format(book_id)
            # 发送价格请求
            yield scrapy.Request(price_url, callback=self.parse_price, meta={'book': deepcopy(item)})

        # self.page += 1
        # if self.page > 4:
        #     return

        # 下一页url url 网址不全
        next_url = response.xpath('//a[@class="pn-next"]/@href').extract_first()
        # 发送下一页的请求
        yield response.follow(
            next_url,
            callback=self.parse_book,
            meta={'book': item}
        )
    # 解析价格
    def parse_price(self, response):
        item = response.meta['book']
        item['book_price'] = json.loads(response.body.decode())[0]['p']

        yield item














