# -*- coding: utf-8 -*-
import scrapy
from copy import deepcopy
import json


class BkSpiderSpider(scrapy.Spider):
    name = 'jd_book'
    allowed_domains = ["jd.com", "p.3.cn"]
    start_urls = ['https://book.jd.com/booksort.html']

    def parse(self, response):
        # 大分类
        dt_list = response.xpath("//div[@class='mc']/dl/dt")
        for dt in dt_list:
            item = {}
            top = dt.xpath("./a/text()").extract_first()
            # if top in ('小说','文学','传记','童书','科普读物','进口原版','科学与自然'):
            if top in ('文学','童书','科普读物','科学与自然'):
                item["top"] = top
                # 小分类
                em_list = dt.xpath("./following-sibling::*[1]/em")
                for em in em_list:
                    item["sub"] = em.xpath("./a/text()").extract_first()
                    # item["sub_url"] = em.xpath("./a/@href").extract_first()
                    # if item["sub_url"] is not None:
                        # item["sub_url"] = "https:" + item["sub_url"]
                    sub_url = em.xpath("./a/@href").extract_first()
                    if sub_url is not None:
                        # sub_url = "https:" + item["sub_url"]
                        yield scrapy.Request(
                            # item["sub_url"],
                            'https:%s' %sub_url,
                            callback=self.parse_book_list,
                            meta={"item": deepcopy(item)}
                        )

    def parse_book_list(self, response):
        item = deepcopy(response.meta["item"])
        li_list = response.xpath("//div[@id='plist']/ul/li")
        for li in li_list:
            item["url"] = "https:" + li.xpath(".//div[@class='p-img']/a/@href").extract_first()
            item["img"] = "https:" + li.xpath(".//div[@class='p-img']//img/@src").extract_first() if li.xpath(".//div[@class='p-img']//img/@src").extract_first() else None
            if item["img"] is None:
                item["img"] = "https:" + li.xpath(".//div[@class='p-img']//img/@data-lazy-img").extract_first()
            item["name"] = li.xpath(".//div[@class='p-name']/a/em/text()").extract_first().strip().replace("\n","")
            item["author"] = "/".join(li.xpath(".//div[@class='p-bookdetails']/span[@class='p-bi-name']//a/text()").extract())
            item["pub"] = li.xpath(".//div[@class='p-bookdetails']/span[@class='p-bi-store']/a/text()").extract_first()
            item["pub_date"] = li.xpath(".//span[@class='p-bi-date']/text()").extract_first().strip().replace("\n","")
            item["sku"] = li.xpath("./div/@data-sku").extract_first()
            item["shop"] = li.xpath(".//div[@class='p-shopnum']/a/text()").extract_first().strip().replace("\n","") if li.xpath(".//div[@class='p-shopnum']/a/text()").extract_first() else li.xpath(".//div[@class='p-shopnum']/span/text()").extract_first().strip().replace("\n","")
            
            meta={"item": deepcopy(item)}
            yield item
            # price_url = "https://p.3.cn/prices/mgets?skuIds=J_{}".format(item["book_sku"])
            # yield scrapy.Request(
            #     price_url,
            #     callback=self.parse_book_price,
            #     meta={"item": deepcopy(item)}
            # )

        # 列表页翻页
        next_url_temp = response.xpath("//a[@class='pn-next']/@href").extract_first()
        if next_url_temp is not None:
            yield scrapy.Request(
                "https://list.jd.com" + next_url_temp,
                callback=self.parse_book_list,
                meta={"item": response.meta["item"]}
            )

    def parse_book_price(self, response):
        item = response.meta["item"]
        dict_resposne = json.loads(response.body.decode())
        item["book_price"] = dict_resposne[0]["op"] if len(dict_resposne) > 0 else None
        yield item
