# -*- coding: utf-8 -*-
import scrapy
from copy import deepcopy
import json #爬取json数据
import urllib.request
# import requests


class JdSpider(scrapy.Spider):
    name = 'jd'
    allowed_domains = ['jd.com','p.3.cn']
    start_urls = ['https://book.jd.com/booksort.html']

    def parse(self, response):
        dt_list = response.xpath('//div[@class="mc"]/dl/dt')
        for dt in dt_list: #大分类列表
            item = {}
            item["superclass"] = dt.xpath('./a/text()').extract_first()
            # item["sup_href"] = dt.xpath('./a/@href').extract_first()
            em_list = dt.xpath('./following-sibling::dd[1]/em')
            for em in em_list: #小分类列表
                item["subclass"] = em.xpath('./a/text()').extract_first()
                item["sub_href"] = em.xpath('./a/@href').extract_first()
                if item["sub_href"] is not None:
                    item["sub_href"] = "https:" + item["sub_href"]
                    yield scrapy.Request(
                        item["sub_href"],
                        callback = self.parse_book_list,
                        meta = {"item":deepcopy(item)}
                    )

    def parse_book_list(self,response): #解析列表页
        item = deepcopy(response.meta["item"])
        li_list = response.xpath('//div[@id="plist"]/ul/li')
        for li in li_list:
            item["name"] = li.xpath('.//div[@class="p-name"]/a/em/text()').extract_first().strip() #去除左右很长的空格
            item["img"] = li.xpath('.//div[@class="p-img"]//img/@src').extract_first()
            if item['img'] is None:
                item["img"] = li.xpath('.//div[@class="p-img"]//img/@data-lazy-img').extract_first()
            if item["img"] is None:
                item["img"] = li.xpath('.//div[@class="p-img"]//img/@source-data-lazy-img').extract_first()
            item["img"] = urllib.request.urljoin(response.url,item["img"]) if item["img"] is not None else None
            item["author"] = li.xpath('.//div[@class="p-bookdetails"]//span[@class="author_type_1"]/a/text()').extract() #作者可能有多位，也可能没有
            item["author"] = [i.strip() for i in item["author"]]
            if len(item["author"]) == 0:
                item["author"].append("无作者")
            item["press"] = li.xpath('.//div[@class="p-bookdetails"]/span[@class="p-bi-store"]/a/text()').extract_first()
            item["publish_date"] = li.xpath('.//div[@class="p-bookdetails"]/span[@class="p-bi-date"]/text()').extract_first().strip()
            item["skuId"] = li.xpath('./div/@data-sku').extract_first()
            yield scrapy.Request(
                "https://p.3.cn/prices/mgets?skuIds=J_{}".format(item["skuId"]),    #注意在 allowed_domains中添加域名，否则添加不成功
                callback=self.parse_book_price,
                meta={"item":deepcopy(item)}
            )

        #翻页
        next_url = response.xpath('//a[@class="pn-next"]/@href').extract_first()
        if next_url is not None:
            next_url = urllib.request.urljoin(response.url,next_url)
            yield scrapy.Request(
                next_url,
                callback=self.parse_book_list,
                meta = {"item":deepcopy(response.meta["item"])}
            )

        #获取最新的一条评论
        # headers = {
        #     'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36',
        #     'Referer': 'https://item.jd.com/{}.html'.format(item["skuId"])
        # }
        # comment_url = 'https://sclub.jd.com/comment/productPageComments.action?productId=12216733&score=0&sortType=5&page=0&pageSize=100&isShadowSku=0&fold=1'
        # r = requests.get(comment_url, headers=headers)

    def parse_book_price(self,response):
        item = response.meta["item"]
        item["price"] = json.loads(response.body.decode())[0]["op"]
        print(item)

"""
注意1：实现持久化、增量式的爬虫效果，要在settings中添加4行代码
注意2：调式代码的时候，每调试完成开始运行前，清空redis数据库表单
注意3：使用urljoin方法补充网址,如果被补充的网址是空字符串/None，补充完毕的结果是参照网址
注意4：search的关键词只能是英文+数字，找到js文件，会在source中显示，然后取network中寻找源文件的，preview可以查看内容，response一般都会很长
注意5：爬取京东商品评论的时候，突然发现爬不下来了，爬取后获得的HTML文本长度为0，状态码200——需要加上refer
"""

