import json
import re

import jsonpath
import scrapy


class JdSpider(scrapy.Spider):
    name = "jd"
    allowed_domains = ["jd.com"]
    start_urls = ["https://search.jd.com/Search?keyword=手机&page=1"]

    def parse(self, response):
        re_list = response.xpath('//div[@class="ml-wrap"]//ul[@class="gl-warp clearfix"]/li[1]')
        # id = None
        # for i in re_list:
        id = re_list.xpath("./@data-sku").extract_first()  # 商品ID
        price = re_list.xpath('.//div[@class="p-price"]//i/text()').extract_first()  # 价格
        title = re_list.xpath('.//div[@class="p-name p-name-type-2"]//em/text()').extract_first()  # 标题
        evaluate = re_list.xpath('.//div[@class="p-commit"]/strong/a/text()').extract_first()  # 评价数量
        name = re_list.xpath('.//div[@class="p-shop"]//a/text()').extract_first()  # 店铺名
        print("商品ID----------->", id)
        print("价格----------->", price)
        print("标题----------->", title)
        print("评价数量----------->", evaluate)
        print("店铺名----------->", name)

        yield {
            "type": "shangping",
            "price": price,
            "title": title,
            "evaluate": evaluate,
            "name": name,
        }
        url = "https://club.jd.com/comment/productPageComments.action?productId=%s&score=0&sortType=5&page=0&pageSize=10" % id
        print("详情评论url--------->>>>", url)
        yield scrapy.Request(url=url, callback=self.pinglun)

    def pinglun(self, response):
        send_url = response.request.url
        url_int = int(re.match(r".*page=(\d*)?", send_url).group(1))
        new_url_int = url_int + 1
        pl_list = json.loads(response.text)["comments"]

        for i in pl_list:
            nickname = i["nickname"]  # 用户名字
            referenceName = i["referenceName"]  # 评论商品
            content = i["content"]  # 评论内容
            time = i["creationTime"]  # 评论时间
            print("nickname-------->>>>>", nickname)
            print("referenceName-------->>>>>", referenceName)
            print("content-------->>>>>", content)
            print("time-------->>>>>", time)

            yield {
                "type": "pnglun",
                "nickname": nickname,
                "referenceName": referenceName,
                "content": content,
                "time": time,
            }

        if url_int < 50:
            url = send_url.replace("page=%d" % url_int, "page=%d" % new_url_int)
            print("发送请求的url--------->>>>", url)
            yield scrapy.Request(url=url, callback=self.pinglun, dont_filter=True)
            print("爬取下一页评论")
