import scrapy
import re
import json
import time


class JingDongScrapy(scrapy.Spider):
    # 爬虫名
    name = "jingDongScrapy"

    # 自定义请求头，让我们的爬虫程序更像浏览器
    heads = {
        ":authority": "sclub.jd.com",
        ":method": "GET",
        ":path": "/comment/productPageComments.action?callback=fetchJSON_comment98vv1&productId=100002672294&score=0&sortType=5&page=0&pageSize=10&isShadowSku=100001490211&fold=1",
        ":scheme": "https",
        "accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
        "accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
        "referer": "https://item.jd.com/100002672294.html",
        "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36",
    }

    # 指定爬虫的入口
    # start_urls = [
    #     "https://club.jd.com/comment/productPageComments.action?callback=fetchJSON_comment98&productId=100014220574&score=0&sortType=5&page=0&pageSize=10&isShadowSku=0&fold=1"
    # ]
    # 自定义url模板
    url = "https://club.jd.com/comment/productPageComments.action?callback=fetchJSON_comment98&productId=100014220574&score=0&sortType=5&page=%d&pageSize=10&isShadowSku=0&fold=1"

    def start_requests(self):
        for i in range(10):
            # print(url % (i))
            new_url = self.url % (i)
            yield scrapy.Request(url=new_url, callback=self.parse, headers=self.heads)

    # 回调函数
    def parse(self, response):
        print(response.text)
        # 定义一个正则表达式 目的是为了去匹配json字符串
        r = re.compile(r"[(](.*)[)]", re.S)
        rlist = r.findall(response.text)
        # print(rlist[0])
        jsonObject = json.loads(rlist[0])
        # print(jsonObject["comments"])
        for comment in jsonObject["comments"]:
            id = comment["id"]
            content = comment["content"].replace("\n", "\t")
            score = comment["score"]
            nickname = comment["nickname"]
            print(id, nickname, content, score)
        # 每个请求休息2s
        time.sleep(2)
