import json

import scrapy
import re


class Test1Spider(scrapy.Spider):
    name = 'test1'
    allowed_domains = ['jd.com']
    # start_urls = ['https://item.jd.com/25116144965.html']
    header = {
        ":authority": "club.jd.com",
        ":method": "GET",
        ":path": "/comment/productPageComments.action?callback=fetchJSON_comment98&productId=37245978364&score=0&sortType=5&page=0&pageSize=10&isShadowSku=0&fold=1",
        ":scheme": "https",
        "accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
        "accept-language": "zh-CN,zh;q=0.9,en-US;q=0.8,en;q=0.7",
        "referer": "https://item.jd.com/",
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.66 Safari/537.36"
    }

    def start_requests(self):
        url_format = 'https://club.jd.com/comment/productPageComments.action?callback=fetchJSON_comment98&productId=100015470344&score=0&sortType=5&page={}&pageSize=10&isShadowSku=0&fold=1'
        for i in range(0, 10):
            new_url = url_format.format(i)
            yield scrapy.Request(url=new_url, headers=self.header, callback=self.parse)

    def parse(self, response):
        # print(response.text)
        rule2 = r"(.*[(])({.*})([)];)"
        reObject2 = re.match(rule2, response.text)
        jsonStr = reObject2.group(2)
        for comment in json.loads(jsonStr)["comments"]:
            id = comment["id"]
            nickname = comment["nickname"]
            score = comment["score"]
            content = comment["content"].replace("\n", "")
            print(id, nickname, score, content)