import scrapy
import re
import json
from ..items import CommentItem


class JDCommentsSpider(scrapy.Spider):
    name = 'JDCommentsSpider'
    allowed_domains = ['jd.com']

    headers = {
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/96.0.4664.110 Safari/537.36"
    }

    #   stat_urls = [
    #       'https://club.jd.com/comment/productPageComments.action?callback=fetchJSON_comment98&productId=10037586434072&score=0&sortType=5&page=0&pageSize=10&isShadowSku=0&fold=1']

    def start_requests(self):
        url_format = "https://club.jd.com/comment/productPageComments.action?callback=fetchJSON_comment98&productId=100014352539&score=0&sortType=5&page={}&pageSize=10&isShadowSku=0&fold=1"
        for i in range(200):
            url = url_format.format(i)
            # headers 请求头
            # callback 回调函数：设置由哪个函数去解析response
            yield scrapy.Request(url=url, headers=self.headers, callback=self.parseComment)

    def parseComment(self, response):
        # print(response.text)
        json_str = re.match(r"(fetchJSON_comment98\()(.*)(\);)", response.text).group(2)
        print(json_str)
        commentDrct = json.loads(json_str)

        comments = commentDrct.get('comments', None)
        if comments is not None:
            for comment in comments:
                # 从response中解析数据并构建成Item对象
                commentItem = CommentItem()
                commentItem['id'] = comment['id']
                commentItem['content'] = comment['content']
                commentItem['creationTime'] = comment['creationTime']
                commentItem['score'] = comment['score']
                commentItem['nickname'] = comment['nickname']

                # 将构建好的Item对象发送给pipeline做后续的处理
                yield commentItem
