import scrapy
from fake_headers import Headers
import re
import json
from ..items import JDCommentItem


class JDSpider(scrapy.Spider):
    name = 'JDSpider'
    allowed_domains = ['jd.com']
    # start_urls = [
    #     'https://club.jd.com/comment/productPageComments.action?callback=fetchJSON_comment98&productId=100026667858&score=0&sortType=5&page=0&pageSize=10&isShadowSku=0&fold=1']

    # 伪装成浏览器，加上请求头
    # pip install fake-headers
    header = Headers(
        browser="chrome",  # Generate only Chrome UA
        os="win",  # Generate ony Windows platform
        headers=True  # generate misc headers
    )

    # 手动发起请求
    def start_requests(self):
        url = 'https://club.jd.com/comment/productPageComments.action?callback=fetchJSON_comment98&productId=100026667858&score=0&sortType=5&page=0&pageSize=10&isShadowSku=0&fold=1'
        yield scrapy.Request(url=url, headers=self.header.generate())

    def parse(self, response):
        # print(response.text)
        # 使用正则表达式将 fetchJSON_comment98({......}); 括号中包含的json字符串提取出来
        # 并使用json.loads方法将提取json字符串转换为Python中的字典
        jsonObj: dict = json.loads(re.match(r'(fetchJSON_comment98\()(.*)(\);)', response.text).group(2))
        comments = jsonObj.get("comments", None)
        if comments is not None:
            for comment in comments:
                id = comment['id']
                nickname = comment['nickname']
                content = comment['content']
                creationTime = comment['creationTime']
                # 构建Item对象
                jdCommentItem = JDCommentItem()
                jdCommentItem['id'] = id
                jdCommentItem['nickname'] = nickname
                jdCommentItem['content'] = content
                jdCommentItem['creationTime'] = creationTime

                # 将构建好的Item对象发送给pipeline进行下一步的处理
                yield jdCommentItem
