import scrapy
import re, json
from JDScrapy.items import JDCommentItem


# 定义一个爬虫类，继承自scrapy框架的Spider类
class JDSpider(scrapy.Spider):
    # 定义爬虫名，保证在项目中是唯一的
    name = "JDSpider"

    header = {
        "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
        "Accept-Language": "zh-CN,zh;q=0.9,en-US;q=0.8,en;q=0.7",
        "Host": "club.jd.com",
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.77 Safari/537.36"
    }

    # 告诉爬虫从哪个链接开始
    # start_urls = [
    #     "https://club.jd.com/comment/productPageComments.action?callback=fetchJSON_comment98&productId=100016034388&score=0&sortType=5&page=0&pageSize=10&isShadowSku=0&fold=1"]

    # 另外一种方式告诉爬虫从哪个链接开始
    def start_requests(self):
        # 手动发起一个请求
        url_format = "https://club.jd.com/comment/productPageComments.action?callback=fetchJSON_comment98&productId=100016034388&score=0&sortType=5&page={}&pageSize=10&isShadowSku=0&fold=1"
        # 爬取10页的评论
        for i in range(10):
            url = url_format.format(i)
            # headers 自定义请求头
            # callback 指定回调函数
            yield scrapy.Request(url=url, headers=self.header, callback=self.parse)

    # 默认的解析函数

    def parse(self, response):
        # print(response.text)
        sourceStr = response.text
        jsonObj = json.loads(re.match("(fetchJSON_comment98\()(.*)(\);)", sourceStr).group(2))
        # print(type(jsonObj))
        # print(jsonObj.keys())
        for comment in jsonObj["comments"]:
            jdCommentItem = JDCommentItem()
            jdCommentItem["nickname"] = comment["nickname"]
            jdCommentItem["score"] = comment["score"]
            jdCommentItem["content"] = comment["content"]
            jdCommentItem["productColor"] = comment["productColor"]
            # 将构建好的Item发送至pipeline进行下一步处理
            yield jdCommentItem
