import scrapy, re, json
from ..items import CommentItem


# 定义一个爬虫类
class JDSpider(scrapy.Spider):
    # 爬虫名称
    name = "JDSpider"
    header = {
        "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.101 Safari/537.36"
    }

    # 启动的url
    # start_urls = [
    #     "https://club.jd.com/comment/productPageComments.action?callback=fetchJSON_comment98&productId=100016034388&score=0&sortType=5&page=1&pageSize=10&isShadowSku=0&rid=0&fold=1"]

    # 限制爬虫程序爬取的域名
    # allowed_domains = ['jd.com']
    # 重写scrapy.Spider类中的start_requests方法
    def start_requests(self):
        url_format = "https://club.jd.com/comment/productPageComments.action?callback=fetchJSON_comment98&productId=100016034388&score=0&sortType=5&page={}&pageSize=10&isShadowSku=0&rid=0&fold=1"
        # 构建50页评论的url
        for i in range(0, 50):
            url = url_format.format(i)
            yield scrapy.Request(url=url, headers=self.header, callback=self.parse)

    # 默认的解析函数
    def parse(self, response):
        print(response.text)
        res = re.match("(fetchJSON_comment98\()(.*)(\);)", response.text)
        print(res.group(0))
        print(res.group(1))
        print(res.group(2))
        print(res.group(3))
        commentDict = json.loads(res.group(2))
        for comment in commentDict["comments"]:
            commentItem = CommentItem()
            commentItem["id"] = comment["id"]
            commentItem["nickname"] = comment["nickname"]
            commentItem["id"] = comment["content"]
            commentItem["productColor"] = comment["productColor"]
            commentItem["creationTime"] = comment["creationTime"]
            commentItem["score"] = comment["score"]
            # 将Item发送到PipeLine里做下一步的处理
            yield commentItem
