import scrapy
import re
import json
from ..items import JdscrapyItem


class JDCommentSpider(scrapy.Spider):
    name = 'JDSpider'

    # 自定义请求头 伪装成浏览器
    header = {
        'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.159 Safari/537.36'
    }

    # 指定需要爬取的url
    # start_urls = [
    #     'https://club.jd.com/comment/productPageComments.action?callback=fetchJSON_comment98&productId=100023380672&score=0&sortType=5&page=0&pageSize=10&isShadowSku=0&fold=1']

    # 爬虫程序在启动时会执行的方法
    def start_requests(self):
        # 手动的发起请求
        # url = 'https://club.jd.com/comment/productPageComments.action?callback=fetchJSON_comment98&productId=100023380672&score=0&sortType=5&page=0&pageSize=10&isShadowSku=0&fold=1'
        url_format = 'https://club.jd.com/comment/productPageComments.action?callback=fetchJSON_comment98&productId=100023380672&score=0&sortType=5&page={}&pageSize=10&isShadowSku=0&fold=1'
        # 爬取前10页的评论
        for i in range(10):
            url = url_format.format(i)
            yield scrapy.Request(url=url, headers=self.header, callback=self.parse)

    def parse(self, response):
        responseStr = response.text
        res = re.match("(fetchJSON_comment98\()(.*)(\);)", responseStr)
        jsonObj = json.loads(res.group(2))
        for comment in jsonObj['comments']:
            jdItem = JdscrapyItem()
            jdItem['id'] = comment['id']
            jdItem['content'] = comment['content']
            jdItem['nickname'] = comment['nickname']
            jdItem['creationTime'] = comment['creationTime']
            jdItem['score'] = comment['score']
            # 将构建好的Item对象发送给pipeline做下一步的处理
            yield jdItem
