import scrapy
from scrapy.http import Request
import re
import json
from ..items import JDItem


class JDSpider(scrapy.Spider):
    name = 'jd'
    # start_urls = [
    #     'https://club.jd.com/comment/productPageComments.action?callback=fetchJSON_comment98&productId=100009464821&score=0&sortType=5&page=1&pageSize=10&isShadowSku=0&rid=0&fold=1']
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/104.0.0.0 Safari/537.36'
    }
    pages = 10

    def start_requests(self):
        for page in range(self.pages):
            url = 'https://club.jd.com/comment/productPageComments.action?callback=fetchJSON_comment98&productId=100009464821&score=0&sortType=5&page={}&pageSize=10&isShadowSku=0&rid=0&fold=1'.format(
                page)
            yield Request(url, headers=self.headers, callback=self.my_parse)

    def my_parse(self, response: scrapy.http.Response):

        # 打印返回结果
        match_res = re.match(pattern="(^fetchJSON_comment98\()(.*)(\);)", string=response.text, flags=0)
        jd_json = json.loads(match_res.group(2))

        # res_comment = "id:{},content:{},creationTime:{},score:{},imageCount:{},productColor:{},productSize:{}"

        # 创建item对象，并对其进行赋值
        jd_item = JDItem()

        # 在Spider中虽然可以实现数据写入至文本文件中，但是不推荐
        #    由于每个URL都会返回一个Response,那么每个Response都会产生一个连接信息 当URL非常多的情况下，会造成数据库或IO压力

        for comment in jd_json['comments']:
            print("*************************")
            jd_item['id'] = comment['id'],
            jd_item['content'] = comment['content'],
            print("comment['content']:", comment['content'])
            jd_item['creationTime'] = comment['creationTime'],
            jd_item['score'] = comment['score'],
            # jd_item['imageCount'] = comment['imageCount'],
            jd_item['productColor'] = comment['productColor'],
            jd_item['productSize'] = comment['productSize']
            # print(str(jd_item))
            yield jd_item
