import scrapy
import re
import json
from ..items import JDCommentItem
import redis  # pip install redis
from fake_headers import Headers  # pip install fake-headers


class JDCommentsSpider(scrapy.Spider):
    name = 'JDCommentsSpider'
    allowed_domains = ['jd.com']

    # 自定义设置，只对当前爬虫有效
    custom_settings = {
        "DOWNLOAD_DELAY": 3,
        "ITEM_PIPELINES": {
            'ScrapyDemo.pipelines.JDCommentsPipeline': 300,
        }
    }

    def __init__(self):
        super(JDCommentsSpider, self).__init__()
        self.url_format = 'https://club.jd.com/comment/productPageComments.action?callback=fetchJSON_comment98&productId=100034710036&score=0&sortType=5&page={page}&pageSize=10&isShadowSku=0&rid=0&fold=1'
        self.page = 1
        self.redis_key = "JDCommentsSpider"
        self.redis_conn = redis.Redis(host='master', port=6379, db=0)
        # 生成假的请求头
        self.header = Headers(
            os='win',
            browser='chrome',
            headers=True
        )

    # 手动启动爬虫
    def start_requests(self):
        # 构建好第一页评论的url
        url = self.url_format.format(page=self.page)
        # 手动的发起请求
        yield scrapy.Request(url=url, callback=self.my_parse, headers=self.header.generate())

    # 定义自己的解析方法
    def my_parse(self, response):
        # 解析数据
        print(response.text)
        regex = '(fetchJSON_comment98\()(.*)(\);)'
        grps = re.match(regex, response.text)
        if grps.lastindex == 3:
            # 将json数据转为dict
            comments_dict: dict = json.loads(grps.group(2))
            comments_list = comments_dict.get('comments', 'None')
            if comments_list != 'None':
                for comment in comments_list:
                    id = comment['id']
                    if not self.redis_conn.sismember(self.redis_key, id):
                        nickname = comment['nickname']
                        score = comment['score']
                        content = comment['content'].replace("\n", " ")
                        productColor = comment['productColor']
                        creationTime = comment['creationTime']

                        # 将id写入Reids 用于去重
                        self.redis_conn.sadd(self.redis_key, id)
                        # 将构建好的Item对象 发送到pipeline处理
                        yield JDCommentItem(id=id,
                                            nickname=nickname,
                                            score=score,
                                            content=content,
                                            productColor=productColor,
                                            creationTime=creationTime)

            if self.page == 1:
                # 获取后续评论
                for page in range(2, comments_dict['maxPage'] + 1):
                    self.page = page
                    # 构建好后续页面评论的url
                    url = self.url_format.format(page=page)
                    # 手动的发起请求
                    yield scrapy.Request(url=url, callback=self.my_parse)
