import scrapy
import re
import json
from ..items import CommentItem
from fake_headers import Headers


# pip install fake-headers


class JDCommentsSpider(scrapy.Spider):
    name = 'JDCommentsSpider'
    allowed_domains = ['jd.com']

    # 初始化fake-headers 用于随机生成请求头
    # headers = Headers(
    #     browser='firefox',
    #     os='mac',
    #     headers=True
    # )
    #

    # 设定请求头 伪装成浏览器
    # headers = {
    #     "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/96.0.4664.93 Safari/537.36"
    # }

    # start_urls = [
    #     'https://club.jd.com/comment/productPageComments.action?callback=fetchJSON_comment98&productId=100014352539&score=0&sortType=5&page=0&pageSize=10&isShadowSku=0&fold=1']

    headers = {
        'Accept': '*/*',
        'Accept-Encoding': 'gzip, deflate, br',
        'Accept-Language': 'zh-CN,zh;q=0.9,en-US;q=0.8,en;q=0.7',
        'Connection': 'keep-alive',
        'Cookie': 'shshshfpb=f3W7aXOimfXUdxcnPVcp8lg%3D%3D; shshshfpa=dd3e618e-7824-e463-e953-735fb2491a30-1572581065; __jdu=15945342777701271159704; user-key=0f674e76-283e-4314-878d-e20485b0bdf9; __jdv=76161171|direct|-|none|-|1639622083019; areaId=14; PCSYCityID=CN_340000_340100_0; ipLoc-djd=14-1116-3431-57939; jwotest_product=99; __jda=122270672.15945342777701271159704.1594534278.1639635327.1639703524.27; __jdc=122270672; token=d580f8297475335873c53a0ca9a442ab,2,910946; __tk=WkaFSLmLVcaCiAeFTLfKVkuCVUvJWceAiDl3iLl2Tce,2,910946; shshshfp=e39a2aab2d64e3d87934802270acb44f; ip_cityCode=1116; shshshsID=92d1c2eec0e0061568b20502af8aa0fb_4_1639703565703; __jdb=122270672.5.15945342777701271159704|27.1639703524; JSESSIONID=4C9F7710F36CC25E02E63C1F6D8706AD.s1; 3AB9D23F7A4B3C9B=PFFHAXCONWJKWHSOLBIBQWHWZCSNLF65GANEXSUBWYJQK3UFYOGIH3BMJJ56DKIL2GTESYLRTXH2CLWABB7Z37WOSY',
        'Host': 'club.jd.com',
        'Referer': 'https://item.jd.com/',
        'sec-ch-ua': '" Not A;Brand";v="99", "Chromium";v="96", "Google Chrome";v="96"',
        'sec-ch-ua-mobile': '?0',
        'sec-ch-ua-platform': '"Windows"',
        'Sec-Fetch-Dest': 'script',
        'Sec-Fetch-Mode': 'no-cors',
        'Sec-Fetch-Site': 'same-site',
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/96.0.4664.110 Safari/537.36'
    }

    # 手动的发起请求
    def start_requests(self):
        url_format = 'https://club.jd.com/comment/productPageComments.action?callback=fetchJSON_comment98&productId=100014352539&score=0&sortType=5&page={}&pageSize=10&isShadowSku=0&fold=1'
        for i in range(80):
            url = url_format.format(i)
            # headers 请求头
            # callback 回调函数：设置由哪个函数去解析response
            yield scrapy.Request(url=url, headers=self.headers, callback=self.parseComment)

    def parseComment(self, response):
        print(response.text)
        json_str = re.match(r"(fetchJSON_comment98\()(.*)(\);)", response.text).group(2)
        print(json_str)
        commentDict = json.loads(json_str)
        # 解析数据
        comments = commentDict.get('comments', None)
        if comments is not None:
            for comment in comments:
                # 从response中解析数据并构建成Item对象
                commentItem = CommentItem()
                commentItem['id'] = comment['id']
                commentItem['content'] = comment['content']
                commentItem['creationTime'] = comment['creationTime']
                commentItem['score'] = comment['score']
                commentItem['nickname'] = comment['nickname']
                # 将构建好的Item对象发送给pipeline做后续的处理
                yield commentItem
