import scrapy
from scrapy.spiders import CrawlSpider, Rule
from scrapy.linkextractors import

from crawl_jd.items import CrawlJdItem
import re
import json
from scrapy_redis.spiders import RedisCrawlSpider

class GoSpider(RedisCrawlSpider):
    name = 'go'
    redis_key = 'wdrcrawler:start_urls'
    allowed_domains = ['jd.com']
    #start_urls = ['https://item.jd.com/100007166029.html']
    rules = (
        Rule(LinkExtractor(allow=('item.jd.com/\d+\.html')),callback='parse_item',follow=True),
)
    def parse_item(self, response):
        id = re.findall('/(\d+).html',response.url)

        #name = response.xpath('//div[2]/div[1]/div[1]/ul[3]/li[1]/text()').getall()
        url = 'https://club.jd.com/comment/productPageComments.action?callback=fetchJSON_comment98&productId='+id[0]+'&score=0&sortType=5&page=1&pageSize=10&isShadowSku=0&rid=0&fold=1'
        #jd['name'] =name[0]
        #
        # print(response.url)
        # print(id)
        headers ={
            'cookie': '__jdu=112509052; shshshfpb=xHMHQiXN5kRmjMCT8HbWbLw%3D%3D; shshshfpa=db02b396-316e-9ea0-404f-d6fd2ea9a085-1601890041; user-key=57b48fc1-e12d-438f-a328-8811f9990c84; _pst=jd_7a5adce078de5; unick=jd_7a5adce078de5; pin=jd_7a5adce078de5; _tp=SHyA9QLzr6%2Bid1epECOTFtyblguVdTNwN8h5u%2BeXf0Q%3D; ipLocation=%u5c71%u4e1c; PCSYCityID=CN_370000_370800_370811; areaId=13; ipLoc-djd=13-2900-3533-0; unpl=V2_ZzNtbUUEExB3AUNQL05dAGILFVkSXkUdcwgUAX0eXlYzUEZYclRCFnQURldnGFsUZAMZXUdcRxNFCEdkeB5fA2AFEFlBZxBFLV0CFi9JH1c%2bbRJcRF9BEnwOT1FyHmw1ZAMiXUNnQxV2DEdceh1fNVcEIm1yV0AXdgBCZHopXTUlV05UQVNAFnxFRlR4HV0NZgcRbUNnQA%3d%3d; mt_xid=V2_52007VwMSUVlZWlsbSikOV2JURgJcXU4IH05LQAA0ABtODV5UCgNBTlkCYlBGUQ8IUQkvShhfBHsDEU5cX0NZH0IZWA5lByJQbVhiWRdPHFsCYAAXUW1YW1sf; __jdv=76161171|baidu|-|organic|not set|1607301041470; jwotest_product=99; cn=0; shshshfp=341babdf387033f8aa4a64c841cd16c8; __jda=122270672.112509052.1605270761.1607301041.1607308089.9; __jdc=122270672; 3AB9D23F7A4B3C9B=4F44W5EE2FM3ZZTM3UYDEQRE5NSWE7XVXL4KFCWB2W4X6TZNEQGD6F4FXIF5QT52X3AYVI3HSSXP2AHJSG37TL5WKQ; shshshsID=84219a92339daa10a607dc1c4e4b3eea_10_1607309357557; __jdb=122270672.10.112509052|9.1607308089; JSESSIONID=66C1010C4B932A0DA0049A0E1859ED03.s1',
        'referer': 'https://item.jd.com/',
        'sec-fetch-dest': 'script',
        'sec-fetch-mode': 'no-cors',
        'sec-fetch-site': 'same-site',
        'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.198 Safari/537.36'
        }
        yield scrapy.Request(url=url,headers=headers,callback=self.test1)
    def test1(self,response):
        jd = json.loads(response.text.replace('fetchJSON_comment98(','').replace(');',''))
        for i in range(1,jd['maxPage']):
            headers = {
                'referer': 'https://item.jd.com/',
                'sec-fetch-dest': 'script',
                'sec-fetch-mode': 'no-cors',
                'sec-fetch-site': 'same-site',
                'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.198 Safari/537.36'
            }
            yield scrapy.Request(url=re.sub('page=(\d+)&','page='+str(i)+'&',str(response.url)),headers=headers,callback=self.zui,priority=10)
    def zui(self,response):
        jing = CrawlJdItem()
        # print(response.text)
        print('########################')
        print(response.url)
        jd = json.loads(response.text.replace('fetchJSON_comment98(', '').replace(');', ''))
        for i in jd['comments']:
            jing['content']=i['content']
            jing['creationTime'] = i['creationTime']
            jing['productColor'] = i['productColor']
            jing['productSize'] = i['productSize']
            jing['referenceName'] = i['referenceName']
            jing['referenceTime'] = i['referenceTime']
            yield jing
        #yield jd

