# -*- coding: utf-8 -*-
import scrapy
import re
import json
from scrapy.http import Request
from multiprocessing.dummy import Pool as ThreadPool
from Jingdong.items import JingdongItem


class JdcommentsSpider(scrapy.Spider):
    name = 'jdcomments'
    allowed_domains = ['jd.com']

    def start_requests(self):
        search_target = "笔记本"
        searh_url = 'https://search.jd.com/Search?' \
                     'qrst=1&rt=1&enc=utf-8&vt=2&psort=3&stop=1&click=0&' \
                     'wq={0}&keyword={0}'.format(search_target)

        return [Request(url=searh_url, callback=self.parse_items)]

    @staticmethod
    def cmt_url(p_id, page_num=0):
        cmt_url = 'https://club.jd.com/comment/skuProductPageComments.action?' \
                  'callback=fetchJSON_comment98vv8444&score=0&sortType=5&pageSize=10&fold=1&isShadowSku=0&' \
                  'page={0}&productId={1}'.format(page_num, p_id)
        return cmt_url

    def parse_items(self, response):
        produce_ids = response.xpath('//*[@id="J_goodsList"]/ul/li/@data-sku').extract()
        if produce_ids:
            for p_id in produce_ids[:5]:
                url = self.cmt_url(p_id=p_id)
                yield Request(url=url, meta={'p_id': p_id, 'max_page_num': True}, callback=self.cmts_item)

    def cmts_item(self, response):
        comments = response.body.decode('gbk')
        p_id = response.meta['p_id']
        json_data = re.findall(r'\((.*)\)', comments, re.S)
        if json_data is False or len(json_data[0]) < 1000:
            print response.url
            print "the length of resp.content is %s" % len(comments)
            print "json_data number is %s " % len(json_data)
            return

        comments_dic = json.loads(json_data[0])

        if 'comments' not in comments_dic or 'maxPage' not in comments_dic:
            return

        item = JingdongItem()
        for cmt in comments_dic['comments']:
            item['productId'] = p_id
            item['productName'] = cmt['referenceName']
            item['productColor'] = cmt['productColor']
            item['orderTime'] = cmt['referenceTime']
            item['commentTime'] = cmt['creationTime']
            item['score'] = cmt['score']
            item['userClientShow'] = cmt['userClientShow']
            item['userLevelName'] = cmt['userLevelName']
            item['commentplusafterDays'] = cmt['afterDays']
            item['commentContent'] = "NULL"
            item['commentplusContent'] = "NULL"
            yield item

        if response.meta['max_page_num'] and int(comments_dic['maxPage']) > 1:
            maxPage_num = int(comments_dic['maxPage'])

            pool = ThreadPool(100)
            def test(n):
            #for n in range(1, int(comments_dic['maxPage'])):
                url = self.cmt_url(p_id=p_id, page_num=n)
                return [Request(url=url, meta={'p_id': p_id, 'max_page_num': False}, callback=self.cmts_item)]
            pool.map(test, range(1, maxPage_num))
            pool.close()
            pool.join()
