#coding:utf8
import sys
import os
sys.path.append(os.path.abspath(os.path.dirname(__file__) + '/' + '..'))
import scrapy
from scrapy.spiders import CrawlSpider
import json 
import items
import logging
import urllib
import re

class JdCommSpider(CrawlSpider):
    kd='Cityclover百广专卖店'
    #kd='aba旗舰店'
    kd=urllib.quote(kd)
    fields=[u'id', u'guid', u'content', u'creationTime', u'referenceId', u'referenceTime', u'score',
        u'firstCategory',u'secondCategory', u'thirdCategory',u'userClient', u'isMobile', u'productColor',
        u'productSize', u'nickname', u'userProvince', u'userLevelId', u'userLevelName', u'userRegisterTime']

    name='JdCommSpider'
    allowed_domains=['jd.com']
    start_urls=['http://search.jd.com/s_new.php?keyword=%s&enc=utf-8&qrst=1&rt=1&stop=1&vt=2&default_sort=1&psort=4&page=1&click=0'%kd,]
    headers={
        'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
        'Accept-Language':'zh-CN,zh;q=0.8,zh-TW;q=0.6',
        'User-Agent':'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/45.0.2454.85 Safari/537.36',
    }
    
    def parse(self,response):
        self.logger.warning('ptPage:%s'%response.url)
        pts =response.xpath('//*[@id="J_goodsList"]/ul/li')
        for pt in pts:#单个产品
            ptId=pt.xpath('@data-sku').extract()[0]
            commCount=pt.xpath('div/div[@class="p-commit"]/strong/a/text()')[0].extract()
            self.logger.warning('ptId,commCount:%s,%s'%(ptId,commCount))
            commIndex=0
            link ='http://club.jd.com/productpage/p-%s-s-0-t-1-p-%d.html'%(ptId,commIndex)
            self.logger.warning('commPageLink:%s'%link)
            yield scrapy.http.Request(link,callback=self.parse_comms)

        count =len(pts)
        if count==30:
            url =response.url
            ptPageIndex =re.findall('page=(\d*)',url)[0]
            ptPageIndex=int(ptPageIndex)+1
            url =re.sub(r'page=(\d*)','page=%d'%ptPageIndex,url)
            yield scrapy.http.Request(url,callback=self.parse)

    def parse_comms(self,response):
        self.logger.warning('commPage:%s'%response.url)
        sbody = response.body
        if len(sbody)==0:
            link=response.url
            self.logger.warning('commPage fail:%s,reload'%link)
            yield scrapy.http.Request(link,callback=self.parse_comms)
        else:
            data = json.loads(sbody.decode('gb18030')) 
            for comment in data['comments']:
                fdDict={}
                for fd in self.__class__.fields:
                    try:
                        fdDict[fd]=comment[fd]
                    except:
                        fdDict[fd]=''
                item=items.ScItem(fdDict)
                yield item

            if len(data['comments'])==10:
                url=response.url
                commPageIndex=re.findall('t-1-p-(\d*).html',url)[0]
                commPageIndex=int(commPageIndex)+1
                link =re.sub('t-1-p-(\d*).html','t-1-p-%d.html'%commPageIndex,url)
                yield scrapy.http.Request(link,callback=self.parse_comms)

