# ~*~ coding:utf-8 ~*~
from scrapy.selector import HtmlXPathSelector
from scrapy.selector import Selector
from scrapy.spider import BaseSpider
from scrapy.http import Request
from scrapy import log 
from ecspider.items import EcspiderItem
import json
from collections import OrderedDict
import re
import urllib
class jdSpider(BaseSpider):
    name = "jingdong"
    searchurlprefix = "http://search.jd.com/Search?"
    searchurlsuffix = "&enc=utf-8"

    def start_requests(self):
        querylist = open("./querylist.txt","rb").readlines()
        for line in querylist:
            contentId,query = line.strip().split(":",2)
            keyword = {"keyword":query}
            queryenc = urllib.urlencode(keyword)
            searchurl = self.searchurlprefix+ queryenc + self.searchurlsuffix
            yield Request(searchurl, meta={"domain":"jd.com", "query":query, "contentId":contentId})

    def parse(self, response):
        meta_data = response.meta
        url = response.url
        html = response.body_as_unicode().encode("utf-8")
        res = re.findall("共(\d+)页",html,re.I|re.M|re.DOTALL)
        if res:
            page_size = int(res[0])
        else:
            page_size = 1
        for page_no in range(1,page_size + 1):
            pageurl = "&page=%d" % page_no
            nexturl = url + pageurl
            yield Request(nexturl,dont_filter = True,meta=meta_data, callback=self.parsesearchlist)

    def parsesearchlist(self, response):
        meta_data = response.meta
        url = response.url
        hxs = HtmlXPathSelector(response)
        skus = hxs.select("//*[@id=\"plist\"]/ul/li/div/div[1]/a/@href")
        for sku in skus:
            skuurl = sku.extract().encode("utf-8")
            yield Request(skuurl, meta=meta_data, callback=self.parseitem)

    def parseitem(self, response):
        meta_data = response.meta
        skuurl = response.url
        hxs = HtmlXPathSelector(response)
        sel = Selector(response)
        itemjson = {}
        itemjson["outer_id"] = re.findall("http://item.jd.com/(\d+)\.html",skuurl,re.I|re.M|re.DOTALL)[0]
        itemjson["domain"] = meta_data["domain"]
        itemjson["query"] = meta_data["query"]
        itemjson["contentId"] = meta_data["contentId"]
        itemjson["url"] = skuurl
        itemjson["title"] = hxs.select("//*[@id=\"name\"]/h1/text()")[0].extract().encode("utf-8")
        ds = sel.xpath("//ul[@class='detail-list']/li//text()")
        dss = []
        for d in ds:
            dss.append(d.extract().encode("utf-8").strip())
        itemjson["desc"] =  ";".join(dss)
        itemjson["breadcrumb"] = ">".join(sel.xpath("//div[@class=\"breadcrumb\"]//a/text()").extract()).encode("utf-8").strip()
        meta_data["itemjson"] = itemjson
        priceurl = "http://p.3.cn/prices/get?skuid=J_%s&callback=cnp" % itemjson["outer_id"]
        return Request(priceurl,meta=meta_data, callback=self.parseprice)

    def parseprice(self,response):
        meta_data = response.meta
        itemjson = meta_data["itemjson"]
        res = response.body.strip()
        price = re.findall("\"p\"\:\"(\d+.*?)",res,re.I|re.M|re.DOTALL)[0]
        itemjson["price"] = price
        commenturl = "http://club.jd.com/productpage/p-%s-s-0-t-3-p-0.html" % itemjson["outer_id"]
        return Request(commenturl,meta=meta_data,headers={"Referer":itemjson["url"]},callback=self.parsecomment)

    def parsecomment(self, response):
        meta_data = response.meta
        itemjson = meta_data["itemjson"]
        res = response.body.strip().decode("gbk")
        obj =  json.loads(res)
        productCommentSummary = obj["productCommentSummary"]
        score1 = productCommentSummary["score1Count"]
        score2 = productCommentSummary["score2Count"]
        score3 = productCommentSummary["score3Count"]
        score4 = productCommentSummary["score4Count"]
        score5 = productCommentSummary["score5Count"]
        goodcount = score5+score4
        middlecount = score3+score2
        badcount = score1
        itemjson["goodcount"] = goodcount
        itemjson["middlecount"] = middlecount
        itemjson["badcount"] = badcount
        commentcount = productCommentSummary["commentCount"]
        itemjson["commentcount"] = commentcount
        averagescore = productCommentSummary["averageScore"]
        itemjson["averagescore"] = averagescore
        cs = obj["comments"]
        comments = []
        for comment in cs:
            comments.append({"content":comment["content"].encode("utf-8"),"score":comment["score"]})
        itemjson["comments"] = comments
        item = EcspiderItem()
        item["itemjson"] = json.dumps(itemjson,encoding="utf-8",ensure_ascii=False)
        return item
    
    
        
        



