# ~*~ coding:utf-8 ~*~
from scrapy.selector import HtmlXPathSelector
from scrapy.selector import Selector
from scrapy.spider import BaseSpider
from scrapy.http import Request
from scrapy import log 
from ecspider.items import EcspiderItem
import json
from collections import OrderedDict
import re
import urllib
class jdSpider(BaseSpider):
    name = "dangdang"
    searchurlprefix = "http://search.dangdang.com/?"
    searchurlsuffix = ""

    def start_requests(self):
        querylist = open("./querylist.txt","rb").readlines()
        for line in querylist:
            contentId,query = line.strip().split(":",2)
            keyword = {"key":query}
            queryenc = urllib.urlencode(keyword)
            searchurl = self.searchurlprefix+ queryenc + self.searchurlsuffix
            yield Request(searchurl, meta={"domain":"dangdang.com", "query":query, "contentId":contentId},headers={"User-Agent":"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/32.0.1700.107 Safari/537.36"})

    def parse(self, response):
        meta_data = response.meta
        url = response.url
        html = response.body_as_unicode().encode("utf-8")
        res = re.findall("共(\d+)页",html,re.I|re.M|re.DOTALL)
        if res:
            page_size = int(res[0])
        else:
            page_size = 1
        for page_no in range(1,page_size + 1):
            pageurl = "&page_index=%d" % page_no
            nexturl = url + pageurl
            yield Request(nexturl,dont_filter = True,meta=meta_data, callback=self.parsesearchlist, headers={"User-Agent":"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/32.0.1700.107 Safari/537.36"})

    def parsesearchlist(self, response):
        meta_data = response.meta
        url = response.url
        sel = Selector(response)
        skus = sel.xpath("//div[@class=\"inner\"]/a/@href")
        for sku in skus:
            skuurl = "http://" + re.findall("product.dangdang.com/\d+.html",sku.extract().encode("utf-8"),re.I|re.M|re.DOTALL)[0]
            yield Request(skuurl, meta=meta_data, callback=self.parseitem, headers={"User-Agent":"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/32.0.1700.107 Safari/537.36"})

    def parseitem(self, response):
        meta_data = response.meta
        skuurl = response.url
        hxs = HtmlXPathSelector(response)
        sel = Selector(response)
        itemjson = {}
        itemjson["outer_id"] = re.findall("http://product.dangdang.com/(\d+).html",skuurl,re.I|re.M|re.DOTALL)[0]
        itemjson["domain"] = meta_data["domain"]
        itemjson["query"] = meta_data["query"]
        itemjson["contentId"] = meta_data["contentId"]
        itemjson["url"] = skuurl
        itemjson["title"] = hxs.select("//h1/text()")[0].extract().encode("utf-8")
        itemjson["goodcount"] = ""
        itemjson["middlecount"] = ""
        itemjson["badcount"] = ""
        itemjson["commentcount"] = ""
        itemjson["averagescore"] = ""
        itemjson["comments"] = []
        divs = sel.xpath("//div[@class='mall_goods_foursort_style_frame']")
        dss = []
        for div in divs:
            kvs = []
            ts = div.xpath(".//text()")
            for t in ts:
                kvs.append(t.extract().strip().encode("utf-8"))
            dss.append(" ".join(kvs))
        itemjson["desc"] =  ";".join(dss)
        itemjson["breadcrumb"] = ">".join(sel.xpath("//div[@class=\"breadcrumb\"]//a//text()").extract()).encode("utf-8").strip()
        itemjson["price"] = hxs.select("//span[@id=\"salePriceTag\"]/text()")[0].extract().encode("utf-8")
        item = EcspiderItem()
        item["itemjson"] = json.dumps(itemjson,encoding="utf-8",ensure_ascii=False)
        return item

