# ~*~ coding:utf-8 ~*~
from scrapy.selector import HtmlXPathSelector
from scrapy.selector import Selector
from scrapy.spider import BaseSpider
from scrapy.http import Request
from scrapy import log 
from ecspider.items import EcspiderItem
import json
from collections import OrderedDict
import re
import urllib
class amazonSpider(BaseSpider):
    name = "amazon"
    searchurlprefix = "http://www.amazon.cn/s/"
    searchurlsuffix = ""

    def start_requests(self):
        querylist = open("./querylist.txt","rb").readlines()
        for line in querylist:
            contentId,query = line.strip().split(":",2)
            keyword = {"keywords":query}
            queryenc = urllib.urlencode(keyword)
            searchurl = self.searchurlprefix+ queryenc + self.searchurlsuffix
            yield Request(searchurl, meta={"domain":"amazon", "query":query, "contentId":contentId})

    def parse(self, response):
        meta_data = response.meta
        url = response.url
        hxs = HtmlXPathSelector(response)
        page = hxs.select("//div[@id=\"pagn\"]/span[last()-1]/text()")
        if page:
            page_size = int(page[0].extract().encode("utf-8"))
        else:
            page_size = 1
        for page_no in range(1,page_size + 1):
            pageurl = "&page=%d" % page_no
            nexturl = url + pageurl
            yield Request(nexturl,dont_filter = True,meta=meta_data, callback=self.parsesearchlist)

    def parsesearchlist(self, response):
        meta_data = response.meta
        url = response.url
        hxs = HtmlXPathSelector(response)
        skus = []
        atf = hxs.select("//div[@id=\"atfResults\"]//div[@class=\"image imageContainer\"]/a/@href")
        for a in atf:
            skus.append(a.extract().encode("utf-8"))
        btf = hxs.select("//div[@id=\"btfResults\"]//div[@class=\"image imageContainer\"]/a/@href")
        for b in btf:
            skus.append(b.extract().encode("utf-8"))
        for skuurl in skus:
            yield Request(skuurl, meta=meta_data, callback=self.parseitem)

    def parseitem(self, response):
        meta_data = response.meta
        skuurl = response.url
        html = response.body_as_unicode().encode("utf-8")
        hxs = HtmlXPathSelector(response)
        sel = Selector(response)
        itemjson = {}
        itemjson["domain"] = meta_data["domain"]
        itemjson["query"] = meta_data["query"]
        itemjson["contentId"] = meta_data["contentId"]
        itemjson["url"] = skuurl
        itemjson["breadcrumb"] = ">".join(sel.xpath("//div[@class=\"bucket\"]/div[@class=\"content\"]/ul/li/a/text()").extract()).encode("utf-8").strip()
        itemjson["title"] = hxs.select("//span[@id=\"btAsinTitle\"]/span[1]/text()")[0].extract().encode("utf-8").replace("\n","").strip()
        itemjson["desc"] =  " ".join(hxs.select("//div[@class='productDescriptionWrapper']/text()").extract()).encode("utf-8").replace("\n","").strip()
        itemjson["commentcount"] = hxs.select("//div[@id=\"acr\"]/div[1]/div[2]/a/text()")[0].extract().encode("utf-8").strip().split(" ")[0]
        itemjson["averagescore"] = hxs.select("//div[@id=\"acr\"]/div[1]/div[4]/text()")[0].extract().encode("utf-8").replace("平均","").replace("星","").strip()
        comments = []
        cs = hxs.select("//*[@id=\"revMHRL\"]/div")
        for c in cs:
            score = c.select("div[3]/span[1]/@title")[0].extract().encode('utf-8').strip().replace("平均","").replace("星","").strip()
            content = c.select("div[6]/div[1]/text()")[0].extract().encode('utf-8').strip()
            comments.append({"content":content,"score":score})
        itemjson["comments"] = comments
        scoredict = {}
        scores = re.findall("<div class=\"histoCount fl gl10 ltgry txtnormal\" style=\"text-decoration: none;\">(\d+)</div>",html,re.I|re.M|re.DOTALL)
        for i in range(0,5):
            score = int(scores[i].strip())
            scoredict["score%d" % (5 - i)] = score
        goodcount = scoredict["score5"]+scoredict["score4"]
        middlecount = scoredict["score2"]+scoredict["score3"]
        badcount = scoredict["score1"]
        itemjson["goodcount"] = goodcount
        itemjson["middlecount"] = middlecount
        itemjson["badcount"] = badcount
        meta_data["itemjson"] = itemjson
        item = EcspiderItem()
        item["itemjson"] = json.dumps(itemjson,encoding="utf-8",ensure_ascii=False)
        return item

    
    
        
        



