#-*- coding:utf-8 -*-
import re
import json
import urllib
import traceback

from scrapy.spider import BaseSpider
from scrapy.http import Request
from scrapy.selector import Selector
from scrapy.http import HtmlResponse
from scrapy import log

from e_commerce_site_crawler_system.items import ECommerceSiteCrawlerSystemItem


class YihaodianSpider(BaseSpider):

    name = "yihaodian"
    table_name = "yihaodian"

    pm_id_p = re.compile("pmId:(\d+?),")
    product_id_p = re.compile("productId:(\d+?),")
    merchant_id_p = re.compile("merchantId:(\d+?),")
    is_yihaodian_p = re.compile("isYiHaoDian:(\d+?),")
    uid_p = re.compile("paramSignature:\"(.+?)\",")

    def get_desc_attrib(self, attr, desc_attrib_sel):
        for li_sel in desc_attrib_sel.xpath("//div[@id='prodDetailCotentDiv']//dd[@title]"):
            try:
                item = u"".join(li_sel.xpath(".//text()").extract()).strip()
                k, v = re.split(u":|：", item, maxsplit=1)
                attr[k.encode("utf-8", "ignore").strip()] = v.strip().encode("utf-8", "ignore")
            except Exception as err:
                continue

    def get_detail_attrib(self, attr, detail_attrib_sel):
        for li_sel in detail_attrib_sel.xpath("//div[@tabindex='1']/dl/dd"):
            k = u"".join(li_sel.xpath("./label/text()").extract()).strip()
            v = u"".join(li_sel.xpath("./text()").extract()).strip()
            if all((k, v)):
                attr[k.encode("utf-8", "ignore").strip()] = v.encode("utf-8", "ignore").strip()

    def start_requests(self):
        with open("./yihaodian.url") as f:
            index = 0
            for index, line in enumerate(f):
                url = line.strip()
                yield Request(url, meta={"origin_url": url}, dont_filter=True)
            else:
                log.msg("get %s url to crawl of %s" % (index + 1, self.name))

    def parse(self, response):
        meta = response.meta
        sel = Selector(response)

        pm_id_m = self.pm_id_p.search(response.body)
        product_id_m = self.product_id_p.search(response.body)
        merchant_id_m = self.merchant_id_p.search(response.body)
        is_yihaodian_m = self.is_yihaodian_p.search(response.body)
        uid_m = self.uid_p.search(response.body)

        meta["on_shelves"] = 1
        meta['url'] = response.url
        if not all((pm_id_m, product_id_m, merchant_id_m, is_yihaodian_m, uid_m)):
            meta["on_shelves"] = 0
            return self.construct_item(meta)

        meta["outer_id"] = product_id_m.group(1).strip()
        meta["pm_id"] = pm_id_m.group(1).strip()
        merchant_id = merchant_id_m.group(1).strip()
        is_yihaodian = is_yihaodian_m.group(1).strip()
        uid = uid_m.group(1).strip()

        meta["title"] = u"".join(sel.xpath("//h1[@id='productMainName']/text()").extract()).strip() or None

        url = ("http://item-home.yhd.com/item/ajax/ajaxProdDescTabView.do?"
               "productID=%s&"
               "merchantID=%s&isYihaodian=%s&"
               "uid=%s"
               "&pmId=%s") % (meta["outer_id"], merchant_id, is_yihaodian,
                              urllib.quote(urllib.quote(uid)), meta["pm_id"])
        return Request(url, meta=meta, callback=self.parse_params, dont_filter=True)

    def parse_params(self, response):
        meta = response.meta
        params_json = json.loads(response.body_as_unicode())
        html = params_json["value"].encode("utf-8", "ignore")
        response2 = HtmlResponse(url=response.url, body=html, encoding='utf-8')
        sel = Selector(response2)
        params = {}
        self.get_desc_attrib(params, sel)
        self.get_detail_attrib(params, sel)
        meta["attr"] = json.dumps(params, ensure_ascii=False)
        url = "http://busystock.i.yihaodian.com/restful/detail?mcsite=1&provinceId=2&pmId=%s" % meta["pm_id"]
        return Request(url, meta=meta, callback=self.parse_info, dont_filter=True)

    def get_comment_num(self, sel, xpath, start, end):
        num = "".join(sel.xpath(xpath).extract()).strip()
        try:
            if end is not None:
                return int(num[start:end])
            else:
                return int(num[start:])
        except Exception as err:
            return None

    def parse_comment_num(self, response):
        meta = response.meta
        try:
            params_json = json.loads(response.body_as_unicode())
            html = params_json["value"].encode("utf-8", "ignore")
            response2 = HtmlResponse(url=response.url, body=html, encoding='utf-8')
            sel = Selector(response2)
            meta["comment_num"] = \
                self.get_comment_num(sel, "//span[@id='all-comment_num']/text()", 0, None)
            meta["good_comment_num"] = \
                self.get_comment_num(sel, "//li[@tag='good-comment']/span/text()", 1, -1)
            meta["medium_comment_num"] = \
                self.get_comment_num(sel, "//li[@tag='general-comment']/span/text()", 1, -1)
            meta["bad_comment_num"] = \
                self.get_comment_num(sel, "//li[@tag='bad-comment']/span/text()", 1, -1)
        except Exception as err:
            log.msg("get comment num of yihaodian %s failed!" % meta["origin_url"])
        return self.construct_item(meta)

    def parse_info(self, response):
        meta = response.meta
        if response.body.strip() == "null":
            log.msg("url %s can't get info from url:%s" % (meta["url"], response.url), level=log.WARNING)
            return self.construct_item(meta)
        yihaodian_info = json.loads(response.body)
        try:
            meta["price"] = yihaodian_info["currentPrice"]
            url = "http://e.yhd.com/front-pe/productExperience/proExperienceAction!ajaxView_pe.do?product.id=%s" % \
                  meta["outer_id"]
            return Request(url, meta=meta, callback=self.parse_comment_num, dont_filter=True)
        except Exception as err:
            log.msg("url %s can't get price info from url:%s %s" %
                    (meta["url"], response.url, traceback.format_exc()), level=log.WARNING)
            return self.construct_item(meta)

    def construct_item(self, info):
        i = ECommerceSiteCrawlerSystemItem()
        i["origin_url"] = info["origin_url"]
        for k in ("url", "on_shelves", "title", "price", "attr",
                  "comment_num", "good_comment_num",
                  "medium_comment_num", "bad_comment_num"):
            i[k] = info.get(k, None)
            if type(i[k]) == unicode:
                i[k] = i[k].encode("utf-8", "ignore")
        return i

