# -*-coding:utf-8-*-
__author__ = 'PP'
import string
import traceback

from scrapy import Selector
from scrapy import Request
from scrapy.utils.project import get_project_settings
from pymongo.connection import MongoClient

from goods.items import GoodsItem
from goods.utils.rules import *

#基础类
class ZolBaseSpider(CrawlSpider):
    isSave = True  #是否保存到数据库

    def __init__(self, index, sort, mode, *a, **kw):
        try:
            settings = get_project_settings()
            self.baseUrl = settings['BASE_URL']
            MONGODB_SERVER = settings['MONGODB_SERVER']
            MONGODB_PORT = settings['MONGODB_PORT']
            MONGODB_DB = settings['MONGODB_DB']
            self.saveName = settings['MONGODB_COLL']
            client = MongoClient(MONGODB_SERVER, MONGODB_PORT)
            self.db = client[MONGODB_DB]
            rule = Rules(index, sort, mode)
            self.allowed_domains = rule.getDomains()
            self.start_urls = rule.getStartUrls()
            self.rules = rule.getRules()
            super(ZolBaseSpider, self).__init__(*a, **kw)
        except Exception as e:
            log.msg("ERROR(MongodbPipeline):" + str(e), level=log.ERROR)
            traceback.print_exc()

    def processLinks(self, links):
        log.msg('[Links]%s' % links)
        return links

    def parseMore(self, response):
        x = Selector(response)
        list = x.xpath('//div[@id="param_list"]//td[@class="name"]/a/@href').extract()
        for index, url in enumerate(list):
            pageUrl = (self.baseUrl + url).replace('cn//', 'cn/')
            if self.check(pageUrl):
                req = Request(url=pageUrl, callback=self.parseIndex)
                req.meta['source'] = 'series'
                yield req

    def parseIndex(self, response):
        if response.meta.has_key('source'):
            source = response.meta['source']
        else:
            source = 'list'
            #log.msg(source)
        pageurl = response.url.replace('cn//', 'cn/')
        x = Selector(response)
        param = x.xpath('//div[@class="product-param"]//a[@class="more"]/@href').extract()
        if len(param) > 0:
            category = x.xpath('//div[@class="product-category"]//a/text()').extract()
            if len(category) > 0:
                sort = category[0]
            if len(category) > 1:
                brand = category[1]
            price = x.xpath('//span[@id="J_PriceTrend"]/b[contains(@class, "price-type")]//text()').extract()
            rank = x.xpath('//div[@class="rank-num"]/span/a/text()').extract()
            intro = x.xpath('//div[@class="product-intro"]/p/text()').extract()
            item = GoodsItem()
            item['_source'] = source
            item['_saveDb'] = self.saveName
            item['_isSave'] = self.isSave
            item['sort'] = sort
            item['brand'] = brand
            if len(rank) > 0:
                item['rank'] = rank[0]
            if len(intro) > 0:
                item['intro'] = intro[0]
            else:
                item['intro'] = ''
            item['url'] = pageurl
            if len(price) > 0:
                item['price'] = string.join(price).strip()
            else:
                item['price'] = ''
            url = self.baseUrl + param[0]
            yield Request(url=url, meta={'item': item}, callback=self.parseParam)

    def parseParam(self, response):
        #print '产品参数页：%s'.decode('utf-8')%response.url
        x = Selector(response)
        item = response.meta['item']
        name = x.xpath('//h1[@class="ptitle"]//text()').extract();
        desc = x.xpath('//span[@class="card-month-sale"]/text()').extract()
        if (len(name) > 0):
            item['name'] = name[0].rstrip('详细参数'.decode('utf-8')).strip()
        if len(desc) > 0:
            item['desc'] = int(string.join(desc).strip())
        else:
            item['desc'] = 0
        attrsList = x.xpath('//ul[@class="category_param_list"]/li')
        attrs = {}
        for index, param in enumerate(attrsList):
            key = param.xpath('span[contains(@id, "newPmName_")]//text()').extract();
            value = param.xpath('span[contains(@id, "newPmVal_")]//text()').extract();
            if (len(key) > 0 and len(value) > 0):
                k = string.join(key).strip().replace('.', '').replace(' ', '')
                v = string.join(value).strip()
                attrs[k] = v
        item['attrs'] = attrs
        yield item

    def processRequest(self, req):
        if self.check(req.url):
            return req
        return None

    def check(self, url):
        dbColl = self.db[self.saveName]
        old = dbColl.find_one({"url": url})
        if old:
            name = old['name']
            log.msg('[Skip]:%s' % name, level=log.WARNING)
            return False
        #log.msg('[Get]:%s'%url,level=log.INFO)
        return True