#!/usr/bin/env python
# -*- coding: utf-8 -*-

'''
Created on 2011-7-27

主要用于从网站上爬取信息后，抽取页面信息；

@author: zhongfeng
'''

from BeautifulSoup import BeautifulSoup
from crawlerhttp import UrlSummary, CrawlerType, crawleRetries
from time import strftime
import chardet, re
from urlparse import urlparse
from threadpool import WorkRequest
from crawlerhttp import crawle
from cStringIO import StringIO
from itertools import chain

encodingDict = {'360buy':'gb2312', 'newegg':'gb2312', 'dangdang':'gb2312', 'gome':'utf-8', 
                'amazon':'utf-8', 'coo8':'gb2312', 'suning':'utf-8','egou':'GBK',}#'efeihu':'utf-8'}

def reinqueue_proc(req, result):
    urlsum = req[0]
    pool = req[3]
    if urlsum.stat == 0:
        urlsum.stat = result.code
        req = WorkRequest(getProductPrice, req, None,
                        callback=None)
        pool.putRequest(req)
    else:
        print "Failed %s:%d" % (urlsum.url, result.code)
           
def getProductPrice(*req):
    pimgUrlSumm = req[0]
    result = crawleRetries(pimgUrlSumm)
    proc_normal_result(req, result)
    return result
        
def proc_normal_result(req, result):
    args = req
    captcha = req[4]
    if result.code == 200:
        prodDetail = args[1]
        resultList = args[2]
        prodDetail.privPrice = captcha(StringIO(result.content))
        resultList.append(prodDetail)
    else:
        reinqueue_proc(req, result)

class ObuyUrlSummary(UrlSummary):
    '''
           链接抽象类
    '''
    def __init__(self, url='', data=None, headers=None, crawlerType=CrawlerType.GET_URL, name='',
                isCrawle=True, isRecursed=True, catagoryLevel=0, retries = 4, parentPath=None,parent = None,
                stat=0, errReason='', include=None, exclude=None):
        super(ObuyUrlSummary, self).__init__(url, data, headers, crawlerType,retries)
        self.name = name                         #分类名称
        self.catagoryLevel = catagoryLevel       #分类级别
        self.parentPath = [] if parentPath is None else parentPath #路径
        self.parent = parent
        self.isCrawle = isCrawle                 #是否抓取
        self.isRecursed = isRecursed             #是否递归抓取
        self.stat = stat               #抓取的最终状态
        self.errReason = errReason               #错误原因       
        self.include = None                      #subUrl中应该包含的url列表
        self.exclude = None                      #subUrl中剔除的url列表，如果include,exclude同时设置，则include规则优先
    def getUrlSumAbstract(self):
        return self.name, self.url, self.catagoryLevel
         
    def __str__(self):
        return str(vars(self))
    
    __repr__ = __str__

class ParserResult(object):
    def logstr(self):
        pass
    
def convertToUnicode(dataStr, siteName):
    if isinstance(dataStr, str):
        encoding = encodingDict.get(siteName, None)
        if encoding is None:
            encoding = chardet.detect(dataStr)['encoding']
            encodingDict[siteName] = encoding
        dataStr = dataStr.decode(encoding, 'ignore')
    return dataStr  

class Parser(object):

    def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
        self.rootUrlSummary = rootUrlSummary
        self.include = include
        self.exclude = exclude
        siteName = urlparse(rootUrlSummary.url).hostname.split('.')[1]  
        self.dataStr = convertToUnicode(dataStr, siteName)
        self.soup = BeautifulSoup(self.dataStr, convertEntities=BeautifulSoup.HTML_ENTITIES) #默认使用BeautifulSoup做解析器
        
    @staticmethod
    def compareUrlSumm(urla, urlb):
        if urla.url != None and len(urla.url) > 0:
            return urla.url == urlb.url
        elif urla.name != None and len(urla.name) > 0:
            return urla.name == urlb.name
        else:
            return False
        
    @staticmethod
    def urlSummContain(filterArr, finalUrlSum):
        #print finalUrlSum.name,finalUrlSum.url
        for urlsumm in filterArr:
            #print urlsumm.name,urlsumm.url
            if Parser.compareUrlSumm(urlsumm, finalUrlSum):
                return True
            else:
                for parent in finalUrlSum.parentPath:
                    #print parent.name,parent.url
                    if Parser.compareUrlSumm(urlsumm, parent):
                        return True
        return False
    
    def filterUrlList(self, finalUrlList):
        filterResult = finalUrlList
        if self.include != None and len(self.include) > 0:
            filterResult = [ finalUrlSum for finalUrlSum in finalUrlList
                          if Parser.urlSummContain(self.include, finalUrlSum)]
        elif self.exclude != None and len(self.exclude) > 0:
            filterResult = [ finalUrlSum for finalUrlSum in finalUrlList
                          if not Parser.urlSummContain(self.exclude, finalUrlSum)]
        return filterResult
    
    def parserPageInfos(self):
        '''
                     返回ParserResult组成的list
        '''
        pass
    def parserSubUrlSums(self):
        pass

def getParser(level,parserDict):
    return parserDict.get(level,None)
    
class ParserUtils(object):
    '''
    html标签解析类,return (name,url)
    '''
    @staticmethod
    def parserTag_A(a):
        return a.getText().strip(), a['href'].strip()
    
    @staticmethod
    def getPrice(sPrice):
        if not sPrice:
            return '0.00'
        '''￥4899.00变为4899.00'''
        sPrice = sPrice.replace(u',', '')
        regx = u'[0-9]+.[0-9]+'
        p = re.compile(regx)
        ret = p.search(sPrice)
        if ret is None:
            return '0.00'
        return ret.group()
    
    @staticmethod
    def getDigit(s):
        s = s.replace(u',', '')
        regx = u'[0-9]+.[0-9]+|[0-9]+'
        p = re.compile(regx)
        sd = p.search(s)
        if sd is None:
            return 0
        return sd.group()
    
    @staticmethod
    def getImgUrl(imgTag):
        if imgTag is None:
            return ''
        return imgTag.img['src']

class RootCatagoryPageParser(Parser):
    '''
          根站点分类解析父类，获取所有的三级分类的ObuyUrlSummary
    '''
    def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
        super(RootCatagoryPageParser, self).__init__(dataStr, rootUrlSummary, include, exclude)
        
    def buildSort_N(self, url, name, parent, isCrawle=True,firstFinalPage = False):
        '''
                    构造各级节点逻辑
        '''
        sort_n_urlsum = ObuyUrlSummary(url=url, name=name, isCrawle=isCrawle)
        sort_n_urlsum.parentPath = []
        sort_n_urlsum.catagoryLevel = parent.catagoryLevel + 1
        sort_n_urlsum.parentPath.extend(parent.parentPath)
        sort_n_urlsum.parentPath.append(parent)
        if firstFinalPage:
            sort_n_urlsum.parent = sort_n_urlsum
        else:
            sort_n_urlsum.parent = parent
        return sort_n_urlsum
    
    def getBaseSort3UrlSums(self):
        pass
    
    def parserSubUrlSums(self):
        result = self.getBaseSort3UrlSums() 
        return self.filterUrlList(result) 
    
class Sort3PageParser(Parser):
    '''
           三级页面解析类,
           a.负责获取当前分类的所有的后续页面的UrlSummary
           b.负责获取页面的所有商品的信息
    '''
    def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
        super(Sort3PageParser, self).__init__(dataStr, rootUrlSummary, include, exclude)
        
    def buildSort_4(self, url):
        sort4_urlsum = ObuyUrlSummary(url=url, name=self.rootUrlSummary.name,
                                       catagoryLevel=4)
        sort4_urlsum.parentPath = []
        sort4_urlsum.parentPath.extend(self.rootUrlSummary.parentPath)
        sort4_urlsum.parentPath.append(self.rootUrlSummary)
        sort4_urlsum.parent = self.rootUrlSummary.parent
        return sort4_urlsum
    
    def getTotal(self):
        pass
    
    def nextPageUrlPattern(self):
        pass
    
    def buildSort_4UrlSums(self):
        finalUrlList = []
        totalPage = self.getTotal()
        if totalPage > 1:
            for pageNum in range(2, totalPage + 1):
                url = self.nextPageUrlPattern().format(str(pageNum))
                finalUrlList.append(self.buildSort_4(url))
        return finalUrlList
    
    def getSort4PageUrlSums(self):
        return self.buildSort_4UrlSums()
    
    def parserSubUrlSums(self):
        result = self.getSort4PageUrlSums()
        return self.filterUrlList(result)

    
def seEncode(ustr,encoding='gb18030'):
    if ustr is None:
        return ''
    if isinstance(ustr,unicode):
        return ustr.encode(encoding,'ignore')
    else:
        return str(ustr)

class ProductDetails(ParserResult):
    '''
          商品详细信息 
    '''
    def __init__(self, name='', imageUrl='', productId='', catagory=None, fullUrl='', pubPrice='0.00',
                  privPrice='0.00', adWords='', reputation='0', evaluateNum='0', updateTime=None):
        self.name = name                        #商品名称
        self.imageUrl = imageUrl                #商品图片URL
        self.productId = productId              #商品在原网站的ID
        self.catagory = catagory                #商品所属分类
        self.fullUrl = fullUrl                  #原始链接
        self.pubPrice = pubPrice                #商品标称的原价
        self.privPrice = privPrice              #商家卖价，没扣除广告折扣价格
        self.adWords = adWords                  #促销信息，包括下单立减、返劵等
        self.reputation = reputation            #好评度
        self.evaluateNum = evaluateNum          #评论数
        self.updateTime = strftime("%Y-%m-%d %H:%M:%S") if updateTime is None else updateTime #更新时间
    
    def __getCatagoryAbs(self):
        cat = self.catagory.parent
        if isinstance(cat, ObuyUrlSummary):
            return str((seEncode(cat.url), cat.catagoryLevel))
        else:
            return ''
        #return ','.join([str((seEncode(cat.url), cat.catagoryLevel)) for cat in chain(self.catagory.parentPath, (self.catagory,))])
    def __filterStr(self,s):
        return ' '.join(seEncode(s).replace('|', ' ').split())
    
    def logstr(self):
        return '|'.join(map(self.__filterStr, (self.productId, self.privPrice, self.updateTime, self.name, self.evaluateNum, self.reputation,
                                  self.adWords,self.fullUrl, self.imageUrl, self.__getCatagoryAbs())))
                
    def __str__(self):
        return str(vars(self))
    __repr__ = __str__


