#/usr/bin/env python
# -*- coding: utf-8 -*-

'''
Created on 2011-7-27

主要用于从网站上爬取信息后，抽取页面信息；

@author: zhongfeng
'''

from BeautifulSoup import BeautifulSoup
from crawlerhttp import UrlSummary, CrawlerType
from time import strftime
import chardet,re
from urlparse import urlparse


encodingDict = {'360buy':'gb2312','newegg':'gb2312','dangdang':'gb2312','gome':'utf-8','amazon':'utf-8'}

class ObuyUrlSummary(UrlSummary):
    '''
           链接抽象类
    '''
    def __init__(self, url, data=None, headers=None, crawlerType=CrawlerType.GET_URL, name='',
                isCrawle=True, isRecursed=True, catagoryLevel=0, parentPath=None,
                stat=0, errReason='', include=None, exclude=None):
        super(ObuyUrlSummary, self).__init__(url, data, headers, crawlerType)
        self.name = name                         #分类名称
        self.catagoryLevel = catagoryLevel       #分类级别
        self.parentPath = [] if parentPath is None else parentPath #路径
        self.isCrawle = isCrawle                 #是否抓取
        self.isRecursed = isRecursed             #是否递归抓取
        self.stat = stat               #抓取的最终状态
        self.errReason = errReason               #错误原因       
        self.include = None                      #subUrl中应该包含的url列表
        self.exclude = None                      #subUrl中剔除的url列表，如果include,exclude同时设置，则include规则优先
    
    def getUrlSumAbstract(self):
        return self.name,self.url,self.catagoryLevel
         
    def __str__(self):
        return str(vars(self))
    
    __repr__ = __str__

class ParserResult(object):
    def logstr(self):
        pass

    
def convertToUnicode(dataStr,siteName):
    if isinstance(dataStr, str):
        encoding = encodingDict.get(siteName,None)
        if encoding is None:
            encoding = chardet.detect(dataStr)['encoding']
            encodingDict[siteName] = encoding
        dataStr = dataStr.decode(encoding, 'ignore')
    return dataStr  

class Parser(object):

    def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
        self.rootUrlSummary = rootUrlSummary
        self.include = include
        self.exclude = exclude
        siteName = urlparse(rootUrlSummary.url).hostname.split('.')[1]  
        self.dataStr = convertToUnicode(dataStr,siteName)
        self.soup = BeautifulSoup(self.dataStr,convertEntities = BeautifulSoup.HTML_ENTITIES) #默认使用BeautifulSoup做解析器
        
    @staticmethod
    def compareUrlSumm(urla, urlb):
        if urla.url != None and len(urla.url) > 0:
            return urla.url == urlb.url
        elif urla.name != None and len(urla.name) > 0:
            return urla.name == urlb.name
        else:
            return False
        
    @staticmethod
    def urlSummContain(urlsumm, sort_urlsumm):
        if Parser.compareUrlSumm(urlsumm, sort_urlsumm):
            return True
        else:
            for parent in sort_urlsumm.parentPath:
                if Parser.compareUrlSumm(urlsumm, parent):
                    return True
        return False
    
    def filterUrlList(self, finalUrlList):
        filterResult = list(finalUrlList)
        if self.include != None:
            for urlsumm in self.include:
                filterResult = [ finalUrl for finalUrl in finalUrlList
                          if Parser.urlSummContain(urlsumm, finalUrl)]
        elif self.exclude != None:
            for urlsumm in self.exclude:
                filterResult = [ finalUrl for finalUrl in finalUrlList
                          if not Parser.urlSummContain(urlsumm, finalUrl)]
        return filterResult
    
    def parserPageInfos(self):
        '''
                     返回ParserResult组成的list
        '''
        pass
    def parserSubUrlSums(self):
        pass
    
class ParserUtils(object):
    '''
    html标签解析类,return (name,url)
    '''
    @staticmethod
    def parserTag_A(a):
        return a.getText().strip(), a['href'].strip()
    @staticmethod
    def getPrice(sPrice):
        '''￥4899.00变为4899.00'''
        sPrice = sPrice.replace(u',', '')
        regx = u'[0-9]+.[0-9]{2}'
        p = re.compile(regx)
        return p.search(sPrice).group()

class RootCatagoryPageParser(Parser):
    '''
          根站点分类解析父类，获取所有的三级分类的ObuyUrlSummary
    '''
    def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
        super(RootCatagoryPageParser, self).__init__(dataStr, rootUrlSummary, include, exclude)
        
    def buildSort_N(self, url, name, parent, isCrawle=True):
        '''
                    构造各级节点逻辑
        '''
        sort_n_urlsum = ObuyUrlSummary(url=url, name=name, parentPath=[],
                                  isCrawle=isCrawle)
        sort_n_urlsum.catagoryLevel = parent.catagoryLevel + 1
        sort_n_urlsum.parentPath.extend(parent.parentPath)
        sort_n_urlsum.parentPath.append(parent)
        return sort_n_urlsum
    
    def getBaseSort3UrlSums(self):
        pass
    
    def parserSubUrlSums(self):
        result = self.getBaseSort3UrlSums() 
        return self.filterUrlList(result) 
    
class Sort3PageParser(Parser):
    '''
           三级页面解析类,
           a.负责获取当前分类的所有的后续页面的UrlSummary
           b.负责获取页面的所有商品的信息
    '''
    def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
        super(Sort3PageParser, self).__init__(dataStr, rootUrlSummary, include, exclude)
        
    def buildSort_4(self, url):
        sort4_urlsum = ObuyUrlSummary(url=url, name=self.rootUrlSummary.name,
                                      parentPath=[], catagoryLevel=4)
        sort4_urlsum.parentPath.extend(self.rootUrlSummary.parentPath)
        return sort4_urlsum
    
    def getTotal(self):
        pass
    
    def nextPageUrlPattern(self):
        pass
    
    def buildSort_4UrlSums(self):
        finalUrlList = []
        totalPage = self.getTotal()
        if totalPage > 1:
            for pageNum in range(2, totalPage + 1):
                url = self.nextPageUrlPattern().format(str(pageNum))
                finalUrlList.append(self.buildSort_4(url))
        return finalUrlList
    
    def getSort4PageUrlSums(self):
        return self.buildSort_4UrlSums()
    
    def parserSubUrlSums(self):
        result = self.getSort4PageUrlSums()
        return self.filterUrlList(result)

class ProductDetails(ParserResult):
    '''
          商品详细信息 
    '''
    def __init__(self, name='', imageUrl=None, productId=None, catagory=None, fullUrl=None, pubPrice=None,
                  privPrice=None, adWords='', reputation=None, evaluateNum=None, updateTime=None):
        self.name = name                        #商品名称
        self.imageUrl = imageUrl                #商品图片URL
        self.productId = productId              #商品在原网站的ID
        self.catagory = catagory                #商品所属分类
        self.fullUrl = fullUrl                  #原始链接
        self.pubPrice = pubPrice                #商品标称的原价
        self.privPrice = privPrice              #商家卖价，没扣除广告折扣价格
        self.adWords = adWords                  #促销信息，包括下单立减、返劵等
        self.reputation = reputation            #好评度
        self.evaluateNum = evaluateNum          #评论数
        self.updateTime = strftime("%Y-%m-%d %H:%M:%S") if updateTime is None else updateTime #更新时间
          
    def logstr(self):
        return '|'.join((str(self.productId), self.privPrice, self.updateTime, self.name, self.adWords))
                
    def __str__(self):
        return str(vars(self))
    __repr__ = __str__

    
    
