#/usr/bin/env python
# -*- coding: utf-8 -*-

'''
Created on 2011-7-27

主要用于从网站上爬取信息后，抽取页面信息；

@author: zhongfeng
'''

from pageparser import *
import re
from spiderconfigparser import SpiderConfig
from crawlerhttp import crawleRetries
from utils import Future

newEggRoot = ObuyUrlSummary(url=ur'http://www.newegg.com.cn/CategoryList.htm', name='newegg')

class NewEggAllSortParser(RootCatagoryPageParser):
    '''
            从http://www.newegg.com.cn/CategoryList.htm获取所有的分类信息，
            组合成ObuyUrlSummary
    '''   
    mainHost = r'http://www.newegg.com.cn' 
    def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
        super(NewEggAllSortParser, self).__init__(dataStr, rootUrlSummary, include, exclude) 
                             
    def getBaseSort3UrlSums(self):       
        finalUrlList = []
        allSort = self.soup.find(name = 'div',attrs={'class':'allCateList'})
        for t in allSort.findAll(attrs={'id':re.compile('pd[0-9]+')}):#一级分类
            name = t.getText()
            url = '#'.join((r'http://www.newegg.com.cn/CategoryList.htm',t['id']))
            sort_1_urlsum = self.buildSort_N(url, name, self.rootUrlSummary, isCrawle=False)
            sort_2 = t.findNextSibling(name='dl')
            for tt in sort_2(name='dt'):#二级分类
                name, url = ParserUtils.parserTag_A(tt.a)
                sort_2_urlsum = self.buildSort_N(url, name, sort_1_urlsum, isCrawle=False)
                for ttt in tt.findNextSibling(name='dd').findAll(name = 'a'):#三级分类
                    name, url = ParserUtils.parserTag_A(ttt)
                    url = '?'.join((url,'pageSize=96'))
                    sort_3_urlsum = self.buildSort_N(url, name, sort_2_urlsum,firstFinalPage=True)
                    finalUrlList.append(sort_3_urlsum)
        return finalUrlList  
        
class NewEggSort3PageParser(Sort3PageParser):
    '''
          三级页面解析类
    '''    
    def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
        super(NewEggSort3PageParser, self).__init__(dataStr, rootUrlSummary, include, exclude)
    
    def nextPageUrlPattern(self):
        urlSegs = self.rootUrlSummary.url.rsplit('.', 1)
        pageSeg = '-{}'
        return '%s%s.%s' % (urlSegs[0], pageSeg, urlSegs[1])
    
    def getTotal(self):
        pageSeg = self.soup.find(name='div',attrs={'class':'pageNav'}).find(name='ins').getText()
        totalPage = int(pageSeg.split('/')[-1])
        if totalPage > SpiderConfig.getMaxPage():
            totalPage = SpiderConfig.getMaxPage()
        return totalPage
    
    def getAdWords(self,prod,prodUrl):
        extraIconSeg = prod.find(name ='p',attrs={'class':'extraIcon'})
        adWords = ''
        if extraIconSeg:
            extraMsg = extraIconSeg.getText()
            if extraMsg.find(u'返现') != -1 or extraMsg.find(u'赠品') != -1:
                sort_5_urlsum = ObuyUrlSummary(url=prodUrl)
                result = crawleRetries(urlSum = sort_5_urlsum)
                parser = NewEggSortFinalParser(dataStr = result.content,rootUrlSummary=sort_5_urlsum)
                adWords = parser.parserPageInfos()
        return adWords   
    
    def parserPageInfos(self):      
        plist = self.soup.find(attrs={'id':'itemGrid1'})
        resultList = []       
        if plist is None:
            return resultList
        for prod in plist.findAll(attrs={'class':'itemCell noSeller'}):
            pName,url = ParserUtils.parserTag_A(prod.find(name ='p',attrs={'class':'info'}).a)
            futureTask = Future(self.getAdWords, *(prod, url))
            #adWords = self.getAdWords(prod, url)
            pid = url.rsplit('/',1)[-1].split('.')[0]
            currentPrice = ParserUtils.getPrice(prod.find(attrs={'class':'current'}).strong.getText())
            bypastSeg = prod.find(attrs={'class':'bypast'})
            pastPrice = '0.00'
            if bypastSeg != None:
                pastPrice = ParserUtils.getPrice(bypastSeg.getText())
            imgUrlSeg = prod.find(name='dt').findAll(name='img')[-1]
            imgUrl = imgUrlSeg['src']
            reputation = '0.0'
            evlNum = '0'
            rankSeg = prod.find(name='dd',attrs={'class':'rank '})
            aSeg = None
            if rankSeg != None:
                aSeg = rankSeg.a
            if aSeg != None:
                reputation = ParserUtils.getDigit(aSeg['title'])
                evlNum = ParserUtils.getDigit(aSeg.getText())
            adWords = futureTask()
            prodDetail = ProductDetails(productId=pid, fullUrl=url,imageUrl=imgUrl,privPrice = currentPrice,
                    pubPrice=pastPrice,name=pName, adWords=adWords,reputation=reputation,evaluateNum=evlNum)
            prodDetail.reputation = reputation
            prodDetail.evaluateNum = evlNum
            prodDetail.catagory = self.rootUrlSummary
            resultList.append(prodDetail)
        return resultList

class NewEggSort4PageParser(NewEggSort3PageParser):
    '''
          分类四级页面为列表页面，只抽取Product信息
    '''
    def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
        super(NewEggSort4PageParser, self).__init__(dataStr, rootUrlSummary, include, exclude)
        
    def parserSubUrlSums(self):
        pass
    
class NewEggSortFinalParser(Parser):
    
    def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
        super(NewEggSortFinalParser, self).__init__(dataStr, rootUrlSummary, include, exclude)
        
    def parserPageInfos(self):
        crashCut = self.getCrashCut()
        exGift = self.getExtGift()
        return '@'.join((crashCut,exGift))
    
    def getCrashCut(self):
        favInfoSeg = self.soup.find(name = 'ul',attrs={'class':'favourableInfo'})
        crashCut = '0.00'
        if favInfoSeg:
            for info in favInfoSeg(name = 'li'):
                if info.label.getText().find(u'返现') != -1:
                    crashCutText = info.getText()
                    crashCut = ParserUtils.getDigit(crashCutText)
                    break
        return crashCut

    def getExtGift(self):
        exGiftSeg = self.soup.find(name = 'div',attrs={'class':'presentArea'})
        exGift = []
        if exGiftSeg:
            for index,info in enumerate(exGiftSeg(name = 'dd')):
                t = '%s.%s' % (index,info.getText())
                exGift.append(t)
        return ''.join(exGift)
    
parserDict = {0:NewEggAllSortParser, 3:NewEggSort3PageParser, 4:NewEggSort4PageParser}
            
''' test '''
import os
curModDir = os.path.dirname(os.path.abspath(__file__))
testFilePath = os.path.join(curModDir,'test_resources')
def testNewEggAllSortPage():
    fileName = os.path.join(testFilePath,'CategoryList.htm')
    with open(fileName, 'r') as fInput:
        content = fInput.read()

    rootUrlSum = ObuyUrlSummary(url=r'http://www.newegg.com.cn/CategoryList.htm', name='newegg')
    include = [ ObuyUrlSummary(url = r'http://http://www.newegg.com.cn/Category/536.htm',
                               name='服务器',catagoryLevel = 2)]
    firstPage = NewEggAllSortParser(content, rootUrlSum,include = include)
    for sort_3 in firstPage.getBaseSort3UrlSums():       
        for index, urlsum in enumerate(sort_3.parentPath):
            print '\t'*index,str(urlsum.getUrlSumAbstract())
        print sort_3.url ,sort_3.catagoryLevel 

def testSort3Page():    
    fileName = os.path.join(testFilePath,'newegg_2011-08-25_16-03-49.html')
    with open(fileName, 'r') as fInput:
        content = fInput.read()
    sort_3_urlsum = ObuyUrlSummary(url=r'http://www.newegg.com.cn/SubCategory/1043.htm?pageSize=96',
                                    parentPath=[('test')], catagoryLevel=3)
    sort3Page = NewEggSort3PageParser(content, sort_3_urlsum)
    for sort_4 in sort3Page.getSort4PageUrlSums():
        print sort_4.url

def testSort3Details():
    #fileName = os.path.join(testFilePath,'1043.htm')
    #with open(fileName, 'r') as fInput:
    #    content = fInput.read()
    sort_3_urlsum = ObuyUrlSummary(url=r'http://www.newegg.com.cn/SubCategory/970.htm?ep=1', 
                                   parentPath=[('test')], catagoryLevel=3)
    from crawlerhttp import crawle
    content = ''
    while True:
        result = crawle(sort_3_urlsum)
        if result.code == 200:
            content = result.content
            break
    sort3Page = NewEggSort3PageParser(content, sort_3_urlsum)  
    for product in sort3Page.parserPageInfos():
        print product.logstr()
    
if __name__ == '__main__':
    #testNewEggAllSortPage()
    #testSort3Page()
    testSort3Details()

    
    
