#!/usr/bin/env python
# -*- coding: utf-8 -*-

'''
Created on 2011-8-27

主要用于从网站上爬取信息后，抽取页面信息；

@author: zhongfeng
'''


from pageparser import *
from spiderconfigparser import SpiderConfig
from crawlerhttp import crawleRetries

icsonRoot = ObuyUrlSummary(url=r'http://www.icson.com/portal.html', name='icson', 
        isRecursed=True, catagoryLevel=0)

class IcsonAllSortParser(RootCatagoryPageParser):
    '''
            从http://sz.icson.com/portal.html获取所有的分类信息，
            组合成ObuyUrlSummary
    '''   
    mainHost = 'http://sz.icson.com/'
    def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
        super(IcsonAllSortParser, self).__init__(dataStr, rootUrlSummary, include, exclude) 
                             
    def getBaseSort3UrlSums(self):       
        finalUrlList = []
        allSort = self.soup.find(attrs={'id':'protal_list'})
        for t in allSort.findAll(name='div',attrs={'class':'item_hd'}):#一级分类
            name,url = ParserUtils.parserTag_A(t.find(name='a'))
            sort_1_urlsum = self.buildSort_N(url, name, self.rootUrlSummary, isCrawle=False)
            sort_2 = t.findNextSibling(name='div',attrs={'class':'item_bd'})
            for tt in sort_2(name='dl'):#二级分类
                name = tt.dt.getText()
                url = ''.join((self.mainHost,name))
                sort_2_urlsum = self.buildSort_N(url, name, sort_1_urlsum, isCrawle=False)
                for ttt in tt.findAll(name='a'):#三级分类
                    name, url = ParserUtils.parserTag_A(ttt)
                    sort_3_urlsum = self.buildSort_N(url, name, sort_2_urlsum,firstFinalPage=True)
                    finalUrlList.append(sort_3_urlsum)
        return finalUrlList  
        
class IcsonSort3PageParser(Sort3PageParser):
    '''
          三级页面解析类
    '''    
    def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
        super(IcsonSort3PageParser, self).__init__(dataStr, rootUrlSummary, include, exclude)
    
    def nextPageUrlPattern(self):
        urlSegs = self.rootUrlSummary.url.rsplit('.', 1)
        pageSeg = '-0-6-10-20-0-{}--'
        return '%s%s.%s' % (urlSegs[0].replace('--------',''), pageSeg, urlSegs[1])
    
    def getTotal(self):
        nextSeg = self.soup.find(name='a',attrs={'class':'page-next'})
        if nextSeg != None:
            t = nextSeg.findPreviousSibling(name='a').getText()
            totalPage =  int(t)
        else:
            totalPage = 1
        if totalPage > SpiderConfig.getMaxPage():
            totalPage = SpiderConfig.getMaxPage()
        return totalPage
    
    def parserPageInfos(self):      
        plist = self.soup.findAll(name='li',attrs={'class':'item_list'})
        resultList = []
        for prod in plist:
            pNameSeg = prod.find(attrs={'class':'wrap_info'})
            pName,url = ParserUtils.parserTag_A(pNameSeg.a)
            hotWords = pNameSeg.find(name='p',attrs={'class':'hot'}).getText()
            adWords = hotWords
            #===================================================================
            # exGiftSeg = prod.find(name='ul',attrs = {'class':'list_gifts'})
            # if exGiftSeg:
            #    allGift = []
            #    for index ,gift in enumerate(exGiftSeg(name = 'li')):
            #        eGift = '%s.%s' % (index ,gift.getText())
            #        allGift.append(eGift)
            #    adWords = '%s@%s' % (hotWords,''.join(allGift))
            # print adWords
            #===================================================================
            pid = url.rsplit('-',1)[-1].split('.')[0]
            t = prod.find(attrs={'class':'price_icson'})
            if t != None:
                currentPrice = ParserUtils.getPrice(t.getText())
            else:
                currentPrice = 0.00
            commSeg = prod.find(name = 'p',attrs={'class':'comment'})
            repu = 0.0
            evalNum = 0
            if commSeg:
                repuSeg = commSeg.find(name = 'span',attrs = {'class':'icon_star'})
                if repuSeg:
                    repu = ParserUtils.getDigit(repuSeg.b['style'])
                    repu = float(repu) * 5 / 100
                evalNum = ParserUtils.getDigit(commSeg.a.getText())
            imgSeg = prod.find(name='a',attrs={'class':'link_pic'})
            imgUrl = ParserUtils.getImgUrl(imgSeg)
            #evlNum = ParserUtils.getDigit(prod.find(name='p',attrs={'class':'comment'}).getText())
            prodDetail = ProductDetails(productId=pid, fullUrl=url,imageUrl=imgUrl,privPrice = currentPrice,
                                        name=pName, adWords=adWords,reputation=repu,evaluateNum=evalNum)
            prodDetail.catagory = self.rootUrlSummary
            resultList.append(prodDetail)
        return resultList

class IcsonSort4PageParser(IcsonSort3PageParser):
    '''
          分类四级页面为列表页面，只抽取Product信息
    '''
    def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
        super(IcsonSort4PageParser, self).__init__(dataStr, rootUrlSummary, include, exclude)
        
    def parserSubUrlSums(self):
        pass

parserDict = {0:IcsonAllSortParser, 3:IcsonSort3PageParser, 4:IcsonSort4PageParser}
    
            
''' test '''
import os
curModDir = os.path.dirname(os.path.abspath(__file__))
testFilePath = os.path.join(curModDir,'test_resources')
def testIcsonAllSortPage():
    fileName = os.path.join(testFilePath,'portal.html')
    with open(fileName, 'r') as fInput:
        content = fInput.read()

    rootUrlSum = ObuyUrlSummary(url=r'http://sz.icson.com/portal.html', name='Icson')
    firstPage = IcsonAllSortParser(content, rootUrlSum)
    for sort_3 in firstPage.parserSubUrlSums():       
        print sort_3.name,sort_3.url ,sort_3.catagoryLevel 

def testSort3Page():    
    fileName = os.path.join(testFilePath,'icson_2011-08-27_14-13-21.html')
    with open(fileName, 'r') as fInput:
        content = fInput.read()
    sort_3_urlsum = ObuyUrlSummary(url=r'http://list.icson.com/311--------.html',
                                    parentPath=[('test')], catagoryLevel=3)
    sort3Page = IcsonSort3PageParser(content, sort_3_urlsum)
    for sort_4 in sort3Page.getSort4PageUrlSums():
        print sort_4.url

def testSort3Details():
    fileName = os.path.join(testFilePath,'icson_2011-08-27_14-13-21.html')
    with open(fileName, 'r') as fInput:
        content = fInput.read()
    sort_3_urlsum = ObuyUrlSummary(url=r'http://list.icson.com/311--------.html',
                                    parentPath=[('test')], catagoryLevel=3)
    result = crawleRetries(sort_3_urlsum)
    fileName = os.path.join(testFilePath,'icson_test.html')
    with open(fileName, 'w') as fInput:
        fInput.write(result.content)
    sort3Page = IcsonSort3PageParser(result.content, sort_3_urlsum)  
    for product in sort3Page.parserPageInfos():
        print product.logstr()


if __name__ == '__main__':
    #testIcsonAllSortPage()
    #testSort3Page()
    testSort3Details()

    
    
