#!/usr/bin/env python
# -*- coding: utf-8 -*-

'''
Created on 2011-8-02

主要用于从网站上爬取信息后，抽取页面信息；

@author: zhongfeng
'''


from pageparser import *
from spiderconfigparser import SpiderConfig

dangdangRoot = ObuyUrlSummary(url=r'http://category.dangdang.com/', name='dangdang')

class DangDangAllSortParser(RootCatagoryPageParser):
    '''
            从http://category.dangdang.com/?ref=www-0-C 获取所有的分类信息，
            组合成ObuyUrlSummary，不包含图书
    '''   
    def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
        super(DangDangAllSortParser, self).__init__(dataStr, rootUrlSummary, include, exclude) 
                             
    def getBaseSort3UrlSums(self):       
        finalUrlList = []
        allSort = self.soup.find(attrs={'class':'categories_mainBody'})
        for t in allSort.findAll(name='div',attrs={'id':re.compile(r'[a-z]*')}):#一级分类
            name = t['id']
            if name == 'book':  #不解析图书
                continue
            url = ''.join((r'http://category.dangdang.com/',name))
            sort_1_urlsum = self.buildSort_N(url, name, self.rootUrlSummary, isCrawle=False)
            sort_2 = t.find(attrs={'class':''.join([name,'_details'])})
            for tt in sort_2(name='li'):#二级分类
                name, url = ParserUtils.parserTag_A(tt.a)
                sort_2_urlsum = self.buildSort_N(url, name, sort_1_urlsum, isCrawle=False)
                for ttt in tt.a.findNextSiblings(name='a'):#三级分类
                    name, url = ParserUtils.parserTag_A(ttt)
                    url = '&'.join((url,'store=eq0'))
                    sort_3_urlsum = self.buildSort_N(url, name, sort_2_urlsum,firstFinalPage=True)
                    finalUrlList.append(sort_3_urlsum)
        return finalUrlList  
        
class DangDangSort3PageParser(Sort3PageParser):
    '''
          三级页面解析类
    '''    
    def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
        super(DangDangSort3PageParser, self).__init__(dataStr, rootUrlSummary, include, exclude)
    
    def nextPageUrlPattern(self):
        pageSeg = 'p={}'
        return '%s&%s' % (self.rootUrlSummary.url,pageSeg)
    
    def getTotal(self):
        regx = u'共([0-9]*)页'
        p = re.compile(regx)
        s = self.soup.find(name='span',attrs = {'id':'all_num'})
        if s is None: #dangdang_2011-08-04_10-00-04.html页面格式解析
            st = self.soup.find(name='input',attrs = {'id':'jumpto'})
            if st != None:
                s = st.findNextSibling(name='span')
        if s is None:
            return 1
        pageNum = s.getText()
        totalNum = int(p.search(pageNum).group(1))
        if totalNum > SpiderConfig.getMaxPage():
            totalNum = SpiderConfig.getMaxPage()
        return totalNum
    
    def parserPageInfos(self):      
        plist = self.soup.find(name='ul',attrs={'class':'mode_goods clearfix'})
        resultList = []      
        if plist is None:
            prodSeg = self.soup.findAll(attrs = {'class':'listitem '})
        else:
            prodSeg = plist.findAll(name='li')
        for prod in prodSeg:
            pNameSeg = prod.find(attrs={'class':'name'})
            if pNameSeg is None:
                pNameSeg = prod.find(attrs={'class':'title'})
            pName,url = ParserUtils.parserTag_A(pNameSeg.a)
            pid = url.rsplit('=',1)[-1]
            t = prod.find(attrs={'class':'price_d'})
            if t != None :
                currentPrice = ParserUtils.getPrice(t.getText())
            else:
                currentPrice = 0.00
            t = prod.find(attrs={'class':'price_m'})
            if t != None:
                pastPrice = ParserUtils.getPrice(t.getText())
            else:
                pastPrice = 0.00
            starLevelSeg = prod.find(name = 'p',attrs={'class':'starlevel'})
            repu = 0.0
            evalNum = 0
            if starLevelSeg:
                for starImg in starLevelSeg.findAll(name='img'):
                    if starImg['src'] == 'images/star_all.png':
                        repu += 1.0
                    elif starImg['src'] == 'images/star_half.png':
                        repu += 0.5
                evalNum = starLevelSeg.find(name='span').a.getText()
            imgUrlSeg = prod.find(attrs={'class':re.compile('.*pic')})
            imgUrl = ParserUtils.getImgUrl(imgUrlSeg)
            prodDetail = ProductDetails(productId=pid, fullUrl= url,imageUrl = imgUrl, privPrice = currentPrice,pubPrice=pastPrice,
                                        name=pName, adWords='',reputation=repu,evaluateNum=evalNum)
            prodDetail.catagory = self.rootUrlSummary
            resultList.append(prodDetail)
        return resultList

class DangDangSort4PageParser(DangDangSort3PageParser):
    '''
          分类四级页面为列表页面，只抽取Product信息
    '''
    def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
        super(DangDangSort4PageParser, self).__init__(dataStr, rootUrlSummary, include, exclude)
        
    def parserSubUrlSums(self):
        pass

parserDict = {0:DangDangAllSortParser, 3:DangDangSort3PageParser, 4:DangDangSort4PageParser}
            
''' test '''
import os
curModDir = os.path.dirname(os.path.abspath(__file__))
testFilePath = os.path.join(curModDir,'test_resources')
def testDangDangAllSortPage():
    fileName = os.path.join(testFilePath,'dangcat.html')
    with open(fileName, 'r') as fInput:
        content = fInput.read()

    rootUrlSum = ObuyUrlSummary(url=r'http://category.dangdang.com/', name='dangdang')
    pserver  = ObuyUrlSummary(url = r'http://category.dangdang.com/list?cat=4001976', 
                               name='奶粉',catagoryLevel = 2)
    firstPage = DangDangAllSortParser(content, rootUrlSum,include = [pserver])
    for sort_3 in firstPage.parserSubUrlSums():       
        for index, urlsum in enumerate(sort_3.parentPath):
            print urlsum.name
        print sort_3.name,sort_3.url ,sort_3.catagoryLevel 

def testSort3Page():    
    fileName = os.path.join(testFilePath,'dangdang_2011-08-19_13-03-03.html')
    with open(fileName, 'r') as fInput:
        content = fInput.read()
    sort_3_urlsum = ObuyUrlSummary(url=r'http://category.dangdang.com/list?cat=4001011&store=eq0',
                                    parentPath=[('test')], catagoryLevel=3)
    sort3Page = DangDangSort3PageParser(content, sort_3_urlsum)
    for sort_4 in sort3Page.getSort4PageUrlSums():
        print sort_4.url

def testSort3Details():
    fileName = os.path.join(testFilePath,'dangdang_2011-08-04_10-31-18.html')
    with open(fileName, 'r') as fInput:
        content = fInput.read()
    sort_3_urlsum = ObuyUrlSummary(url=r'http://category.dangdang.com/list?cat=4001011&store=eq0', 
                                   parentPath=[], catagoryLevel=3)
    sort3Page = DangDangSort3PageParser(content, sort_3_urlsum)  
    for product in sort3Page.parserPageInfos():
        print type(product.logstr())
        print product.logstr()

def testRegx():
    regx = u'共([0-9]*)页'
    p = re.compile(regx)
    fileName = os.path.join(testFilePath,'4001011.html')
    with open(fileName, 'r') as fInput:
        content = fInput.read()
    soup = BeautifulSoup(content)
    s = soup.find(name='span',attrs = {'id':'all_num'}).getText()
    content = content.decode('gb18030','ignore')
    print p.search(s).group(1)

if __name__ == '__main__':
    #testRegx()
    #testDangDangAllSortPage()
    #testSort3Page()
    testSort3Details()

    
    
