#!/usr/bin/env python
# -*- coding: utf-8 -*-

'''
Created on 2011-11-22

主要用于从网站上爬取信息后，抽取页面信息；

@author: zhongfeng
'''

from pageparser import *
import urlparse
from spiderconfigparser import SpiderConfig

efeihuRoot = ObuyUrlSummary(url=r'http://www.efeihu.com/', name='efeihu')

class EfeihuAllSortParser(RootCatagoryPageParser): 
    def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
        super(EfeihuAllSortParser, self).__init__(dataStr, rootUrlSummary, include, exclude) 
                             
    def getBaseSort3UrlSums(self):       
        finalUrlList = []
        allSort = self.soup.find(attrs={'id':'sitesort'})
        for t in allSort.findAll(name='div',attrs={'id':re.compile(r'sort_hd_[0-9]*')}):#一级分类
            name = t.h3.a.contents[0]
            url = t.h3.a['href']
            sort_1_urlsum = self.buildSort_N(url, name, self.rootUrlSummary, isCrawle=False)
            sort_2 = t.find(attrs={'class':'subitem'})
            for tt in sort_2(name='dl'):#二级分类
                name, url = ParserUtils.parserTag_A(tt.dt.a)
                sort_2_urlsum = self.buildSort_N(url, name, sort_1_urlsum, isCrawle=False)
                for ttt in tt.dd(name='em'):#三级分类
                    name, url = ParserUtils.parserTag_A(ttt.a)
                    sort_3_urlsum = self.buildSort_N(url, name, sort_2_urlsum,firstFinalPage=True)
                    finalUrlList.append(sort_3_urlsum)
        return finalUrlList  
        
class EfeihuSort3PageParser(Sort3PageParser):
    '''
          三级页面解析类
    '''    
    def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
        super(EfeihuSort3PageParser, self).__init__(dataStr, rootUrlSummary, include, exclude)
    
    def nextPageUrlPattern(self):
        return self.rootUrlSummary.url.replace('--1','--{}')
    
    def getTotal(self):
        s = self.soup.find(name='div',attrs = {'id':'ctl00_ContentPlaceHolder1_ucProductItemWithPager1_AspNetPager_down'})
        if s is None:
            pageNum = 1
        else:
            a = s(name = 'a',attrs={'class':'btn_next'})[-1]
            name,url = ParserUtils.parserTag_A(a)
            pageNum = url.split('/')[-1].split('.')[0].split('-')[-1]
            pageNum = int(pageNum)

        if pageNum > SpiderConfig.getMaxPage():
            pageNum = SpiderConfig.getMaxPage()
        return pageNum
    
    def __getSingleProdDetail(self, prod):
        infoSeg = prod.find(attrs={'class':'infor'})
        pNameHref = infoSeg.find(name='a',attrs={'class':'name'})
        pName, url = ParserUtils.parserTag_A(pNameHref)
        url = ''.join(('http://www.efeihu.com',url))
        pid = url.split('/')[-1].split('.')[0]
        adwords = infoSeg.find(name='p',attrs={'class':'promtn'}).getText()
        t = infoSeg.find(name='span', attrs={'class':'price_e'})
        if t != None:
            currentPrice = ParserUtils.getPrice(t.getText())
        else:
            currentPrice = 0.00
        t = infoSeg.find(name='span',attrs={'class':'price_del'})
        if t != None:
            pastPrice = ParserUtils.getPrice(t.getText())
        else:
            pastPrice = 0.00
        evalNum = ParserUtils.getDigit(infoSeg.find(name='div',attrs={'class':'comment'}).a.getText())
        imgUrlSeg = prod.find(name='a', attrs={'class':'img'})
        imgUrl = ParserUtils.getImgUrl(imgUrlSeg)
        prodDetail = ProductDetails(productId=pid, fullUrl=url, imageUrl=imgUrl, privPrice=currentPrice,
                                     pubPrice=pastPrice, name=pName, adWords=adwords,evaluateNum=evalNum)
        prodDetail.catagory = self.rootUrlSummary
        return prodDetail

    def parserPageInfos(self):      
        plist = self.soup.find(name='ul',attrs={'id':'prolist'})
        resultList = []      
        for prod in plist(name='li',attrs={'class':'m_pro'}):
            prodDetail = self.__getSingleProdDetail(prod)
            resultList.append(prodDetail)
        return resultList

class EfeihuSort4PageParser(EfeihuSort3PageParser):
    '''
          分类四级页面为列表页面，只抽取Product信息
    '''
    def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
        super(EfeihuSort4PageParser, self).__init__(dataStr, rootUrlSummary, include, exclude)
        
    def parserSubUrlSums(self):
        pass

parserDict = {0:EfeihuAllSortParser, 3:EfeihuSort3PageParser, 4:EfeihuSort4PageParser}
            
''' test '''
import os,chardet
curModDir = os.path.dirname(os.path.abspath(__file__))
testFilePath = os.path.join(curModDir,'test_resources')

from crawlerhttp import getContentFromUrlSum
def testAllSortPage():
    fileName = os.path.join(testFilePath,'efeihu.html')
    with open(fileName, 'r') as fInput:
        content = fInput.read()

    rootUrlSum = ObuyUrlSummary(url=r'http://www.efeihu.com/', name='efeihu')
    firstPage = EfeihuAllSortParser(content, rootUrlSum)
    for sort_3 in firstPage.parserSubUrlSums():       
        for index, urlsum in enumerate(sort_3.parentPath):
            print urlsum.name
        print sort_3.name,sort_3.url ,sort_3.catagoryLevel 

def testSort3Page():
    
    sort_3_urlsum = ObuyUrlSummary(url=r'http://www.efeihu.com/Products/89-0-0-0-0-0-40--1.html',
                                    parentPath=[('test')], catagoryLevel=3)
    
    content = getContentFromUrlSum(sort_3_urlsum)
    sort3Page = EfeihuSort3PageParser(content, sort_3_urlsum)
    for sort_4 in sort3Page.getSort4PageUrlSums():
        print sort_4.url
    
    for product in sort3Page.parserPageInfos():
        print product.logstr()



if __name__ == '__main__':
    testSort3Page()
    