#/usr/bin/env python
# -*- coding: utf-8 -*-

'''
Created on 2011-8-25

主要用于从网站上爬取信息后，抽取页面信息；

@author: zhongfeng
'''

from pageparser import *
from spiderconfigparser import SpiderConfig
from threadpool import ThreadPool
from suning.image_price import captcha_suning


sunningRoot = ObuyUrlSummary(url=ur'http://www.suning.com', name='suning')

mainHost = r'http://www.suning.com' 

class SuningAllSortParser(RootCatagoryPageParser):
    '''
            从http://www.suning.com/获取所有的分类信息，
            组合成ObuyUrlSummary
    '''   

    def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
        super(SuningAllSortParser, self).__init__(dataStr, rootUrlSummary, include, exclude) 
                             
    def getBaseSort3UrlSums(self):       
        finalUrlList = []
        allSort = self.soup.find(attrs = {'id':'allsortlist'})
        for t in allSort.findAll(name = 'h3'):#一级
            name,url = ParserUtils.parserTag_A(t.a)
            if not url.startswith('http'):
                url = ''.join((mainHost,url))
            sort_1_urlsum = self.buildSort_N(url, name, self.rootUrlSummary, isCrawle=False)
            sort_2 = t.findNextSibling(name='div')
            for tt in sort_2(name='dl'):#二级分类
                name,url = ParserUtils.parserTag_A(tt.dt.a)
                if not url.startswith('http'):
                    url = ''.join((mainHost,name))
                sort_2_urlsum = self.buildSort_N(url, name, sort_1_urlsum, isCrawle=False)
                for ttt in tt.dd.findAll(name = 'a'):#三级分类
                    name, url = ParserUtils.parserTag_A(ttt)
                    if not url.startswith('http'):
                        url = ''.join((mainHost,url))
                    sort_3_urlsum = self.buildSort_N(url, name, sort_2_urlsum,firstFinalPage=True)
                    finalUrlList.append(sort_3_urlsum)
        return finalUrlList  
        
class SuningSort3PageParser(Sort3PageParser):
    '''
          三级页面解析类
    '''    
    pricePageNum = 10
    def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
        super(SuningSort3PageParser, self).__init__(dataStr, rootUrlSummary, include, exclude)
    
    def nextPageUrlPattern(self):
        #http://www.suning.com/webapp/wcs/stores/servlet/trd_10052_10051_1_20011_.html
        nextPagePattern = 'http://www.suning.com/emall/thirdsearch?storeId=%s&catalogId=%s&categoryId=%s&langId=-7&ip_state=&suggestionWordList=&isCatalogSearch=1&isList=0&sortType=4&currentPage={}'
        urlSegs = self.rootUrlSummary.url.rsplit('/', 1)[-1]
        retArr = urlSegs.split('_')
        pageSeg = nextPagePattern % (retArr[1],retArr[2],retArr[4])
        return pageSeg
    
    def getTotal(self):
        pageSeg = self.soup.find(name='i',attrs={'id':'pageTotal'})
        totalPage = 1
        if pageSeg is not None:
            totalPage = int(pageSeg.getText())
        totalPage = totalPage - 1
        if totalPage > SpiderConfig.getMaxPage():
            totalPage = SpiderConfig.getMaxPage()
        return totalPage
    

    def parseSingleProd(self, li):
        try:
            aSeg = li.find(name='span').a
            url = aSeg['href'].strip()
            if ''.find('www') == -1:
                url = '/'.join(('http://www.suning.com/emall', url))
            name = aSeg['title'].strip()
            adWords = li.find(name='p', attrs={'class':'sell'}).getText()
            pid = url.rsplit('/', 1)[-1].split('_')[-2]
            imgUrl = li.find(name='img')['src2']
            priceSeg = li.find(name='p', attrs={'class':'price'})
            priceImgUrl = ''
            if priceSeg != None:
                priceImgUrl = priceSeg.img['src2']
                cityId = '9173' #默认是南京的价格
                priceImgUrl = priceImgUrl.replace('~', cityId)
            commentSeg = li.find(name='div', attrs={'class':'comment'})
            evalSeg = None
            repuSeg = None
            if commentSeg is not None:
                evalSeg = commentSeg.a
                repuSeg = commentSeg.div
            if evalSeg is not None:
                evaluateNum = ParserUtils.getDigit(evalSeg.getText())
                repu = ParserUtils.getDigit(repuSeg['class'])
            else:
                evaluateNum = '0'
                repu = '0'
            prodDetail = ProductDetails(productId=pid, fullUrl=url, imageUrl=imgUrl, privPrice='', name=name, 
                       evaluateNum=evaluateNum, reputation=repu, adWords=adWords)
            prodDetail.catagory = self.rootUrlSummary
            return priceImgUrl,prodDetail
        except Exception,e:
            print e

    def parserPageInfos(self):
        plist = self.soup.find(name='div', attrs={'id':'proShow'})
        resultList = []       
        if plist is None:
            return resultList
        try:
            pool = ThreadPool(self.pricePageNum)
            for li in plist(name='li'):
                priceImgUrl,prodDetail = self.parseSingleProd(li)
                pimgUrlSumm = ObuyUrlSummary(url = priceImgUrl)
                req = WorkRequest(getProductPrice, [pimgUrlSumm, prodDetail, resultList, pool,captcha_suning], None,
                        callback=None)
                pool.putRequest(req)
            pool.wait()
        except Exception,e:
            raise e
        finally:
            pool.dismissWorkers(num_workers=self.pricePageNum)
        return resultList


class SuningSort4PageParser(SuningSort3PageParser):
    '''
          分类四级页面为列表页面，只抽取Product信息
    '''
    def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
        super(SuningSort4PageParser, self).__init__(dataStr, rootUrlSummary, include, exclude)
        
    def parserSubUrlSums(self):
        pass

parserDict = {0:SuningAllSortParser, 3:SuningSort3PageParser, 4:SuningSort4PageParser}
            
''' test '''
import os
curModDir = os.path.dirname(os.path.abspath(__file__))
testFilePath = os.path.join(curModDir,'test_resources')
def testAllSortPage():
    fileName = os.path.join(testFilePath,'suningAllSort.html')
    with open(fileName, 'r') as fInput:
        content = fInput.read()
    
    rootUrlSum = ObuyUrlSummary(url=r'http://www.suning.com/', name='suning')
    excludeUrlSums = [ObuyUrlSummary(url=r'http://www.suning.com/webapp/wcs/stores/servlet/tcd_10052_22001_.html', name='suning' ,catagoryLevel=1)]
    firstPage = SuningAllSortParser(content, rootUrlSum,exclude=excludeUrlSums)
    for sort_3 in firstPage.parserSubUrlSums():       
        for index, urlsum in enumerate(sort_3.parentPath):
            pass
        print sort_3.name,sort_3.url ,sort_3.catagoryLevel 

def testSort3Page():    
    fileName = os.path.join(testFilePath,'trd_10052_10051_1_20011_.html')
    with open(fileName, 'r') as fInput:
        content = fInput.read()
    sort_3_urlsum = ObuyUrlSummary(url=r'http://www.suning.com/webapp/wcs/stores/servlet/trd_10052_10051_1_20011_.html',
                                    parentPath=[('test')], catagoryLevel=3)
    sort3Page = SuningSort3PageParser(content, sort_3_urlsum)
    for sort_4 in sort3Page.getSort4PageUrlSums():
        print sort_4.url

def testSort3Details():
    fileName = os.path.join(testFilePath,'suning_2011-08-25_13-05-11.html')
    with open(fileName, 'r') as fInput:
        content = fInput.read()
    sort_3_urlsum = ObuyUrlSummary(url=r'http://www.suning.com/emall/strd_10052_10051_1_20003_.html',
                                    parentPath=[('test')], catagoryLevel=3)
    ret = crawleRetries(sort_3_urlsum)
    outputFileName = os.path.join(testFilePath,sort_3_urlsum.url.rsplit('/')[-1])
    with open(outputFileName,'w') as outputFile:
        outputFile.write(ret.content)
    
    sort3Page = SuningSort3PageParser(ret.content, sort_3_urlsum)  
    for product in sort3Page.parserPageInfos():
        print product.logstr()
    
if __name__ == '__main__':
    #import urllib
    #url = 'http://localhost/webapp/wcs/stores/servlet/odeSearch?storeId=10052&catalogId=10051&categoryId=20003&langId=-7&ip_state=c0%3ds%253A9%253Bcity_id%253B%253Aeq%253B1001B.s%253A9%253Bcity_id%253B%253Aeq%253B5006F.s%253A9%253Bcity_id%253B%253Aeq%253B5006Z.s%253A9%253Bcity_id%253B%253Ass%253B0000A%26c1%3ds%253A9%253Biphrase%2bbundle%2btaxonomy%2bid%2bfrom%2broot%253B%253Ass%253B%253A20003%26q%3d20%26a0%3diphrase%2bbundle%2btaxonomy%252F%252Fv%253A0%26i%3dsitemap%2bid%26qt%3d1313391335%26qid%3dq8GzGmE5P2Ss3%26vid%3dvSXajhCLXuWWu%26ioe%3dUTF-8%26s2%3dsitemap%2bid%252F%252F1%26qtid%3dn8GzGmE5P2Ss3%26s1%3dpublishTime%252F%252F0%26rid%3dr8OlldtbsEwdf%26s0%3drank%252F%252F0%26t%3d0%26m0%3diphrase%2bbundle%2bid%26mcmode%3dtest&suggestionWordList=&isCatalogSearch=1&isList=0&sortType=0&currentPage=1'
    #print urllib.unquote(url)
    #import re
    #regx = re.compile('[0-9]+')
    #testAllSortPage()
    #testSort3Page()
    testSort3Details()
    
    
