#!/usr/bin/env python
# -*- coding: utf-8 -*-

'''
Created on 2011-8-20

主要用于从网站上爬取信息后，抽取页面信息；

@author: zhongfeng
'''

from pageparser import *
from threadpool import ThreadPool, WorkRequest
from cStringIO import StringIO
from gome.image_price import captcha_gome
from spiderconfigparser import SpiderConfig


gomeRoot = ObuyUrlSummary(url = r'http://www.gome.com.cn/allSort.html',name='gome',  
                                 isRecursed = True,catagoryLevel = 0) 

class GomeAllSortParser(RootCatagoryPageParser):
    '''
            从http://www.gome.com.cn/allSort.html获取所有的分类信息，
            组合成ObuyUrlSummary
    '''
    mainHost = r'http://www.gome.com.cn' 
    def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
        super(GomeAllSortParser, self).__init__(dataStr, rootUrlSummary, include, exclude) 
                             
    def getBaseSort3UrlSums(self):       
        finalUrlList = []
        allSort = self.soup.find(attrs = {'class':'alcatBox'})
        for t in allSort.findAll(name = 'div',attrs = {'class':'listBox'}):#一级
            for aSeg in t.findAll(name = 'a'):
                try:
                    name,url = ParserUtils.parserTag_A(aSeg)
                    #url = ''.join((self.mainHost,url))
                except Exception,e:
                    pass
                    #print e
                else:
                    #print name,url
                    sort_3_urlsum = self.buildSort_N(url, name, self.rootUrlSummary,firstFinalPage=True)
                    sort_3_urlsum.catagoryLevel = 3
                    finalUrlList.append(sort_3_urlsum)
        return finalUrlList  
        
class GomeSort3PageParser(Sort3PageParser):
    '''
          三级页面解析类
    '''    
    pricePageNum = 10
    def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
        super(GomeSort3PageParser, self).__init__(dataStr, rootUrlSummary, include, exclude)
    
    def nextPageUrlPattern(self):
        urlSegs = self.rootUrlSummary.url.rsplit('.', 1)
        pageSeg = '-{}-4-1-sc_'
        return '%s%s.%s' % (urlSegs[0], pageSeg, urlSegs[1])
    
    def nextPageUrlPattern1(self):
        nextPat = 'http://search.gome.com.cn/product.do?topCtgyId=%s&order=%s&ctgyId=%s&p={}&ctgLevel=3&scopes='
        urlSegs = self.rootUrlSummary.url.rsplit('/',1)[-1].split('.')[0].split('-')
        #urlSegs = self.rootUrlSummary.url.rsplit('.', 1)
        #pageSeg = '&order=3&scopes=&p={}'
        #return '%s%s' % (self.rootUrlSummary.url,pageSeg)
        return nextPat % (urlSegs[0],urlSegs[2],urlSegs[1])
    
    def getTotal(self):
        toolSeg = self.soup.find(name='div',attrs={'class':'listNav'})
        if toolSeg is None:
            return 0
        pageSeg = toolSeg.find(attrs={'class':'fr'}).getText()
        totalPage = int(pageSeg.split('/')[-1])
        if totalPage > SpiderConfig.getMaxPage():
            totalPage = SpiderConfig.getMaxPage()
        return totalPage
    
    def getAdwordsDict(self):
        regx =ur'''var d =\[\{name0:'(.*?)',.*?\$\('#promImg_([0-9]+)'\)'''
        p = re.compile(regx,re.DOTALL)
        idAdDict = {}
        for match in p.finditer(self.dataStr):
            idAdDict[match.group(2)] = match.group(1)
        return idAdDict
    
    def parserPageInfos(self):      
        pTipsSeg = self.soup.find(name='div', attrs={'class':'listNav'}).find(name='div',attrs={'class':'tips'})
        resultList = []
        if pTipsSeg is None:
            raise Exception("Page Error")
            return resultList
        try:
            pool = ThreadPool(self.pricePageNum)
            plist = pTipsSeg.findNextSibling(name='ul')
            idAdDict = self.getAdwordsDict()
            for li in plist(name='li'):
                pName,url = ParserUtils.parserTag_A(li.find(name='div', attrs={'class':'title'}).a)
                
                imgUrl = li.find(name='div',attrs={'class':'pic'}).img['gome-src']
            
                #repuSeg = li.find(name='div',attrs={'class':'extra'}).div['class']  
                #reputation = ParserUtils.getDigit(repuSeg)
                #adWordsSeg = li.find(name='span',attrs={'id':re.compile(r'promImg_[0-9]+')}).find(name = 'img')
                #if adWordsSeg:
                #    adWords = adWordsSeg['title']
#                print adWords
                pid = url.rsplit('/',1)[-1].split('.')[0]
                if url and not url.startswith('http'):
                    url = ''.join((r'http://www.gome.com.cn',url))
                
                priceImgUrlSeg = li.find(name='span',attrs={'class':'price'})
                
                priceImgUrl = ParserUtils.getImgUrl(priceImgUrlSeg)
                adWords = idAdDict.get(pid,'')
                prodDetail = ProductDetails(productId=pid, name=pName, adWords=adWords,
                                            imageUrl=imgUrl,fullUrl=url)
                prodDetail.catagory = self.rootUrlSummary
                pimgUrlSumm = ObuyUrlSummary(url = priceImgUrl)
                req = WorkRequest(getProductPrice, [pimgUrlSumm, prodDetail, resultList, pool,captcha_gome], None,
                        callback=None)
                pool.putRequest(req)
            pool.wait()
        except Exception,e:
            raise e
        finally:
            pool.dismissWorkers(num_workers=self.pricePageNum)
        return resultList

class GomeSort4PageParser(GomeSort3PageParser):
    '''
          分类四级页面为列表页面，只抽取Product信息
    '''
    def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
        super(GomeSort4PageParser, self).__init__(dataStr, rootUrlSummary, include, exclude)
        
    def parserSubUrlSums(self):
        pass

parserDict = {0:GomeAllSortParser,3:GomeSort3PageParser,4:GomeSort4PageParser} 
            
''' test '''
import os
curModDir = os.path.dirname(os.path.abspath(__file__))
testFilePath = os.path.join(curModDir,'test_resources')
from crawlerhttp import crawle, crawleRetries

def testAllSortPage():
    fileName = os.path.join(testFilePath,'AllSort.html')
    with open(fileName, 'r') as fInput:
        content = fInput.read()
    
    rootUrlSum = ObuyUrlSummary(url=r'http://www.gome.com.cn/allSort.html', name='gome')
    result = crawle(urlSum = rootUrlSum)
    content = result.content
    
    firstPage = GomeAllSortParser(content, rootUrlSum)
    for sort_3 in firstPage.getBaseSort3UrlSums():       
        print sort_3.name,sort_3.url ,sort_3.catagoryLevel 

def testSort3Page():    
    fileName = os.path.join(testFilePath,'gome_2011-12-24_22-37-57.html')
    with open(fileName, 'r') as fInput:
        content = fInput.read()
        
    sort_3_urlsum = ObuyUrlSummary(url=r'http://search.gome.com.cn/product/10000000-10000073-3.html',
                                    parentPath=[('test')], catagoryLevel=3)
    #http://search.gome.com.cn/product.do?topCtgyId=10000000&order=3&ctgyId=10000070&p=2&ctgLevel=3&scopes=
    #result = crawle(urlSum = sort_3_urlsum)
    #content = result.content
    
    sort3Page = GomeSort3PageParser(content, sort_3_urlsum)
    for sort_4 in sort3Page.getSort4PageUrlSums():
        print sort_4.url

def testSort3Details():
    fileName = os.path.join(testFilePath,'gome_test.html')
    #with open(fileName, 'r') as fInput:
    #    content = fInput.read()
        
    sort_3_urlsum = ObuyUrlSummary(url=r'http://search.gome.com.cn/product/10000004-10000057-3-1-4-2-sc_.html', 
                                   parentPath=[('test')], catagoryLevel=3)
    
    result = crawleRetries(urlSum = sort_3_urlsum)
    content = result.content
    with open(fileName, 'w') as fInput:
        fInput.write(content)
    sort3Page = GomeSort3PageParser(content, sort_3_urlsum)  
    for product in sort3Page.parserPageInfos():
        print product.logstr()
        
def testRegx():
    fileName = os.path.join(testFilePath,'gome_test.html')
    with open(fileName, 'r') as fInput:
        content = fInput.read()
    import chardet,re
    content = content.decode('utf-8')
    #print content
    regx =ur'''var d =\[\{name0:'(.*?)',.*?\$\('#promImg_([0-9]+)'\)'''
    p = re.compile(regx,re.DOTALL)
    for match in p.finditer(content):
        print match.group(2),match.group(1)
    
if __name__ == '__main__':
    #testAllSortPage()
    #testSort3Page()
    testSort3Details()
    testRegx()

    
    
