#!/usr/bin/env python
# -*- coding: utf-8 -*-

'''
Created on 2011-8-7

主要用于从网站上爬取信息后，抽取页面信息；

@author: zhongfeng
'''

from cStringIO import StringIO
from coo8.image_price import captcha_coo8
from crawlerhttp import crawleRetries
from pageparser import *
from threadpool import ThreadPool, WorkRequest
import json
import os
import re
import threading
import urllib
from spiderconfigparser import SpiderConfig


coo8Root = ObuyUrlSummary(url=r'http://www.coo8.com/allcatalog/', name='coo8', 
        isRecursed=True, catagoryLevel=0)

class Coo8AllSortParser(RootCatagoryPageParser):
    '''
            从http://www.coo8.com/allcatalog/获取所有的分类信息，
            组合成ObuyUrlSummary
    '''   
    mainHost = r'http://www.coo8.com/' 
    def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
        super(Coo8AllSortParser, self).__init__(dataStr, rootUrlSummary, include, exclude) 
    
    def getBaseSort3UrlSums(self):
        finalUrlList = []
        allSort = self.soup.find(name='div', attrs={'class':'cateItems'})
        for t in allSort.findAll(name='div', attrs={'class':re.compile('hd.*')}):#一级分类
            sort_1 = t.find(name='h2')
            name = sort_1['id']
            url = ''.join((self.mainHost,name,'/'))
            sort_1_urlsum = self.buildSort_N(url, name, self.rootUrlSummary, isCrawle=False)
            sort_2 = t.findNextSibling(name='div',attrs={'class':re.compile('bd.*')})
            for tt in sort_2(name='dl'):#二级分类
                name = tt.dt.h3.getText()
                url = ''.join((self.mainHost, sort_1_urlsum.name, name))
                sort_2_urlsum = self.buildSort_N(url, name, sort_1_urlsum, isCrawle=False)
                for ttt in tt(name='dd'):#三级分类
                    try:
                        name, url = ParserUtils.parserTag_A(ttt.a)
                    except Exception:
                        continue
                    sort_3_urlsum = self.buildSort_N(url, name, sort_2_urlsum,firstFinalPage=True)
                    finalUrlList.append(sort_3_urlsum)
        return finalUrlList      
        
class Coo8Sort3PageParser(Sort3PageParser):
    '''
三级页面解析类
    '''    
    pricePageNum = 8
    def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
        super(Coo8Sort3PageParser, self).__init__(dataStr, rootUrlSummary, include, exclude)
        
    def nextPageUrlPattern(self):
        urlSegs = self.rootUrlSummary.url.rsplit('.', 1)
        pageSeg = '-0-0-0-{}-0-101101'
        return '%s%s.%s' % (urlSegs[0].replace('-0-0-0-0',''), pageSeg, urlSegs[1])
    
    def getTotal(self):
        regx = u'共([0-9]*)页'
        p = re.compile(regx)
        pageSeg = self.soup.find(name='div', attrs={'class':'pageInfo'})
        if pageSeg is None:
            return 1
        pageNum = pageSeg.getText()
        totalNum = int(p.search(pageNum).group(1))
        if totalNum > SpiderConfig.getMaxPage():
            totalNum = SpiderConfig.getMaxPage()
        return totalNum
    
    def getAdWords(self,prod,prodUrl):
        extraIconSeg = prod.find(name ='p',attrs={'class':'text-tag-wrap'})
        adWords = ''
        if extraIconSeg:
            extraMsg = extraIconSeg.getText()
            if extraMsg.find(u'返现') != -1 or extraMsg.find(u'赠品') != -1 \
                                       or extraMsg.find(u'返券') != -1 :
                sort_5_urlsum = ObuyUrlSummary(url=prodUrl)
                result = crawleRetries(urlSum = sort_5_urlsum)
                parser = Coo8SortFinalParser(dataStr = result.content,rootUrlSummary=sort_5_urlsum)
                adWords = parser.parserPageInfos()
        return adWords 
    
    def parserPageInfos(self):
        resultList = []  
        plist = self.soup.find(name='div', attrs={'class':'srchContent'})
        if plist is None:
            #raise Exception("Page Error")
            return resultList
        try:
            pool = ThreadPool(self.pricePageNum)
            for li in plist(name='li'):
                pNameSeg = li.find(name='p', attrs={'class':'name'}).a
                pName = pNameSeg['title']
                imgUrlSeg = li.find(name='p',attrs={'class':'pic'}).img
                imgUrl = ''
                if imgUrlSeg:
                    imgUrl = imgUrlSeg['src']
                pid = pNameSeg['href'].rsplit('/')[-1].split('.')[0]
                url = pNameSeg['href']
                if url and not url.startswith('http'):
                    url = ''.join((r'http://www.coo8.com',pNameSeg['href']))
                adWords = self.getAdWords(prod = li,prodUrl=url)
                priceImgUrl = li.find(name='p', attrs={'class':'price'}).img['src']
                prodDetail = ProductDetails(fullUrl=url,productId=pid, adWords=adWords, name=pName,imageUrl=imgUrl)
                prodDetail.catagory = self.rootUrlSummary
                pimgUrlSumm = ObuyUrlSummary(url = priceImgUrl)
                req = WorkRequest(getProductPrice, [pimgUrlSumm, prodDetail, resultList, pool,captcha_coo8], None,
                        callback=None)
                pool.putRequest(req)
            pool.wait()
        except Exception,e:
            raise e
        finally:
            pool.dismissWorkers(num_workers=self.pricePageNum)
        return resultList

class Coo8Sort4PageParser(Coo8Sort3PageParser):
    '''
          分类四级页面为列表页面，只抽取Product信息
    '''
    def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
        super(Coo8Sort4PageParser, self).__init__(dataStr, rootUrlSummary, include, exclude)
        
    def parserSubUrlSums(self):
        pass
    
class Coo8SortFinalParser(Parser):
    
    def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
        super(Coo8SortFinalParser, self).__init__(dataStr, rootUrlSummary, include, exclude)
        
    def parserPageInfos(self):
        crashCut = self.getCrashCut()
        exGift = self.getCouponAndExGift()
        return '@'.join((crashCut,exGift))
    
    def getCrashCut(self):
        crashCutSeg = self.soup.find(name = 'span',attrs={'class':'D-fanxian'})
        crashCutText = ''
        if crashCutSeg:
            crashCutText = crashCutSeg.getText()
        return crashCutText
    
    def getCouponAndExGift(self):
        adSeg = self.soup.find(name = 'dl',attrs = {'id':'zengpin'})
        ret = ''
        if adSeg:
            ret = adSeg.getText()
        return ret
    
parserDict = {0:Coo8AllSortParser, 3:Coo8Sort3PageParser, 4:Coo8Sort4PageParser}

''' test '''
curModDir = os.path.dirname(os.path.abspath(__file__))
testFilePath = os.path.join(curModDir, 'test_resources')
def testAllSortPage():
    fileName = os.path.join(testFilePath, 'coo8_2011-11-07_21-02-49.html')
    with open(fileName, 'r') as fInput:
        content = fInput.read()

    rootUrlSum = ObuyUrlSummary(url=r'http://www.coo8.com/allcatalog/', name='coo8')
    firstPage = Coo8AllSortParser(content, rootUrlSum)
    for sort_3 in firstPage.getBaseSort3UrlSums():
        print sort_3.url

def testSort3Page():    
    fileName = os.path.join(testFilePath, '280-0-0-0-0.html')
    with open(fileName, 'r') as fInput:
        content = fInput.read()
    sort_3_urlsum = ObuyUrlSummary(url=r'http://www.coo8.com/products/280-0-0-0-0.html', parentPath=[('test')], catagoryLevel=3)
    sort3Page = Coo8Sort3PageParser(content, sort_3_urlsum)
    for sort_4 in sort3Page.getSort4PageUrlSums():
        print sort_4.url

def testSort3Details():
    fileName = os.path.join(testFilePath, '280-0-0-0-0.html')
    with open(fileName, 'r') as fInput:
        content = fInput.read()
    sort_3_urlsum = ObuyUrlSummary(url=r'http://www.coo8.com/products/353-0-0-0-0.html', parentPath=[('test')], catagoryLevel=3)
    result = crawleRetries(sort_3_urlsum)
    content = result.content
    sort3Page = Coo8Sort3PageParser(content, sort_3_urlsum)    
    for prod in sort3Page.parserPageInfos():
        print prod.logstr()                                                     

def testSortFinal():
    urlsum = ObuyUrlSummary(url=r'http://www.coo8.com/product/159376.html', parentPath=[('test')], catagoryLevel=3)
    result = crawleRetries(urlsum)
    finalPage = Coo8SortFinalParser(result.content, urlsum)    
    print finalPage.parserPageInfos()

    
if __name__ == '__main__':
    #testAllSortPage()
    #testSort3Page()
    #testSort3Details()
    #testSortFinal()
    s = '@'
    print s.split('@')

    
    
