#/usr/bin/env python
# -*- coding: utf-8 -*-

'''
Created on 2011-7-27

主要用于从网站上爬取信息后，抽取页面信息；

@author: zhongfeng
'''

from cStringIO import StringIO
from j360buy.image_price import captcha_360buy
from crawlerhttp import crawle
from pageparser import *
from threadpool import ThreadPool, WorkRequest
import json
import os
import re
import threading
import urllib


class J360buyAllSortParser(RootCatagoryPageParser):
    '''
            从http://www.360buy.com/allSort.aspx获取所有的分类信息，
            组合成ObuyUrlSummary
    '''   
    mainHost = r'http://www.360buy.com' 
    def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
        super(J360buyAllSortParser, self).__init__(dataStr, rootUrlSummary, include, exclude) 
    
    def getBaseSort3UrlSums(self):
        finalUrlList = []
        allSort = self.soup.find(name='div', attrs={'id':'allsort'})
        for t in allSort.findAll(name='div', attrs={'id':re.compile('JDS_[0-9]+')}):#一级分类
            sort_1 = t.find(name='div', attrs={'class':'mt'})
            name, url = ParserUtils.parserTag_A(sort_1.h2.a)
            sort_1_urlsum = self.buildSort_N(url, name, self.rootUrlSummary, isCrawle=False)
            sort_2 = t.find(name='div', attrs={'class':'mc'})
            for tt in sort_2(name='dl'):#二级分类
                name, url = ParserUtils.parserTag_A(tt.dt.a)
                url = ''.join((self.mainHost, url))
                sort_2_urlsum = self.buildSort_N(url, name, sort_1_urlsum, isCrawle=False)
                for ttt in tt.dd(name='em'):#三级分类
                    name, url = ParserUtils.parserTag_A(ttt.a)
                    url = ''.join((self.mainHost, '/', url))
                    sort_3_urlsum = self.buildSort_N(url, name, sort_2_urlsum)
                    finalUrlList.append(sort_3_urlsum)
        return finalUrlList      
        
class J360buySort3PageParser(Sort3PageParser):
    '''
    360Buy三级页面解析类
    '''    
    pricePageNum = 8
    def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
        super(J360buySort3PageParser, self).__init__(dataStr, rootUrlSummary, include, exclude)
    
    def nextPageUrlPattern(self):
        urlSegs = self.rootUrlSummary.url.rsplit('.', 1)
        pageSeg = '-0-0-0-0-0-0-0-1-1-{}'
        return '%s%s.%s' % (urlSegs[0], pageSeg, urlSegs[1])
    
    def getTotal(self):
        pageSeg = self.soup.find(name='div', attrs={'id':'filter'}).find(attrs={'class':'pagin pagin-m fr'})
        totalPage = int(pageSeg.span.string.split('/')[-1])
        return totalPage
    
    def __getAdWords(self, plist):
        adQueryDict = eval(re.compile(r'{.*}').search(str(plist.script)).group())
        baseUrl = 'http://www.360buy.com/JdService.aspx?callback=GetJdwsmentsCallback&action=GetJdwsment'
        url = '&'.join((baseUrl, urllib.urlencode(adQueryDict)))
        result = crawle(url)
        ct = re.compile(r'{.*}').search(result.content)
        if ct is None:
            return []
        jObj = json.loads(ct.group())
        return jObj['html']
    
    def parserPageInfos(self):      
        def getProductPrice(*req):
            priceImgUrl = req[0]
            result = crawle(priceImgUrl)
            proc_normal_result(req, result)
            print 'Get price:%s' % priceImgUrl
            return result
        
        def proc_normal_result(req, result):
            args = req
            if result.code == 200:
                prodDetail = args[1]
                resultList = args[2]
                prodDetail.privPrice = captcha_360buy(StringIO(result.content))
                resultList.append(prodDetail)
            else:
                print args[0]
        resultList = []  
        plist = self.soup.find(name='div', attrs={'id':'plist'})
        if plist is None:
            raise Exception("Page Error")
            return resultList
        try:
            pool = ThreadPool(self.pricePageNum)

            pid_ad = dict([[int(wa['Wid']), wa['AdTitle']] for wa in self.__getAdWords(plist)])       
            for li in plist(name='li', attrs={'sku':re.compile('[0-9]+')}):
                pid = int(li['sku'])
                pName = li.find(name='div', attrs={'class':'p-name'}).a.getText()
                priceImgUrl = li.find(name='div', attrs={'class':'p-price'}).img['src']
                adWords = pid_ad.get(pid, '')
                prodDetail = ProductDetails(productId=pid, name=pName, adWords=adWords)
                req = WorkRequest(getProductPrice, [priceImgUrl, prodDetail, resultList, pool], None,
                        callback=None)
                pool.putRequest(req)
            pool.wait()
        except Exception,e:
            raise e
        finally:
            pool.dismissWorkers(num_workers=self.pricePageNum)
        return resultList

class J360buySort4PageParser(J360buySort3PageParser):
    '''
          分类四级页面为列表页面，只抽取Product信息
    '''
    def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
        super(J360buySort4PageParser, self).__init__(dataStr, rootUrlSummary, include, exclude)
        
    def parserSubUrlSums(self):
        pass

''' test '''
curModDir = os.path.dirname(os.path.abspath(__file__))
testFilePath = os.path.join(curModDir, 'test_resources')
def test360BuyAllSortPage():
    fileName = os.path.join(testFilePath, 'allSort.aspx')
    with open(fileName, 'r') as fInput:
        content = fInput.read()

    rootUrlSum = ObuyUrlSummary(url=r'http://www.360buy.com/allSort.aspx', name='360buy')
    firstPage = J360buyAllSortParser(content, rootUrlSum)
    for sort_3 in firstPage.getBaseSort3UrlSums():
        print '/'.join(sort_3.getSavePathL()) 
        print sort_3.catagoryLevel

def testSort3Page():    
    fileName = os.path.join(testFilePath, '360buy_2011-08-15_12-26-01.html')
    with open(fileName, 'r') as fInput:
        content = fInput.read()
    sort_3_urlsum = ObuyUrlSummary(url=r'http://www.360buy.com/products/737-794-870.html', parentPath=[('test')], catagoryLevel=3)
    sort3Page = J360buySort3PageParser(content, sort_3_urlsum)
    for sort_4 in sort3Page.getSort4PageUrlSums():
        print sort_4

def testSort3Details():
    fileName = os.path.join(testFilePath, '360buy_2011-08-15_12-26-01.html')
    with open(fileName, 'r') as fInput:
        content = fInput.read()
    sort_3_urlsum = ObuyUrlSummary(url=r'http://www.360buy.com/products/737-794-870.html', parentPath=[('test')], catagoryLevel=3)
    sort3Page = J360buySort3PageParser(content, sort_3_urlsum)    
    sort3Page.parserPageInfos()
    
if __name__ == '__main__':
    #test360BuyAllSortPage()
    #testSort3Page()
    testSort3Details()

    
    
