#/usr/bin/env python
# -*- coding: utf-8 -*-

'''
Created on 2011-7-27

主要用于从网站上爬取信息后，抽取页面信息；

@author: zhongfeng
'''

from pageparser import *
import re
from copy import deepcopy

class AmazonAllSortParser(RootCatagoryPageParser):
    '''
            从http://www.amazon.cn/gp/site-directory获取所有的分类信息，
            组合成ObuyUrlSummary
    '''   
    mainHost = r'http://www.amazon.cn' 
    def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
        super(AmazonAllSortParser, self).__init__(dataStr, rootUrlSummary, include, exclude) 
                             
    def __getBaseSort2UrlSums(self):       
        finalUrlList = []
        allSort = self.soup.find(attrs={"id":"siteDirectory"})
        for t in allSort.findAll(name='div', attrs={"class":"popover-grouping"}):#一级分类
            name = t.find(name='div', attrs={"class":"popover-category-name"}).h2.getText()
            url = ''.join((self.mainHost, name))
            sort_1_urlsum = self.buildSort_N(url, name, self.rootUrlSummary, isCrawle=False)
            sort_2 = t.findNextSiblings(name='div')
            for tt in sort_2:#二级分类
                name, url = ParserUtils.parserTag_A(tt.a)
                url = ''.join((self.mainHost,url))
                if name.startswith(u'所有'):
                    continue
                sort_2_urlsum = self.buildSort_N(url, name, sort_1_urlsum, isCrawle=True)
                finalUrlList.append(sort_2_urlsum);
        return finalUrlList
    
    def parserSubUrlSums(self):
        result = self.__getBaseSort2UrlSums()
        return self.filterUrlList(result) 
    
    
class AmazonSort2Parser(RootCatagoryPageParser):
    '''
            从http://www.amazon.cn/gp/site-directory获取所有的分类信息，
            组合成ObuyUrlSummary
    '''   
    mainHost = r'http://www.amazon.cn' 
    def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
        super(AmazonSort2Parser, self).__init__(dataStr, rootUrlSummary, include, exclude) 
    
    def __isCat(self, catName):
        return catName.find(u'分类') >= 0
    
    def getBaseSort3UrlSums(self):       
        finalUrlList = []
        allSort3 = self.soup.findAll(name='div', attrs={"class":"unified_widget blurb"})
        for alls3 in allSort3:
            if self.__isCat(alls3.h2.getText()):
                break
        for t in alls3.findAll(name='div',attrs={'class':'title'}):
            name, url = ParserUtils.parserTag_A(t.a)
            url = ''.join((self.mainHost,url))
            sort_2_urlsum = self.buildSort_N(url, name, self.rootUrlSummary, isCrawle=True)
            finalUrlList.append(sort_2_urlsum);
        return finalUrlList
        
class AmazonSort3PageParser(Sort3PageParser):
    '''
          三级页面解析类
    '''    
    def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
        super(AmazonSort3PageParser, self).__init__(dataStr, rootUrlSummary, include, exclude)
    
    def parserPageInfos(self):      
        resultList = []       
        for prod in self.soup.findAll(name='div',attrs={'id':re.compile(r'result_[0-9]+')}):
            pName, url = ParserUtils.parserTag_A(prod.find(name='div', attrs={'class':'title'}).a)
            pid = pName
            currentPrice = ParserUtils.getPrice(prod.find(name='div',attrs={'class':'newPrice'}).span.getText())
            bypastSeg = prod.find(name='div',attrs={'class':'newPrice'}).strike
            pastPrice = '0.00'
            if bypastSeg != None:
                pastPrice = ParserUtils.getPrice(bypastSeg.getText())
            prodDetail = ProductDetails(productId=pid, privPrice=currentPrice, pubPrice=pastPrice,
                                        name=pName, adWords='')
            resultList.append(prodDetail)
        return resultList
    
    def __getNextPageUrl(self):
        nextPageSeg = self.soup.find(attrs={'id':'pagnNextLink'})
        fullUrl = None
        if nextPageSeg != None:
            name,url = ParserUtils.parserTag_A(nextPageSeg)
            print url
            url = url.replace(r'/gp/search','#')
            baseUrl = self.rootUrlSummary.url.rsplit('#')[0]
            fullUrl = ''.join((baseUrl,url))
        return fullUrl
        
    def parserSubUrlSums(self):
        result = self.__getNextPageUrl()
        if result is None:
            return []
        else:
            urlSum = deepcopy(self.rootUrlSummary)
            urlSum.url = result
            return [urlSum]

            
''' test '''
import os
curModDir = os.path.dirname(os.path.abspath(__file__))
testFilePath = os.path.join(curModDir, 'test_resources')
def testAllSortPage():
    fileName = os.path.join(testFilePath, 'amazonSite.html')
    with open(fileName, 'r') as fInput:
        content = fInput.read()

    rootUrlSum = ObuyUrlSummary(url=r'http://www.amazon.cn/gp/site-directory/ref=topnav_sad', name='Amazon')
    include = [ ObuyUrlSummary(url=r'http://http://www.newegg.com.cn/Category/536.htm',
                               name='服务器', catagoryLevel=2)]
    firstPage = AmazonAllSortParser(content, rootUrlSum, include=None)
    for sort_2 in firstPage.parserSubUrlSums():       
        #for index, urlsum in enumerate(sort_3.parentPath):
            #print '\t' * index, str(urlsum.getUrlSumAbstract())
        print sort_2.url , sort_2.catagoryLevel 
             

def testSort2Page():    
    fileName = os.path.join(testFilePath, '888465051.html')
    with open(fileName, 'r') as fInput:
        content = fInput.read()
    sort_2_urlsum = ObuyUrlSummary(url=r'http://www.amazon.cn/%E7%94%B5%E8%84%91%E5%8F%8A%E9%85%8D%E4%BB%B6/b/ref=sd_allcat_pc?ie=UTF8&node=888465051',
                                    parentPath=[('test')], catagoryLevel=2)
    sort3Page = AmazonSort2Parser(content, sort_2_urlsum)
    for sort_3 in sort3Page.parserSubUrlSums():
        print sort_3.url

def testSort3Page():    
    fileName = os.path.join(testFilePath, 'computer.html')
    with open(fileName, 'r') as fInput:
        content = fInput.read()
    sort_3_urlsum = ObuyUrlSummary(url=r'http://www.amazon.cn/s/ref=amb_link_29560972_51?ie=UTF8&rh=n%3A888484051&page=1&pf_rd_m=A1AJ19PSB66TGU&pf_rd_s=center-5&pf_rd_r=0AHSS2SXTF1E0C6A69MM&pf_rd_t=101&pf_rd_p=61174572&pf_rd_i=888465051#/ref=sr_pg_2?rh=n%3A888465051%2Cn%3A888474051%2Cn%3A888484051&page=2&ie=UTF8&qid=1312881351',
                                    parentPath=[('test')], catagoryLevel=3)
    sort3Page = AmazonSort3PageParser(content, sort_3_urlsum)
    for sort_3 in sort3Page.parserSubUrlSums():
        print sort_3.url

def testSort3Details():
    fileName = os.path.join(testFilePath, 'computer.html')
    with open(fileName, 'r') as fInput:
        content = fInput.read()
    sort_3_urlsum = ObuyUrlSummary(url=r'http://www.amazon.cn/s/ref=amb_link_29560972_51?ie=UTF8&rh=n%3A888484051&page=1&pf_rd_m=A1AJ19PSB66TGU&pf_rd_s=center-5&pf_rd_r=0AHSS2SXTF1E0C6A69MM&pf_rd_t=101&pf_rd_p=61174572&pf_rd_i=888465051#/ref=sr_pg_2?rh=n%3A888465051%2Cn%3A888474051%2Cn%3A888484051&page=2&ie=UTF8&qid=1312881351',
                                   parentPath=[('test')], catagoryLevel=3)
    sort3Page = AmazonSort3PageParser(content, sort_3_urlsum)  
    for product in sort3Page.parserPageInfos():
        print product.logstr()
        
def testComment():
    from BeautifulSoup import BeautifulSoup, Comment
    fileName = os.path.join(testFilePath, 'computer.html')
    with open(fileName, 'r') as fInput:
        content = fInput.read()
    soup = BeautifulSoup(content)
    comments = soup.findAll(text=lambda text:isinstance(text, Comment))
    for comment in comments:
        print comment.extract()
        
def testJson():
    import json
    fileName = os.path.join(testFilePath, 'watch_json.html')
    with open(fileName, 'r') as fInput:
        content = fInput.read()
    segList = content.split('&&&')
    segList = [' '.join(seg.split()).strip() for seg in segList]
    segList = filter(lambda seg:seg != '',segList)
    jSonObjs = [json.loads(seg) for seg in segList ]
    for jsonObj in jSonObjs:
        if jsonObj.has_key('results-btf'):
            print '+++++++++++++++++'
            jsonRet =  jsonObj['results-btf']['data']['value']
            sort_3_urlsum = ObuyUrlSummary(url=r'http://www.amazon.cn/s/ref=amb_link_29560972_51?ie=UTF8&rh=n%3A888484051&page=1&pf_rd_m=A1AJ19PSB66TGU&pf_rd_s=center-5&pf_rd_r=0AHSS2SXTF1E0C6A69MM&pf_rd_t=101&pf_rd_p=61174572&pf_rd_i=888465051#/ref=sr_pg_2?rh=n%3A888465051%2Cn%3A888474051%2Cn%3A888484051&page=2&ie=UTF8&qid=1312881351',
                                   parentPath=[('test')], catagoryLevel=3)
            sort3Page = AmazonSort3PageParser(jsonRet, sort_3_urlsum)  
            for product in sort3Page.parserPageInfos():
                print product.logstr()
        elif jsonObj.has_key('results-atf-next'):
            print '--------------'
            jsonRet =  jsonObj['results-atf-next']['data']['value']
            from BeautifulSoup import BeautifulSoup, Comment
            soup = BeautifulSoup(jsonRet)
            comment = soup.find(attrs={'id':'results-atf-next'},text=lambda text:isinstance(text, Comment))
            
            sort_3_urlsum = ObuyUrlSummary(url=r'http://www.amazon.cn/s/ref=amb_link_29560972_51?ie=UTF8&rh=n%3A888484051&page=1&pf_rd_m=A1AJ19PSB66TGU&pf_rd_s=center-5&pf_rd_r=0AHSS2SXTF1E0C6A69MM&pf_rd_t=101&pf_rd_p=61174572&pf_rd_i=888465051#/ref=sr_pg_2?rh=n%3A888465051%2Cn%3A888474051%2Cn%3A888484051&page=2&ie=UTF8&qid=1312881351',
                                   parentPath=[('test')], catagoryLevel=3)
            sort3Page = AmazonSort3PageParser(comment.extract(), sort_3_urlsum)  
            for product in sort3Page.parserPageInfos():
                print product.logstr()
                
if __name__ == '__main__':
    #testAllSortPage()
    #testSort2Page()
    #testSort3Page()
    #testSort3Details()
    #testComment()
    testJson()
    
