#/usr/bin/env python
# -*- coding: utf-8 -*-

'''
Created on 2011-7-27

主要用于从网站上爬取信息后，抽取页面信息；

@author: zhongfeng
'''

from BeautifulSoup import BeautifulSoup, Comment
from copy import deepcopy
from pageparser import *
import itertools
import json
import os
import re
import urllib
import urlparse

class AmazonAllSortParser(RootCatagoryPageParser):
    '''
            从http://www.amazon.cn获取所有的分类信息，
            组合成ObuyUrlSummary
    '''   
    mainHost = r'http://www.amazon.cn' 
    def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
        super(AmazonAllSortParser, self).__init__(dataStr, rootUrlSummary, include, exclude) 
                             
    def __getBaseSort1UrlSums(self):       
        finalUrlList = []
        allSort = self.soup.find(name='select',attrs={"id":"searchDropdownBox"})
        base_url = r'http://www.amazon.cn/s/ref=nb_sb_noss?__mk_zh_CN=%E4%BA%9A%E9%A9%AC%E9%80%8A%E7%BD%91%E7%AB%99&url={}&field-keywords=&x=20&y=15'
        for t in allSort.findAll(name='option'):#一级分类
            searchAias = t['value']
            name = searchAias.split('=')[-1]
            if name == 'aps':
                continue
            url = base_url.format(urllib.quote(searchAias))
            sort_1_urlsum = self.buildSort_N(url, name, self.rootUrlSummary)
            finalUrlList.append(sort_1_urlsum);
        return finalUrlList
    
    def parserSubUrlSums(self):
        result = self.__getBaseSort1UrlSums()
        return self.filterUrlList(result) 
    
    
class AmazonSort1Parser(RootCatagoryPageParser):
    '''
            从一级分类获取所有的2级分类信息，
            组合成ObuyUrlSummary
    '''   
    mainHost = r'http://www.amazon.cn' 
    def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
        super(AmazonSort1Parser, self).__init__(dataStr, rootUrlSummary, include, exclude) 
    
    def __isCat(self, catName):
        return catName.find(u'类别') >= 0
    
    def __getBaseSort2UrlSums(self):       
        finalUrlList = []
        sort2 = self.soup.find(name='div', attrs={"id":"refinements"})
        #refId = 'ref_%s' % urllib.unquote(sort2['data-browseladder']).split(':')[-1]
        #allSort2Seg = sort2.find(name='ul',attrs={'id':refId})
        for catSeg in sort2(name='h2'):
            if self.__isCat(catSeg.getText().strip()):
                break
        
        allSort2Seg = catSeg.findNextSibling(name='ul')
        for t in allSort2Seg.findAll(name='a'):
            name, url = ParserUtils.parserTag_A(t)
            url = ''.join((self.mainHost,url))
            sort_2_urlsum = self.buildSort_N(url, name, self.rootUrlSummary, isCrawle=True)
            finalUrlList.append(sort_2_urlsum);
        return finalUrlList
    
    def parserSubUrlSums(self):
        result = self.__getBaseSort2UrlSums()
        return self.filterUrlList(result) 
    
        
class AmazonSort2PageParser(Sort3PageParser):
    '''
           二级页面解析类
    '''    
    def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
        super(AmazonSort2PageParser, self).__init__(dataStr, rootUrlSummary, include, exclude)

    def parseProduct(self, prod):
        titleSeg = prod.find(name='div', attrs={'class':'title'})
        if titleSeg is None:
            return
        pName, url = ParserUtils.parserTag_A(titleSeg.a)
        pid = pName
        priceSeg = prod.find(name='div', attrs={'class':'newPrice'})
        pastPrice = '0.00'
        currentPrice = '0.00'
        if priceSeg != None:
            currentPrice = ParserUtils.getPrice(priceSeg.span.getText())
            bypastSeg = priceSeg.strike
            if bypastSeg != None:
                pastPrice = ParserUtils.getPrice(bypastSeg.getText())
        prodDetail = ProductDetails(productId=pid, privPrice=currentPrice, pubPrice=pastPrice, 
            name=pName, adWords='')
        return prodDetail

    def parserPageInfos(self):
        resultList = []       
        soupRoot = self.soup
        for prod in soupRoot.findAll(name='div',attrs={'id':re.compile(r'result_[0-9]+')}):
            prodDetail = self.parseProduct(prod)
            if prodDetail != None:
                resultList.append(prodDetail)
        
        resultsAtfNextSeg = self.soup.find(attrs = {'id':'results-atf-next'})
        if resultsAtfNextSeg != None:
            resultsAtfNext = resultsAtfNextSeg.find(text=lambda text:isinstance(text, Comment))  
            spt = BeautifulSoup(resultsAtfNext,convertEntities = BeautifulSoup.HTML_ENTITIES)
            for prod in spt.findAll(name='div',attrs={'id':re.compile(r'result_[0-9]+')}):
                prodDetail = self.parseProduct(prod)
                if prodDetail != None:
                    resultList.append(prodDetail)
        return resultList
    
    def __nextPagePattern(self):
        return  r'http://www.amazon.cn/mn/search/ajax/{}&tab={}&pageTypeID={}&fromHash=&section=BTF&fromApp=undefined&fromPage=undefined&version=2'
        
    def __getNextPageUrl(self):
        nextPageSeg = self.soup.find(attrs={'id':'pagnNextLink'})
        fullUrl = None
        if nextPageSeg != None:
            name,url = ParserUtils.parserTag_A(nextPageSeg)
            t= urlparse.urlparse(url)
            qsDict = urlparse.parse_qs(t.query)
            pageTypeID = qsDict['rh'][0].split(',')[-1].split(':')[-1]
             
            ref = url.replace(r'/gp/search/','')
            tab = self.rootUrlSummary.parentPath[1].name
            fullUrl = self.__nextPagePattern().format(ref,tab,pageTypeID)    
        return fullUrl
        
    def parserSubUrlSums(self):
        nextPageUrl = self.__getNextPageUrl()
        if nextPageUrl is None:
            return []
        else:
            urlSum = self.buildSort_4(nextPageUrl)
            urlSum.catagoryLevel = 3
            return [urlSum]

class AmazonSort3JsonParser(Parser):
    '''
           Sort3Json解析类
    '''    
    def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
        super(AmazonSort3JsonParser, self).__init__(dataStr, rootUrlSummary, include, exclude)
        segList = self.dataStr.split('&&&')
        segList = [' '.join(seg.split()).strip() for seg in segList]
        segList = filter(lambda seg:seg != '',segList)
        jSonObjs = [json.loads(seg) for seg in segList ]
        for jsonObj in jSonObjs:
            if jsonObj.has_key('pagination'):
                self.pageNextSeg = jsonObj['pagination']['data']['value']
            if jsonObj.has_key('results-btf'):
                self.resultsBtf =  jsonObj['results-btf']['data']['value']
            if jsonObj.has_key('results-atf-next'):
                self.resultsAtf = jsonObj['results-atf-next']['data']['value']

    def parserPageInfos(self):
        result = []
        retBtf = AmazonSort2PageParser(self.resultsBtf,self.rootUrlSummary).parserPageInfos()
        retAtf = AmazonSort2PageParser(self.resultsAtf,self.rootUrlSummary).parserPageInfos()
        result.extend(itertools.chain(retBtf,retAtf))
        return result
    
    def parserSubUrlSums(self):
        return AmazonSort2PageParser(self.pageNextSeg,self.rootUrlSummary).parserSubUrlSums()
    
    
''' test '''
curModDir = os.path.dirname(os.path.abspath(__file__))
testFilePath = os.path.join(curModDir, 'test_resources')
def testAllSortPage():
    fileName = os.path.join(testFilePath, 'amazon.html')
    with open(fileName, 'r') as fInput:
        content = fInput.read()

    rootUrlSum = ObuyUrlSummary(url=r'http://www.amazon.cn/', name='Amazon')
    include = [ ObuyUrlSummary(url=r'http://http://www.newegg.com.cn/Category/536.htm',
                               name='服务器', catagoryLevel=2)]
    firstPage = AmazonAllSortParser(content, rootUrlSum, include=None)
    for sort_1 in firstPage.parserSubUrlSums():       
        #for index, urlsum in enumerate(sort_3.parentPath):
            #print '\t' * index, str(urlsum.getUrlSumAbstract())
        print sort_1.url , sort_1.catagoryLevel 
             

def testSort1Page():    
    fileName = os.path.join(testFilePath, 'toys_games.html')
    with open(fileName, 'r') as fInput:
        content = fInput.read()
    sort_1_urlsum = ObuyUrlSummary(url=r'http://www.amazon.cn/s/ref=nb_sb_noss?__mk_zh_CN=%E4%BA%9A%E9%A9%AC%E9%80%8A%E7%BD%91%E7%AB%99&url=search-alias%3Dtoys-and-games&field-keywords=&x=16&y=14',
                                    parentPath=[('test')], catagoryLevel=1)
    sort2Page = AmazonSort1Parser(content, sort_1_urlsum)
    for sort_2 in sort2Page.parserSubUrlSums():
        print sort_2.url

def testSort2Page():    
    fileName = os.path.join(testFilePath, 'amazon_2011-08-12_15-58-49.html')
    with open(fileName, 'r') as fInput:
        content = fInput.read()
    rootObuyUrlSummary = ObuyUrlSummary(url=r'http://www.amazon.cn/',parentPath=[], catagoryLevel=0)
    sort_1_urlsum = ObuyUrlSummary(url=r'http://www.amazon.cn/s/ref=nb_sb_noss?__mk_zh_CN=%E4%BA%9A%E9%A9%AC%E9%80%8A%E7%BD%91%E7%AB%99&url=search-alias%3Dtoys-and-games&field-keywords=&x=16&y=14',
                                    parentPath=[rootObuyUrlSummary], catagoryLevel=1)
    sort_1_urlsum.name = 'toys-and-games'
    sort_2_urlsum = ObuyUrlSummary(url=r'http://www.amazon.cn/gp/search/ref=sr_nr_n_0?rh=n%3A647070051%2Cn%3A!647071051%2Cn%3A1982054051&bbn=647071051&ie=UTF8&qid=1313051441&rnid=647071051',
                                    parentPath=[rootObuyUrlSummary,sort_1_urlsum], catagoryLevel=2)
    sort2Page = AmazonSort2PageParser(content, sort_2_urlsum)
    for sort_2 in sort2Page.parserSubUrlSums():
        print sort_2.url
    for product in sort2Page.parserPageInfos():
        print product.logstr()

def testSort3Details():
    fileName = os.path.join(testFilePath, 'toys_games_1.json')
    with open(fileName, 'r') as fInput:
        content = fInput.read()
    rootObuyUrlSummary = ObuyUrlSummary(url=r'http://www.amazon.cn/',parentPath=[], catagoryLevel=0)
    sort_1_urlsum = ObuyUrlSummary(url=r'http://www.amazon.cn/s/ref=nb_sb_noss?__mk_zh_CN=%E4%BA%9A%E9%A9%AC%E9%80%8A%E7%BD%91%E7%AB%99&url=search-alias%3Dtoys-and-games&field-keywords=&x=16&y=14',
                                    parentPath=[rootObuyUrlSummary], catagoryLevel=1)
    sort_1_urlsum.name = 'toys-and-games'
    sort_3_urlsum = ObuyUrlSummary(url=r'http://www.amazon.cn/gp/search/ref=sr_nr_n_0?rh=n%3A647070051%2Cn%3A!647071051%2Cn%3A1982054051&bbn=647071051&ie=UTF8&qid=1313051441&rnid=647071051',
                                    parentPath=[rootObuyUrlSummary,sort_1_urlsum], catagoryLevel=2)
    sort3Page = AmazonSort3JsonParser(content, sort_3_urlsum)  
    for product in sort3Page.parserPageInfos():
        print product.logstr()
    for sort_3 in sort3Page.parserSubUrlSums():
        print sort_3.url
        
def testComment():
    fileName = os.path.join(testFilePath, 'computer.html')
    with open(fileName, 'r') as fInput:
        content = fInput.read()
    soup = BeautifulSoup(content)
    comments = soup.findAll(text=lambda text:isinstance(text, Comment))
    for comment in comments:
        print comment.extract()
        
def testJson():
    import json
    fileName = os.path.join(testFilePath, 'toys_games_1.json')
    with open(fileName, 'r') as fInput:
        content = fInput.read()
    segList = content.split('&&&')
    segList = [' '.join(seg.split()).strip() for seg in segList]
    segList = filter(lambda seg:seg != '',segList)
    jSonObjs = [json.loads(seg) for seg in segList ]
    for jsonObj in jSonObjs:
        if jsonObj.has_key('pagination'):
            print jsonObj['pagination']['data']['value']
        if jsonObj.has_key('results-btf'):
            print '+++++++++++++++++'
            jsonRet =  jsonObj['results-btf']['data']['value']
            sort_3_urlsum = ObuyUrlSummary(url=r'http://www.amazon.cn/s/ref=amb_link_29560972_51?ie=UTF8&rh=n%3A888484051&page=1&pf_rd_m=A1AJ19PSB66TGU&pf_rd_s=center-5&pf_rd_r=0AHSS2SXTF1E0C6A69MM&pf_rd_t=101&pf_rd_p=61174572&pf_rd_i=888465051#/ref=sr_pg_2?rh=n%3A888465051%2Cn%3A888474051%2Cn%3A888484051&page=2&ie=UTF8&qid=1312881351',
                                   parentPath=[('test')], catagoryLevel=3)
            sort3Page = AmazonSort2PageParser(jsonRet, sort_3_urlsum)  
            for product in sort3Page.parserPageInfos():
                print product.logstr()
        elif jsonObj.has_key('results-atf-next'):
            print '--------------'
            jsonRet =  jsonObj['results-atf-next']['data']['value']
            from BeautifulSoup import BeautifulSoup, Comment
            soup = BeautifulSoup(jsonRet)
            comment = soup.find(attrs={'id':'results-atf-next'},text=lambda text:isinstance(text, Comment))
            
            sort_3_urlsum = ObuyUrlSummary(url=r'http://www.amazon.cn/s/ref=amb_link_29560972_51?ie=UTF8&rh=n%3A888484051&page=1&pf_rd_m=A1AJ19PSB66TGU&pf_rd_s=center-5&pf_rd_r=0AHSS2SXTF1E0C6A69MM&pf_rd_t=101&pf_rd_p=61174572&pf_rd_i=888465051#/ref=sr_pg_2?rh=n%3A888465051%2Cn%3A888474051%2Cn%3A888484051&page=2&ie=UTF8&qid=1312881351',
                                   parentPath=[('test')], catagoryLevel=3)
            sort3Page = AmazonSort2PageParser(comment.extract(), sort_3_urlsum)  
            for product in sort3Page.parserPageInfos():
                print product.logstr()
                
if __name__ == '__main__':
#    testAllSortPage()
    #testSort1Page()
    testSort2Page()
    #testSort3Details()
    #testComment()
    #testJson()
   
#/gp/search/ref=sr_nr_n_0?rh=n%3A647070051%2Cn%3A!647071051%2Cn%3A1982054051&bbn=647071051&ie=UTF8&qid=1313051441&rnid=647071051
#/gp/search/ref=sr_pg_2?rh=n%3A647070051%2Cn%3A%21647071051%2Cn%3A1982054051&page=2&bbn=647071051&ie=UTF8&qid=131311239
#ref=sr_pg_2?rh=n%3A647070051%2Cn%3A%21647071051%2Cn%3A1982054051&page=2&bbn=647071051&ie=UTF8&qid=1313112393&tab=toys-and-games&pageTypeID=1982054051&fromHash=&fromRH=n%3A647070051%2Cn%3A%21647071051%2Cn%3A1982054051&section=BTF&fromApp=undefined&fromPage=undefined&version=2
#ref=sr_pg_3?rh=n%3A647070051%2Cn%3A%21647071051%2Cn%3A1982054051&page=3&bbn=647071051&ie=UTF8&qid=1313112553&tab=toys-and-games&pageTypeID=1982054051&fromHash=%2Fref%3Dsr_pg_2%3Frh%3Dn%253A647070051%252Cn%253A%2521647071051%252Cn%253A1982054051%26page%3D2%26bbn%3D647071051%26ie%3DUTF8%26qid%3D1313112393&section=BTF&fromApp=gp%2Fsearch&fromPage=results&version=2
#ref=sr_pg_5?rh=n%3A647070051%2Cn%3A%21647071051%2Cn%3A1982054051&page=5&bbn=647071051&ie=UTF8&qid=1313112793&tab=toys-and-games&pageTypeID=1982054051&fromHash=%2Fref%3Dsr_pg_4%3Frh%3Dn%253A647070051%252Cn%253A%2521647071051%252Cn%253A1982054051%26page%3D4%26bbn%3D647071051%26ie%3DUTF8%26qid%3D1313112677&section=BTF&fromApp=gp%2Fsearch&fromPage=results&version=2