#/usr/bin/env python
# -*- coding: utf-8 -*-

'''
Created on 2011-10-18

@author: zhongfeng
'''

from crawlerhttp import crawle
from logfacade import LoggerFactory

from urlparse import urlparse
import os,sys
import time
from multiprocessing import Pool,Queue

logger = LoggerFactory.getLogger()

def __procSubUrlRequests(parser,result,q):
    '''SubUrl 入队'''
    parserResult = parser.parserSubUrlSums()
    if parserResult is not None:
        #for subUrlSum in parserResult:
        #    logger.debug( 'SubUrlSum put Q: %s ,level: %s' \
        #                     % (subUrlSum.url,subUrlSum.catagoryLevel))
        #    _putSpideRequest(subUrlSum,q)
        return parserResult
       
def __procPageInfos(parser,urlsum):
    '''解析页面的详细信息，例如product信息'''
    resultList = parser.parserPageInfos()
    if resultList is not None:
        siteName = urlparse(urlsum.url).hostname.split('.')[1]
        logger = LoggerFactory.getLogger(logName=siteName)
        for parserResult in resultList:
            if logger.isEnabledFor('INFO'):
                logger.info(parserResult.logstr())

def proc_normal_result(urlsum, result, mSpider):
    #if result.content == '':
    #    __reinqueue_proc(urlsum, result, mSpider.q)
    #    return
    if result.code == 200:
        #print "**** Result from request #%s: %d" % (urlsum.url, result.code)
        ParserClass = mSpider.parserDict.get(urlsum.catagoryLevel,None)
        if ParserClass is None:
            return
        parser = ParserClass(result.content,urlsum,urlsum.include,
                         urlsum.exclude)
        try:
            if urlsum.isRecursed:
                subUrlSums = __procSubUrlRequests(parser,result,mSpider.q) 
            if mSpider.procDetails:
                __procPageInfos(parser,urlsum)
                #parentLevel1 = urlsum.parentPath[1]
                #__writeStat(spider.stat,parentLevel1,retSize)
            return subUrlSums
        except Exception,e:
            logger.error('ParserException.Reason:%s,URL:%s'% (e,urlsum.url))
    else:
        logger.error('Get From URL:%s Error code:' % (urlsum.url,result.code))
        
def main_spide(mSpider):
    q = mSpider.q
    while True:
        try:
            urlsum = q.get(timeout=30)
        except Exception:
            break
        logger.info( "Q Size: %d|URL: %s" % (q.qsize(),urlsum.url))
        result = crawle(urlsum)
        subUrlSums = proc_normal_result(urlsum, result, mSpider)
        for urlsum in subUrlSums:
            q.put(urlsum)
 
class ObuySpider(object):
    def __init__(self,rootUrlSummary = None,parserDict =None,threadNum = 5,
                 procDetails = True,include = None,exclude = None,rootPageResult = None):
        self.rootUrlSummary = rootUrlSummary
        self.parserDict = parserDict
        self.procDetails = procDetails       #是否解析页面的详细信息
        self.rootUrlSummary.include = include
        self.rootUrlSummary.exclude = exclude
        self.pool = Pool(threadNum)
        self.stat = dict()
        self.rootPageResult = rootPageResult
        self.q = Queue()
         
    def spide(self):
        self.q.put(self.rootUrlSummary)
        self.pool.apply_async(main_spide,(self,))
        self.pool.join()
        self.__printStatResult()
    
    def __printStatResult(self):
        for k,v in self.stat.iteritems():
            print 'Catagory:%s,Num:%d' % (k.name,v)

if __name__ == '__main__':
    logger = LoggerFactory.getLogger()
