#/usr/bin/env python
# -*- coding: utf-8 -*-

'''
Created on 2011-7-28

@author: zhongfeng
'''

from crawlerhttp import crawle
from logfacade import LoggerFactory
from threadpool import ThreadPool, WorkRequest
from urlparse import urlparse
import os, sys
import time
import threading
from threading import stack_size

stack_size(32768 * 32)
   
class ObuySpider(object):
    def __init__(self, rootUrlSummary=None, parserDict=None, threadNum=5,
                 procDetails=True, include=None, exclude=None, rootPageResult=None):
        self.rootUrlSummary = rootUrlSummary
        self.parserDict = parserDict
        self.procDetails = procDetails       #是否解析页面的详细信息
        self.rootUrlSummary.include = include
        self.rootUrlSummary.exclude = exclude
        self.pool = ThreadPool(threadNum)
        self.stat = dict()
        self.rootPageResult = rootPageResult

    def init_urls(self):
        return self.putSpideRequest(self.rootUrlSummary)

    def spide(self):
        self.init_urls()
        self.pool.wait()
        self.__printStatResult()
        
    def procSubUrlRequests(self,parser, result):
        '''SubUrl 入队'''
        parserResult = parser.parserSubUrlSums()
        if parserResult is not None:
            for subUrlSum in parserResult:
                logger = LoggerFactory.getLogger()
                if logger.isEnabledFor('DEBUG'):
                    logger.debug('SubUrlSum put Q: %s ,level: %s' \
                             % (subUrlSum.url, subUrlSum.catagoryLevel))
                self.putSpideRequest(subUrlSum)
               
    def procPageInfos(self,parser, urlsum):
        '''解析页面的详细信息，例如product信息'''
        resultList = parser.parserPageInfos()
        if resultList is not None:
            siteName = urlparse(urlsum.url).hostname.split('.')[1]
            logger = LoggerFactory.getLogger(logName=siteName)
            for parserResult in resultList:
                if logger.isEnabledFor('INFO'):
                    logger.info(parserResult.logstr())
    
    def putSpideRequest(self,urlsum):
        req = WorkRequest(self.main_spide, [urlsum], None,
                    callback=None, exc_callback=self.handle_exception)
        self.pool.putRequest(req)
    
    def saveErrorPage(self,url, content):
        curModDir = os.path.abspath(os.path.dirname(sys.argv[0]))
        siteName = urlparse(url).hostname.split('.')[1]
        curtDate = time.strftime("%Y-%m-%d")
        errorFilePath = os.path.join(curModDir, 'error_page', siteName, curtDate) 
        if not os.path.exists(errorFilePath):
            os.makedirs(errorFilePath)
        curtime = time.strftime("%Y-%m-%d_%H-%M-%S")
        fileName = '%s_%s.html' % (siteName, curtime)
        fullPath = os.path.join(errorFilePath, fileName)
        with open(fullPath, 'w') as output:
            output.write(content)

    def handle_exception(self,request, exc_info):
        logger = LoggerFactory.getLogger()
        logger.error("**** Exception occured in request #%s: %s,%s" % 
                     (request.requestID, exc_info, request))

    def proc_result(self,request, result):
        pass

    def reinqueue_proc(self,urlsum, result):
        logger = LoggerFactory.getLogger()
        if urlsum.retries > 0:
            urlsum.retries = urlsum.retries - 1
            logger.error("urlsum reinqueue:%s" % urlsum.url)
            self.putSpideRequest(urlsum)
        else:
            urlsum.stat = result.code
            logger.error("Failed %s:%d" % (urlsum.url, result.code))

    def writeStat(self,stat, urlsum, retSize):
        if retSize > 0:
            stat[urlsum] = stat.get(urlsum, 0) + retSize

    def procParserResult(self, result, urlsum, parser):
        if urlsum.isRecursed:
            self.procSubUrlRequests(parser, result)
        if self.procDetails:
            self.procPageInfos(parser, urlsum)

    def proc_normal_result(self,reqArgs, result):
        urlsum = reqArgs[0]
        logger = LoggerFactory.getLogger()
        if result.content == '':
            self.reinqueue_proc(urlsum, result)
            return
        if result.code == 200:
            #print "**** Result from request #%s: %d" % (urlsum.url, result.code)
            ParserClass = self.parserDict.get(urlsum.catagoryLevel, None)
            if ParserClass is None:
                return
            parser = ParserClass(result.content, urlsum, urlsum.include,
                         urlsum.exclude)
            try:
                self.procParserResult(result, urlsum, parser)
            except Exception, e:
                logger.error('ParserException.Reason:%s,URL:%s' % (e, urlsum.url))
                self.saveErrorPage(urlsum.url, result.content)
        else:
            logger.error('Get From URL:%s Error code:' % (urlsum.url, result.code))
            self.reinqueue_proc(urlsum, result)
        
    def main_spide(self,*req):
        urlsum = req[0]
        logger = LoggerFactory.getLogger()
        logger.info( "Q Size: %d|Name:%s |URL: %s"  \
                 % (self.pool._requests_queue.qsize(),urlsum.name,urlsum.url))
        #处理rootpage直接导入文件的方式
        if urlsum.catagoryLevel == 0 and self.rootPageResult != None:
            self.proc_normal_result(req, self.rootPageResult)
            return self.rootPageResult   
        result = crawle(urlsum)
        self.proc_normal_result(req, result)
        return result
    
    def __printStatResult(self):
        for k, v in self.stat.iteritems():
            print 'Catagory:%s,Num:%d' % (k.name, v)

from spiderconfigparser import getExcludeUrlSums,getIncludeUrlSums,SpiderConfig
from upload import fileUpload

def main(root, parserDict,SpiderClass = ObuySpider):
    if SpiderConfig.isStartSpider():
        includes = getIncludeUrlSums()
        excludes = getExcludeUrlSums()
        spider = SpiderClass(rootUrlSummary=root, parserDict=parserDict, include=includes, 
                                 exclude=excludes, threadNum = SpiderConfig.getThreadNum())
        spider.spide()
    LoggerFactory.shutdown()
    if SpiderConfig.isUpload():
        fileUpload()

if __name__ == '__main__':
    logger = LoggerFactory.getLogger()
