#/usr/bin/env python
# -*- coding: utf-8 -*-

'''
Created on 2011-7-28

@author: zhongfeng
'''

from crawlerhttp import crawle
from logfacade import LoggerFactory
from threadpool import ThreadPool, WorkRequest
from urlparse import urlparse
import os
import time
import threading
from threading import stack_size

stack_size(32768*32)

logger = LoggerFactory.getLogger()

def __procSubUrlRequests(parser,result,spider):
    '''SubUrl 入队'''
    parserResult = parser.parserSubUrlSums()
    if parserResult is not None:
        for subUrlSum in parserResult:
            print 'SubUrlSum put Q: %s ,level: %s' \
                             % (subUrlSum.url,subUrlSum.catagoryLevel)
            _putSpideRequest(subUrlSum,spider)
                 
def __procPageInfos(parser,urlsum):
    '''解析页面的详细信息，例如product信息'''
    resultList = parser.parserPageInfos()
    if resultList is not None:
        siteName = urlparse(urlsum.url).hostname.split('.')[1]
        logger = LoggerFactory.getLogger(logName=siteName)
        for parserResult in resultList:
            logger.info(parserResult.logstr())
    
def _putSpideRequest(urlsum,spider):
    req = WorkRequest(main_spide, [urlsum,spider], None, 
                    callback=proc_result,exc_callback=handle_exception)
    spider.pool.putRequest(req)
    
def __saveErrorPage(url,content):
    curModDir = os.path.dirname(os.path.abspath(__file__))
    siteName = urlparse(url).hostname.split('.')[1]
    curtDate = time.strftime("%Y-%m-%d")
    errorFilePath = os.path.join(curModDir,'error_page',siteName,curtDate) 
    if not os.path.exists(errorFilePath):
        os.makedirs(errorFilePath)
    curtime = time.strftime("%Y-%m-%d_%H-%M-%S")
    fileName = '%s_%s.html' %(siteName,curtime)
    fullPath = os.path.join(errorFilePath,fileName)
    with open(fullPath,'w') as output:
        output.write(content)

def handle_exception(request, exc_info):
    if not isinstance(exc_info, tuple):
        # Something is seriously wrong...
        print request
        print exc_info
        raise SystemExit
    print "**** Exception occured in request #%s: %s" % \
     (request.requestID, exc_info)

def proc_result(request, result):
    pass
#    args = request.args
#    urlsum = args[0]
#    spider = args[1]
#    if result.code == 200:
#        print "**** Result from request #%s: %d" % (urlsum.url, result.code)
#        ParserClass = spider.parserDict.get(urlsum.catagoryLevel,None)
#        if ParserClass is None:
#            return
#        parser = ParserClass(result.content,urlsum,urlsum.include,
#                         urlsum.exclude)
#        try:
#            if urlsum.isRecursed:
#                __procSubUrlRequests(parser,result,spider) 
#            if spider.procDetails:
#                __procPageInfos(parser,urlsum)
#        except Exception,e:
#            logger = LoggerFactory.getLogger()
#            logger.error('ParserException.Reason:%s,URL:%s'% (e,urlsum.url))
#            __saveErrorPage(urlsum.url,result.content)
#            if urlsum.stat == 0:
#                urlsum.stat = result.code
#                spider.pool.putRequest(request)
#        
#    elif urlsum.stat == 0:
#        urlsum.stat = result.code
#        spider.pool.putRequest(request)
#    else:
#        print "Failed %s:%d" % (urlsum.url, result.code)

def __reinqueue_proc(urlsum,result,spider):
    if urlsum.stat == 0:
        urlsum.stat = result.code
        logger.info("urlsum reinqueue:%s" % urlsum.url)
        _putSpideRequest(urlsum,spider)
    else:
        logger.error( "Failed %s:%d" % (urlsum.url, result.code))

def proc_normal_result(reqArgs, result):
    urlsum = reqArgs[0]
    spider = reqArgs[1]
    if result.content == '':
        __reinqueue_proc(urlsum, result, spider)
        return
    if result.code == 200:
        if result.content == '':
            __reinqueue_proc(urlsum, result, spider)
            return
        print "**** Result from request #%s: %d" % (urlsum.url, result.code)
        ParserClass = spider.parserDict.get(urlsum.catagoryLevel,None)
        if ParserClass is None:
            return
        parser = ParserClass(result.content,urlsum,urlsum.include,
                         urlsum.exclude)
        try:
            if urlsum.isRecursed:
                __procSubUrlRequests(parser,result,spider) 
            if spider.procDetails:
                __procPageInfos(parser,urlsum)
        except Exception,e:
            logger.error('ParserException.Reason:%s,URL:%s'% (e.message,urlsum.url))
            __saveErrorPage(urlsum.url,result.content)
            __reinqueue_proc(urlsum,result,spider)
    else:
        __reinqueue_proc(urlsum,result,spider)
        
def main_spide(*req):
    print "(active worker threads: %i)" % (threading.activeCount()-1, )
    urlsum = req[0]
    result = crawle(urlsum)
    proc_normal_result(req, result)
    return result   
 
class ObuySpider(object):
    def __init__(self,rootUrlSummary = None,parserDict =None,threadNum = 5,
                 procDetails = True,include = None,exclude = None):
        self.rootUrlSummary = rootUrlSummary
        self.parserDict = parserDict
        self.procDetails = procDetails       #是否解析页面的详细信息
        self.rootUrlSummary.include = include
        self.rootUrlSummary.exclude = exclude
        self.pool = ThreadPool(threadNum)
        
    def spide(self):
        _putSpideRequest(self.rootUrlSummary,self)
        self.pool.wait()

if __name__ == '__main__':
    logger = LoggerFactory.getLogger()
