﻿#/usr/bin/env python
# -*- coding: utf-8 -*-

'''
Created on 2006-02-11

抓取核心类，用于抓取页面；
可以支持登陆抓取等；

@author: zhongfeng
'''
from __future__ import with_statement

import urllib
import urllib2
import socket
import gzip
import zlib
import cookielib
from copy import deepcopy
from threadpool import ThreadPool,makeRequests

try:
    from cStringIO import StringIO
except ImportError:
    from StringIO import StringIO

from enum import Enum

#设置超时
timeout = 15
socket.setdefaulttimeout(timeout)

#默认错误
UnKnownErrCode=700

#http  headers 
commonHeaders = {"Accept":"*/*", "User-Agent": "Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; GTB6.5)"}

CrawlerType = Enum('GET_URL', 'GET_MESSAGE', 'POST_MESSAGE')
proxiesDic = {'http':'61.152.108.19:8080'}

class UrlSummary(object):
    ''' URLSummary 用于表示 Url信息抓取摘要信息，包括http请求的headers设定，采用get or post方式等.'''

    def __init__(self, url, data=None, headers=None,
                 crawlerType=CrawlerType.GET_URL):
        assert url != None #url不能为None
        self.url = url
        self.headers = deepcopy(commonHeaders) if headers is None else headers 
        self.crawlerType = crawlerType 
        self.data = data
    def __str__(self):
        return str(vars(self))
    __repr__ = __str__

class CrawlResult(object):
    ''' 用于保存crawl page结果，包括内容，返回的状态码等 '''
    def __init__(self, url=None, code=UnKnownErrCode, content='', headers=None):
        self.url = url
        self.code = code
        self.content = content
        self.headers = headers
    def __str__(self):
        return str(vars(self))
    __repr__ = __str__

# deflate support
def deflate(data):     # zlib only provides the zlib compress format, not the deflate format;
    try:               # so on top of all there's this workaround:
        return zlib.decompress(data, - zlib.MAX_WBITS)
    except zlib.error:
        return zlib.decompress(data)

class ContentEncodingProcessor(urllib2.BaseHandler):
    """A handler to add gzip capabilities to urllib2 requests """
    # add headers to requests
    def http_request(self, req):
        req.add_header("Accept-Encoding", "gzip,deflate")
        return req
# decode
    def http_response(self, req, resp):
        old_resp = resp
        if resp.headers.get("content-encoding") == "gzip":
            gz = gzip.GzipFile(fileobj=StringIO(resp.read()),
                           mode="r")
            resp = urllib2.addinfourl(gz, old_resp.headers, old_resp.url, old_resp.code)
            resp.msg = old_resp.msg
        if resp.headers.get("content-encoding") == "deflate":
            gz = StringIO(deflate(resp.read()))
            resp = urllib2.addinfourl(gz, old_resp.headers, old_resp.url, old_resp.code)
            resp.msg = old_resp.msg
        return resp

class HTTPRefererProcessor(urllib2.BaseHandler):
    """A handler to add Referer capabilities to urllib2 requests """    
    def __init__(self):
        self.referer = None
    def http_request(self, req):
        if not req.has_header("Referer"):
            if self.referer is None:
                self.referer = req.get_host()
            req.add_unredirected_header("Referer", self.referer)
        return req
    def http_response(self, req, resp):
        self.referer = resp.geturl()
        return resp

class SmartRedirectHandler(urllib2.HTTPRedirectHandler):
    '''用于处理 301 /302 重定向,可以记录到发生了重定向行为的code'''
    def http_error_301(self, req, fp, code, msg, headers):
        result = urllib2.HTTPRedirectHandler.http_error_301(self, req, fp, code, msg, headers)
        #result.code = code
        return result
    
    def http_error_302(self, req, fp, code, msg, headers):
        result = urllib2.HTTPRedirectHandler.http_error_302(self, req, fp, code, msg, headers)
        #result.code = code
        return result

class DefaultErrorHandler(urllib2.HTTPDefaultErrorHandler):
    ''' 用于处理301，302以外的httperror '''
    def http_error_default(self, req, fp, code, msg, headers):
        result = urllib2.HTTPError(req.get_full_url(), code, msg, headers, fp)
        result.code = code
        return result

def __buildCookieProcessor():
    ''' 处理cookies用于登陆使用 '''
    cj = cookielib.CookieJar()
    return urllib2.HTTPCookieProcessor(cj)
        
def getHandlers(debug = False,cookies = True,proxy = None,extraHandlers = None):
    smartRediHandler = urllib2.HTTPRedirectHandler()           #重定向处理
    defaultErrorHandler = DefaultErrorHandler()         #默认错误处理
    contentEncodingProc = ContentEncodingProcessor()    #gzip,deflate解码
    httpRefererProc = HTTPRefererProcessor()            #防盗链破解，request添加referer header
    handlers = [smartRediHandler, defaultErrorHandler,
                contentEncodingProc, httpRefererProc] 
    if proxy != None:
        handlers.append(urllib2.ProxyHandler(proxy))     #设置代理
    if debug:
        handlers.append(urllib2.HTTPHandler(debuglevel=debug)) #urllib2的调试功能
    if cookies:
        handlers.append(__buildCookieProcessor())         #Cookies支持，用于登陆
    if extraHandlers != None:
        handlers.extend(list(extraHandlers))
    return handlers
   
def createOpener(handlers = None):
    if handlers is None:
        handlers = getHandlers()
    #设定handlers
    opener = urllib2.build_opener(*handlers)
    #urllib2.install_opener(opener)
    return opener

class CrawlerHttp(object):
    ''' 提供抓取数据服务的facade接口类 '''
    def __init__(self, urlSummary, opener=None):
        self.urlSummary = urlSummary
        if opener is None:
            self.opener = createOpener()
        else:
            self.opener = opener
    
    def __createRequest(self):
        request = None
        url = self.urlSummary.url
        data = None
        if self.urlSummary.data != None:
            data = urllib.urlencode(self.urlSummary.data)
        if self.urlSummary.crawlerType == CrawlerType.POST_MESSAGE:
            request = urllib2.Request(url, data)
        elif self.urlSummary.crawlerType == CrawlerType.GET_MESSAGE :
            fullUrl = ''.join([url, '?', data])
            request = urllib2.Request(fullUrl)
        else:
            request = urllib2.Request(url)    
        headers = self.urlSummary.headers
        if headers:
            for k, v in headers.items():
                request.add_header(k, v)       
        return request
                   
    def __getResponseStreamData(self,resp):
        dataArr = []
        try:
            if resp != None:
                while True:                  
                    data = resp.read(102400)#onetimesize 100k
                    if not data:
                        break
                    dataArr.append(data)
        except IOError, e:
            raise e
        return ''.join(dataArr)
        
    def fetch(self, isGetData = True, islogin = False, retries=0):
        '''Fetch data and metadata from a URL, file, stream, or string'''
        result = CrawlResult()
        resp = None
        try:
            req = self.__createRequest()
            resp = self.opener.open(req)
            if isGetData:              #如果为false，则不读取，仅获得response的headers等信息
                result.content = self.__getResponseStreamData(resp)
        except IOError, e:
            print 'Couldn\'t fulfill the request.Error code:%s, Reason: %s,URL: %s' % \
                (getattr(e,'code',UnKnownErrCode),getattr(e,'reason','Unknown Error'), req.get_full_url())         
        finally:
            if resp != None:
                result.headers = getattr(resp, 'headers', None)
                result.url = getattr(resp, 'url', '')
                result.code = getattr(resp, 'code', UnKnownErrCode)
                if islogin:
                    result.loginResponse = resp #登陆操作时，response不能关闭，待后续操作完成后关闭
                else:
                    resp.close()
            if result.code >= 400 and retries != 0:
                print 'sleep 3 seconds.try again'
                self.fetch(isGetData,islogin,retries - 1)
        return result
       
def crawle(urlSum, debug = False,proxy = None):
    handlers = getHandlers(debug = debug,proxy = proxy)
    opener = createOpener(handlers)
    return crawleDepOpener(urlSum,opener)

def crawleDepOpener(urlSum,opener,reservelogin = False):
    if not isinstance(urlSum, UrlSummary):
        urlSum = UrlSummary(urlSum)# May Be Url String
    crawler = CrawlerHttp(urlSum,opener)
    crawlResult = crawler.fetch(islogin = reservelogin)
    return crawlResult

def login(urlSum,debug = False,proxy = None):
    handlers = getHandlers(debug = debug,proxy = proxy,cookies = True)
    opener = createOpener(handlers)
    loginResult = crawleDepOpener(urlSum,opener,reservelogin = True)
    return (opener,loginResult)
    
def logout(loginResult):
    return loginResult.loginResponse.close()

def crawleOnLogin(loginUrlSum,desUrlSum,debug = False,proxy = None):
    opener,loginResult = login(loginUrlSum,debug,proxy)
    try:
        if loginResult.code == 200:
            result = crawleDepOpener(desUrlSum,opener)
    finally:
        logout(loginResult)
        pass
    return result

class MutiDownloader(object):
    ''' multi-thread downloading tool '''
    def __init__(self, threadNum = 1):
        self.threadNum = threadNum
        self.dataAll = [ t for t in range(threadNum)]
    
    def _getResourceFileSize(self,urlSummary):
        crawler = CrawlerHttp(urlSummary)
        result = crawler.fetch(isGetData = False)
        contentLen = None
        if result.code == 200:
            contentLen = result.headers.get('Content-Length')
        
        return int(contentLen) if contentLen else -1
    
    @staticmethod
    def splitBlocks(totalsize, blockNum):
        blocksize = totalsize/blockNum
        ranges = []
        for i in range(0, blockNum - 1):
            ranges.append((i*blocksize, i*blocksize +blocksize - 1))
        ranges.append(( blocksize*(blockNum - 1), totalsize -1 ))     
        return ranges
    
    @staticmethod
    def downloadPart(urlSum,partNum):
        return crawle(urlSum,debug = True)
   
    def save_result(self,request, result):
        partNum = request.args[1]
        self.dataAll[partNum] = result.content

    # this will be called when an exception occurs within a thread
    @staticmethod
    def handle_exception(request, exc_info):
        if not isinstance(exc_info, tuple):
            # Something is seriously wrong...
            print request
            print exc_info
            raise SystemExit
        print "**** Exception occured in request #%s: %s" % \
          (request.requestID, exc_info)
    
    def download(self,urlSummary):
        totalSize = self._getResourceFileSize(urlSummary)
        ranges = MutiDownloader.splitBlocks(totalSize,self.threadNum)
        urlSums = [deepcopy(urlSummary).headers.__setitem__('Range','bytes={}-{}'.format(*ranges[i]))
                        for i in range(self.threadNum)]
        urlRequests = [([k,v],{})for k,v in enumerate(urlSums)]
        requests = makeRequests(MutiDownloader.downloadPart, urlRequests, 
                                self.save_result, MutiDownloader.handle_exception)
        pool = ThreadPool(self.threadNum)
        for request in requests:
            pool.putRequest(request)
        pool.wait()
        #return ''.join(self.dataAll)
        
def __detectChardet(crawlResult):
    import chardet
    if crawlResult.code == 200:
        print 'Page %s :Content code is %s' % (crawlResult.url, chardet.detect(crawlResult.content))

if __name__ == '__main__':
    
    #===========================================================================
    # bookKey = {'url':'search-alias=stripbooks', 'field-keywords':'Java(TM) and JMX: Building Manageable Systems'}
    # firstSearchUrl = UrlSummary(url='http://www.amazon.com/s/ref=nb_sb_noss', data=bookKey, crawlerType=CrawlerType.GET_MESSAGE)
    # bookSearchPageResult = crawle(firstSearchUrl)
    # print bookSearchPageResult.content
    # regx = r'(http://www.amazon.com/[-a-zA-Z]*/[a-z]*/[0-9]*)/ref=sr_1_1'
    # 
    # f = file(r'c:/ff.html', 'w')
    # f.write(bookSearchPageResult.content)
    # f.close()
    # amazonUrl = 'http://www.amazon.com/Head-First-Servlets-JSP-Certified/dp/0596516681'
    # import chardet
    # DEBUG = 1
    #===========================================================================
    urlSummary = UrlSummary(url="http://www.coo8.com/allcatalog/")
    result = crawle(urlSummary)
    print result.content
    with open(r'c:allcatalog.html', 'w') as outputFile:
        outputFile.write(result.content)
        #downloader = MutiDownloader()
        #downloader.download(urlSummary)
    #===========================================================================
    # newSmthLoginData = {'id':'dao123mao', 'passwd':'902910','x':'38','y':'1'}
    # newsmthUrlSum = UrlSummary(url='http://www.newsmth.net/bbslogin2.php', data=newSmthLoginData, crawlerType=CrawlerType.POST_MESSAGE)
    # #opener = login(newsmthUrlSum,debug = True)
    # desUrlSum = UrlSummary('http://www.newsmth.net/bbsmailbox.php?path=.DIR&title=%CA%D5%BC%FE%CF%E4')
    # result = crawleOnLogin(newsmthUrlSum,desUrlSum,debug = True)
    # print result.code, result.content
    # with open(r'c:tt1.html', 'w') as outputFile:
    #    outputFile.write(result.content)
    # import re
    # regx = r'<span class="tag"><a href="(/tag/.*\?ref_=tag_dpp_cust_itdp_t)" title="([0-9]*) customers tagged this product'
    # p = re.compile(regx)
    # for t in p.finditer(result.content):
    #    print t.group(1),t.group(2)
    # import os
    # print os.sys.path
    # bookKey = {'key':u'Java编程思想','catalog':'01'}
    # bookKey['key'] = bookKey['key'].encode('gb2312')
    # print bookKey['key']
    # firstSearchUrl = UrlSummary(url='http://search.dangdang.com/book/search_pub.php',data=bookKey,crawlerType = CrawlerType.GET_MESSAGE)
    # bookSearchPageResult = crawle(firstSearchUrl)
    # from extract.pageparser  import DangDangSearchPageParser ,BookDetailParser
    # t = DangDangSearchPageParser(bookSearchPageResult.content)
    # urlSummary = t.parserResult()
    # 
    #===========================================================================
    #bookDetailPageResult = crawle(urlSummary)
    #import chardet
    #print chardet.detect(bookDetailPageResult.content)
    
    #bookDetailParser = BookDetailParser(bookDetailPageResult.content)
    #bookDetail = bookDetailParser.parserResult()
    #from persistence.dbsaver import insertBookDetail
    #print insertBookDetail(bookDetail)
    #print chardet.detect(bookDetail.contentAbs)
    #print bookDetail.contentAbs.decode('GB2312')
    #f = file(r'c:/ff.html','w')
    #f.write(bookDetailPageResult.content)
    #f.close()
 
