﻿#coding:utf-8
'''
WebCrawler
version 2.0
author: zheng lingjie
description: 借助eventlet实现高性能网络爬虫。原理是IO多路复用
'''


from StringIO import StringIO
import gzip

import os
import sys
import time
from time import sleep
import threading
import subprocess
import re
import json

from string import maketrans 

from bs4 import BeautifulSoup
from Queue import Queue     #同步队列，可用于多线程环境

import eventlet     #高性能网络并发库
from eventlet.green import urllib2

class WebCrawler:
    ''''此处定义静态成员'''
    
    USER_AGENT = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/31.0.1650.63 Safari/537.36'
    #正则表达式：取出一个url的域名
    RE_ROOT_URL = '^(https?://(\w+)\.(\w+)\.(\w+)).*'
    #将url作为文件名时，需替换掉一些非法字符
    TRANTABLE_URL = maketrans('\/:*?"<>|','_________')
    #替换网页中的标点符号等为空格
    TRANTABLE_TEXT = maketrans('''\/:;*?"<>|$()[]{}_#'.,''', '                      ')
    
    #保存下载的url list
    FILE_SAVED_URL_LIST =  'saved_url_list.lst'
       
    
    def __init__(self, url):
        '''定义成员变量'''
        self.__seedUrl = url
        self.__rootUrl = ''     #种子url地址的域名地址
        self.waitUrlQueue = Queue()
        self.doneUrlSet = set()
        
        self.folder_name = '' #存放保存网页的目录名称
        
        #用于过滤链接的正则表达式
        self.__addRE = []  
        self.__delRE = []
        
        self.waitUrlQueue.put(self.__seedUrl)
        
        #绿色线程池，实现io多路复用
        self.pool = eventlet.GreenPool(10)
        
        #提取url的域名
        regex = re.compile(self.RE_ROOT_URL)
        regex_match = regex.match(self.__seedUrl)
        if regex_match:
            self.__rootUrl = regex_match.group(1)
               
        self.__file_saved_url_list = None
        
        self.__soup = None
        
        self.__schedulingThread = None
        
    def __urlfetch(self, url, cookie=None):
        '''从给定url获取网页返回信息,返回utf-8编码的字符串'''
                    
        req_headers = {'User-Agent': self.USER_AGENT, 'Accept-Encoding': 'gzip'}
        if cookie:
            req_headers['Cookie'] = cookie
            
        try:
            request = urllib2.Request(url=url, headers=req_headers)
            response = urllib2.urlopen(request)
            
            content_encoding = response.info().get('Content-Encoding')
            if content_encoding == 'gzip':
                buf = StringIO( response.read())        # response.read():得到gzip编码的响应信息
                f = gzip.GzipFile(fileobj=buf)
                data = f.read()                         # f.read():获取gzip解码后的响应信息
            else:
                data = response.read()
            return response, data
        
        except Exception,e:
            print e
            return '',''
    
    
    def __paraseHtml(self, html):
        '''解析html文档,提取url链接'''
        
        regex_add = None
        regex_del = None
        
        self.__soup = BeautifulSoup(html) # html为html源代码字符串，type(html) == str
        
        #获取网页中所有链接地址，过滤出/html/开头的链接，存入self.waitUrlQueue
        a_list = self.__soup.findAll('a')
        
        
        if not self.__addRE == []:
            regex_add = re.compile(self.__addRE[0])
        if not self.__delRE == []:
            regex_del = re.compile(self.__delRE[0])     #暂时不用
        for a in a_list:
            if not regex_add:
                #用户没有提供正则表达式，默认提取所有url链接
                #对外部链接:url总是以http://开头；
                temp_url = str(a.get('href'))
                if temp_url.find('http://') == 0:   #以http://开头
                    #是一个以http开头的外部链接
                    self.waitUrlQueue.put(temp_url)
                else:
                    self.waitUrlQueue.put(self.__rootUrl + temp_url)
            else:
                #用户提供了用于筛选的正则表达式. 
                if regex_add.match(str(a.get('href'))):
                    temp_url = str(a.get('href'))
                    if temp_url.find('http://') == 0:   #以http://开头
                        #是一个以http开头的外部链接
                        self.waitUrlQueue.put(temp_url)
                    else:
                        self.waitUrlQueue.put(self.__rootUrl + temp_url)
        
        self.__soup.clear(decompose=True)   #使用完毕，销毁对象，释放内存            
        
        '''
        #不应在此处做调度，否则会演变成函数的递归调用。python语言默认的最大递归调用深度是1000，超出会报错
        while not self.waitUrlQueue.empty():
            url = self.waitUrlQueue.get()
            #若该url未爬取过，则添加到待爬取队列
            if not (url in self.doneUrlSet):
                #将已处理网址添加到self.doneUrlSet
                self.doneUrlSet.add(url)
                
                self.pool.spawn_n(self.urlCrawlTask, url)
        '''
    
    def __getTextFromHtml(self, html):
        '''提取网页中文本内容'''
        '''
        soup = BeautifulSoup(html) # html为html源代码字符串，type(html) == str
        tags = soup.findAll({'a':True}, target="_blank")    #获取所有包含target="_blank"属性的，且名为‘a’的tag
        for tag in tags:
            print tag.renderContents()    #提取tag的文本内容
        '''
        #通用的文本提取代码
        self.__soup = BeautifulSoup(html) # html为html源代码字符串，type(html) == str
        texts = self.__soup.findAll(text=True)
        self.__soup.clear(decompose=True)   #使用完毕，销毁对象，释放内存

        text_list = []
        for text in texts:
            if not text == '\n' and not text == ' ':
                #替换标点符号等为空格
                s = text.encode('utf-8').translate(self.TRANTABLE_TEXT)     #必须进行utf-8编码，否则translate时会报错
                text_list.append(s)
        text_str = ' '.join(text_list)
        return text_str
    
    def __make_folder(self):
        '''创建存放网页的目录'''
        
        time_str = time.strftime('%Y-%m-%d_%H-%M',time.localtime(time.time()))
        self.folder_name = 'crawled_at_' + time_str
        
        folder_to_make = os.getcwd() + '\\' + self.folder_name.decode('utf-8').encode('gbk')        #解决创建文件时文件名乱码的问题
        if not os.path.exists(folder_to_make):
            os.makedirs(folder_to_make)
        os.chdir(folder_to_make)
    
    #-------------------------------GreenPool线程池任务函数----------------------------------  
    def urlCrawlTask(self,args):
        '''爬取一个网页'''
                
        _,html = self.__urlfetch(args)
        
        #保存下载的网页
        file_name = args.translate(self.TRANTABLE_URL)      #将url作为文件名时，需替换掉一些非法字符
        html_file = open(file_name.decode('utf-8').encode('gbk'),'w')
        html_file.write(self.__getTextFromHtml(html))
        html_file.close()
        
        #将已下载的url记录到文件saved_url_list
        self.__file_saved_url_list.write(args + ' ' + file_name + '\n')
        print "urlCrawlTask finished: ", args
        
        self.__paraseHtml(html)     #解析并提取url链接
                        
        
                  
    
    #------------------------------------外部接口--------------------------------------------    
    def setRE(self, re, mode='ADD'):
        '''设置用于过滤链接的正则表达式。
            mode='ADD'表示满足正则表达式的url添加到待爬取队列。
            re是一个list类型
        '''
        if mode == 'ADD':
            self.__addRE = re
        else:
            self.__delRE = re
    
    def work(self):
        
        #创建文件夹
        self.__make_folder()
        
        #下载种子url对应的网页
        url = self.waitUrlQueue.get()
        _,html = self.__urlfetch(url)        
                
        #保存下载的网页
        file_name = url.translate(self.TRANTABLE_URL)      #将url作为文件名时，需替换掉一些非法字符
        html_file = open(file_name.decode('utf-8').encode('gbk'),'w')
        html_file.write(self.__getTextFromHtml(html))
        html_file.close()
        
        #将已下载的url记录到文件saved_url_list
        self.__file_saved_url_list = open(self.FILE_SAVED_URL_LIST.decode('utf-8').encode('gbk'),'a')
        self.__file_saved_url_list.write(url + ' ' + file_name + '\n')
        
        
        #将已处理网址添加到self.doneUrlSet
        self.doneUrlSet.add(url)
        
        #开始循环爬取
        self.__paraseHtml(html)     #解析并提取url链接
        
        #开启调度线程
        self.__schedulingThread = SchedulingThread(self)
        self.__schedulingThread.start()  
        
        # 等待子线程结束
        #self.pool.waitall()
        self.__schedulingThread.join()
                
        #sleep(1)
        
        self.__file_saved_url_list.close()
        

class SchedulingThread(threading.Thread):
    '''调度器线程，不断从WebCrawler的waitUrlQueue中取出任务，然后添加到其pool中执行'''
    
    def __init__(self, parent):
        threading.Thread.__init__(self)
        self.__parent = parent
        self.running = True
    
    def run(self):
        
        flag_free = 0       #当flag_free达到3则说明循环爬虫已经完成
        while(self.running):
                       
            if self.__parent.waitUrlQueue.empty():
                if flag_free >= 3:
                    #爬取完毕。结束线程
                    self.running = False
                    self.__parent.pool.waitall()
                else:
                    flag_free += 1
            else:
                flag_free = 0
                while not self.__parent.waitUrlQueue.empty():
                    url = self.__parent.waitUrlQueue.get()
                    #若该url未爬取过，则添加到待爬取队列
                    if not (url in self.__parent.doneUrlSet):
                        #将已处理网址添加到self.doneUrlSet
                        self.__parent.doneUrlSet.add(url)
                        
                        self.__parent.pool.spawn_n(self.__parent.urlCrawlTask, url)
                        
            sleep(0.2)

    

if __name__ == '__main__':
    
    url = 'http://www.boost.org'
    crawler = WebCrawler(url)
    #crawler.setRE(['^/html/\d+/\d+/\d+\.html$'], mode='ADD')
    crawler.setRE(['/.*'], mode='ADD')
    crawler.work()