﻿#coding:utf-8
'''
WebCrawler
version 1.0
author:zheng lingjie
description: 借助自定义的线程池实现多线程并行下载的网络爬虫。
'''

import urllib2
from StringIO import StringIO
import gzip

import os
import sys
import time
from time import sleep
import subprocess
import threading
import re
import json

from string import maketrans 

from bs4 import BeautifulSoup
from Queue import Queue     #同步队列，可用于多线程环境

from ThreadPoolImpl import ThreadPool

class WebCrawler:
    ''''此处定义静态成员'''
    
    USER_AGENT = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/31.0.1650.63 Safari/537.36'
    #正则表达式：取出一个url的域名
    RE_ROOT_URL = '^(https?://(\w+)\.(\w+)\.(\w+)).*'
    #将url作为文件名时，需替换掉一些非法字符
    TRANTABLE_URL = maketrans('\/:*?"<>|','_________')
    #替换网页中的标点符号等为空格
    TRANTABLE_TEXT = maketrans('''\/:;*?"<>|$()[]{}_#'.,''', '                      ')
    
    #保存下载的url list
    FILE_SAVED_URL_LIST =  'saved_url_list.lst'
       
    
    def __init__(self, url):
        '''定义成员变量'''
        self.__seedUrl = url
        self.__rootUrl = ''     #种子url地址的域名地址
        self.__waitUrlQueue = Queue()
        self.__doneUrlSet = set()
        self.__doneUrlSetLock = threading.Condition(threading.Lock())
        
        #初始化线程池
        self.__pool = ThreadPool(3)
        self.__runningThreadCnt = 0     #当前线程池中正在运行的线程数，当其为0时释放锁__closePoolLock。然后主线程关闭线程池
        self.__threadCntLock = threading.Condition(threading.Lock())
        self.__closePoolLock = threading.Condition(threading.Lock())
        
        #用于过滤链接的正则表达式
        self.__addRE = []  
        self.__delRE = []
        
        self.__waitUrlQueue.put(self.__seedUrl)
        
        #提取url的域名
        regex = re.compile(self.RE_ROOT_URL)
        regex_match = regex.match(self.__seedUrl)
        if regex_match:
            self.__rootUrl = regex_match.group(1)
               
        
    def __urlfetch(self, url, cookie=None):
        '''从给定url获取网页返回信息,返回utf-8编码的字符串'''
    
        req_headers = {'User-Agent': self.USER_AGENT, 'Accept-Encoding': 'gzip'}
        if cookie:
            req_headers['Cookie'] = cookie
            
        try:
            request = urllib2.Request(url=url, headers=req_headers)
            response = urllib2.urlopen(request)
            
            content_encoding = response.info().get('Content-Encoding')
            if content_encoding == 'gzip':
                buf = StringIO( response.read())        # response.read():得到gzip编码的响应信息
                f = gzip.GzipFile(fileobj=buf)
                data = f.read()                         # f.read():获取gzip解码后的响应信息
            else:
                data = response.read()
            return response, data
        
        except Exception,e:
            print e
            return '',''
    
    
    def __paraseHtml(self, html):
        '''解析html文档,提取url链接'''
        
        regex_add = None
        regex_del = None
        
        soup = BeautifulSoup(html) # html为html源代码字符串，type(html) == str
        
        #获取网页中所有链接地址，过滤出/html/开头的链接，存入self.__waitUrlQueue
        a_list = soup.findAll('a')
        if not self.__addRE == []:
            regex_add = re.compile(self.__addRE[0])
        if not self.__delRE == []:
            regex_del = re.compile(self.__delRE[0])     #暂时不用
        for a in a_list:
            if not regex_add:
                #用户没有提供正则表达式，默认提取所有url链接
                #对外部链接:url总是以http://开头；
                temp_url = str(a.get('href'))
                if temp_url.find('http://') == 0:   #以http://开头
                    #是一个以http开头的外部链接
                    self.__waitUrlQueue.put(temp_url)
                else:
                    self.__waitUrlQueue.put(self.__rootUrl + temp_url)
            else:
                #用户提供了用于筛选的正则表达式. 
                if regex_add.match(str(a.get('href'))):
                    temp_url = str(a.get('href'))
                    if temp_url.find('http://') == 0:   #以http://开头
                        #是一个以http开头的外部链接
                        self.__waitUrlQueue.put(temp_url)
                    else:
                        self.__waitUrlQueue.put(self.__rootUrl + temp_url)
                    
        
        while not self.__waitUrlQueue.empty():
            url = self.__waitUrlQueue.get()
            #若该url未爬取过，则添加到待爬取队列
            if not (url in self.__doneUrlSet):
                #将已处理网址添加到self.__doneUrlSet
                self.__doneUrlSetLock.acquire()
                self.__doneUrlSet.add(url)
                self.__doneUrlSetLock.release()
                
                self.__pool.queueTask(self.__urlCrawlTask, url, self.__taskCallback)
    
    
    def __getTextFromHtml(self, html):
        '''提取网页中文本内容'''
        '''
        soup = BeautifulSoup(html) # html为html源代码字符串，type(html) == str
        tags = soup.findAll({'a':True}, target="_blank")    #获取所有包含target="_blank"属性的，且名为‘a’的tag
        for tag in tags:
            print tag.renderContents()    #提取tag的文本内容
        '''
        #通用的文本提取代码
        soup = BeautifulSoup(html) # html为html源代码字符串，type(html) == str
        texts = soup.findAll(text=True)
        text_list = []
        for text in texts:
            if not text == '\n' and not text == ' ':
                #替换标点符号等为空格
                s = text.encode('utf-8').translate(self.TRANTABLE_TEXT)     #必须进行utf-8编码，否则translate时会报错
                text_list.append(s)
        text_str = ' '.join(text_list)
        return text_str
    
    def __make_folder(self):
        '''创建存放网页的目录'''
        
        time_str = time.strftime('%Y-%m-%d_%H-%M',time.localtime(time.time()))
        folder_name = 'crawled_at_' + time_str
        
        folder_to_make = os.getcwd() + '\\' + folder_name.decode('utf-8').encode('gbk')        #解决创建文件时文件名乱码的问题
        if not os.path.exists(folder_to_make):
            os.makedirs(folder_to_make)
        os.chdir(folder_to_make)
    
    #-------------------------------线程池任务函数----------------------------------  
    def __urlCrawlTask(self,args):
        '''爬取一个网页'''
        
        self.__threadCntLock.acquire()
        self.__runningThreadCnt += 1
        if self.__runningThreadCnt == 1:
            self.__closePoolLock.acquire()
        self.__threadCntLock.release()
        
        _,html = self.__urlfetch(args)
        self.__paraseHtml(html)     #解析并提取url链接
                
        #保存下载的网页
        file_name = args.translate(self.TRANTABLE_URL)      #将url作为文件名时，需替换掉一些非法字符
        html_file = open(file_name.decode('utf-8').encode('gbk'),'w')
        html_file.write(self.__getTextFromHtml(html))
        html_file.close()
        
        #将已下载的url记录到文件saved_url_list
        file_saved_url_list = open(self.FILE_SAVED_URL_LIST.decode('utf-8').encode('gbk'),'a')
        file_saved_url_list.write(args + ' ' + file_name + '\n')
        file_saved_url_list.close()
        
                
        self.__threadCntLock.acquire()
        self.__runningThreadCnt -= 1
        if self.__runningThreadCnt == 0:
            self.__closePoolLock.release()
        self.__threadCntLock.release()
        
        return "urlCrawlTask finished: ", args
    
    def __taskCallback(self,args):
        '''任务执行完毕后的回调函数'''        
        print "Callback called for -- ", args
                   
    
    #------------------------------------外部接口--------------------------------------------    
    def setRE(self, re, mode='ADD'):
        '''设置用于过滤链接的正则表达式。
            mode='ADD'表示满足正则表达式的url添加到待爬取队列。
            re是一个list类型
        '''
        if mode == 'ADD':
            self.__addRE = re
        else:
            self.__delRE = re
    
    def work(self):
        
        #创建文件夹
        self.__make_folder()
        
        #下载种子url对应的网页
        url = self.__waitUrlQueue.get()
        _,html = self.__urlfetch(url)
        self.__paraseHtml(html)     #解析并提取url链接
                
        #保存下载的网页
        file_name = url.translate(self.TRANTABLE_URL)      #将url作为文件名时，需替换掉一些非法字符
        html_file = open(file_name.decode('utf-8').encode('gbk'),'w')
        html_file.write(self.__getTextFromHtml(html))
        html_file.close()
        
        #将已下载的url记录到文件saved_url_list
        file_saved_url_list = open(self.FILE_SAVED_URL_LIST.decode('utf-8').encode('gbk'),'a')
        file_saved_url_list.write(url + ' ' + file_name + '\n')
        file_saved_url_list.close()
        
        #将已处理网址添加到self.__doneUrlSet
        self.__doneUrlSetLock.acquire()
        self.__doneUrlSet.add(url)
        self.__doneUrlSetLock.release()
        
        # When all tasks are finished, allow the threads to terminate
        sleep(1)
        self.__closePoolLock.acquire()
        self.__pool.joinAll()
        self.__closePoolLock.release()
        '''
        #test
        while not self.__waitUrlQueue.empty():
            print self.__waitUrlQueue.get()
        if 'http://bt.ktxp.com/today.html' in self.__doneUrlSet:
            print 'yes'
        '''


    

if __name__ == '__main__':
    
    url = 'http://www.boost.org/'
    ktxp = WebCrawler(url)
    ktxp.setRE(['/.*'], mode='ADD')
    ktxp.work()