#  crawler of cs6913 #  written in Python 3.1 #  author Liang Xinzhao
import urllib2,httplib
import re
import pickle
import time
import urlparse
import hashlib
import robotexclusionrulesparser
import threading
from common import *
from linkparser import LinkParser

        
class Crawler:
    robotList = {}
    crawledRecord = {}
    crawledContent= {}
    urlRecordLock = threading.RLock()
    urlNotDone = {}
    urlList = []
    errorCounter = {}
    crawledAmount = 0
    def __init__(self):
        
        self._md5 = hashlib.md5()
        self.searchEngineUrl = {GOOGLE:'http://www.google.com/search?q='}
        self.customUrl = ""
        self.rerp = robotexclusionrulesparser.RobotExclusionRulesParser()
        
    #load crawled record
    @staticmethod
    def loadRecord(recordType):
        try:
            recordHandle = open(recordType,'r')
            record = pickle.load(recordHandle)
            recordHandle.close()
            
            if type(record) != 'dict':
                return {}
            else:
                return record
        except Exception,e:
            print(e)
        return {}
    #add a keyword for search engine
    def addSearchEngineUrl(self,keyword,searchEngine = GOOGLE):
        self.customUrl =  self.searchEngineUrl[searchEngine] + keyword
        
    
    #   add a list of urls to the Crawler.urlList
    #   param:  urlList
    def addUrlList(self,urlList, normalSite= NORMAL_SITE):
        for url in urlList:
            #    prevent adding circulate links
            pathname = self.url2Pathname(url)
            if Crawler.urlNotDone.has_key(pathname) == False:
                if normalSite == NORMAL_SITE :
                    self.parseRobot(url)
                    if self.isBlockedByRobot(url):
                        return False
                Crawler.urlNotDone[pathname] = url
                Crawler.urlList.append(url)
    
    #   crawl down a url
    #   param:  flag, it is a search engine or a normal site 
    #   param:  url, it is required if it is crawling normal_site
    #   return : html if success, False if error occurs!
    def crawlUrl(self,flag = NORMAL_SITE,url='',outputFlag = False):
        if str(url).isspace():
            return False
        if flag != NORMAL_SITE:
            url = self.customUrl

        #   only normal site ,we follow robots.txt. because google disallow spider.
        pathname = self.url2Pathname(url)
        
        if self.isEarlyVisited(pathname) == True:
            self.reportError(url, msg[ERROR_EARLY_VISIT])
            return False 
        Crawler.urlRecordLock.acquire()
        Crawler.crawledRecord[pathname] = time.time()
        Crawler.urlRecordLock.release()   
         
        req = urllib2.Request(url) 
        req.add_header("User-Agent",CRAWL_USERAGENT)
        try:
            fileHandle = urllib2.urlopen(req,None,CRAWL_TIMEOUT)
        except httplib.InvalidURL,e:
            self.reportError(url,str(e.message))
            return False
        except urllib2.HTTPError, e:
            self.reportError(url, str(e.code))
            return False
        except urllib2.URLError, e:
            self.reportError(url, str(e.reason))
            return False
        except ValueError, e:
            self.reportError(url, str(e.message))
            return False
        try:
            typeResult = self.isWantedType(fileHandle.headers["content-type"])
        except e:
            self.reportError(url, msg[ERROR_CONTENT_TYPE_NOT_FOUNDED])
            typeResult = False

        if  typeResult == False:
            self.reportError(url, msg[ERROR_WANTED_CONTENT_TYPE])
            return False
        
        try:
            htmlcode = fileHandle.read()
        except Exception,e:
            self.reportError(url, msg[ERROR_READ_DOWNLOADED_PAGE])
            return False
        fileHandle.close()
        htmlcode = str(htmlcode)
        self._md5.update(htmlcode)
        hashkey = self._md5.hexdigest()
        
        #    detect duplicate content
        if Crawler.crawledContent.has_key(hashkey):
            self.reportError(url,msg[ERROR_DUPLICATE_CONTENT]+" same with"+ Crawler.crawledContent[hashkey] )
            return False
        else:
            Crawler.urlRecordLock.acquire()
            Crawler.crawledContent[hashkey] = url
            Crawler.urlRecordLock.release()
            
        #    record the crawled urls
        
        if(outputFlag == True):
            self.output2Disk(htmlcode,self.url2Pathname(url),typeResult)
        return htmlcode
    
    #check the response header, whether it is wanted type of file.
    def isWantedType(self,contentType):
        for typeId in TYPE_WANTED:
            regString = TYPE_DICT[typeId]
            textHtmlRegerEx = re.compile(r'^'+regString+'+')
            if textHtmlRegerEx.match(contentType) != None:
                return typeId
        return False
        
    #   crawl down the list of links
    #   parse the downloaded pages and add new url to list
    #   param:  outputFlag, whether save in harddisk 
    def crawlAllUrl(self,outputFlag = False,crawlAmountLimit = CRAWL_AMOUNT_LIMIT): 
        while len(Crawler.urlList)>0:
            Crawler.urlRecordLock.acquire()#lock the queue when loading the first element
            url = Crawler.urlList.pop()
            pathname = self.url2Pathname(url)
            Crawler.urlNotDone.pop(pathname)
            
            if Crawler.crawledAmount >= crawlAmountLimit:
                Crawler.urlRecordLock.release()
                break
            Crawler.urlRecordLock.release()
            
            result = self.crawlUrl(NORMAL_SITE,url,outputFlag)
            try:
                urlArr = urlparse.urlparse(url)
                #if can not crawl the url, accumulate to the errorCounter
                if result == False:
                    Crawler.urlRecordLock.acquire()
                    if Crawler.errorCounter.has_key(urlArr.netloc):
                        Crawler.errorCounter[urlArr.netloc]+=1
                    else:
                        Crawler.errorCounter[urlArr.netloc] = 1
                    Crawler.urlRecordLock.release()
                    continue
                    if Crawler.errorCounter[urlArr.netloc]> MIN_ERRORS_ALLOWED_FOR_A_SITE:
                        continue
                _path = urlArr.path
                rightMostSlashIndex = _path.rfind('/')
                replaced = _path[rightMostSlashIndex : len(_path)]
                #try to parse relative address
                if replaced.find('.') != -1:
                    _path = _path.replace(replaced,'')
                hostPath = urlArr.scheme + '://' + urlArr.netloc + _path 
                 
                parser = LinkParser()
                parser.setFlag(NORMAL_SITE)
                parser.setHostPath(hostPath)
                parser.feed(result)
                urlList = parser.hrefsList
                    
                Crawler.urlRecordLock.acquire()
                self.addUrlList(urlList)
                Crawler.crawledAmount += 1
                Crawler.urlRecordLock.release()

                parser.close()
                    
            except Exception, e:
                #print(e)
                self.reportError(url, msg[ERROR_HTML_PARSE])
               
    
    #return whether the url has been visted recently
    def isEarlyVisited(self,path):
        if Crawler.crawledRecord.has_key(path):
            if time.time() - Crawler.crawledRecord[path] <= EARLY_VISITED_BEFORE:
                return True
        return False
    
    #   output2Disk
    #   write the content to hardisk
    def output2Disk(self,content="",filename="default",ext = DEFAULT_FILE_EXT,outputResult = True):
        if ext != DEFAULT_FILE_EXT:
            ext = EXT_DICT[ext]
        completeName = str(filename+ext)
        try:
            localFile = open( PAGE_STORE_DIRECTORY + completeName , 'wb')
            localFile.write(content)
            localFile.close()
        except Exception,e:
            self.reportError(e)
        if FLAG_PRINT_SUC == True:
            print(completeName+" Crawled!")
    
    #   url2Pathname
    #   convert the url 2 valid file name
    def url2Pathname(self,url=""):
        p = re.compile(r'[\W]')
        url = p.sub('',url)
        if len(url) >200:
            url = url[0:199]
        return url
    
    #    write the error into log file
    def reportError(self,url="",message = ""):
        log = open(LOG_OF_ERRORS_PATH , 'a+')
        log.write(url+"  :  "+message+'\n')
        log.close()
        print(url+ msg[ERROR_GENERAL])       
    
    
    #   search the robots.txt and parse
    def parseRobot(self,url):
        urlArr = urlparse.urlparse(url)
        hostPath = urlArr.scheme + '://' + urlArr.netloc
        if Crawler.robotList.has_key(hostPath):
            return ERROR_ROBOT_DECTECTED_EARLIER
        
        try:
            robotPath = hostPath+ '/robots.txt'
            Crawler.urlRecordLock.acquire()
            self.rerp.fetch(robotPath)
            Crawler.robotList[hostPath] = self.rerp
            Crawler.urlRecordLock.release()
        except:
            Crawler.robotList[hostPath] = None
            Crawler.urlRecordLock.release()

    
    #   see that if the url is disallowed in robots.txt
    def isBlockedByRobot(self,url):
        urlArr = urlparse.urlparse(url)
        hostPath = urlArr.scheme + '://' + urlArr.netloc
        if Crawler.robotList.has_key(hostPath) == False:
            return False
        else:
            if Crawler.robotList[hostPath] == None:
                return False
            _rerp = Crawler.robotList[hostPath]
            q = urlArr.query
            if len(q)>0:
                q = "?" +q
            if _rerp.is_allowed(CRAWL_USERAGENT, urlArr.path + q):
                return False
            else: 
                return True
    #    dump the record object into file
    def dumpRecord(self,recordType):
        try:
            os.remove(recordType)
            recordHandle = open(recordType,'wb')

            if recordType == LOG_OF_CRAWLED_URL :
                pickle.dump(Crawler.crawledRecord,recordHandle)
            elif recordType == LOG_OF_CRAWLED_CONTENT:
                pickle.dump(Crawler.crawledContent,recordHandle)
            recordHandle.close()
        except Exception, e:
            print(msg(ERROR_DUMP_RECORD))
    #    when object closed  save the crawling record  ,save the robot detected results   
    def flush(self):
        print("flushing record")
        self.dumpRecord(LOG_OF_CRAWLED_URL)
        self.dumpRecord(LOG_OF_CRAWLED_CONTENT)
    
