#coding:utf-8

import urllib2
import threadpool
import threading
import pickle
import bsddb
from logutility import *
from parserutility import parserLink
#from fetcher import initHTTPRequest
from crawlerSettings import *
import traceback
import random
import urlparse
import os
import time
from crawlerBase import initHTTPRequest,http_headers
from crawlerObject import LObject,PostProcesser
from timeout import timelimit,TimeoutError 
import Queue

infoLogger=None
errLogger=None
storage_mutex=None

def loadfile(filename):
    lines=[]
    try:
        input=open(filename)
        for line in input:
            lines.append(line.strip())
    except IOError,e:
        return []
    else:
        return lines    

def filter_url(filterList,url):
    for filter in filterList:
        if(filter.filter(url)):
            return True
    return False


class CrawlerStatus:
    BEGIN=1
    FAILED=4
    SUCCESS=256
    DEAD=256
    
class Frontier(LObject):
    def __init__(self,name,seedfile):
        if(self.__class__==Frontier):
            raise NotImplementedError,"frontier can not be created, abstarct class"
        LObject.__init__(self,name)
        self._lock=threading.RLock()
        self._seedfile=seedfile
        self._seeds=[]
        #self.loadseed()

    def loadseed(self):
        seeds=loadfile(self._seedfile)
        for seed in seeds:
            if(seed):
                self._seeds.append(seed)
       
        self._seeds=list(set(self._seeds))
        
    def geturl(self):
        return None

    def addurl(self,url):
        pass

    def getcnt(self):
        return len(self._seeds)

    def empty(self):
        return self._seeds==[]

class BasicFrontier(Frontier):
    def __init__(self,name="basic frontier",seedfile=None):
        Frontier.__init__(self,name,seedfile)
  
    def geturl(self):
        ret=None
        self._lock.acquire()
        if(self._seeds):
            ret=self._seeds.pop()
        self._lock.release()
        return ret
 
    def addurl(self,url):
        self._lock.acquire()
        self._seeds.append(url)
        self._lock.release()

class BasicQueueFrontier(Frontier):
    def __init__(self,name="basic frontier",seedfile=None):
        Frontier.__init__(self,name,seedfile)
        self._seeds=Queue.Queue()
  
    def geturl(self):
        ret=None
        if(not self.empty()):
            ret=self._seeds.get()
        return ret
 
    def addurl(self,url):
        self._seeds.put(url)

    def getcnt(self):
        return self._seeds.qsize()
    
    def empty(self):
        return self._seeds.empty()

class RandomFrontier(Frontier):
    def __init__(self,name="random frontier",seedfile=None):
        Frontier.__init__(self,name,seedfile)

    def geturl(self):
        ret=None
        self._lock.acquire()
        cnt=self.getcnt()
        if(cnt):
            index=random.randint(0,cnt-1)
            url=self._seeds[index]
            self._seeds.remove(url)
            ret=url
        self._lock.release()
        return ret
    
    def addurl(self,url):
        self._lock.acquire()
        self._seeds.append(url)
        self._lock.release()

class AlreadySeen(LObject):
    def __init__(self,name):
        if(self.__class__==AlreadySeen):
            raise NotImplementedError,"alreadyseen can not be created, abstarct class"
        LObject.__init__(self,name)

class DBAlreadySeen(AlreadySeen):
    def __init__(self,name,dbname):
        AlreadySeen.__init__(self,name)
        self._lock=threading.RLock()
        self._dbname=dbname
        self._db=bsddb.btopen(dbname)

    def addurl(self,url):
        ret=False
        self._lock.acquire()
        if(not self._db.has_key(url)):
            self._db[url]=str(CrawlerStatus.BEGIN)
            ret=True
        self._lock.release()
        return ret

    def clean(self):
        self._db.close()

    def getStatus(self,url):
        return int(self._db.get(url,CrawlerStatus.DEAD))

    def updateStatus(self,url,status):
        self._db[url]=str(status)

    def sync(self):
        self._db.sync()

class StatusUpdate(LObject):
    def __init__(self,name="status update",frontier=None,alreadyseen=None):
        LObject.__init__(self,name)
        self._frontier=frontier
        self._alreadyseen=alreadyseen
     
    def updateStatus(self,url,status):
       if(status==CrawlerStatus.FAILED):
           failed_times=self._alreadyseen.getStatus(url)
           if(failed_times>=retry_times):
               self._alreadyseen.updateStatus(url,CrawlerStatus.DEAD)
           else:
               self._alreadyseen.updateStatus(url,failed_times+1)
               self._frontier.addurl(url)
       else:
           self._alreadyseen.updateStatus(url,status)

class FrontierScheduler(LObject):
    def __init__(self,name,frontier,alreadyseen,filters):
        LObject.__init__(self,name)
        self._frontier=frontier
        self._alreadyseen=alreadyseen
        self._filters=filters

    def loadseeds(self):
        pass

    def addurl(self,url):
        if(url):
            if(not filter_url(self._filters,url)):
                ret=self._alreadyseen.addurl(url)
                if ret:
                    self._frontier.addurl(url)

    def addurls(self,urls):
        for url in urls:
            self.addurl(url)
        self._alreadyseen.sync() 

    def clean(self):
        self._alreadyseen.clean()
        
class URLFilter(LObject):
    def __init__(self,name,filename,positive):
        if(self.__class__==URLFilter):
            raise NotImplementedError,"urlfilter can not be created, abstarct class"
        LObject.__init__(self,name)
        self._filename=filename
        self._positive=positive
        self.prepare()
   
    def prepare(self):
        pass

    def satisfy(self,url):
        return self._positive

    def filter(self,url):
        ret=self.satisfy(url)
        if(ret):
            if(self._positive):
                return False
            else:
                return True
        else:
            if(self._positive):
                return True
            else:
                return False

class UrlFormatFilter(URLFilter):
    def __init__(self,name="url format filter",filename=None,positive=True):
        URLFilter.__init__(self,name,filename,positive)
 
    def satisfy(self,url):
        urlInfo=urlparse.urlparse(url)
        if(urlInfo.netloc):
            hostparts=urlInfo.netloc.split(".")
            if(len(hostparts)>1):
                return True
            else:
                return False
        else:
            return False

class DBFilter(URLFilter):
    def __init__(self,name="DBFilter",filename=None,positive=None):
        self._db=None
        URLFilter.__init__(self,name,filename,positive)

    def prepare(self):
        try:
            self._db=bsddb.btopen(self._filename,"r")
        except Exception,e:
            self._db=None

    def satisfy(self,url):
        if(self._db):
            return self._db.has_key(url)
        else:
            return False

class ProtocalFilter(URLFilter):
    def __init__(self,name="url filter", filename=None,positive=None):
        self._allowList=[]
        URLFilter.__init__(self,name,filename,positive)

    def prepare(self):
        protocals=loadfile(self._filename)
        if(protocals):
            self._allowList.extend(protocals)
     
    def satisfy(self,url):
        protocal=urlparse.urlparse(url).scheme
        if(protocal in self._allowList):
            return True
        else:
            return False 

class ExtFilter(URLFilter):
    def __init__(self,name="ext filter",filename=None,positive=None):
        self._allowList=[]
        URLFilter.__init__(self,name,filename,positive)

    def prepare(self):
        exts=loadfile(self._filename)
        if(exts):
            self._allowList.extend(exts)
    
    def satisfy(self,url):
        ext=os.path.splitext(urlparse.urlparse(url).path)[1]
        if(ext in self._allowList):
            return True
        else:
            return False 

class Fetcher(LObject):
    def __init__(self,name):
        if(self.__class__==Fetcher):
            raise NotImplementedError,"fetcher can not be created, abstarct class"
        LObject.__init__(self,name)
 
    def getpage(self,url,data=None,headers=None):
        pass       

class TwistedFetcher(Fetcher):
    def __init__(self,name="twisted fetcher"):
        Fetcher.__init__(self,name)



class UrllibFetcher(Fetcher):
    def __init__(self,name="urllib fetcher"):
        Fetcher.__init__(self,name)

  
    def getpage(self,url,data=None,headers=None):
        if headers==None: headers=http_headers
        req=urllib2.Request(url,data,headers)
        if debug: print 'init req ok :', url
        response=urllib2.urlopen(req)
        return response      
 
class Writer(LObject):
    def __init__(self,name,filename):
        if(self.__class__==Writer):
            raise NotImplementedError,"writer can not be created, abstarct class"
        LObject.__init__(self,name)
        self._filename=filename
        self._writer=None
        self.prepare()

    def prepare(self):
        pass

    def write(self,key,value):
        pass

    def close(self):
        pass

class DBWriter(Writer):
    def __init__(self,name="db writer",filename=None):
        Writer.__init__(self,name,filename)
        self._lock=threading.RLock()

    def prepare(self):
        try:
            self._writer=bsddb.btopen(self._filename)
        except Exception,e:
            print 'can not create db write %s' % traceback.format_exc()
            sys.exit(-1)
      
    def write(self,key,value):
        #print 'writing for %s' % key
        self._lock.acquire()
        try:
            self._writer[key]=value
            randsync=random.randint(1,10)
            if(randsync>9):
                self._writer.sync()
        except Exception,e:
            raise e
        finally:
            self._lock.release()

    def close(self):
        if(self._writer!=None):
            self._writer.close()

class Storage(LObject):
    def __init__(self,name,writer):
        if(self.__class__==Storage):
            raise NotImplementedError,"storage can not be created, abstarct class"
        LObject.__init__(self,name)
        self._writer=writer

    def store(self,url,info,content):
        pass
 
    def clean(self):
        self._writer.close()

class LinkStorage(Storage):
    def __init__(self,name="link storage", writer=None):
        Storage.__init__(self,name,writer)
      

    def store(self,url,info,content):
        links=parserLink(url,content)
        self._writer.write(url,pickle.dumps(links))

class PageStorage(Storage):
    def __init__(self,name="page storage", writer=None):
        Storage.__init__(self,name,writer)
    
    def store(self,url,info,content):
        pageContent=content
        if debug:print 'writing ', url
        self._writer.write(url,pageContent)
            
#def multiSavepage(request,response):
    #global storage_mutex
    #url=request.args[0]
    #storage=request.args[2]
    #storage_mutex.acquire()
    #savepage(url,response,storage)
    #storage_mutex.release()
    #slap=random.randint(minSlap,maxSlap)
    #time.sleep(slap)  

#def savepage(url,response,storage):
    #if(response):
        #try:
            #storage.store(url,response)
        #except Exception,e:
            #errLog(errLogger,"[storage-failed]: %s %s" % (url,traceback.format_exc().replace("\n"," ")))
        #else:
            #infoLog(infoLogger,"[storage-ok]: %s" % url)

def getresult(url,response):
    info=response.info()
    if debug: print 'get info ok : ', url
    content=response.read()
    if debug: print 'get content ok: ', url
    return (info,content)

@timelimit(60)
def fetchpage(url,fetcher,storage,statusUpdater,postProcessers):
    global infoLogger,errLogger,storage_mutex
    if debug:print 'getting url %s' % url
    try:
        response=fetcher.getpage(url)
    except urllib2.URLError,e:
        code=-1
        errmsg=None
        if(hasattr(e,"code")):
            code=e.code
        elif(hasattr(e,"reason")):
            code=e.reason[0]
        if(code=="104"):
            time.sleep(500)
            errmsg="104 error for url"
            errLog(errLogger,"[fetch-error]:%s %s" % (url,errmsg))
        else:
            errmsg=traceback.format_exc().replace('\n'," ")
            errLog(errLogger,"[fetch-error]:%s %s" % (url,errmsg))
            statusUpdater.updateStatus(url,CrawlerStatus.FAILED)
    except Exception,e:
        errmsg=traceback.format_exc().replace('\n'," ")
        errLog(errLogger,"[fetch-error]:%s %s" % (url,errmsg))
        statusUpdater.updateStatus(url,CrawlerStatus.FAILED)
    else:
        if(response.getcode()==200):
            infoLog(infoLogger,"[fetch-ok]: %s" % url)
        #return response
            #storage_mutex.acquire()
            try:
                info,content=getresult(url,response)
                storage.store(url,info,content)
            except Exception,e:
                errLog(errLogger,"[storage-failed]: %s %s" % (url,traceback.format_exc().replace("\n"," ")))
                statusUpdater.updateStatus(url,CrawlerStatus.FAILED)
            else:
                infoLog(infoLogger,"[storage-ok]: %s" % url)
                statusUpdater.updateStatus(url,CrawlerStatus.SUCCESS)
                for postProcesser in postProcessers:
                    try:
                        postProcesser.process(url,info,content)
                    except Exception,e:
                        errLog(errLogger,"[process-failed]: %s %s" % (url,traceback.format_exc().replace("\n"," ")))
            finally:
                response.close()
                #storage_mutex.release()
            
             
            slap=random.randint(minSlap,maxSlap)
            time.sleep(slap)
        
        
def thread_exception(request,exc_info):
    statusUpdater=request.args[3]
    url=request.args[0]
    statusUpdater.updateStatus(url,CrawlerStatus.FAILED)
    if not isinstance(exc_info, tuple):
        print("**** Exception occured in request #%s: %s" % (request.requestID, exc_info))
        raise SystemExit

def config(seedfile,storagefile):
    frontier=BasicQueueFrontier(seedfile=seedfile)
    fetcher=UrllibFetcher()
    writer=DBWriter(filename=storagefile)
    storage=PageStorage(writer=writer)
    urlFilters=[]
    
    fFilter=UrlFormatFilter()
    pFilter=ProtocalFilter(filename="allow.protocal",positive=True)
    extFilter=ExtFilter(filename="deny.ext",positive=False)
    #seenFilter=DBFilter(filename=alreadyseendb,positive=False)
    
    urlFilters.append(fFilter)
    urlFilters.append(pFilter)
    urlFilters.append(extFilter)
    #urlFilters.append(seenFilter)
    
    dbAlreadySeen=DBAlreadySeen("db already seen",".alreadyseen.bdb")
    frontierScheduler=FrontierScheduler("frontier scheduler",frontier,dbAlreadySeen,urlFilters)
    statusUpdater=StatusUpdate("status updater",frontier,dbAlreadySeen)
  
    seeds=loadfile(seedfile)
    print 'seeds num %d' % len(seeds)
    #for seed in seeds:
    frontierScheduler.addurls(seeds)

    postProcessers=[]
    #rmrbPostProcesser=RMRBPostProcesser(dbname="rmrbobject.bdb",frontierScheduler=frontierScheduler)
    #postProcessers.append(rmrbPostProcesser) 
   
    #cctvPostProcesser=CCTVPostProcesser(frontierScheduler=frontierScheduler)
    #postProcessers.append(cctvPostProcesser) 

    print str(frontier)
    initHTTPRequest()
    #if(withDNSCache):
        #addDNSCache(DNSCacheSize)   
 
    return [frontier,frontierScheduler,fetcher,storage,urlFilters,statusUpdater,postProcessers]

def clean(frontierScheduler,storage,postProcessers):
    if debug: print 'frontierScheduler clean'
    frontierScheduler.clean()
    if debug: print 'storage clean'
    storage.clean()
    if debug: print 'postProcesser clean'
    for postProcesser in postProcessers:
        postProcesser.clean()

def multiThreadFetcher(seedfile,storagefile,numThread):
    frontier,frontierScheduler,fetcher,storage,urlFilters,statusUpdater,postProcessers=config(seedfile,storagefile)
    if(numThread<1):
        singleThreadFetcher(seedfile,storagefile)
    else:
        pool=threadpool.ThreadPool(numThread)
        requestsArgs=[]
        num=0
        while(True):
            url=frontier.geturl()
            if(url):
                if(not filter_url(urlFilters,url)):
                    num=num+1
                    if(num%1000==0):
                        print num
                    args=([url,fetcher,storage,statusUpdater,postProcessers],None)
                    requestsArgs.append(args)
            else:
                break

        print 'total request ', len(requestsArgs)
        if(requestsArgs):
            requests=threadpool.makeRequests(fetchpage,[args for args in requestsArgs],None,thread_exception)
            for req in requests:
                pool.putRequest(req)

            print 'starting crawl'
            pool.wait()
  
            print 'crawling end'
            clean(frontierScheduler,storage,postProcessers)

def multiThreadFetcher2(seedfile,storagefile,numThread):
    frontier,frontierScheduler,fetcher,storage,urlFilters,statusUpdater,postProcessers=config(seedfile,storagefile)
    if(numThread<1):
        singleThreadFetcher(seedfile,storagefile)
    else:
        pool=threadpool.ThreadPool(numThread)
        requestsArgs=[]
        num=0
        print 'starting crawl'
        while(True):
            url=frontier.geturl()
            if(url):
                if(not filter_url(urlFilters,url)):
                    num=num+1
                    if(num%1000==0):
                        print num
                    args=([url,fetcher,storage,statusUpdater,postProcessers],None)
                    pool.putRequest(threadpool.makeRequests(fetchpage,[args,],None,thread_exception)[0])
            else:
                time.sleep(5)
                while 1:
                    try:
                        pool.poll()
                    except threadpool.NoResultsPending:
                        print 'no results pending'
                        break
                    else:
                        time.sleep(1) 
                        if(not frontier.empty()):
                            break
                if(frontier.empty()):
                    pool.wait()
                    break

        print 'crawling end' 
        clean(frontierScheduler,storage,postProcessers)

def singleThreadFetcher(seedfile,storagefile):
    frontier,frontierScheduler,fetcher,storage,urlFilters,statusUpdater,postProcessers=config(seedfile,storagefile)
    while(True):
        url=frontier.geturl()
        if(url):
            if(not filter_url(urlFilters,url)):
               try:
                   fetchpage(url,fetcher,storage,statusUpdater,postProcessers)
               except TimeoutError,e:
                   statusUpdater.updateStatus(url,CrawlerStatus.FAILED)
               #savepage(url,response,storage)
               #slap=random.randint(minSlap,maxSlap)
               #time.sleep(slap)
            else:
               print 'filterred:' ,url 
        else:
            break

    print 'crawling end'
    clean(frontierScheduler,storage,postProcessers)
   
    

if __name__=='__main__':
    import sys
    print 'usage python %s [--multi/--single] [infologger] [errlogger] [bRestart] [seedfile] [storagefile] [numThread]' % sys.argv[0]
  
    type=sys.argv[1]
    infologname=sys.argv[2]
    errlogname=sys.argv[3] 
    bRestart=int(sys.argv[4])
    infoLogger,errLogger=setupLogging(infologname,errlogname,bRestart)
    
    seedfile=sys.argv[5]
    storagefile=sys.argv[6]
    if(type=="--single" or type=="-s"):
        storage_mutex=threading.Lock()
        singleThreadFetcher(seedfile,storagefile)
    elif(type=="--multi" or type=="-m"):
        numThread=int(sys.argv[7])
        storage_mutex=threading.Lock()
        multiThreadFetcher2(seedfile,storagefile,numThread)
    else:
        print 'wrong type'
