#! /usr/bin/env python
# -*- coding: utf-8 -*-
#http://www.crummy.com/software/BeautifulSoup/download/BeautifulSoup.tar.gz

''' 
This is simple crawler script.
'''
import os
import urllib2
import urllib
import sys
import logging
import logging.handlers
from  urlparse import urljoin
from BeautifulSoup import BeautifulSoup as perser
import worker
LOG_SAVE_DIR     = '/tmp'
LOG_MAX_BYTES    = 3000000
LOG_BACKUP_COUNT = 6
logger = None

def initLog(logName):
    logger = logging.getLogger(logName)
    logFormatter = logging.Formatter('[%(asctime)s %(process)d %(thread)d] %(levelname)s (%(filename)s:%(lineno)d) %(message)s')
    logPath = '%s/%s.log' % (LOG_SAVE_DIR, logName)
    fileHandler = logging.handlers.RotatingFileHandler(logPath, maxBytes = LOG_MAX_BYTES, backupCount = LOG_BACKUP_COUNT)
    fileHandler.setFormatter(logFormatter)
    logger.addHandler(fileHandler)
    logger.setLevel(logging.DEBUG)
    return logger

class crawler:
        def __init__(self, *roots):
                self.roots = roots # remove space
                self.now_root = ""              
                self.urlset = set()     # link url strings
                self.opener = urllib2.build_opener() # for open url object
                self.wm = worker.WorkerManager(80)

        def crawl(self, depth=2):               
                pages = []              
                for root in self.roots:
                        self.now_root = root
                        pages.append(root)
                        for i in range(depth):          
                                newpages = set()
                                for page in pages:                      
                                        newpages.update(self.get_urls(page))
                                pages = newpages
                self.wm.wait_for_complete()
                return self.urlset

        def get_image(self,url,local_filename):
                    urllib.urlretrieve(url,local_filename)
                    print url

        def get_urls(self, page):
                newpages = set()                
                try:
                        c = self.opener.open(page)
                except:
                        print "Could not open %s" % page
                        return []
                self.add_toIndex(page)
                try:
                        soup = perser(c.read().decode('utf-8', 'replace')) # measures for japanese strings encode
                except:
                        print "soup err %s" % page # a botch-up job for bug opening binary files link(ex: jpg, png etc.)
                        return []               
                links = soup('a') # seek ancor tag
                for link in links:
                        if ('href' in dict(link.attrs)): # get attribute 'href' in ancor tag
                                url = urljoin(page, link['href']) 
                                if (url.find("'") != -1): continue              
                                print "LINK:",url
                                url = url.split('#')[0] # remove ancor
                                if ((url[0:4] == 'http') and (not self.isindexed(url)) and (self.is_samedomain(url))):                   
                                        newpages.add(url)
                                        print "URL:",url
                        #elif 'img' in dict(link.attrs):print link
                imgs = soup('img')
#url_op = urllib.URLopener()
                #local_dir = "./sss/"
                for img in imgs:
                    if not img.has_key('src'):continue
                    url = urljoin(address,img['src'])
                    #url = urljoin(page,img['src'])
                    print url
                    local_filename = local_dir + "/" + os.path.basename(img['src'])
                    if ".gif" in img['src']:continue
                    #Add to task queue
                    self.wm.add_job( self.get_image, url, local_filename)  
#cmd = "wget %s -P images/"%url
#os.system(cmd)
                
                return newpages

        def isindexed(self, url):
                if (url in self.urlset):
                        return True             
                return False

        def add_toIndex(self, url):
                if (self.is_samedomain(url) and not self.isindexed(url)):               
                        self.urlset.add(url)
                        print "Indexing %s" % url
                        logger.info(url)
        
        def is_samedomain(self, url):                           
                return url.startswith(self.now_root)            

def main(root_url, depth):
        c = crawler(root_url)   
        print c.crawl(depth)

if __name__ == '__main__' :
        logger = initLog("crawler.log")
        global local_dir 
        global address
        address = sys.argv[1]
        local_dir = sys.argv[2]
        os.system("mkdir -p %s"%local_dir)
        depth = 50
        main(address, depth)
