# jsb/plugs/common/spider.py
#
#


""" 
    Spider plugin.. Spider websites and makes an index into them.

    taken from http://code.activestate.com/recipes/576551-simple-web-crawler/

    -- BHJTW 15-11-2011 Adapted for JSONBOT

"""

__version__ = "0.2"
__copyright__ = "CopyRight (C) 2008-2011 by James Mills"
__license__ = "MIT"
__author__ = "James Mills"
__author_email__ = "James Mills, James dot Mills st dotred dot com dot au"

## jsb imports

from jsb.lib.commands import cmnds
from jsb.lib.examples import examples
from jsb.utils.url import geturl2
from jsb.lib.threadloop import ThreadLoop
from jsb.lib.callbacks import callbacks

from jsb.imports import getBeautifulSoup
soup = getBeautifulSoup()

## basic imports

import logging
import re
import sys
import time
import math
import urllib2
import urlparse
import optparse
from cgi import escape
from traceback import format_exc
from Queue import Queue, Empty as QueueEmpty

class Crawler(ThreadLoop):

    def __init__(self):
        ThreadLoop.__init__(self)

    def handle(self, url, depth, event=None, *args):
        sTime = time.time()
        event and event.reply("crawling %s (max depth: %d)" % (url, depth))
        root = url.split("#")[0]
        host = urlparse.urlparse(root)[1]
        urls = []
        linknr = 0
        follownr = 0
        page = Fetcher(root)
        page.fetch()
        q = Queue()
        for u in page.urls: q.put(u)
        followed = [root]
        n = 0
        while True:
            try: u = q.get()
            except QueueEmpty: break
            n += 1
            if u not in followed:
                try:
                    h = urlparse.urlparse(u)[1]
                    if re.match(".*%s" % host, h):
                        followed.append(u)
                        follownr += 1
                        try:
                            t = root + "/" + u.split("/", 3)[3]
                        except IndexError: t = u 
                        t = t.split("#")[0]
                        page = Fetcher(t)
                        page.fetch()
                        for i, uu in enumerate(page):
                            if uu not in urls:
                                linknr += 1
                                q.put(t)
                                urls.append(t)
                        if n > depth and depth > 0:
                            break
                except Exception, e:
                    logging.warn("ERROR: Can't process url '%s' (%s)" % (u, e))
        eTime = time.time()
        tTime = eTime - sTime
        event and event.reply("found:    %d" % linknr)
        event and event.reply("followed: %d" % follownr)
        event and event.reply("stats:    (%d/s after %0.2fs)" % (int(math.ceil(float(linknr) / tTime)), tTime))
 

class Fetcher(object):

    def __init__(self, url):
        self.url = url
        self.urls = []

    def __getitem__(self, x):
        return self.urls[x]

    def fetch(self):
        content = geturl2(self.url)
        if content:
            s = soup.BeautifulSoup(content)
            tags = s('a')
            for tag in tags:
                href = tag.get("href")
                if href is not None:
                    if ".." in href or href in self.url: continue
                    url = urlparse.urljoin(self.url, escape(href))
                    url = url.split("#")[0]
                    if url not in self:
                        self.urls.append(url)

crawler = Crawler()

def getLinks(url):
    page = Fetcher(url)
    page.fetch()
    for i, url in enumerate(page):
        print "%d. %s" % (i, url)

def dumbcb(bot, event): pass

def init():
    crawler.start()
    callbacks.add("START", dumbcb)
    
def shutdown():
    crawler.stop()

def handle_spider(bot, event):
    if not event.args: event.missing("<url> [<depth>]")
    url = event.args[0]
    try: depth = event.args[1]
    except IndexError: depth = 30
    crawler.put(5, url, depth, event)
    event.reply("events pushed to the crawler")
    
cmnds.add("spider", handle_spider, "OPER")
examples.add("spider", "run the spider on a site.", "spider http://jsonbot.org/handbook")
