# (setq py-indent-offset 2)

import urllib
import re
from java.lang import *

def crawl(site, depth):
  try: 
	global hashtable
	if depth < MAX_DEPTH:
	  key = String(site).hashCode()
	  if hashtable.has_key(key):
		print '[%s] already crawled.' % site
		return
	  value = [site, []]
	  hashtable[key] = value
	  print 'crawling [%s]...' % site, 
	  url = urllib.urlopen(site)
	  content = url.read()
	  hits = pattern.findall(content)
	  print 'done.'
	  value[1] = hits
	  for hit in hits:
		crawl(hit, depth + 1)
  except:
	pass
	  
MAX_DEPTH = 3
pattern = re.compile(r'href="(http://.*?)"')
base = r'http://www.ceid.upatras.gr'
hashtable = {}

crawl(base, 0)
    


