# author: ahmed.iitkgp@gmail.com
# 25th dec 2010
# basic Crawler for Lucene indexing 
# GNU License
#!/usr/bin/env python

import sys
import re
import urllib2
import urlparse

toCrawl   = set([sys.argv[1]])
crawled   = set([])
linkRegX  = re.compile('<a\s*href=[\'|"](.*?)[\'|"].*?>')
parsedUrl = urlparse.urlparse(sys.argv[1])
baseUrl   = parsedUrl[1]

while len(toCrawl) > 0:
	try:
		nowCrawling = toCrawl.pop()
	except KeyError:
		raise StopIteration
	try:
		response = urllib2.urlopen(nowCrawling)
	except:
		continue
		
	pageContent = response.read()
	startPos = pageContent.find('<title>')
	if startPos != -1:
		endPos = pageContent.find('</title>', startPos + len('<title>'))
		if endPos != -1:
			title = pageContent[startPos+len('<title>'):endPos]
			#print title
			
	crawled.add(nowCrawling)
	print "done:",len(crawled),"----2be:",len(toCrawl),  "----now:",nowCrawling	
	linksFound = linkRegX.findall(pageContent)
	currentUrl = urlparse.urlparse(nowCrawling)
	for link in (linksFound.pop(0) for _ in xrange(len(linksFound))):
		if link.startswith('/'):
			link = 'http://' + currentUrl[1] + link
		elif link.startswith('#'):
			link = 'http://' + url[1] + currentUrl[2] + link
		elif not link.startswith('http'):
			link = 'http://' + currentUrl[1] + '/' + link
		if link not in crawled:
			url = urlparse.urlparse(link)
			base = url[1]
			#print base, baseUrl
			if base == baseUrl:
				toCrawl.add(link)
				

# Information about the crawling process :
print "",
print "Total links crawled =", len(crawled)
