import urlsplit, linkDigg, time, os
from threading import Thread
import  pgdb as PgSQL

"""
Search the base url of a site and gathers all the links within them that
are searchable
"""


baseUrl = ""
threads = []
maxThreads = None
cacheFolder = "data"
dbuser=None
dbtable=None
dbpasswd=None

def baseSearch(	url=None,
				threads=1,
				user=None,
				passwd=None,
				table=None,
				folder=None):
	global baseUrl, maxThreads, dbpasswd, dbuser, dbtable, cacheFolder
	#Define Globals
	dbpasswd 	= passwd
	dbuser		= user
	dbtable		= table
	cacheFolder	= folder
	baseUrl 		= urlsplit.baseUrl(url)
	maxThreads 	= threads - 1
	
	print "Base Site:\t", baseUrl
	print "Max Threads:\t", threads
	
	page = linkDigg.search(baseUrl)
	
	if not page.header:
		return

	for link in page.links:
		if len(link.href) > 0:
			if link.href.find(baseUrl) > -1:
				checkThreads(searchThreaded(link.href))
			elif link.href[0] == "/":
				link.href = urlsplit.subUrl(baseUrl,link.href)
				checkThreads(searchThreaded(link.href))
			else:
				if link.href[0:7] != "http://" and link.href[0:8] != "https://":
					link.href = urlsplit.subUrl(url,link.href)
					checkThreads(searchThreaded(link.href))
	

class searchThreaded(Thread):
	def __init__ (self, url):
		Thread.__init__(self)
		self.url = url
	def run(self):
		global threads, baseUrl, cacheFolder
		
		url = self.url
		cacheName = time.time()
		#Check if page has already been crawled
		if checkUrl(url):
			return
			
		#If not add page to crawl list before crawling.
		addUrl(url,cacheName)
		#Crawl the page
		page = linkDigg.search(url)
		#Check page header - If page exists
		if not page.header:
			return
			
		#Save Crawled Data
		cache = open('%s/%s.html'%(cacheFolder,cacheName),'w')
		cache.write(page.data)
		cache.close()
		
		#Continue Crawl Base on links within the site.
		for link in page.links:
			if len(link.href) > 0:
				if link.href.find(baseUrl) > -1: #Not localist link, but is still locates on site i.e http://baseurl/foo
					checkThreads(searchThreaded(link.href))
				elif link.href[0] == "/": #An absolute link i.e /foo
					link.href = urlsplit.subUrl(baseUrl,link.href)
					checkThreads(searchThreaded(link.href))
				else: #relative link i.e ./foo or ../foo or foo
					if link.href[0:7] != "http://" and link.href[0:8] != "https://":
						link.href = urlsplit.subUrl(url,link.href)
						checkThreads(searchThreaded(link.href))
						
				


def addUrl(url,filename):
	"""
	Add the url to the database, so it wont be recrawled
	"""
	global dbpasswd, dbuser, dbtable, cacheFolder
	
	absfile = "%s/%s/%s"%(os.path.abspath("."),cacheFolder,filename)
	if cacheFolder[0] == "/": #*nix
		absfile = "%s/%s"%(cacheFolder,filename)
		
	## SQL_START ##
	cx = PgSQL.connect(host="localhost", user=dbuser, password=dbpasswd, database=dbtable)
	cu = cx.cursor()
	cu.execute("select * from stabber_page where url='%s'"%url)
	result = cu.fetchone()
	if result:
		return
	#print "Adding url: ",url
	cu.execute("""insert into stabber_page (url,cache,last_crawl)
			values ('%s','%s',NOW())"""%(url, absfile))
	cx.commit()
	cx.close()
	## SQL_END ##
	
def checkUrl(url):
	"""
	Checks to see if this url has been added to the database
	"""
	global dbpasswd, dbuser, dbtable
	## SQL_START ##
	cx = PgSQL.connect(host="localhost", user=dbuser, password=dbpasswd, database=dbtable)
	cu = cx.cursor()
	cu.execute("select * from stabber_page where url='%s'"%url)
	result = cu.fetchone()
	if result:
		return True
	else:	return False
	## SQL_END ##

def checkThreads(newThread):
	global threads, maxThreads
	newThread.start()
	if len(threads) >= maxThreads:
		newThread.join()
	threads.append(newThread)
	for i,process in enumerate(threads):
		if not process.isAlive():
			threads.pop(i)

