
from utils.minisax import AdvHTMLParser
from utils.googleSearch import GoogleSearch

import urllib,sys,time, re

class TagResult:
	def __init__(self,name,arg,text):
		self.name	= name
		self.arg	= arg
		self.text	= text
		return
		
	def get(self,name):
		if self.arg == None:
			return None
			
		for arg in self.arg:
			if arg[0] == name:
				return arg[1]
		return None
	
class Soup(AdvHTMLParser):
	def __init__(self,markup):
		AdvHTMLParser.__init__(self)
		self.markup	= markup
		self.result	= []
		self.tag	= ""
		self.title	= ""
		self.last	= ""
		return
	
	def findAll(self,tag):
		if tag.lower() == "a":
			return self.findAnchors()
			
		if tag.lower() == "title":
			return self.findTitle()
		
		return None
		
	def findAnchors(self):
		self.result	= []
		self.tag	= 'a'
		self.feed(self.markup)
		return self.result

	def findTitle(self):
		if len(self.title) == 0:
			self.feed(self.markup)
		self.result	= []
		self.result.append(TagResult('title',None,self.title))
		return self.result
		
	def handle_starttag(self, tag, attrs):
		tag = tag.lower()
		if self.tag == tag:
			self.result.append(TagResult(tag,attrs,None))
			
		self.last	= tag
		return
		
	def handle_endtag(self, tag):
		self.last	= None
		return
		
	def handle_data(self, data):
		if self.last=="title":
			self.title	= self.title+data
		return
		
class Crawler:
	def __init__(self,sleep=0.5):
		self.sleep	= sleep
		self.stop	= False
		return
		
	def Process(self,url,maxDepth,hostOnly):			
		self.hostOnly	= hostOnly
		self.parsed		= set()
		self.pending	= list()
		self.root		= self.GetRoot(url)
		
		self.ProcessUrl( url )
		for depth in range(0,maxDepth):
			self.ProcessPendingUrl()
		return

	def ProcessUrl(self, url):
		self.DoProcessUrl(url)

	def ProcessPendingUrl(self):
		urls	= list()
		urls.extend( self.pending )
		for url in urls:
			self.DoProcessUrl(url)
		return
			
	def DoProcessUrl(self,url):
		if self.stop:
			return
			
		if url in self.parsed:
			return
					
		if self.hostOnly:
			if url.startswith(self.root) == False:
				return
		
		try:
			html = self.GetPage(url)
			self.ProcessPage( url, html )
			if self.sleep != 0:
				time.sleep(self.sleep)
				
		except (Exception, e):
			print ("FAIL:" + url+":"+str(e))
		except:
			print ("FAIL:" + url)
		
	def GetPage(self,url):
		self.parsed.add(url)
		return urllib2.urlopen(url).read()
		
	def ProcessPage(self, url, html):
		self.OnProcessPage(url, html)
		soup 	= Soup(markup=html)
		self.ProcessPageLinks(soup,url,html)
		self.ProcessTitle(soup,url,html)
		
	def ProcessTitle(self, soup, url, html):
		tags	= soup.findAll("title")
		for tag in tags:
			self.OnProcessTitle(url,tag.text)
		
		
	def ProcessPageLinks(self, soup, url, html):
		tags	= soup.findAll("a")
		for tag in tags:
			self.PreProcessLinks( url, tag )
	
		for tag in tags:
			self.ProcessLinks( url, tag )
			
		re_mail	= re.compile(r'[\w\-][\w\-\.]+@[\w\-][\w\-\.]+[a-zA-Z]{1,4}')
		emails	= []
		emails.extend(re_mail.findall(html))
		
		for email in emails:
			self.OnProcessEmail(url,email)
	
			

	def PreProcessLinks(self,url,tag):
		href	= tag.get('href')
		if href == None:
			return
			
		href = href.lstrip()
		href = href.rstrip()
		
		if len(href) == 0:
			return
			
		if href[0] == "#":
			return
							
		return
		
	def ProcessLinks(self,url,tag):
		href	= tag.get('href')
		if href == None:
			return
			
		href = href.lstrip()
		href = href.rstrip()
		
		if len(href) == 0:
			return

					
		if href.find("mailto:") != -1:
			email	= href.lower()
			email	= email.replace("mailto:","")
			self.OnProcessEmail(url,email)
			return
			
		if href[0] == '#':
			return
		
		child	= self.GetRelPath(url, href)	
		self.pending.append(child)

	def GetRoot(self, url):
		ndxHost	= url.find('://')
		if ndxHost == -1:
			return None
		
		ndxHost	= ndxHost+len('://')
		ndxObj	= url.find('/',ndxHost)
		if ndxObj == -1 :
			return url
		else:
			return url[:ndxObj]
				
	def GetRelPath(self,url, href):
		if href.find('http') != -1:
			return url
			
		if href[0] == '/':
			return self.root+href
	
		if url[-1] == '/':
			return url+href
		
		return url[0:url.rfind('/')]+'/'+href

	def OnProcessTitle(self,url, title):
		return
		
	def OnProcessPage(self,url, html):
		return
					
	def OnProcessEmail(self,url,email):
		return

class BizCrawler(Crawler):
	def __init__(self,sleep=0.5):
		Crawler.__init__(self,sleep)
		self.email	= ""
		self.title	= ""
		return
		
	def OnProcessPage(self,url, html):
		# TODO: Parse the page for an email.
		# TODO: Parse the page for a phone number. The major challenge is that there are so many phone number
		#       formats depending on the country.
		return

	def OnProcessPhone(self,url,email):
		return
				
	def OnProcessEmail(self,url,email):
		# No repetitions
		if self.email.find(email) != -1:
			return
			
		if len(self.email) != 0:
			self.email	= self.email+","
		
		self.email	= self.email + email
		return

	def OnProcessTitle(self,url, title):			
		self.title	= title
		return
		
'''if __name__ == '__main__':
	c	= BizCrawler();
	#c.Process("http://www.tnrcn.com/", 50, True)
	c.Process("http://www.airequipments.com/", 0, True)
	print c.title	
	#c.Process("http://www.airequipments.com/contact_us.html", 0, True)
	#company	= "TNR International Co., Ltd"
	#url = GoogleSearch().getCompanyHomepage(company)
	#c.Process(url, 2, True)
	#print c.email
'''
	
	
	
