# -*- coding: utf8 -*-
import dialog
import urllib, re, os, sys, time
from urlparse import urlparse

class Spider:
	def __init__(self, firstUrl, depth):
		print "depth:", depth
		self._urls=[]
		self._urls_cached=[]
		self._old_urls=[]
		if(not firstUrl.startswith("http://")):
			firstUrl = "http://" + firstUrl
		p = firstUrl.split('/')
		if '.' in p[-1]:
			self._url_header = '/'.join(p[:-1])
		else:
			self._url_header = '/'.join(p)
			if not firstUrl.endswith('/'):
				firstUrl = firstUrl + '/'
		if not self._url_header.endswith('/'):
			self._url_header = self._url_header + '/'
		self._urls.append(firstUrl)
		self._depth = depth
		self._done = False
	def isDone(self):
		return self._done
	def scanOnePage(self):
		local_urls = []
		if not self._urls:
			self._depth = self._depth - 1
			self._urls = self._urls_cached
			self._urls_cached = []
		if not self._urls:
			self._done = True
			return None
		if self._depth<=0:
			self._done = True
			return None
		cur_url = self._urls.pop()
		self._old_urls.append(cur_url)
		my_url_head = cur_url
		my_url_head = my_url_head[:my_url_head.rindex('/')+1]
		result = []
		try:
			server = urllib.urlopen(cur_url)
			lines = server.readlines()
			page = ''.join(lines)
			page = re.sub('\n|\r|\t|&nbsp;', '', page)
			urls = re.findall('< *a +href[^>]*>', page)
			urls = re.findall('href= *"[^"]*"', ''.join(urls))
			while urls:
				l = urls.pop()
				l = re.sub('(href *=[^"]*"|"[^"]*)',
					'', l)
				l = re.sub("(#.*|\?.*)", '', l) 
				if urlparse(l)[1]:
					if l.startswith(self._url_header):
						result.append(l)
				elif not l.startswith("/"):
					l = my_url_head + l
					while l.endswith("//"):
						l=l[:-1]
					result.append(l)
		except IOError:
			print "scan failed"
			self._done = True
		result2 = []
		while result:
			i=result.pop()
			if i in self._old_urls or i in self._urls or \
					i in self._urls_cached:
				continue
			result2.append(i)
			if re.search(
		"(\?.*|\.(html|php|asp|jsp|action|htm|py|pl|cgi))", i) \
					or i.endswith('/'):
				self._urls_cached.append(i)
			else:
				self._old_urls.append(i)
		return result2

def main():
	print "start file scanner...."
	spider = Spider(dialog.getDest(), dialog.getDepth())
	while dialog.keepRunning():
		result = spider.scanOnePage()
		if result:
			print "get once"
			for i in result:
				dialog.putUrl(i)
		else:
			if spider.isDone():
				print "finished..."
				break

if __name__ == "__main__":
	main();
