#!/usr/bin/env python

"""Extract list of URLs in a web page

This program is part of "Dive Into Python", a free Python book for
experienced programmers.  Visit http://diveintopython.org/ for the
latest version.
"""

__author__ = "Mark Pilgrim (mark@diveintopython.org)"
__version__ = "$Revision: 1.2 $"
__date__ = "$Date: 2004/05/05 21:57:19 $"
__copyright__ = "Copyright (c) 2001 Mark Pilgrim"
__license__ = "Python"

from sgmllib import SGMLParser
import urllib
import urlparse

class URLLister(SGMLParser):
	def reset(self):
		SGMLParser.reset(self)
		self.urls = []

	def start_a(self, attrs):
		href = [v for k, v in attrs if k=='href']
		if href:
			self.urls.extend(href)

if __name__ == "__main__":
	got_urls = {}

	#netloc = "www.baidu.com"
	netloc = "clashofclans.wikia.com"
	init_url = "http://%s" % netloc

	pending_urls = [init_url, ]
	while len(pending_urls) != 0:
		new_urls = {} #set
		for u in pending_urls:
			#print "GET %s" % u
			try:
				usock = urllib.urlopen(u)
				parser = URLLister()
				parser.feed(usock.read())
				parser.close()
				usock.close()
	
				for u2 in parser.urls:
					#print "u2", u2
					pr = urlparse.urlparse(u2)
					valid_path = True
					if pr.path.find(".") != -1:
						valid_path = False
					elif pr.path.find(":") != -1:
						valid_path = False
					u3 = ""
					if pr.scheme=="http" and pr.netloc==netloc and valid_path:
						pr2 = (pr.scheme, pr.netloc, pr.path, "", "", "")
						u3 = urlparse.urlunparse(pr2)
					elif pr.scheme=="" and pr.netloc=="" and pr.path.startswith("/") and valid_path:
						pr2 = ("http", netloc, pr.path, "", "", "")
						u3 = urlparse.urlunparse(pr2)
					if u3 != "":
						#print "u3", u3
						new_urls[u3] = True
			except:
				pass
			got_urls[u] = True
	
		#print new_urls
		pending_urls = []
		for u,_ in new_urls.items():
			if not got_urls.has_key(u) or not got_urls[u]:
				pending_urls.append(u)

	for u,_ in got_urls.items():
		print u 

