"""Extract list of URLs in a web page

This program is part of "Dive Into Python", a free Python book for
experienced programmers.  Visit http://diveintopython.org/ for the
latest version.
"""

__author__ = "Mark Pilgrim (mark@diveintopython.org)"
__version__ = "$Revision: 1.2 $"
__date__ = "$Date: 2004/05/05 21:57:19 $"
__copyright__ = "Copyright (c) 2001 Mark Pilgrim"
__license__ = "Python"

from sgmllib import SGMLParser

class URLLister(SGMLParser):
	def reset(self):
		SGMLParser.reset(self)
		self.urls_names = []
		self.url_name = []

	def start_a(self, attrs):
		href = [v for k, v in attrs if k=='href']
		if href:
			self.url_name.extend(href)
	def handle_data(self,text):
	    self.url_name.append(text)
		
	def end_a(self):
	    self.urls_names.append(self.url_name)
	    self.url_name = []

if __name__ == "__main__":
	import urllib
	usock = urllib.urlopen("http://www.douban.com/people/beck917/contact_list")
	parser = URLLister()
	parser.feed(usock.read())
	parser.close()
	usock.close()
	people = []
	people_name = []
	#print parser.urls_names[50]
	
	for url_name in parser.urls_names: 
		#url_list = url[1].split('/')
		#print url
		#print "\r\n"
		try:
		    url_list = url_name[1].split('/')
		    if(url_list[1] == 'people'):
				people.append([url_list[2],url_name[2]])
				print url_name[2].decode('utf-8').encode('gbk')
		except  Exception, e:
			pass
		
	print people
	

		
