

# this file was modified / mangled in the process of using it.
# it won't do anything at all if run except choke on all the
# files / classes it can't find, but has been left here for
# reference.  the take home lesson is that if you are scraping
# html you ought to use the BeautifulSoup library.

# if you choose to use the default Python parser, it will
# evaporate instantly on contact with html which was not written
# to the exact standard put forth in the W3C specification.
# for instance: all html which is not on the W3C website.




from BeautifulSoup import BeautifulSoup
import body_parser
import urllib




p = body_parser.BodyParser()
# contents = open('tmp')
# soup = BeautifulSoup(contents)
# p.feed(soup.prettify())
# 
# p.dump_to_db()


links = open('filteredlinks')
for line in links :
	try :
		print line.rstrip()
		if len(line.rstrip()) == 0 :
			continue		# urllib chokes on trying to open a local file called ''
	
		contents = urllib.urlopen(line.rstrip())
		soup = BeautifulSoup(contents)
		p.feed(soup.prettify())

		nondelineated = line.split('/')[-1].split('-')[:-1]
		name = ''
		for word in nondelineated :
			name += word
			name += ' '

		p.dump_to_db(name)

	except :
		pass # table already exists






# page = '&pg='
# for curr in range(2,119) :
# 	print "starting page " + str(curr)
# 	url = prefix + page + str(curr) + suffix
# 	contents = urllib.urlopen(url)
# 	soup = BeautifulSoup(contents)
# 	p.feed(soup.prettify())


