#!/usr/bin/python
# coding: utf-8

URLS_IN = ["http://www.presidentelavagna.com/index.php"]
FILE_OUT = "paginas_candidatos.txt"

from xml.dom import ext
from xml.dom import implementation
from xml.dom.minidom import parse



from mechanize import Browser
import re
import datetime


def getUpTo(rootUrl,nestLevel) :
	br = Browser()
	br.open(rootUrl)
	visited =  [rootUrl] # list of strings
	toVisit = [] # list of links
	for l in br.links(url_regex) :
		toVisit.append(l)
	while len(toVisit)>0 :
		assert(br.viewing_html())
		current = toVisit.pop()
		visited.append(current.url)
		if nestLevel>0 :
			print "Following link:", current.url
			br.follow_link(current)
			for l in br.links() :
				toVisit.append(l)
		else :
			br.back()
	return visited

#
# Main
#

import sys

print getUpTo(URLS_IN[0],1)
exit(0)

br = Browser()
for url in URLS_IN :
	br.open(url)
	if br.viewing_html() :
		# Get all links
		links = []
		# Get the links
		for link in br.links(): 
		    links.append(link.url)
		#unique links
		links = list(set(links))
		print links
		exit(0)


#for j in range(len(linksToComments)) :
#	print linksToComments[j]
#exit(0)

m = mensaje()
#Create a doctype using document type name, sysid and pubid
dt = implementation.createDocumentType('noname', '', '')
doc = implementation.createDocument("any", "any", dt)
fd = open(FILE_OUT, "w")

# XML header, in utf-8
fd.write("""<?xml version='1.0' encoding='utf-8'?>
<corpus>
""")

# Process all urls, linksToComments is a list of URL strings
for j in range(len(linksToComments)) :
	print "Getting link", j, "out of", len(linksToComments)
	linkurl = linksToComments[j]
	br.open(linkurl)
	base_url = re.sub('#comments$', '', linkurl) # Erase trailing trash

	# Guess charset to .encode() properly. Now deprecated.
	charset = re.findall("charset=([\S]+)", br.response().info()["Content-Type"])
	assert(len(charset)==1)
	charset = charset[0]

	body = br.response().read()
	title = re.findall("<title>(.*?)</title>", body, re.DOTALL)[0]
	#title = title.decode(charset) # Better use unicode() method
	comments_content = re.findall("<div class=\"comment-content\">(.*?)</div>", body, re.DOTALL)
	comments_footer = re.findall("<p class=\"comment-footer\">(.*?)</p>", body, re.DOTALL)
	assert(len(comments_content) == len(comments_footer))

	# Postprocessing
	for i in range(len(comments_content)) :
		print "  Getting comment", i, "out of", len(comments_content)
		c = comments_content[i]

		# Remove HTML tags, by John Lenton, http://mail.python.org/pipermail/python-list/2005-February/305119.html
		c = re.sub('<[^!>](?:[^>]|\n)*>', '', c)
		# One whitespace
		c = re.sub('[\s]+', ' ', c)

		f = comments_footer[i]
		parts = re.search(".*Publicado por:(.*?)\|.*<a href=\"(.*?)\">(.*?)</a>", f, re.DOTALL)
		author = parts.groups()[0].strip()
		author = re.sub('<[^!>](?:[^>]|\n)*>', '', author)
		ident = parts.groups()[1][1:] # take the "comment-" out
		permalink = base_url + parts.groups()[1]
		date = parse_date(parts.groups()[2].strip())

        	m.autor = unicode(author, 'iso-8859-1')
        	m.fecha = unicode(str(date), 'iso-8859-1')
        	m.titulo = unicode(title, 'iso-8859-1')
        	m.id = unicode(ident, 'iso-8859-1')
		m.permalink = unicode(permalink, 'iso-8859-1')
		#m.en_respuesta_a = unicode("missing", 'iso-8859-1')
        	m.texto = unicode(c, 'iso-8859-1')
		d = m.get_dom(doc)
		ext.PrettyPrint(d,fd,encoding='utf-8') # Finally everything to utf-8

fd.write("</corpus>")
fd.close()
