#!/usr/bin/python
# coding: utf-8

URL_IN = "http://weblogs.clarin.com/elecciones-2007/archives.html"
FILE_OUT = "clarin_elecciones_presidenciales.txt"

from xml.dom import ext
from xml.dom import implementation
from xml.dom.minidom import parse


#
# Clase mensaje del Rafa para mantener el formato
#

class mensaje:
    def __init__(self):
        self.autor = ""
        self.fecha = ""
        self.titulo = ""
        self.id = ""
        self.en_respuesta_a = ""
        self.texto = ""
        self.permalink = ""
        

    def get_dom(self,doc):
        root = doc.createElement("mensaje")
        meta = doc.createElement("metadatos")
        texto = doc.createElement("texto")
        root.appendChild(meta)
        root.appendChild(texto)
        t = doc.createTextNode(self.texto)
        texto.appendChild(t)
        self._setattribute(meta,"autor")
        self._setattribute(meta,"fecha")
        self._setattribute(meta,"titulo")
        self._setattribute(meta,"id")
        self._setattribute(meta,"en_respuesta_a")
        self._setattribute(meta,"permalink")
        return root

    def _setattribute(self,son,varname):
        son.setAttribute(varname,self.__dict__[varname])

    def _loadvar(self,son,varname):
        if son.attributes.has_key(varname):
            self.__dict__[varname] = son.attributes[varname].value

    def load_from_dom(self,root):
        for son in root.childNodes:
            if son.nodeName == "metadatos":
                self._loadvar(son,"autor")
                self._loadvar(son,"fecha")
                self._loadvar(son,"titulo")
                self._loadvar(son,"id")
                self._loadvar(son,"en_respuesta_a")
                self._loadvar(son,"permalink")
            elif son.nodeName == "texto":
                if len(son.childNodes) != 0 and son.childNodes[0].nodeType == son.childNodes[0].TEXT_NODE:
                    self.texto = son.childNodes[0].data


from mechanize import Browser
import re
import datetime

#
# From "Septiembre 17, 2007  1:10 AM" to ('Septiembre', '17', '2007', '1', '10', 'AM') to  2007-09-17 01:10:00 [datetime]
#
def parse_date(date_str):
	month_sp = {	'enero':1, 'febrero':2, 'marzo':3, 'abril':4, 'mayo':5, 'junio':6, 
			'julio':7, 'agosto':8, 'septiembre':9, 'setiembre':9, 'octubre':10, 'noviembre':11, 'diciembre':12 
			}
	# commented regexp, as http://pleac.sourceforge.net/pleac_python/patternmatching.html#AEN297
	date_str_parts = re.search("""([\w]+)			# capture month
					[\s]*([\d]+)		# spaces and capture day
					.*?([\d]+)		# whatever including colon and capture year
					.*?([\d]+):([\d]+)	# trash and capture hh:mm
					.*?([\w]+) 		# finally AM/PM
					""", date_str, re.VERBOSE)
	g = date_str_parts.groups()
	Y = int(g[2])
	M = int(month_sp[g[0].lower()])
	d = int(g[1])
	h = int(g[3])
	if g[5].capitalize()=="PM" :
		h += 12
	m = int(g[4])
	date = datetime.datetime(Y, M, d, h, m, 0)
	return date

#
# Main
#

import sys

br = Browser()
br.open(URL_IN)
assert br.viewing_html()

# Get all links from main page "Elección Nacional" to the topic's comments
linksToComments = []
# An easy way to get the links, although they are somehow dirty at the end.
for link in br.links(url_regex=".*/archives/2007/.*"): 
    linksToComments.append(link.url)

#Unique of a list from: http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/52560
s = {}
map(s.__setitem__, linksToComments, [])
linksToComments = s.keys()

#for j in range(len(linksToComments)) :
#	print linksToComments[j]
#exit(0)

m = mensaje()
#Create a doctype using document type name, sysid and pubid
dt = implementation.createDocumentType('noname', '', '')
doc = implementation.createDocument("any", "any", dt)
fd = open(FILE_OUT, "w")

# XML header, in utf-8
fd.write("""<?xml version='1.0' encoding='utf-8'?>
<corpus>
""")

# Process all urls, linksToComments is a list of URL strings
for j in range(len(linksToComments)) :
	print "Getting link", j, "out of", len(linksToComments)
	linkurl = linksToComments[j]
	br.open(linkurl)
	base_url = re.sub('#comments$', '', linkurl) # Erase trailing trash

	# Guess charset to .encode() properly. Now deprecated.
	charset = re.findall("charset=([\S]+)", br.response().info()["Content-Type"])
	assert(len(charset)==1)
	charset = charset[0]

	body = br.response().read()
	title = re.findall("<title>(.*?)</title>", body, re.DOTALL)[0]
	#title = title.decode(charset) # Better use unicode() method
	comments_content = re.findall("<div class=\"comment-content\">(.*?)</div>", body, re.DOTALL)
	comments_footer = re.findall("<p class=\"comment-footer\">(.*?)</p>", body, re.DOTALL)
	assert(len(comments_content) == len(comments_footer))

	# Postprocessing
	for i in range(len(comments_content)) :
		print "  Getting comment", i, "out of", len(comments_content)
		c = comments_content[i]

		# Remove HTML tags, by John Lenton, http://mail.python.org/pipermail/python-list/2005-February/305119.html
		c = re.sub('<[^!>](?:[^>]|\n)*>', '', c)
		# One whitespace
		c = re.sub('[\s]+', ' ', c)

		f = comments_footer[i]
		parts = re.search(".*Publicado por:(.*?)\|.*<a href=\"(.*?)\">(.*?)</a>", f, re.DOTALL)
		author = parts.groups()[0].strip()
		author = re.sub('<[^!>](?:[^>]|\n)*>', '', author)
		ident = parts.groups()[1][1:] # take the "comment-" out
		permalink = base_url + parts.groups()[1]
		date = parse_date(parts.groups()[2].strip())

        	m.autor = unicode(author, 'iso-8859-1')
        	m.fecha = unicode(str(date), 'iso-8859-1')
        	m.titulo = unicode(title, 'iso-8859-1')
        	m.id = unicode(ident, 'iso-8859-1')
		m.permalink = unicode(permalink, 'iso-8859-1')
		#m.en_respuesta_a = unicode("missing", 'iso-8859-1')
        	m.texto = unicode(c, 'iso-8859-1')
		d = m.get_dom(doc)
		ext.PrettyPrint(d,fd,encoding='utf-8') # Finally everything to utf-8

fd.write("</corpus>")
fd.close()
