import sys
import nltk
import glob
import fnmatch
import os
from BeautifulSoup import BeautifulStoneSoup
import matplotlib                                          

from nltk.corpus.reader.wordnet import POS_LIST
from nltk.corpus import stopwords

def get_count(word_count_tuple):
  return word_count_tuple[1]
  
def pos2wn(pos):
	if pos == 'JJ':
		return 'a'
	if pos[0].lower() in set(POS_LIST):
		return pos[0].lower()
	return 'n'

def get_tokens(filename):
	
	original = open(filename,'r').read()
	ind = original.find("<table class='cable'>")
	if not ind==-1:
		sin_header = original[ind:]
		sin_html = nltk.clean_html(sin_header)
	
		sin_codigos_feos = BeautifulStoneSoup(sin_html, 
                 convertEntities=BeautifulStoneSoup.XML_SPECIAL_CHARS_TO_ENTITIES).contents[0]
	
		sin_codigos_feos = sin_codigos_feos.lower().replace('&#x000a;','\n')
		return nltk.wordpunct_tokenize( sin_codigos_feos)
	return []

def contar_palabras(filename):
	original = open(filename,'r').read()
	ind = original.find("<table class='cable'>")
	if not ind==-1:
		sin_header = original[ind:]
		sin_html = nltk.clean_html(sin_header)
		
		sin_codigos_feos = BeautifulStoneSoup(sin_html, 
                   convertEntities=BeautifulStoneSoup.XML_SPECIAL_CHARS_TO_ENTITIES).contents[0]
		
		sin_codigos_feos = sin_codigos_feos.lower().replace('&#x000a;','\n')
		return len(sin_codigos_feos.split())

def tiene_mas_de_tres_vocales(palabra):
    return len([c for c in palabra if c in "aeiou"]) >= 3

def es_puntuacion(c):
		return (len(c) == 1 and (c in "-.'?!,\":;()|-/")) or c == '""' or  len(c) == 1 or c == '--' or c == ').' or c == '.""' or c == ''
   

def get_archivos(cuales = 'ejemplo'):
	
	print 'Buscando archivos: '+cuales
	matches = []

	if cuales == 'bsas':
		for root, dirnames, filenames in os.walk('wikis'):
			for filename in fnmatch.filter(filenames, '*.html'):
				if filename.find('BUENOSAIRES') != -1:
					matches.append(os.path.join(root, filename))

	elif cuales == 'ejemplo':
		matches = glob.glob('*.html')	
	
	elif cuales == 'todos':
		for root, dirnames, filenames in os.walk('wikis'):
			for filename in fnmatch.filter(filenames, '*.html'):
				matches.append(os.path.join(root, filename))
	
	elif cuales == 'bsas_pocos':
		for root, dirnames, filenames in os.walk('wikis'):
			for filename in fnmatch.filter(filenames, '*.html'):
				if filename.find('BUENOSAIRES170') != -1:
					matches.append(os.path.join(root, filename))
	else:
		for root, dirnames, filenames in os.walk('wikis/cable/'+cuales):
			for filename in fnmatch.filter(filenames, '*.html'):
				matches.append(os.path.join(root, filename))
		
	
	return matches

if len(sys.argv) != 2:
  print 'Uso: ./ejercicios_nltk.py nro_ejercicio'
  sys.exit(1)


ej = sys.argv[1]

print 'Ejercicio '+ ej

wnl = nltk.WordNetLemmatizer()

#Default para archivos
files = get_archivos()                                        


#1)Cargar los documentos y separar las palabras.
#2)Contar la cantidad de palabras totales y la cantidad de palabras por 
if ej == '2' or ej == '1':
	
	total_palabras = 0
	cant_palabras = {}
	for f in files:
		cant_palabras[f] = contar_palabras(f)
		total_palabras = total_palabras + cant_palabras[f]
	
	print 'Archivo			|	Cant. Palabras'
	for k in cant_palabras:
		print k +'	|	'+str(cant_palabras[k])
	print 'Total			|	' + str(total_palabras)

#3)Realizar un grafico de frecuencias de las 50 palabras mas frecuentes
elif ej == '3':
	tokens = []
	for f in files:
		print 'Procesando archivo '+f
		tokens = tokens + get_tokens(f)
				
	tokens = [t for t in tokens if not es_puntuacion(t)]
	freq = nltk.FreqDist(tokens)
	freq.plot(50)

#4)Realizar un grafico de frecuencias de las 50 palabras mas frecuentes que no sean stopwords	                                                
elif ej == '4':
	tokens = []
	for f in files:
		print 'Procesando archivo '+f
		tokens = tokens + get_tokens(f)
	
	stop_words = stopwords.words('english')				
	tokens = [word for word in tokens if not word in stop_words]			
	tokens = [t for t in tokens if not es_puntuacion(t)]
	freq = nltk.FreqDist(tokens)
	freq.plot(50)

#5)Buscar todas las palabras que tengan mas de 3 vocales	
elif ej == '5':	
	tokens = []
	for f in files:
		print 'Procesando archivo '+f
		tokens = tokens + get_tokens(f)
			 
	tokens = [word for word in tokens if tiene_mas_de_tres_vocales(word)]		
	tokens = [t for t in tokens if not es_puntuacion(t)]			
	freq = nltk.FreqDist(tokens)
	freq.plot(50)

#6)Buscar los documentos provenientes de Buenos Aires
elif ej == '6':
	files = get_archivos('bsas')
	for f in files:
			print 'Encontre archivo ' + f

#7)Graficar la frecuencia de palabras interesantes (no stopwords) para los documentos de Bs. As.
# Es 4 y 6, copio codigo xq soy pajero	
elif ej == '7':
	stop_words = stopwords.words('english')
	tokens = []
	files = get_archivos('bsas')
	for f in files:
		print 'Procesando archivo '+f
		tokens = tokens + get_tokens(f)
				 
	tokens = [word for word in tokens if not word in stop_words]		
	tokens = [t for t in tokens if not es_puntuacion(t)]			
	freq = nltk.FreqDist(tokens)
	freq.plot(50)	

#8)Hacer un grafico de dispersion de palabras clave (dispersion_plot) para los doc de Bs. As.
elif ej == '8':
	stop_words = stopwords.words('english')
	tokens = []
	word_count = {}
	files = get_archivos('bsas')
	for f in files:
		print 'Procesando archivo '+f
		original = open(f,'r').read()
		ind = original.find("<table class='cable'>")
		if not ind==-1:
			sin_header = original[ind:]
			sin_html = nltk.clean_html(sin_header)
		
			sin_codigos_feos = BeautifulStoneSoup(sin_html, 
                  convertEntities=BeautifulStoneSoup.XML_SPECIAL_CHARS_TO_ENTITIES).contents[0]
		
			sin_codigos_feos = sin_codigos_feos.lower().replace('&#x000a;','\n')
			aux_tokens = nltk.wordpunct_tokenize( sin_codigos_feos)
			tokens = tokens + [word for word in aux_tokens if not word in stop_words] 
			tokens = [t for t in tokens if not es_puntuacion(t)]
			for token in tokens:      
				if not token in word_count:
					word_count[token] = 1
				else:
					word_count[token] = word_count[token] + 1
	

	items = sorted(word_count.items(), key=get_count, reverse=True)
	top_words = []
	for item in items[:50]:
		top_words = top_words + [item[0]]
		#print item[0], item[1]
	nltk.Text(tokens).dispersion_plot(top_words)


#9)Tomar un subconjunto de documentos, y lematizarlos
#10)Tomando los textos lematizados hacer analisis de frecuencia.
elif ej == '9' or ej == '10':
	stop_words = stopwords.words('english')
	tokens = []
	stem_words = []
	files = get_archivos('2004/07')
	for f in files:		
			print 'Procesando archivo '+f
			original = open(f,'r').read()
			ind = original.find("<table class='cable'>")
			if not ind==-1:
				sin_header = original[ind:]
				sin_html = nltk.clean_html(sin_header)
		                                
				sin_codigos_feos = BeautifulStoneSoup(sin_html, 
                   convertEntities=BeautifulStoneSoup.XML_SPECIAL_CHARS_TO_ENTITIES).contents[0]
		
				sin_codigos_feos = sin_codigos_feos.lower().replace('&#x000a;','\n')
		
				sentences = nltk.sent_tokenize(sin_codigos_feos)
				stem_words = []
				for s in sentences:
					tokens = nltk.wordpunct_tokenize(s)
					pos_tagged = nltk.pos_tag(tokens)
					for (w,p) in pos_tagged:
							stem_words.append(wnl.lemmatize(w,pos2wn(p)))
				
	stem_words = [word for word in stem_words if not word in stop_words] 
	stem_words = [t for t in stem_words if not es_puntuacion(t)]
	freq = nltk.FreqDist(stem_words)
	freq.plot(50)

else:
	print 'Fail: nro_ejercicio in [1..10]'