# This Python file uses the following encoding: utf-8
import nltk
import sys
import nltk
import glob
import fnmatch
import os
from BeautifulSoup import BeautifulStoneSoup
import matplotlib     
import matplotlib.pyplot as plt  
import math       
import random            
import copy         
import pylab       

from nltk.corpus.reader.wordnet import POS_LIST
from nltk.corpus import stopwords
  
filename =  'C:\\Documents and Settings\\Administrador\\Escritorio\\INCC\\darwin\\origin.txt';
  
top_words = ['on','species','varieties','hybrids','forms','islands','of','will','selection','genera','plants','seeds','sterility','fertility','characters','breeds','groups','water','the','formations']
  
def pos2wn(pos):
	if pos == 'JJ':
		return 'a'
	if pos[0].lower() in set(POS_LIST):
		return pos[0].lower()
	return 'n'
	
	

def es_puntuacion(c):
		return (len(c) == 1 and (c in "-.'?!,\":;()|-/")) or c == '""' or  len(c) == 1 or c == '--' or c == ').' or c == '.""' or c == ''
   		
	
def cargar_tokens(clean_stop_words = False):	
	global filename
	
	if clean_stop_words:
		stop_words = stopwords.words('english')
	else:
		stop_words = []
	
	
	print 'Procesando archivo origin.txt'
	original = open(filename,'r').read()
	#original = original[600:].lower()
	original = original[600:]
	tokens = nltk.wordpunct_tokenize(original)
		
	tokens = [t for t in tokens if not es_puntuacion(t) 
																	and t.isalpha()
																	and t not in stop_words]			
	
	print str(len(tokens)) +' tokens cargados'
	return tokens
	
	
#3) Armar una funcion que reciba una lista de tokens, una lista de palabras y un tamano de ventana y devuelva una lista de probabilidades de encontrar la palabra en cada ventana para cada palabra pasada por parametro, .	
def occurrence_probability(tokenized_text, words, window_size, ret_freq = False):

	#N = cantidad de tokens
	N = len(tokenized_text)
	#P = cantidad de particiones
	P = N / window_size
	
	#print 'Calculando probabilidades'
	#print 'Cantidad de tokens: '+ str(N)
	#print 'Tamano particion: '+ str(window_size)
	#print 'Cantidad de particiones: '+ str(P)
	
	f = {}
	sum_f = {}
	for word in words:
		sum_f[word] = 0
		f[word] = {}
		for i in range(0,P):
			
			f[word][i] = 1.0 * tokenized_text[(i*window_size):(((i+1)*window_size)-1)].count(word) / window_size 
			#print word + "\t" + str(i) + "\t"+ str(f[word][i])
			sum_f[word] = sum_f[word] + f[word][i] 	 
	
	if ret_freq:
		return f
	p = {}		
	for word in words:
		p[word] = {}
		#print word + "\t" + str(sum_f[word])
		for i in range(0,P):
			if sum_f[word] != 0:
				p[word][i] = 1.0*f[word][i] / sum_f[word]
			#print word+ "\t"+ str(i) +"\t"+ str(p[word][i])	
				

	return p
	
#4) Calcular la entropia de la distribucion de palabras seleccionadas para distintos tamanos de ventana
#5) Generar una version randomizada del texto, y medir la entropia de las palabras randomizadas.
def entropy(t_text, words, window_size, randomize = False):
	#N = cantidad de tokens
	tokenized_text = copy.deepcopy(t_text)
	if randomize:
		print 'Randomizando texto'
		random.shuffle(tokenized_text)
		
	N = len(tokenized_text)
	#P = cantidad de particiones
	P = N / window_size
	
	p = occurrence_probability(tokenized_text, words, window_size)

	S = {}
	#print 'Entropias:'
	for word in words:
		S[word] = 0
		for i in p[word]:
			if p[word][i] != 0:
				S[word] = S[word] + (p[word][i] * math.log(p[word][i]))
		
		S[word] = S[word] * (1.0 / math.log(P))
		#print word+': '+ str(S[word])		
	
	return S		
	
#6) Calcular la "information value" de las palabras seleccionadas, cuya definicion es: "The difference between the two entropies multiplied by the frequency of the word gives the word’s 'information value' in the text. Information value, just as in binary computing, is measured in bits."
def get_count(ctuple):
	return ctuple[1]


def information_value(tokenized_text, words, window_size):
	print 'Calculando entropias de texto ordenado'
	ordered_entropy = entropy(tokenized_text, words, window_size)
	
	random_entropy = {}
	random_mean = {}
	cant_randoms = 1
	for i in range(0,cant_randoms):
		print 'Calculando entropias de texto randomizado '+ str(i)
		random_entropy[i] = entropy(tokenized_text, words, window_size, True)
	
	
	for word in words:
		random_mean[word] = 0
		for i in range(0,cant_randoms):
			random_mean[word] = random_mean[word] + random_entropy[i][word]
		random_mean[word] = 1.0 * random_mean[word] / cant_randoms
		
		

	
	information_value = {}
	N = len(tokenized_text)
	for word in words:
		frec = 1.0* tokenized_text.count(word) / N
		information_value[word] =  (frec * (ordered_entropy[word] - random_mean[word]))
		
	items = sorted(information_value.items(), key=get_count, reverse=False)

	print 'Top 20 words (information values):'
	for item in items[:20]:
		print item[0]
		#print item[0], item[1]	
    
	return items

	
def get_top_n(words, n , count = False):
	word_count = {}  # Map each word to its count
	for word in words: 
		if not word in word_count:
			word_count[word] = 1
		else:
			word_count[word] = word_count[word] + 1

	#print 'pseudoresing'
	#pseudo_res = {}
	#for w in  word_count:
	#	if word_count[w] > 60:
	#		pseudo_res[w] = word_count[w]

			
	items = sorted(word_count.items(), key=get_count, reverse=True)
  
	if count:
		return items[:n]      
	return [w for (w,c) in items[:n]]
	
#7) Distinguir las palabras del texto en articulos, sustantivos y adjetivos usando un POS-tagger. Verificar si la entropia separa a estos grupos de palabras.
def verify_rol_clustering():
	global filename
	

	tokenized_text = cargar_tokens()
	#tokenized_text = tokenized_text[:6000]
	
	#Esto es un fake para tomar 2000 palabras cualquiera
	#words = list(set(tokenized_text))[:50]
	
	#Esto entendi que hicieron en el paper de la clusterizacion
	# (dice otra cosa, pero por contexto es esto)
	#print 'Obteniendo 2000 palabras con mayor tasa de ocurrencia...'
	words = get_top_n(tokenized_text, 2000)
	#words = list(set(tokenized_text))	
	print 'Calculando entropia...'        
	S = entropy(tokenized_text, words, 2500)
	
	     
	print 'Taggeando las 2000 palabras...'
	wnl = nltk.WordNetLemmatizer()
	original = open(filename,'r').read()
	original = original[600:].lower()
	sentences = nltk.sent_tokenize(original)
	pos_tagged = []
	i = 1
	print 'Len words: '+ str(len(words))
	for s in sentences:
	
		if len(words) == 0:
			break

		analizar = False
		for w in s.split():
			if w in words:
				analizar = True
				words.remove(w)
				break
				
		if analizar:
			tokens = nltk.wordpunct_tokenize(s)
			#and (p[0].lower() == 'a' or p[0].lower() == 'n' or p[0].lower() == 'd')
			aux =  [(w2,p[0].lower()) for (w2,p) in nltk.pos_tag(tokens) if w == w2 ]
		
			pos_tagged = pos_tagged + aux  
			print str(i) + ')'+ aux[0][0] + '\t'+aux[0][1] + '\t'+ str(S[w]) 
			i = i + 1
	
	pos_tagged = list(set(pos_tagged))
	
	#Aca grafico S segun pos tags
	tags = {'j': 'bo', 'n':'ro', 'd':'mo'}
	
	x = {}
	y = {}
	for unTag in tags:
		x[unTag] = []
		y[unTag] = []
		
	pylab.axes([0.1, 0.15, 0.8, 0.75])
	
	print 'Calculando (1-S)n y n...'
	
	for (w,t) in pos_tagged:
		if tags.keys().count(t) != 0:
		 #Tengo algo mal con S, es negativo
		 n = 1.0 * tokenized_text.count(w)
		 y[t].append((1-S[w]) * n)
		 x[t].append(n)
			
	for unTag in tags:	
		pylab.plot(x[unTag], y[unTag], tags[unTag], label= unTag)
		pylab.legend()
		pylab.show()
	
	return 
	
	#Calculando 1-S
	for (w,t) in pos_tagged:
		if tags.keys().count(t) != 0:
		   x[t].append(1-S[w])	
	
	
	#Graficando 1-S
	for unTag in tags:
			pylab.plot(x[unTag],tags[unTag], label= unTag)
			pylab.legend()
			pylab.show()	
		
	
	 		
def main():
  verify_rol_clustering()
if __name__ == "__main__":
  main()	