import nltk	
import glob
import math

wnl = nltk.stem.WordNetLemmatizer()
stopset = set(nltk.corpus.stopwords.words('english'))
stopset |= set(['diego','diego2','(',')',':',',','.','-','\"', '----------------------------', '*','xxxxxxxxxxxx','\'s'])

	
def lemmatizeCable(cable):
	words = []
	for w in nltk.word_tokenize(cable):
		if w not in stopset:
			words.append(wnl.lemmatize(w))

	return words
	
def probabilidadPalabra(palabra, corpus):
	return corpus.count(palabra)/float(len(corpus))
	
def probabilidadConjunta(x, y, corpus, windowSize):
	return contarPalabras(x,y,corpus,windowSize) / float(len(corpus))

def informacionConjunta(x, y, corpus, windowSize):
	probX = probabilidadPalabra(x, corpus)
	probY = probabilidadPalabra(y, corpus)
	probConj = probabilidadConjunta(x,y,corpus, windowSize)
	
	if(probConj == 0):
		return 0
	
	return math.log(probConj / (probX * probY),2)


#armado de la matriz de coocurrencia
def matrizCoocurrencia(words, windowSize):
	""" Arma la matriz de coocurrencia de palabras a partir del parametro words y windowSize """
	matriz = {}
	cantPalabras = len(words)
	
	for i in range(1,windowSize+1):
		for j in range(cantPalabras - i):
			word1 = words[j]
			word2 = words[j+i] 
			if word1 not in matriz:
				matriz[word1] = {}
			if word2 not in matriz[word1]:
				matriz[word1][word2] = 0
			matriz[word1][word2] +=1
	
	# calculo probabilidad conjunta sobre la matriz de cantidades
	freqs = nltk.FreqDist(words)
	for i in matriz.keys():
		for j in matriz[i].keys():
			probConjunta = matriz[i][j] / float(cantPalabras)
			matriz[i][j] = math.log( probConjunta / float(((freqs[i] / float(cantPalabras))  * (freqs[j] / float(cantPalabras)))), 2)
	
	
	return matriz

### TODO: CHECK! || Aliasing en matriz
def matrizInfoConjunta(matriz, words):
	cantPalabras = len(words)
	freqs = nltk.FreqDist(words)
	for i in matriz.keys():
		for j in matriz[i].keys():
			probConjunta = matriz[i][j] / float(cantPalabras)
			matriz[i][j] = math.log( probConjunta / float(((freqs[i] / float(cantPalabras))  * (freqs[j] / float(cantPalabras)))), 2)
	
	return matriz




def show(matriz, writefile=False, filename='matrizDump'):
	"""	Muestra la matriz en un formato un poco mas agradable	"""	
	
	if writefile:
		filedump = open(filename,'w')
		
	for claves in matriz.keys():
		if writefile:
			filedump.write(str(claves+" =>\n"))
		else:
			print claves+" =>"
		for significados in matriz[claves].keys():
			if matriz[claves][significados] > 5:
				if writefile:
					filedump.write("\t\t"+significados+"\t"+str(matriz[claves][significados])+"\n")
				else:
					print "\t\t"+significados+"\t"+str(matriz[claves][significados])



def LSA(path):
	""" Latent semantic analysis over a set of documents """
	
	files = glob.glob(path+'/*.html')
	totalWords = []
	completeText = ''
			
	for fn in files:
		
		f = open(fn,'r')
		text = f.read()
		f.close()
		text = text.lower()

		try:
			ind = text.index("<table class='cable'>")
		except:
			print ('Hubo un error en la busqueda')
			continue
		solo_cable = text[ind:-1]
		solo_cable = solo_cable.replace('&#x000a;','')
		solo_cable_limpio = nltk.clean_html(solo_cable)
		
			
		for w in nltk.word_tokenize(solo_cable_limpio):
			if w not in stopset:
				totalWords.append(wnl.lemmatize(w))
				#words.append(w)
		
	
		
	wordset = set(totalWords)
	
	#for i in range(len(wordset)):
	
								
	return matriz
	
	
	
def procesarTodos(path, writefile=False):
	files = glob.glob(path+'/*.html')
	words = []
	completeText = ''
	
		
	for fn in files:
		f = open(fn,'r')
		text = f.read()
		f.close()
		text = text.lower()

		try:
			ind = text.index("<table class='cable'>")
		except:
			print ('Hubo un error en la busqueda')
			continue
		
		solo_cable = text[ind:-1]
		solo_cable = solo_cable.replace('&#x000a;','')
		solo_cable_limpio = nltk.clean_html(solo_cable)
		completeText = ' ' + solo_cable_limpio

		for w in nltk.word_tokenize(solo_cable_limpio):
			if w not in stopset:
				#words.append(wnl.lemmatize(w))
				words.append(w)

		#freqs = nltk.FreqDist(words)
		#freqs.plot(50)
		#print freqs
		
		if(writefile):
			sn = fn.replace('html','txt')
			save_file = open(sn,'w')
			save_file.write(str(solo_cable_limpio))
			save_file.close()
		
		return words

def wordEntropy(word, text, parts):
	N = len(text)
	s = N/parts
	n = text.count(word)
	result = 0
	
	for i in range(parts):
		inicio = i*s
		fin = (i+1)*s
		
		if i == parts-1:
			fin = len(text)	
		
		nj = text[inicio:fin].count(word)
		print str(text[inicio:fin])+ ' => ' + str(nj)
		
		if nj != 0:
			result += (nj/float(n))*( math.log((nj/float(n)),2 ))
			print (nj/float(n))*( math.log((nj/float(n)),2 ))
	return result * (-1)
