'''
LSA Full Context
yields a full context obtained using LSA.
The new context is calculated using the reduced sigma and the originals U and Vt.
THe new context dimensions are the same as the one from the original matrix.
'''
from scipy import linalg,array,dot,mat,transpose
import matplotlib
import pylab
import numpy
from math import *
from pprint import pprint
import sys
import random

class LSA:
	""" Latent Semantic Analysis(LSA). 
	    Apply transforms to a document-term matrix to bring out latent relationships. 
	    These are found by analysing relationships between the documents and the terms they 
	    contain.
	"""


	def __init__(self, matrix):
		self.matrix = array(matrix)
		rows,cols = self.matrix.shape
		self.idfs = {}
		for i in xrange(0,cols):
			self.idfs[i] = -1

	def __repr__(self,):
		""" Make the matrix look pretty """
		stringRepresentation=""

		rows,cols = self.matrix.shape

		for row in xrange(0,rows):
			stringRepresentation += "["

			for col in xrange(0,cols):
				stringRepresentation+= "%+0.2f "%self.matrix[row][col]
			stringRepresentation += "]\n"

		return stringRepresentation
		

	def __getTermDocumentOccurences(self,col):
		""" Find how many documents a term occurs in"""
		
		termDocumentOccurences=0
		
		rows,cols = self.matrix.shape

		for n in xrange(0,rows):
			if self.matrix[n][col]>0: #Term appears in document
				termDocumentOccurences+=1 
		return termDocumentOccurences


	def tfidfTransform(self,):	
		""" Apply TermFrequency(tf)*inverseDocumentFrequency(idf) for each matrix element. 
		    This evaluates how important a word is to a document in a corpus
	   	    
		    With a document-term matrix: matrix[x][y]
			tf[x][y] = frequency of term y in document x / frequency of all terms in document x
			idf[x][y] = log( abs(total number of documents in corpus) / abs(number of documents with term y)  )
		    Note: This is not the only way to calculate tf*idf
		"""

		documentTotal = len(self.matrix)
		rows,cols = self.matrix.shape
		total = rows*cols
		count= 0
		for row in xrange(0, rows): #For each document
		   
			wordTotal= reduce(lambda x, y: x+y, self.matrix[row] )

			for col in xrange(0,cols): #For each term
				count += 1
				if count%2000000 == 0:
					print str(count)+"/"+str(total)+" completados"
				#For consistency ensure all self.matrix values are floats
				self.matrix[row][col] = float(self.matrix[row][col])

				if self.matrix[row][col]!=0:
					termFrequency = self.matrix[row][col] / float(wordTotal)
					
					#if self.idfs[col] == -1:
					termDocumentOccurences = self.__getTermDocumentOccurences(col)
					inverseDocumentFrequency = log(abs(documentTotal / float(termDocumentOccurences)))
					#else:
					#	inverseDocumentFrequency = self.idfs[col]
					self.matrix[row][col]=termFrequency*inverseDocumentFrequency


	def lsaTransform(self,dimensions=1):
		""" Calculate SVD of objects matrix: U . SIGMA . VT = MATRIX 
		    Reduce the dimension of sigma by specified factor producing sigma'. 
		    Then dot product the matrices:  U . SIGMA' . VT = MATRIX'
		"""
		rows,cols= self.matrix.shape
		print "Matrix dimensions " + str(dimensions) + "/ rows " + str(rows)
		if dimensions <= cols: #Its a valid reduction

			print "--> Calculating"
			#Sigma comes out as a list rather than a matrix
			u,sigma,vt = linalg.svd(self.matrix)
			print "--> Reducing Matrix ("+str(len(sigma))+"iterations)"
			#Dimension reduction, build SIGMA'
			ssigma = []
			for ind in xrange(len(sigma)):#, len(sigma)):
				#print index
				if ind < dimensions:
					ssigma.append(sigma[ind])
				else:
					sigma[ind] = 0
			#uEliminate the first dimension
			#sigma[0] = 0
			#print sigma
			#print linalg.diagsvd(sigma,len(self.matrix), len(vt))
			un = []
			for i in u:
				un.append([j for j in i[:dimensions]])
			un = array(un)
			''' DEBUG
			unt = transpose(un)
			ident = dot(unt,un)
			print ident
			'''
			#Reconstruct MATRIX'
			
			#reconstructedMatrix = dot(un,linalg.diagsvd(ssigma,len(ssigma),dimensions))
			reconstructedMatrix= dot(dot(u,linalg.diagsvd(sigma,len(self.matrix),len(vt))),vt)
			
			#Save transform
			self.matrix=reconstructedMatrix

		else:
			print "dimension reduction cannot be greater than %s" % rows


if __name__ == '__main__':
	
	if len(sys.argv)<2: 
		print "\n*** usage:",sys.argv[0]," [dbfile] [dimensions] [offset]"
		exit()
	dimensions = int(sys.argv[2])
	offset = float(sys.argv[3])
	dbfile = sys.argv[1]

	print "Leyendo matriz"
	matrix = []
	matrixfile = open(dbfile,"r")
	l=0
	lcounter = 0
	recycle = ""
	fs0 = []
	for line in matrixfile:
		lcounter += 1
		if ((lcounter >= 7) & (line.replace("\n","") != "[END Relational Context]")):
			if lcounter == 7:
				recycle += "{replace_here}"
			values = line.replace("\n","").split(" ")
			row=[]
			#print values
			for i in values:
				if i != "":
					row.append(float(i))
					if float(i) > 0:
						fs0.append(float(i))
			matrix.append(row)
		else:
			recycle += line

	print "Creando LSA"
	#Create
	lsa = LSA(matrix)
	#print lsa
	print "Calculando tfidf"
	#Prepare
	lsa.tfidfTransform()
	#print lsa.matrix
	print "Calculando svd with " + str(dimensions) + " dimensions"
	#Perform
	lsa.lsaTransform(dimensions)
	#print lsa
	
	print "Escribiendo nuevo contexto para valores superiores a " + str(offset)
	salida = ""
	salida_nb=""
	fs1 = []
	dims = ""
	#for i in range(dimensions):
	#	dims += "d"+str(i+1)+" | "
	#wdims += "\n"
	averages = []
	for i in lsa.matrix.T:
		averages.append(numpy.mean(i))
	
	for i in lsa.matrix:
		ctr=0
		for k in i:
			#print k,averages[ctr]
			if k > averages[ctr]:
				salida_nb += str(k)+" "
				salida += "1 "
				fs1.append(k)
			else:
				salida += "0 "
				salida_nb += str(k)+" "
			ctr += 1
		#print ""
		salida += "\n"
		salida_nb += "\n"
		
	#print averages
	
	dbfile = dbfile[dbfile.rfind("/")+1:]
	f1 = open("contextos/"+dbfile.replace(".rcf","")+"-d"+str(dimensions) +"-p"+ str(offset) +"-binary.rcf","w")
	f1.write(recycle.replace("{replace_here}",salida))
	f1.close()
	
	
	f1 = open("contextos/"+dbfile.replace(".rcf","")+"-d"+str(dimensions) +"values.rcf","w")
	f1.write(recycle.replace("{replace_here}",salida_nb))
	f1.close()
	
	print "Estadisticas"
	print "Estado Original: "
	print "--> Valores distintos de 0: " + str(len(fs0))
	print "--> Promedio de los valores: " + str(numpy.mean(fs0))

	print "Estado Modificado: "
	print "--> Valores distintos de 0: " + str(len(fs1))
	print "--> Promedio de los valores: " + str(numpy.mean(fs1))
	
	perc = len(fs1)/len(fs0)
	perc *= 100
	print "Densidad del contexto aumentada en " + str(perc) + "%"
	print "Ejecucion finalizada"
	
