from scipy import linalg,array,dot,mat,transpose,log

class LSA:
	""" Latent Semantic Analysis(LSA). 
	    Apply transforms to a document-term matrix to bring out latent relationships. 
	    These are found by analysing relationships between the documents and the terms they 
	    contain.
	"""


	def __init__(self, matrix):
		self.matrix = array(matrix)
		rows,cols = self.matrix.shape
		self.idfs = {}
		for i in xrange(0,cols):
			self.idfs[i] = -1

	def __repr__(self,):
		""" Make the matrix look pretty """
		stringRepresentation=""

		rows,cols = self.matrix.shape

		for row in xrange(0,rows):
			stringRepresentation += "["

			for col in xrange(0,cols):
				stringRepresentation+= "%+0.2f "%self.matrix[row][col]
			stringRepresentation += "]\n"

		return stringRepresentation
		

	def __getTermDocumentOccurences(self,col):
		""" Find how many documents a term occurs in"""
		
		termDocumentOccurences=0
		
		rows,cols = self.matrix.shape

		for n in xrange(0,rows):
			if self.matrix[n][col]>0: #Term appears in document
				termDocumentOccurences+=1 
		return termDocumentOccurences


	def tfidfTransform(self,):	
		""" Apply TermFrequency(tf)*inverseDocumentFrequency(idf) for each matrix element. 
		    This evaluates how important a word is to a document in a corpus
	   	    
		    With a document-term matrix: matrix[x][y]
			tf[x][y] = frequency of term y in document x / frequency of all terms in document x
			idf[x][y] = log( abs(total number of documents in corpus) / abs(number of documents with term y)  )
		    Note: This is not the only way to calculate tf*idf
		"""

		documentTotal = len(self.matrix)
		rows,cols = self.matrix.shape
		total = rows*cols
		count= 0
		for row in xrange(0, rows): #For each document
		   
			wordTotal= reduce(lambda x, y: x+y, self.matrix[row] )

			for col in xrange(0,cols): #For each term
				count += 1
				if count%2000000 == 0:
					print str(count)+"/"+str(total)+" completados"
				#For consistency ensure all self.matrix values are floats
				self.matrix[row][col] = float(self.matrix[row][col])

				if self.matrix[row][col]!=0:
					termFrequency = self.matrix[row][col] / float(wordTotal)
					
					#if self.idfs[col] == -1:
					termDocumentOccurences = self.__getTermDocumentOccurences(col)
					inverseDocumentFrequency = log(abs(documentTotal / float(termDocumentOccurences)))
					#else:
					#	inverseDocumentFrequency = self.idfs[col]
					self.matrix[row][col]=termFrequency*inverseDocumentFrequency


	def lsaTransform(self,dimensions=1):
		""" Calculate SVD of objects matrix: U . SIGMA . VT = MATRIX 
		    Reduce the dimension of sigma by specified factor producing sigma'. 
		    Then dot product the matrices:  U . SIGMA' . VT = MATRIX'
		"""
		rows,cols= self.matrix.shape
		print "Matrix dimensions " + str(dimensions) + "/ rows " + str(rows)
		if dimensions <= cols: #Its a valid reduction

			print "--> Calculating"
			#Sigma comes out as a list rather than a matrix
			u,sigma,vt = linalg.svd(self.matrix)
			print "--> Reducing Matrix ("+str(len(sigma))+"iterations)"
			#Dimension reduction, build SIGMA'
			ssigma = []
			for ind in xrange(len(sigma)):#, len(sigma)):
				#print index
				if ind < dimensions:
					ssigma.append(sigma[ind])
				else:
					sigma[ind] = 0
			#uEliminate the first dimension
			#sigma[0] = 0
			#print sigma
			#print linalg.diagsvd(sigma,len(self.matrix), len(vt))
			un = []
			for i in u:
				un.append([j for j in i[:dimensions]])
			un = array(un)
			''' DEBUG
			unt = transpose(un)
			ident = dot(unt,un)
			print ident
			'''
			#Reconstruct MATRIX'
			
			reconstructedMatrix = dot(un,linalg.diagsvd(ssigma,len(ssigma),dimensions))
			#reconstructedMatrix= dot(dot(u,linalg.diagsvd(sigma,len(self.matrix),len(vt))),vt)
			
			#Save transform
			self.matrix=reconstructedMatrix

		else:
			print "dimension reduction cannot be greater than %s" % rows


