from scipy import linalg,array,dot,mat,transpose
import matplotlib
import pylab
import numpy
from math import *
from pprint import pprint
import sys
import random

class LSA:
    """ Latent Semantic Analysis(LSA). 
        Apply transforms to a document-term matrix to bring out latent relationships. 
        These are found by analysing relationships between the documents and the terms they 
        contain.
    """


    def __init__(self, matrix):
        self.matrix = array(matrix)
        rows,cols = self.matrix.shape
        self.idfs = {}
        for i in xrange(0,cols):
            self.idfs[i] = -1

    def __repr__(self,):
        """ Make the matrix look pretty """
        stringRepresentation=""

        rows,cols = self.matrix.shape

        for row in xrange(0,rows):
            stringRepresentation += "["

            for col in xrange(0,cols):
                stringRepresentation+= "%+0.2f "%self.matrix[row][col]
            stringRepresentation += "]\n"

        return stringRepresentation
        

    def __getTermDocumentOccurences(self,row):
        """ Find how many documents a term occurs in"""
        
        termDocumentOccurences=0
        
        rows,cols = self.matrix.shape

        for n in xrange(0,cols):
            if self.matrix[row][n]>0: #Term appears in document
                termDocumentOccurences+=1 
        return termDocumentOccurences

    
    def __countWords(self,col):
        """ Find how words a document have"""
        
        rows,cols = self.matrix.shape
        wordTotal = 0
        for n in xrange(0,rows):
            wordTotal+=self.matrix[n][col] 
        return wordTotal
    
    def tfidfTransform(self,):	
        """ Apply TermFrequency(tf)*inverseDocumentFrequency(idf) for each matrix element. 
            This evaluates how important a word is to a document in a corpus
            
            With a document-term matrix: matrix[x][y]
            tf[x][y] = frequency of term y in document x / frequency of all terms in document x
            idf[x][y] = log( abs(total number of documents in corpus) / abs(number of documents with term y)  )
            Note: This is not the only way to calculate tf*idf
        """

        documentTotal = len(self.matrix)
        rows,cols = self.matrix.shape
        total = rows*cols
        count= 0
        idf = {}
        wt = {}
        
        for row in range(0,rows):
            termDocumentOccurences = self.__getTermDocumentOccurences(row)
            idf[row] = numpy.log10(abs(documentTotal / float(termDocumentOccurences)))
        for col in range(0,cols):
            wt[col] = self.__countWords(col)
        
        for row in xrange(0, rows): #For each document
            
            for col in xrange(0,cols): #For each term
                
                count += 1
                if count%2000000 == 0:
                    print str(count)+"/"+str(total)+" completados"
                #For consistency ensure all self.matrix values are floats
                self.matrix[row][col] = float(self.matrix[row][col])

                if self.matrix[row][col]!=0:
                    termFrequency = self.matrix[row][col] / float(wt[col])
                    self.matrix[row][col]=termFrequency*idf[row]

    def lsaTransformDim(self,dimensions=1):
        """ Calculate SVD of objects matrix: U . SIGMA . VT = MATRIX 
            Reduce the dimension of sigma by specified factor producing sigma'. 
            Then dot product the matrices:  U . SIGMA' . VT = MATRIX'
            """
        rows,cols= self.matrix.shape
        print "Matrix dimensions " + str(dimensions) + "/ rows " + str(rows)
        if dimensions <= cols: #Its a valid reduction
            
            print "--> Calculating SVD"
            #Sigma comes out as a list rather than a matrix
            u,sigma,vt = linalg.svd(self.matrix,full_matrices=True)
            #print u,sigma,vt
            v = vt[:dimensions].T
            print v,vt
            print "--> Reducing Matrix ("+str(len(sigma))+"iterations)"
            #Dimension reduction, build SIGMA'
            ssigma = sigma[:dimensions]
            
            #print ssigma
            #uEliminate the first dimension
            #sigma[0] = 0
            #print sigma
            #print linalg.diagsvd(sigma,len(self.matrix), len(vt))
            un = u.T[:dimensions].T
            ''' DEBUG
                unt = transpose(un)
                ident = dot(unt,un)
                print ident
                '''
            #Reconstruct MATRIX'
            
            #reconstructedMatrix = dot(un,linalg.diagsvd(ssigma,len(ssigma),dimensions))
            #print '**********'
            ssigmainv = array([1/i for i in sigma])
            sigmacom = [i for i in sigma]
            print ssigmainv,'caca',dot(sigma,ssigmainv),
                #            for i in range(len(u) - len(v)):
            #                sigmacom.append(0)
            print sigmacom
            query = numpy.array([0,0,0,0,0,0,0,0,0,1,1,0])
            print "\nCHACHA"
            x = array([round(i,2) for i in dot(un.T,query)])
            print x
            #print dot(dot(u,linalg.diagsvd(sigmacom,len(u),len(v))),x)
            #reconstructedMatrix = dot(v,linalg.diagsvd(ssigma,dimensions,dimensions))
            reconstructedMatrix = dot(v,linalg.diagsvd(ssigma,dimensions,dimensions))
            #reconstructedMatrix= dot(dot(u,linalg.diagsvd(sigma,len(self.matrix),len(vt))),vt)            
            
            #print u,linalg.diagsvd(sigma,len(self.matrix),len(vt)),vt

            #print reconstructedMatrix
            #Save transform
            self.matrix=reconstructedMatrix
        
        else:
            print "dimension reduction cannot be greater than %s" % rows


    def lsaTransform(self,dimensions=1):
        """ Calculate SVD of objects matrix: U . SIGMA . VT = MATRIX 
            Reduce the dimension of sigma by specified factor producing sigma'. 
            Then dot product the matrices:  U . SIGMA' . VT = MATRIX'
        """
        rows,cols= self.matrix.shape
        print "Matrix dimensions " + str(dimensions) + "/ rows " + str(rows)
        if dimensions <= cols: #Its a valid reduction

            print "--> Calculating SVD"
            #Sigma comes out as a list rather than a matrix
            u,sigma,vt = linalg.svd(self.matrix,full_matrices=True)
            #print u,sigma,vt
            print "--> Reducing Matrix ("+str(len(sigma))+"iterations)"
            #Dimension reduction, build SIGMA'
            ssigma = []
            print sigma
            for ind in xrange(len(sigma)):#, len(sigma)):
                #print index
                if ind < dimensions:
                    ssigma.append(sigma[ind])#*sigma[ind])
                else:
                    sigma[ind] = 0
            print ssigma
            #uEliminate the first dimension
            #sigma[0] = 0
            #print sigma
            #print linalg.diagsvd(sigma,len(self.matrix), len(vt))
            un = []
            for i in u:
                un.append([j for j in i[:dimensions]])
            un = array(un)
            ''' DEBUG
            unt = transpose(un)
            ident = dot(unt,un)
            print ident
            '''
            #Reconstruct MATRIX'
            
            #reconstructedMatrix = dot(un,linalg.diagsvd(ssigma,len(ssigma),dimensions))
            print u,linalg.diagsvd(sigma,len(self.matrix),len(vt)),vt
            reconstructedMatrix= dot(dot(u,linalg.diagsvd(sigma,len(self.matrix),len(vt))),vt)
            #print reconstructedMatrix
            #Save transform
            self.matrix=reconstructedMatrix

        else:
            print "dimension reduction cannot be greater than %s" % rows


if __name__ == '__main__':
    
    if len(sys.argv)<2: 
        exit()
    dimensions = int(sys.argv[2])
    dbfile = sys.argv[1]
    
    print "Leyendo matriz"
    matrix = []
    matrixfile = open(dbfile,"r")
    l=0
    lcounter = 0
    recycle = ""
    fs0 = []
    for line in matrixfile:
        lcounter += 1
        
        if lcounter == 6:
            recycle += "{dimensions_here}"
        elif ((lcounter >= 7) & (line.replace("\n","") != "[END Relational Context]")):
            if lcounter == 7:
                recycle += "{replace_here}"
            values = line.replace("\n","").split(" ")
            row=[]
            #print values
            for i in values:
                if i != "":
                    row.append(float(i))
                    if float(i) > 0:
                        fs0.append(float(i))
            matrix.append(row)
        else:
            recycle += line

    print "Creando LSA"
    #Create
    lsa = LSA(matrix)
    #print lsa
    print "Calculando tfidf"
    #Prepare
    #lsa.tfidfTransform()
    #print lsa.matrix
    print "Calculando svd with " + str(dimensions) + " dimensions"
    #Perform
    lsa.lsaTransformDim(dimensions)
    #print lsa
    
    print "Writing new context "
    salida = ""
    salida_nb=""
    fs1 = []
    dims = ""
    for i in range(dimensions):
        dims += "d"+str(i+1)+" | "
    dims += "\n"
    averages = []
    for i in lsa.matrix.T:
        averages.append(numpy.mean(i))
    
    for i in lsa.matrix:
        ctr=0
        for k in i:
            #print k,averages[ctr]
            if k >= averages[ctr]:
                salida_nb += str(round(k,2))+" & "
                salida += "1 "
                fs1.append(round(k,2))
            else:
                salida += "0 "
                salida_nb += str(round(k,2))+" & "
            ctr += 1
        #print ""
        salida += "\n"
        salida_nb += "\n"
        
    #print averages
    
    dbfile = dbfile[dbfile.rfind("/")+1:]
    f1 = open("contextos/"+dbfile.replace(".rcf","")+"-d"+str(dimensions) + "-binary.rcf","w")
    f1.write(recycle.replace("{replace_here}",salida).replace("{dimensions_here}",dims))
    f1.close()
    
    
    f1 = open("contextos/"+dbfile.replace(".rcf","")+"-d"+str(dimensions) +"-values.rcf","w")
    f1.write(recycle.replace("{replace_here}",salida_nb).replace("{dimensions_here}",dims))
    f1.close()

    print "Estadisticas"
    print "Estado Original: "
    print "--> Valores distintos de 0: " + str(len(fs0))
    print "--> Promedio de los valores: " + str(numpy.mean(fs0))

    print "Estado Modificado: "
    print "--> Valores distintos de 0: " + str(len(fs1))
    print "--> Promedio de los valores: " + str(numpy.mean(fs1))
    
    perc = len(fs1)/len(fs0)
    perc *= 100
    print "Densidad del contexto aumentada en " + str(perc) + "%"
    
    print "Ejecucion finalizada"
	
