# -*- coding: utf-8 -*-

import math
from MetaArray import *
from numpy import * 
from scipy import linalg , dot 
from interfacedb import *


DEFAULT_DIMS_DIVISOR = 2
TEST_FILE_LOCATION = "testMatrix/"
DEFAULT_FILE_LOCATION = ""

class UpdateLSSP:
    """Class for building a Term Document Matrix (TDM) and the four corresponding Matrices (U,Vt,S,SVD) of a Latent Semantic Space (LSSP).
    These will be stored directly in binary form to allow better performance when applying a custom Latent Semantic Analysis.
    Matrices are stored as extend numpy package ndarrays using the class MetaArray from Luke Campagnola (available at http://www.scipy.org/Cookbook/MetaArray).
    The module has been written for extensibility and portability rather than performance.
    Example and Doctest : 
    >>> from localTest import *
    >>> local_test = LocalTest()
    >>> lssp  = UpdateLSSP(test_mode = True, dimensions = 9, tf_idf = False)
    >>> sha1(lssp.TDM.view(ndarray)).hexdigest()
    '7a943a5fa8ad6f7baec4d2e99ce1536d12ec43d9'
    >>> sha1(lssp.U.view(ndarray)).hexdigest()
    '4c425ebe7174af7c38c93d5bf54377aee61ee8b0'
    >>> sha1(lssp.Vt.view(ndarray).__str__()).hexdigest()
    '90d32bc92ac5fa4e03071f69a5916c1bd6112e58'
    >>> sha1(lssp.S.view(ndarray)).hexdigest()
    'c2956ff81238e85a64b7f15dd25970c9fc3af5f3'
    >>> sha1(lssp.SVD.view(ndarray)).hexdigest()
    'a2381191ab625e9fdb0a33e4c7d2eed956396d10'
    >>> local_test.set_test_number(2) #Test of tf-idf
    >>> lssp  = UpdateLSSP(test_mode = True, dimensions = 3 )
    >>> sha1(lssp.TDM.view(ndarray)).hexdigest()
    '549eb32e0fb6d4006b16c008ce49ba57ce63333f'
    >>> local_test.set_test_number(3) #Landauer Example
    >>> lssp  = UpdateLSSP(test_mode = True , dimensions =9)
    >>> sha1(lssp.TDM.view(ndarray)).hexdigest()
    '4187dcdb1be1222c571a872363a4493f146ae6f1'
    """ 
    
    new_docs = None
    words = None
    docs = None
    iDB  = None
    TDM = None 
    SVD = None 
    U = None 
    Vt = None 
    S = None 
    meta_headers = None
    dimensions = None
    binarydb_location = None 
    tf_idf = None
    
    def __ordered_set(self,alist):
        """Returns the list passed in parameter without duplicates in the same order.
        """ 
        duplicate = set()
        return [element for element in alist if element not in duplicate and not duplicate.add(element)]

    def __get_new_docs(self):
        """Retrieves the new documents ids and their contents from the DB.
        The documents must have no stopwords and must be stemmed.
        This operation is needed before updating the TDM and LSSP."""
        self.iDB = InterfaceDB(self.test_mode,self.extra_db_parameters)
        doc_id = self.iDB.get_next_new_doc_id()
        if doc_id != 0:
            self.new_docs = {}
            while doc_id != 0:
                self.new_docs[ 'doc'+str(doc_id) ] =  self.iDB.get_words(doc_id) 
                doc_id = self.iDB.get_next_new_doc_id()
            return True
        else:
            return False

    def __update_words(self):
        """Completes the words list with the new words contained in the new documents."""
        self.words.extend(' '.join(self.new_docs.values()).split())#add new words
        self.words = self.__ordered_set(self.words)        
    
    def __update_docs(self):
        """Completes the document list with the new documents ids."""
        self.docs.extend(self.new_docs.keys())#add new docs ids
    
    def __update_meta_headers(self):
        """Sets up to date the header information corresponding to the MetaArray object, for matrices TDM and SVD.
        Before this method is called, the list of documents and words must be updated, ie  by calling  __update_words() and __update_docs()."""
        self.meta_headers = [axis('Words', cols=self.words),axis('Docs', cols=self.docs), {'dimensions': self.dimensions}]
        
    def __updateTDM(self): 
        """Private method to update the Term Document Matrix (TDM).
        Before this method is called, the list of documents and words must be updated, ie  by calling  __update_words() and __update_docs().
        Also header information for the MetaArray object must be up to date, ie by calling __update_meta_headers().
        The TDM is a MetaArray, which is an extended class of the numpy ndarray class (see numpy package for more info and the class MetaArray from Luke Campagnola).
        A TDM contains statistical data of the concepts in all documents. 
        The rows represent the concepts and the cols the documents.
        Therefore a cell(i,j) will represent the number of occurences of word i in document j."""
        self.TDM = MetaArray((len(self.words),len(self.docs)), dtype=float, info=self.meta_headers)
        self.TDM[:] = 0 #fill with zeros
        
        for doc in self.docs:
            words = self.iDB.get_words(doc[3:]).split()#[3:] because 'doc542'[:3] = 542
            unique_words = set(words)
            for word in unique_words :
                self.TDM["Words":word,"Docs":doc] = words.count(word)
        self.TDM.write(self.binarydb_location +'TDM')

    def __updateTDM_TFIDF(self): 
        """Same method as __updateTDM() but uses tf-idf (term frequency-inverse document frequency)
        scheme to normalize TDM cells according to the following formula :
        tf(i,j) = (the number of occurences of word i in document j)/(the number of occurences of all terms in document j)
        idf(i) =  log(number of documents in the corpus/ number of documents where the term i appears)
        cell(i,j) = tf(i,j)*idf(i)
        """
        self.__updateTDM()

        temp = MetaArray((len(self.words),len(self.docs)), dtype=float, info=self.meta_headers)
        temp[:] = 0 #fill with zeros
        for doc in self.docs:
            words = self.iDB.get_words(doc[3:]).split()#[3:] because 'doc542'[:3] = 542
            unique_words = set(words)
            for word in unique_words :
                tf = float(self.TDM["Words":word,"Docs":doc]) / len(words)
                idf = math.log10(float(len(self.docs)) / (1 + len(argwhere(self.TDM["Words":word,:].view(ndarray))) ))
                temp["Words":word,"Docs":doc] = tf*idf
        self.TDM = temp
        self.TDM.write(self.binarydb_location +'TDM')
        
    
    def __updateSVD(self):
        """Private method for updating the matrices of the Singular Value Decomposition (U,Vt,S,SVD).
           Their dimension will be adjusted according to the constructor "dimension" parameter.
           Before this method is called, the list of documents and words must be updated, ie  by calling  __update_words() and __update_docs().
           Also header information for the MetaArray object must be up to date, ie by calling __update_meta_headers().
           These matrices are MetaArrays, which is an extended class of the numpy ndarray class (see numpy package for more info and the class MetaArray from Luke Campagnola).
           The rows represent the concepts and the cols the documents.
           Therefore a cell(i,j) will represent the intersection of singularity of word i in document j according to the dimensionality reduction."""
        if self.dimensions == None :
            self.dimensions = int(len(self.docs)/DEFAULT_DIMS_DIVISOR ) 
        if self.dimensions <= 0 or (len(self.docs)> 0 and self.dimensions > len(self.docs)) : 
	        raise Exception("Dimensions must be a positive integer inferior to the number of documents.")  
        U,S,Vt = linalg.svd(self.TDM.view(ndarray), full_matrices= False) #view(ndarray) according to performance tip in http://www.scipy.org/Cookbook/MetaArray
        S = S[0:self.dimensions] #S is a vector
        U = U[:,0:self.dimensions]
        Vt = transpose(transpose(Vt)[:,0:self.dimensions])
        S = diag(S) # S has a shape of (K,K)
        SVD = dot(U, dot(S, Vt))
        self.SVD = MetaArray(SVD, info=self.meta_headers)
        self.U = MetaArray(U)
        self.Vt = MetaArray(Vt)
        self.S = MetaArray(S)
        self.SVD.write(self.binarydb_location + 'SVD')
        self.U.write(self.binarydb_location + 'U')
        self.Vt.write(self.binarydb_location + 'Vt')
        self.S.write(self.binarydb_location + 'S')
	         
    def __init__(self, dimensions = None, force_update = None, test_mode = None, matrix_file_location = None,extra_db_parameters= None, tf_idf = True):
        """
        Updates all matrices in the LSSP with the new documents in the DB.
        If present, the old TDM matrix is reconstructed before updating, this is done to get old word and document lists (ie force_update parameter) .
        If no new documents are found, no action will be performed and a warning will be printed.
        If dimensions parameter isn't set it will be by default the same dimension that the previous LSSP.
        If no previous LSSP is found dimensions parameter will be by default the total number of documents divided by 2.
        When the force_update parameter is None (default behaviour) it will try to find a previous TDM matrix and update it.
        When the force_update parameter is True it will raise an error if no previous TDM matrix is found.
        When the force_update parameter is False (test mode behaviour) it will ignore any previous TDM matrix.
        The test_mode parameter will use the test DB (for more info see interfacedb) and force_update = False.
        The matrix_file_location defines the location where the matrices will be stored, by default is the current directory matrix_file_location ="" 
        unless if test_mode is True, then matrix_file_location ="testMatrix/".
        The extra_db_parameters is an extra parameter for database (DB) extraction only, and thus will only be used by the Interfacedb class (for more info see interfacedb.py). 
        The tf_idf parameter defaults to True and updates the TDM with tf-idf values instead of number of occurences (see __updateTDM_TFIDF() for more info )
        """
        
        self.dimensions  = dimensions
        self.tf_idf = tf_idf
        self.test_mode = test_mode
        self.extra_db_parameters = extra_db_parameters
        if matrix_file_location == None  : 
            if test_mode != None:
                self.binarydb_location = TEST_FILE_LOCATION
                force_update = False
            else: 
                self.binarydb_location = DEFAULT_FILE_LOCATION
        else : 
            self.binarydb_location = matrix_file_location
            
        if force_update != True and  self.__get_new_docs() : 
            try : 
                if force_update == False:
                    raise Exception("No update is necessary")
                self.TDM = MetaArray(file=self.binarydb_location+'TDM')
                self.words  = [ elem['name'] for elem in self.TDM._info[0]['cols'] ] #word list is the cols of TDM axis 0
                self.docs = [ elem['name'] for elem in self.TDM._info[1]['cols'] ]
                if self.dimensions == None:
                    self.dimensions = self.TDM._info[-1]['dimensions'] #extra information contained at the end of metaArray
            except Exception, e:
                if force_update == True:
                    raise e
                else:
                    self.words = []  
                    self.docs  = []
                     
            self.__update_words()
            self.__update_docs()
            self.__update_meta_headers() 
            if self.tf_idf :
                self.__updateTDM_TFIDF()
            else :
                self.__updateTDM()
            self.__updateSVD()
        else:
            if force_update != True :
                print "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!"
                print "!!!!!!!!!!!!!!!!Warning : No new document found, skipping all LSSP updates.!!!!!!!!!!!!!!!!!!!!!!!!!!!"
                print "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!"
            self.TDM = MetaArray(file=self.binarydb_location+'TDM')
            self.SVD = MetaArray(file=self.binarydb_location+'SVD')

            

if __name__ == "__main__":
    from hashlib import sha1
    import doctest
    doctest.testmod()
