import nltk
from math import log
class Indexer:
    
    def __init__(self, indexMethod = "EntropyWeight"):
        """
        default index method is "EntropyWeight"
        """
        self._indexMethod = indexMethod
        indexMethodName = "indexBy" + indexMethod
        self._index = getattr(self, indexMethodName)
        self._globalWordFreqDist = None
        self._sortedGlobalWordList = None
        return
    
    def _composeGlobalWordFreqDist(self, dataNodeDict):
        """
        returns a FREQDIST which is maintaining all words as well as their frequency within the whole training data collection
        """
        globalWordFreqDist = nltk.FreqDist()
        dataNodeList = dataNodeDict.values()
        for dataNode in dataNodeList:
            for word in dataNode.wordsFreqDist.keys():
                globalWordFreqDist.inc(word, dataNode.wordsFreqDist[word])
        print "--- globalWordFreqDist has been built, with %d items..."%len(globalWordFreqDist)         
        self._globalWordFreqDist = globalWordFreqDist
    
    def getSortedGlobalWordList(self):
        if self._sortedGlobalWordList == None:
            self._sortedGlobalWordList = self._globalWordFreqDist.keys()
            self._sortedGlobalWordList.sort()                 
        return self._sortedGlobalWordList
    
    def getGlobalWordFreqDist(self, dataNodeDict):
        return self._globalWordFreqDist    
    
    def getFeatureDictMatrix(self, dataNodeDict, attachment):
        """
        returns a data_node_ID-indexed dictionary of word-indexed dictionaries mapping word-value
        """                
        featureDictMatrix = {}
        nodeIDs = dataNodeDict.keys()
        print "--- start building featureDictMatrix (%d vectors) ... "%len(nodeIDs)
        for nodeID in nodeIDs:
            featureDictMatrix[nodeID] = self._index(dataNodeDict[nodeID], attachment)
        return featureDictMatrix
    
    def generateFeatureMatrixFile(self, filename, dataNodeDict, sortedGlobalWords, attachment):
        import os
        if not os.path.exists(filename):
            featureMatrixFile = open(filename,'w+')        
            nodeIDs = dataNodeDict.keys()
            nodeIDs.sort()
            print "--- start building featureDictMatrix (%d vectors) ... "%len(nodeIDs)
            
            featureMatrixFile.write("NodeID,%s\n"%','.join(sortedGlobalWords))
            for nodeID in nodeIDs:            
                indexDict = self._index(dataNodeDict[nodeID], attachment)
                indexLine = "%d,%s\n"%(nodeID,','.join(str(indexDict[key]) for key in sortedGlobalWords))
                featureMatrixFile.write(indexLine)
            featureMatrixFile.close()
            print "FeatureMatrixFile:' %s 'saved."%filename
        return
    
    
    def getMethodAttachment(self, dataNodeDict):        
        self._composeGlobalWordFreqDist(dataNodeDict)
        if self._indexMethod == "EntropyWeight":
            attachment = self._getAverageEntropyDict(dataNodeDict)
        elif self._indexMethod == "WordFrequency":
            attachment = self._getWordFrequencyDict(dataNodeDict)
        elif self._indexMethod == "BooleanWeight":
            attachment = self._getWordFrequencyDict(dataNodeDict)
        return attachment
     
    def indexByEntropyWeight(self, dataNode, attachment):
        """
        """
        averageEntropyDict = attachment
        nodeWordFreqDist = dataNode.wordsFreqDist
        nodeWords = nodeWordFreqDist.keys()
        globalWords = self.getSortedGlobalWordList()
        indexDict = {}
        for word in globalWords:
            if word in nodeWords:
                indexDict[word] = (log(nodeWordFreqDist[word] + 1.0))*(1.0 + averageEntropyDict[word])
            else:
                indexDict[word] = 0        
        return indexDict
    
    def _getAverageEntropyDict(self, dataNodeDict):
        averageEntropyDict = {}
        N = len(dataNodeDict) 
        if N <= 1: raise Exception, "!!! Unachievable EntropyWeight: Too less files !!!"
        log_N = log(N)
        for word in self._globalWordFreqDist.keys():
            sum = 0.0
            for dataNode in dataNodeDict.values():                
                fij = float(dataNode.wordsFreqDist[word])
                if fij == 0:
                    sum += 0
                else:
                    ni = float(self._globalWordFreqDist[word])
                    f_n = fij / ni                       
                    sum += f_n*log(f_n)
            averageEntropyDict[word] = sum / log_N
        print "--- _getAverageEntropyDict has been built, with %d items..."%len(averageEntropyDict)
        return averageEntropyDict

    def indexByWordFrequency(self, dataNode, attachment):
        """
        """
        wordFrequencyDict = attachment
        nodeWordFreqDist = dataNode.wordsFreqDist
        nodeWords = nodeWordFreqDist.keys()
        globalWords = self.getSortedGlobalWordList()
        indexDict = {}
        for word in globalWords:
            if word in nodeWords:
                indexDict[word] = wordFrequencyDict[word]
            else:
                indexDict[word] = 0
        return indexDict    
        
    def _getWordFrequencyDict(self, dataNodeDict):
        return self._globalWordFreqDist.copy()
    
    def indexByBooleanWeight(self, dataNode, attachment):
        nodeWordFreqDist = dataNode.wordsFreqDist
        nodeWords = nodeWordFreqDist.keys()
        globalWords = self.getSortedGlobalWordList()
        indexDict = {}
        for word in globalWords:
            if word in nodeWords:
                indexDict[word] = 1
            else:
                indexDict[word] = 0
        return indexDict
