#!/local/usr/bin/python

from nltk import FreqDist
import instanceFactory
import os,sys

class Console:
    
    def __init__(self):
        self.instFact = instanceFactory.InstanceFactory.getInstance()
        self.focusedCodes = None
                
    def config(self, configFile='../config/config.xml'):
        self.instFact.setXmlConfigurator(configFile)
        self.xmlConf = self.instFact.getXmlConfigurator()
        sectionKeys = self.xmlConf.getBlockedSectionKeys()
        self.sectioners = self.instFact.getSectioners(sectionKeys)   
        self.globalClassFreqDist = FreqDist()
        self.quantityOfNodeOfCodeFreqDist = FreqDist()
    
    def readTrainingDataNodeDict(self, sqlConfigDict):
        ma = self.instFact.getMysqlAgent(sqlConfigDict)
        #dataNodeDict = ma.getDataNodeDict(amount = 200)
        focusedClassFilename = self.xmlConf.getFocusedClassesFilename()
        if os.path.exists(focusedClassFilename):
            self.focusedCodes = open(focusedClassFilename,'rU').read().split()
        randomConfigDict = {}
        randomConfigDict['needRerandom'] = self.xmlConf.getNeedRerandom()
        randomConfigDict['subsetFile'] = self.xmlConf.getSubsetFile()
        randomConfigDict['baseFile'] = self.xmlConf.getBaseFile()
        randomConfigDict['randomAmount'] = self.xmlConf.getRandomAmount()
        randomConfigDict['randomRange'] = self.xmlConf.getRandomRange()
        dataNodeDict = ma.getRandomDataNodeDict(focusedClassFilename, randomConfigDict)
        return dataNodeDict
        
    
    def preprocess(self, dataNodeDict):
        dataNodes = dataNodeDict.itervalues()
        nForGram = self.xmlConf.getNForGram()
        needConceptId = self.xmlConf.getNeedConceptId()
        separateSection = self.xmlConf.getSeparateSection()
        replaceStrWithId = self.xmlConf.getReplaceStrWithId()  
        keepIdOnly = self.xmlConf.getKeepIdOnly()      
        for dataNode in dataNodes:
            for sectioner in self.sectioners:
                dataNode.textStr = sectioner.blockSection(dataNode.textStr)
            dataNode.tokenizeTextString(separateSection, needConceptId, replaceStrWithId, keepIdOnly, nForGram)
            
            dataNode.removeStopwords()
            dataNode.lexicalVerify()
            
            stemScale = self.xmlConf.getStemScale()
            dataNode.stem(stemScale)
            dataNode.checkInClasses(self.globalClassFreqDist)
            dataNode.checkInNodeOfCode(self.quantityOfNodeOfCodeFreqDist, self.focusedCodes)
        return dataNodeDict

    def getIndexAttachment(self, indexer, dataNodeDict):
        attachment = indexer.getMethodAttachment(dataNodeDict)
        return attachment    
        
    def indexInFile(self, indexFilename, dataNodeDict, indexer, indexingAttachment = None):
        if not os.path.exists(os.path.dirname(indexFilename)): os.makedirs(os.path.dirname(indexFilename))
        print "indexing...\n"
        sortedGlobalWords = indexer.getSortedGlobalWordList()
        
        if indexingAttachment == None:
            indexingAttachment = indexer.getMethodAttachment()
        indexer.generateFeatureMatrixFile(indexFilename, dataNodeDict, sortedGlobalWords, indexingAttachment)
        print "\tindexing:globalWordFreqDist..."
        self.globalWordFreqDist = indexer.getGlobalWordFreqDist(dataNodeDict)
        return indexingAttachment
    
    def reduceDim(self, dataNodeDict, binaryClassList, reduceType, reduceMethod, freqThreshold, IgThreshold):  
        dimReducer = self.instFact.getDimReducer(reduceType, reduceMethod)
        highFreqWordList = dimReducer.filterByLowFrequency(self.globalWordFreqDist, freqThreshold)
        reducedDimWordList = dimReducer.reduceDim(dataNodeDict, highFreqWordList, binaryClassList, IgThreshold)
        return reducedDimWordList
    
    def generateVectorFile(self, dataNodeDict, fromMatrixFilename, reducedDimWordList, toFileType, \
                           binaryClassList, outputPath, filename, attachment, isFast=False):                
        fileGenerator = self.instFact.getFileGenerator(toFileType)
        if not os.path.exists(outputPath):
            os.makedirs(outputPath)
        fileGenerator.generateFile(outputPath, fromMatrixFilename, reducedDimWordList, \
                                   binaryClassList, dataNodeDict, filename, attachment, isFast, self)
    
    def envaluateResultsInPath(self, learningMachineName, dataNodeDict, outputPath, logName):
        print '||'.join((learningMachineName, str(id(dataNodeDict)), outputPath, logName))
        return

    def main(self):
        sqlConfigDict = self.xmlConf.getSqlConfig()
        dataNodeDict = self.readTrainingDataNodeDict(sqlConfigDict)                                
        self.preprocess(dataNodeDict)
        '''
        featureMatrixFileName = self.xmlConf.getFeatureMatrixFileName()
        indexMethod = self.xmlConf.getIndexMethod()
        indexer = self.instFact.getIndexer(indexMethod)
        attachment = self.getIndexAttachment(indexer, dataNodeDict)
        self.indexInFile(featureMatrixFileName, dataNodeDict, indexer, attachment)
        '''        
        focusedClassesFilename = self.xmlConf.getFocusedClassesFilename()
        
        # <exp>
        import histogram
        globalClassFreqDistReportName = "./experiments/globalClassFreqDist.txt"
        histogram.reportFreqDistHistogram(self.globalClassFreqDist, globalClassFreqDistReportName)
        quantityOfNodeOfCodeReportName = "./experiments/nodeQuatityOfCode.txt"
        histogram.reportFreqDistHistogram(self.quantityOfNodeOfCodeFreqDist, quantityOfNodeOfCodeReportName) 
        
        '''       
        print "wordAndClassHistogram...\n"
        focusedClassList = open(focusedClassesFilename, 'rU').read().split()
        trainingDataNodesIter = dataNodeDict.itervalues()
        histogram.wordAndClassHistogram(self.globalWordFreqDist, focusedClassList, trainingDataNodesIter)
        # </exp>      


        if os.path.exists(focusedClassesFilename):
            focusedClassList = open(focusedClassesFilename, 'rU').read().split()
        else:
            focusedClassList = self.globalClassFreqDist.keys()        
        totalFileNumber = len(focusedClassList)
        printedFileNumber = 0
        print "totalFileNumber: %d"%totalFileNumber
        
        toFileType = self.xmlConf.getGenerateFileType()
        outputPath = self.xmlConf.getGenerateFilePath()
        reduceDimMethodType = self.xmlConf.getReduceDimMethodType()
        reduceDimMethodName = self.xmlConf.getReduceDimMethodName()
        freqThreshold = self.xmlConf.getFrequencyThreshold()
        IgThreshold = self.xmlConf.getInfoGainThreshold()
        
        for pickedClass in focusedClassList:
            binaryClassList = [pickedClass, '-1']
            reducedDimWordList = self.reduceDim(dataNodeDict, binaryClassList, reduceDimMethodType, reduceDimMethodName, freqThreshold, IgThreshold)
            filename = pickedClass
            self.generateVectorFile(dataNodeDict, featureMatrixFileName, reducedDimWordList, toFileType, binaryClassList, outputPath, filename)
            printedFileNumber += 1
            print " %d file(s) in process: now is %d, -- %d file(s) left.\n" \
                    %(totalFileNumber, printedFileNumber, totalFileNumber - printedFileNumber)
        
        learningMachineName = self.xmlConf.getEvaluationMethod()
        logName = self.xmlConf.getEnvaluationLogName()
        self.envaluateResultsInPath(learningMachineName, dataNodeDict, outputPath, logName)
        '''
    
if __name__ == '__main__':    
    conf = Console()
    configFile = sys.argv[1]
    conf.config(configFile)
    conf.main()
    