import os
import nltk
import re
class WordPacker:        
    def packDirFile(self, dataDirName, globalWordFreqDist, tokenizer):
        """
        pack up all words within a filenode, which is belonging to the  directory-tree 
        """     
        filenames = os.listdir(dataDirName)
        dataSubDirList = [ fname for fname in filenames if os.path.isdir(os.path.join(dataDirName, fname)) ] # just keep the directories# each directory name indicates a class name
        
        dataWordFreqDist = nltk.FreqDist()  # set up a word-repository for the whole data-set
        dataSubDirDict = {}                 # set up directory index

        ## <extract files and words>
        for subDirName in dataSubDirList:
            classDir = os.path.join(dataDirName, subDirName)
            classFiles = [os.path.join(classDir, fname) for fname in os.listdir(classDir)]
            subDirFileDict = {}
            for file in classFiles:
                fileWordFreqDist = nltk.FreqDist()
                fileContentStr = open(file).read()
                # words = tokenizer.tokenize(fileContentStr.lower())                
                words = re.findall(r"[a-z]+",fileContentStr.lower())                
                [fileWordFreqDist.inc(word) for word in words]      # evaluate frequencies of each word and store them into the                 
                subDirFileDict[file] = fileWordFreqDist             # build up a dict hashing to files.
                [dataWordFreqDist.inc(word) for word in words]      # evaluate frequencies of the words in all files

            dataSubDirDict[subDirName] = subDirFileDict         # categorize docWordFreqDists
        ##</extract files and words>
        return (dataSubDirDict,dataWordFreqDist)
    
        
            