# Ofri Keidar Inbal Wiesel 302933981 305331878

'''
Class used to parse data set files in order to count number of events
and to generate observed vocabulary
'''
class Parser:
     
    '''
    Constructor- creates a new Parser object
    '''
    def __init__(self, headerPrefix, wordDelim):
        #set members
        self.headerPrefix = headerPrefix; # prefix of header line
        self.wordDelim = wordDelim; # delimiter between words in data files
        self.BEGIN_ARTICLE = "begin-article-event"; # virtual event at article's beginning
    
    '''
    Splits given development file:
    1. trainFileName- name of training file, will hold round(splitProp*|S|) of the first words in development file
    2. validFileName- name of validation file, hold the remaining words in development file
    Where |S| is number of events in development file 
    '''
    def splitDataSet(self, develFileName, trainFileName, validFileName, splitProp):
        
        # open source and destination files
        srcFile = open(develFileName, "r");
        train = open(trainFileName, "w");
        validation = open(validFileName, "w");
        
        # calculate number of words for train file
        numEvents = self.countUniEvents(develFileName);
        numWordsTrain = round(splitProp * numEvents);
        
        # write files
        wordCtr = 0; # counts how many words had been written so far
        for line in srcFile:
            
            # skip header or empty line
            if line.strip().startswith(self.headerPrefix) or line.strip() == "":
                continue
            
            # copy current line
            words = line.strip().split(self.wordDelim) 
            for word in words:
            
                # check if limit on words number is reached
                if wordCtr < numWordsTrain:
                    # append word to train file
                    print >> train, word, 
                else:
                    # append word to validation file
                    print >> validation, word, 
                    
                # update word counter
                wordCtr += 1;
            
            if wordCtr < numWordsTrain:
                print >> train, ""
            else:
                print >> validation, ""
            
        
        # close files
        srcFile.close()
        train.close()
        validation.close()  
    
    '''
    Counts number of words (unigrams) in given file
    ''' 
    def countUniEvents(self, fileName):
        
        # initialize word counter
        numEvents = 0;
        
        #open input file
        inputFile = open(fileName, "r");
        
        # count number of words in given file
        for line in inputFile:
            
            # skip header or empty line
            if line.strip().startswith(self.headerPrefix) or line.strip() == "":
                continue
            
            # add number of words
            words = line.strip().split(self.wordDelim) 
            numEvents += len(words);
        
        # close file
        inputFile.close()
        
        # return number of events
        return numEvents
    
    '''
    Returns number of articles (lines with data) in given file
    '''
    def countArticles(self, fileName):
    
        # initialize article counter
        numArticles = 0;
        
        #open input file
        inputFile = open(fileName, "r");
        
        # count number of words in given file
        for line in inputFile:
            
            # skip header or empty line
            if line.strip().startswith(self.headerPrefix) or line.strip() == "":
                continue
        
            # increment article counter
            numArticles += 1;
        
        # close file
        inputFile.close()
        
        # return number of events
        return numArticles
    
    '''
    Returns the observed vocabulary of unigrams according to given file:
    a map where key is a word and value is number of instances 
    '''
    def genUniObsVoc(self, fileName):
        
        # initialize observed vocabulary
        uniObsVoc = {}
        
        #open input file
        inputFile = open(fileName, "r");
        
        # count instances of each word in file
        for line in inputFile:
            
            # skip header or empty line
            if line.strip().startswith(self.headerPrefix) or line.strip() == "":
                continue
            
            # count instances of current word
            words = line.strip().split(self.wordDelim) 
            for word in words:
                if len(word) == 0:
                    continue
                word = word.strip() # remove white spaces
                if uniObsVoc.get(word) is None: # check if first instance
                    uniObsVoc[word] = 1;
                else: # increment count
                    uniObsVoc[word] = uniObsVoc[word] + 1;
                
        # close file
        inputFile.close()
        
        # return observed vocabulary
        return uniObsVoc
    
    '''
    Returns the observed vocabulary of bigrams according to given file:
    a map where key is a bigram and value is number of instances
    '''
    def genBiObsVoc(self, fileName):
        
        # initialize observed vocabulary
        biObsVoc = {}
        
        #open input file
        inputFile = open(fileName, "r");
        
        # count instances of each word in file
        for line in inputFile:
            
            # skip header or empty line
            if line.strip().startswith(self.headerPrefix) or line.strip() == "":
                continue

            # remove white spaces            
            words = line.strip().split(self.wordDelim)
            
            # count instances of the bigram ("begin-article", first-word)
            currWord = words[0].strip();
            if biObsVoc.get((self.BEGIN_ARTICLE, currWord)) is None: # check if first instance
                    biObsVoc[(self.BEGIN_ARTICLE, currWord)] = 1;
            else: # increment count
                    biObsVoc[(self.BEGIN_ARTICLE, currWord)] = biObsVoc[(self.BEGIN_ARTICLE, currWord)] + 1;
            
            # process current line
            for i in range(1,len(words)):
                
                # count instances of current bigram
                nextWord = words[i].strip();
                if biObsVoc.get((currWord, nextWord)) is None: # check if first instance
                    biObsVoc[(currWord, nextWord)] = 1;
                else: # increment count
                    biObsVoc[(currWord, nextWord)] = biObsVoc[(currWord, nextWord)] + 1;
                    
                # move to next word
                currWord = nextWord;
             
        # close file
        inputFile.close()
        
        # return observed vocabulary
        return biObsVoc
    
    '''
    Returns a map where key is an observed conditioning word and value
    is a list of all words w, such that (condWord w) is an observed bigram
    in given file
    '''
    def getCondWords(self, fileName):
        
        # initialize conditioning words map
        condWords = {} # key is conditioning word, values are all words appearing after to a key's instance
        
        # add virtual begin article event
        condWords[self.BEGIN_ARTICLE] = [];
        
        #open input file
        inputFile = open(fileName, "r");
        
        # count instances of each word in file
        for line in inputFile:
            
            # skip header or empty line
            if line.strip().startswith(self.headerPrefix) or line.strip() == "":
                continue
            
            # remove white spaces
            words = line.strip().split(self.wordDelim)
            
            # handle line's first word
            currWord = words[0].strip();
            if currWord not in condWords[self.BEGIN_ARTICLE]:
                condWords.setdefault(self.BEGIN_ARTICLE, []).append(currWord);
            
            # process current line
            for i in range(1,len(words)):
                
                # handle first instance of conditioning word
                if condWords.get(currWord) is None:
                    condWords[currWord] = [];
                
                # skip bigram if already been seen
                nextWord = words[i].strip();
                if nextWord in condWords[currWord]:
                    # move to next word
                    currWord = nextWord;
                    continue;
                
                # add new bigram
                condWords.setdefault(currWord, []).append(nextWord);
                    
                # move to next word
                currWord = nextWord;
        
            # check if first instance of last word in line
            if condWords.get(currWord) is None:
                condWords[currWord] = [];
            
        
        # close file
        inputFile.close()
        
        # return observed vocabulary
        return condWords
        