from feature_extractor import FeatureExtractor
import re
from numpy.ma.core import log

class OurBagOfWords(FeatureExtractor):

    '''
    Extracts a bag of words representation with TFIDF scores from raw text.
    '''
    
    def __init__(self, num_features, stemming = False, stopWords = False,
                  lBound=0,hBound=1):
        '''
        Constructor.
        
        @param num_features: The number of features to extract.
        @param stemming: Indicator whether to use stemming or not.
        @param stopWords: Indicator whether to user the stop words filter
        @param lBound: lower percent of the threshold - between 0 and 1
        @param hBound: higher percent of the threshold - between 0 and 1

        '''
        self.stemming = stemming
        self.stopWords = stopWords
        self.num_features = num_features
#        self.badWords = {'a':1,'an':1,'and':1,'are':1,'as':1,'at':1,'be':1,'but':1,'by':1,'for':1,'if':1,'in':1,
#                         'into':1,'is':1,'it':1,'no':1,'not':1,'of':1,'on':1,'or':1,'such':1,'that':1,'the':1,
#                         'their':1,'there':1,'than':1,'this':1,'these':1,'they':1,'to':1,'was':1,'will':1,
#                         'with':1}
        self.badWords = {'the':1,'a':1,'i':1,'to':1,'and':1,'it':1,'is':1,'in':1,'you':1,'of':1,
            'for':1,'but':1,'that':1,'on':1,'was':1,'my':1,'this':1,'than':1,'with':1,'have':1,
            'not':1,'or':1,'more':1,'can':1,'if':1,'so':1,'one':1,'are':1,'very':1,'at':1,'when':1,
            'as':1,'get':1,'good':1,'about':1,'me':1,'up':1,'we':1,'there':1,'better':1,'like':1,
            'has':1,'an':1,'other':1,'they':1,'after':1,'great':1,'no':1,'be':1,'don':1,'out':1,
            'because':1,'now':1,'size':1,'only':1,'will':1,'well':1,'would':1,'buy':1,'which':1,
            'do':1,'long':1,'first':1,'its':1,'from':1,'back':1,'think':1,'some':1,'really':1,
            'way':1,'what':1,'last':1,'work':1,'hold':1,'all':1,'time':1,'own':1,'still':1,'buying':1,
            'had':1,'off':1,'didn':1,'using':1,'got':1,'free':1,'any':1,'used':1,'them':1,'these':1,
            'color':1,'since':1,'enough':1,'many':1,'does':1,'need':1,'two':1,'comfortable':1,
            'return':1,'take':1,}
        
    def extract(self, raw_instance):
        '''
        Creates a new instance in the feature-space from the given raw instance.
        
        @param raw_instance: A string.
        @return: A tuple of numerical features, each feature representing a word 
                 and its TFIDF score.
        '''
        tf = self._countTermFrequency(raw_instance)
        features = []
        for word in self.order:
            if word in tf:
                features += [tf[word] * self.idf[word]]
            else:
                features += [0]
        return tuple(features)
    
    def setup(self, examples, extraction_time_limit, setup_time_limit):
        '''
        Prepares a dictionary of inverse-document-frequencies (IDFs) for each word,
        and selects the terms with the highest IDFs as the features.
        
        @param examples: A list of raw data examples.
        @param extraction_time_limit: The time that will be allocated for each example.
        @param setup_time_limit: The time limit for setting up this agent.
        '''
        self.extraction_time_limit = extraction_time_limit
        
        doc_count = float(len(examples))
        tf_examples = []
        for raw_example in examples: 
            tf_examples += [self._countTermFrequency(raw_example)]
        self.idf = self._countInverseDocumentFrequency(tf_examples)
        
        self.order = sorted(self.idf.items(), lambda item1, item2: -cmp(item1[1], item2[1]))
        self.order = self.order[:self.num_features]
        self.idf = dict(self.order)
        self.order = [x[0] for x in self.order]
        
        for word in self.idf.keys():
            self.idf[word] = log(doc_count / self.idf[word])
    
    def _countTermFrequency(self, raw_example):
        '''
        Counts the frequency of each word in each document.
        
        @param raw_example: A raw example (strings).
        @return: A term frequency (word count) dictionary.
        '''
        example = {}
        total_count = 0
        for word in self._getTerms(raw_example):
            if word not in example:
                example[word] = 1.0
            else:
                example[word] += 1.0
            total_count += 1
        for word in example.keys():
            example[word] = example[word] / total_count
        return example
    
    def _countInverseDocumentFrequency(self, tf_examples):
        '''
        Counts the number of documents containing each word.
        
        @param tf_examples: A list of processed examples, each one represented by a term frequency 
                            (word count) dictionary.
        @return: A dictionary of each word and the number of different documents it appears in.
        '''
        idf = {}
        for example in tf_examples:
            for word in example.keys():
                if word not in idf:
                    idf[word] = 1
                else:
                    idf[word] += 1
        return idf
    
    def _getTerms(self, text):
        '''
        Retrieves all the terms (keywords) from the given text.
        This method defines a term to be an alphabetical string of at least three characters. 
        
        @param text: A string.
        @return: A list of terms.
        '''
        if not (self.stemming or self.stopWords):
            pattern = re.compile(r'[a-z]{3,}')
            return re.findall(pattern, text.lower())
        
        terms = []
        words = text.split()
        for w in words:
            processed = self.processWord(w)
            if not processed is None:
                terms = terms + [processed]
        return terms
        
    def processWord(self, rawWord):     
        if self.stopWords and (rawWord in self.badWords):
            return None
        if not self.stemming:
            return rawWord
        
        pattern = re.compile(r'(^[a-zA-Z]+).*')
        match = pattern.match(rawWord)
        if not match is None:
            word = match.group(1).lower()
        else:
            return None 


        pattern = re.compile(r'([a-z]+)sses')
        match = pattern.match(word)
        if not match is None:
            return match.group(1) + 'ss'
        
        pattern = re.compile(r'([a-z]+)ies')
        match = pattern.match(word)
        if not match is None:
            return match.group(1) + 'i'

#        pattern = re.compile(r'([a-z]+)SS')
#        match = pattern.match(word)
#        if not match is None:
#            return match.group(1) + 'ss'

        pattern = re.compile(r'([a-z]+)(ed|ing|s)')
        match = pattern.match(word)
        if not match is None:
            return match.group(1)
        
        pattern = re.compile(r'([a-z]+)eed')
        match = pattern.match(word)
        if not match is None:
            return match.group(1) + 'ee'
        
        pattern = re.compile(r'([a-z]+)at')
        match = pattern.match(word)
        if not match is None:
            return match.group(1)+'ate'
        
        pattern = re.compile(r'([a-z]+)bl')
        match = pattern.match(word)
        if not match is None:
            return match.group(1)+'ble'
        
        pattern = re.compile(r'([a-z]+)(iz)')
        match = pattern.match(word)
        if not match is None:
            return match.group(1)+'ize'

        return word







