'''
Words.py
'''

import nltk
import input
import math

class Words():
    def __init__(self, train_docs):
        self.frequent_words = nltk.FreqDist()
        self.frequency_dict = dict()
        self.allwords = dict()
        self.most_frequent_words = list() 
        self.stop_words = nltk.corpus.stopwords.words('english')
        
        self.set_train_docs(train_docs)
        
        self.create_dicts()
        self.create_frequent_words()
         
    def set_train_docs(self, train_docs):
        self.train_docs = train_docs
        
    def create_dicts(self):
        '''
        Tworzy zbior wszystkich wyrazow (allwords), ktore wystapily w 
        tekstach oraz slownik czestosci wystapien wyrazu na dokument.
        '''
        for doc in self.train_docs:
            self.frequency_dict[doc] = nltk.FreqDist()
            for word in self.stem(input.get_words(doc[0])):
                self.frequency_dict[doc].inc(word)
            for word in self.frequency_dict[doc]:
                if word in self.allwords:
                    self.allwords[word] = self.allwords[word] + 1
                else:
                    self.allwords[word] = 1
                    
    def create_frequent_words(self):
        '''
        Tworzy zbior slow wraz z przypisana do nich waga.
        Waga obliczana jest zgodnie z miara: term frequency - inverse document frequency)
        '''
        for word in self.allwords:
            self.frequent_words[word] = 0
            for doc in self.train_docs:
                if word in self.frequency_dict[doc]:
                    coef1 = self.frequency_dict[doc].freq(word) * math.log10(len(self.train_docs) / self.allwords[word] + 1.0)
                    self.frequent_words[word] = max(self.frequent_words[word], coef1)
        
    
    def create_most_frequent_words(self, how_many=2000):
        '''
        Wypelnia liste najczestszych slow wg wspolczynnika waznosci zawartego w slowniku
        self.frequent_words.
        '''      
        for elem in sorted(self.frequent_words.items(), key=lambda elem: (-1) * elem[1])[:how_many]:
            self.most_frequent_words.append(elem[0])
        
        return self.most_frequent_words
    
    def to_features(self, words):
        '''
        Tworzy slownik features (Key,Value), gdzie Key - contains(word), a Value - True | False 
        (czy dokument zawiera 'word'), przydatny do uczenia klasyfikatorow
        '''
        features = dict()
        tmp = set(self.stem(words))
        for word in self.most_frequent_words:
            features['contains(%s)' % word] = (word in tmp)
        return features
    
    def stem(self, words_list):
        '''
        words_list - lista wyrazow
        Zwraca wyrazy po Stemmingu (Stemmer Portera)
        '''
        stem_words = []
        self.stop_words = nltk.corpus.stopwords.words('english')
        for word in words_list:
            if word.lower() in self.stop_words:
                continue
            stem_words.append(nltk.PorterStemmer().stem(word.lower()))
        
        return stem_words
   

