'''
Created on May 11, 2013

@author: cdac
'''
import csv
import string
from nltk.corpus import stopwords
from nltk.stem.wordnet import WordNetLemmatizer
from nltk.classify.naivebayes import NaiveBayesClassifier
import nltk
from nltk.probability import ProbDistI, DictionaryProbDist


class Preprocess():
    '''
    classdocs
    '''
    training_set = {}
    test_set = {}
    all_msgs = list()
    
    def __init__(self, file_source, dataset_size):
        '''
        Constructor
        '''
        self.__file_source = file_source
        self.__dataset_size = dataset_size
        self.__debug_on = False
   
        
    def get_msgs(self):
        self.all_msgs = list()
        
        msgfd   = open(self.__file_source, 'r')
        msgcsv  = csv.reader(msgfd, delimiter='\t')

        for msg in msgcsv:
            self.all_msgs.append(msg[1])
            
        return self.all_msgs
    
    
    def get_first_msgs(self):
        self.get_msgs()
        return self.all_msgs[:self.__dataset_size]


    def remove_puncts_and_special_chars_from_msg(self, msg):
        msg = string.strip(msg, string.punctuation)
        
        special_chars = "!@#$%^&*()_+=-~`<>?,./;'{}|[]\""
        
        for char in msg:
            if char in special_chars:
                msg = msg.replace(char, '') # replace by space
        return msg


    def remove_puncts_and_special_chars_from_msgs(self, msgs):
        if msgs == None:
            msgs = self.__msg_list
        
        clean_msgs = list()
        
        for msg in msgs:            
            msg = self.remove_puncts_and_special_chars_from_msg(msg)
            clean_msgs.append(msg)
        
        return clean_msgs


    def convert_msg_to_lowercase(self, msg):
        msg = string.lower(msg)
        return msg

    def convert_msgs_to_lowercase(self, msgs, update=False):
        if msgs == None:
            msgs = self.__msg_list
        
        lowercase_msgs = list()
        
        for msg in msgs:
            msg = self.convert_msg_to_lowercase(msg)
            lowercase_msgs.append(msg)
                    
        return lowercase_msgs

        def remove_stopwords_from_msg(self, msg):
            english_stops = set(stopwords.words('english'))
            
            clean_msg = ""
    
            for word in msg.split(" "):
                if word not in english_stops:
                    clean_msg += word + " " 
        
            return clean_msg

    
        def remove_stopwords_from_msgs(self, msgs):
            clean_msgs = list()
            
            for msg in msgs:
                clean_msg = self.remove_stopwords_from_msg(msg)
                clean_msgs.append(clean_msg)
        
            return clean_msgs


        def lemmatize_msgs(self, msg):
                        
            lemmatizer = WordNetLemmatizer()
            
            clean_msg = ""
    
            for word in msg.split(" "):
                lem_word = lemmatizer.lemmatize(word)
                if lem_word == word: # if noun has not changed the word try verb
                    lem_word = lemmatizer.lemmatize(word, 'v')
                    
                clean_msg += lem_word + " "
            
            return clean_msg

    
    
    def lemmatize_msgs(self, msgs):
                    
        clean_msgs = list()
        
        for msg in msgs:
            clean_msg = self.lemmatize_msg(msg)
            clean_msgs.append(clean_msg)
         
        return clean_msgs


    def get_word_list_from_msg(self, msg):
        words = list()
        for word in msg.split(" "):
            words.append(word)

        return words
    
    def get_word_feats(self, msgs):
        word_feats = {}
        for msg in msgs:
            words_list = self.get_word_list_from_msg(msg)
            for word in words_list:
                word_feats[word] = True 
            
        word_fd = DictionaryProbDist(word_feats)
        return word_feats, word_fd
    
if __name__ == '__main__':
    pp = Preprocess('../Data/Ham.csv', 10)
    msgs = pp.get_first_msgs() 
    word_feats, word_fd = pp.get_word_feats(msgs)
    
    nbc = NaiveBayesClassifier(word_feats, word_fd)
    nbc.classify(("hI"))
    
# stop_set = set(stopwords.words('english'))
# stops_filter = lambda w: len(w) < 3 or w in stop_set