'''
Created on 26-Apr-2013

@author: cdac
'''
import csv
import string
from nltk.corpus import stopwords
from collections import Counter
import numpy
from scipy import stats
from nltk.stem.porter import PorterStemmer
from nltk.stem.wordnet import WordNetLemmatizer
from sklearn.feature_extraction.text import TfidfTransformer, TfidfVectorizer
from nltk.probability import CrossValidationProbDist, ProbDistI,\
    DictionaryProbDist
import random
from Bio.kNN import train
from sympy.mpmath.calculus.extrapolation import izip
from nltk.misc import sort
from matplotlib import pyplot


class OptiPreProcess():
    '''
    classdocs
    '''
    def __init__(self, file_source, dataset_size):
        '''
        Constructor
        '''
        self.__file_source = file_source
        self.__dataset_size = dataset_size
        self.__debug_on = False
        pass

    
    def get_msgs(self):
        self.__msg_list = list()
        
        msgfd   = open(self.__file_source, 'r')
        msgcsv  = csv.reader(msgfd, delimiter='\t')

        for msg in msgcsv:
            self.__msg_list.append(msg[1])
            
        return self.__msg_list

    def get_first_msgs(self, how_many, update=True):
        self.get_msgs()
        
        first_msgs = list()
        for msg in self.__msg_list:
            first_msgs.append(msg)
            how_many -= 1
            if (how_many <= 0):
                break
        if update:
            self.__msg_list = first_msgs
        
        return first_msgs
    
    def remove_puncts_and_special_chars(self, msgs, update=False):
        if msgs == None:
            msgs = self.__msg_list
        
        clean_msgs = list()
        
        for msg in msgs:            
            msg = string.strip(msg, string.punctuation)
            
            special_chars = "!@#$%^&*()_+=-~`<>?,./;'{}|[]\""
            
            for char in msg:
                if char in special_chars:
                    msg = msg.replace(char, '') # replace by space
                
            clean_msgs.append(msg)
        
        if update:
            self.__msg_list = clean_msgs
        
        return clean_msgs


    def convert_to_lowercase(self, msgs, update=False):
        if msgs == None:
            msgs = self.__msg_list
        
        lowercase_msgs = list()
        
        for msg in msgs:
            msg = string.lower(msg)
            lowercase_msgs.append(msg)

        if update:
            self.__msg_list = lowercase_msgs
                    
        return lowercase_msgs


    def remove_stopwords(self, msgs, update=False):
        if msgs == None:
            msgs = self.__msg_list
                    
        clean_msgs = list()
        english_stops = set(stopwords.words('english'))
        
        for msg in msgs:
            clean_msg = ""

            for word in msg.split(" "):
                if word not in english_stops:
                    clean_msg += word + " " 
            clean_msgs.append(clean_msg)
        
        if update:
            self.__msg_list = clean_msgs
        return clean_msgs


    def stem_msgs(self, msgs, update=False):
        if msgs == None:
            msgs = self.__msg_list
                    
        clean_msgs = list()
        stemmer = PorterStemmer()
        
        for msg in msgs:
            clean_msg = ""

            for word in msg.split(" "):
                
                word = stemmer.stem(word)
                clean_msg += word + " " 
            clean_msgs.append(clean_msg)
        
        if update:
            self.__msg_list = clean_msgs
        
        return clean_msgs

    def lemmatize_msgs(self, msgs, update=False):
        if msgs == None:
            msgs = self.__msg_list
                    
        clean_msgs = list()
        lemmatizer = WordNetLemmatizer()
        
        for msg in msgs:
            clean_msg = ""

            for word in msg.split(" "):
                
                lem_word = lemmatizer.lemmatize(word)
                if lem_word == word: # if noun has not changed the word try verb
                    lem_word = lemmatizer.lemmatize(word, 'v')
                
                clean_msg += lem_word + " " 
            clean_msgs.append(clean_msg)
        
        if update:
            self.__msg_list = clean_msgs
        
        return clean_msgs

    def get_tf_idf(self):
        pass
        
    def process_all(self, update=False):
        msgs = self.remove_puncts_and_special_chars(self.__msg_list)
        msgs = self.convert_to_lowercase(msgs)
        msgs = self.remove_stopwords(msgs)
        
        if update:
            self.__msg_list = msgs
            
        return msgs
    
    def extract_stats(self, msgs):
        if msgs == None:
            msgs = self.__msg_list
        
        self.__num_msgs = len(msgs)                
        self.__num_words = 0
                 
        self.__size_of_all_msgs = list()
        self.__all_words_str   = ""
        self.__all_words_list  = list()
        
        for msg in msgs:
            words_in_msg = msg.split(" ")
            self.__size_of_all_msgs.append(len(words_in_msg))

            
            for word in words_in_msg:
                if word != '':
                    self.__num_words += 1
                    self.__all_words_str += word + " "
                    self.__all_words_list.append(word)
            
        self.__unique_words = set(self.__all_words_list)
        self.__num_unique_words = len(self.__unique_words)

    def get_word_freq(self, all_words_list):
        if all_words_list == None:
            all_words_list = self.__all_words_list
        word_freq = Counter(all_words_list)
        return word_freq

    def msgs_to_words_list(self, msgs):
        if msgs == None:
            msgs = self.__msg_list
        
        all_words_list = list()
        for msg in msgs:
            words_in_msg = msg.split(" ")
            for word in words_in_msg:
                all_words_list.append(word)
        
        return all_words_list

    def print_stats(self):   
        if self.__debug_on:  
            print '-' * 80
            print 'Number of Messages,\t\t\t', self.__num_msgs
            print 'Number of Words,\t\t\t', self.__num_words
            print 'Number of Unique Words,\t\t\t', self.__num_unique_words
            print 'Min size of Messages,\t\t\t', min(self.__size_of_all_msgs)
            print 'Max size of Messages,\t\t\t', max(self.__size_of_all_msgs)
            print 'Average size of Messages,\t\t', sum(self.__size_of_all_msgs)/self.__num_msgs
            print 'Median of Message sizes,\t\t', numpy.median(self.__size_of_all_msgs)
            print 'Mode of Message sizes,\t\t\t', stats.mode(self.__size_of_all_msgs)[0][0]
            print 'Most frequent top 10 words,\t\t', self.get_word_freq(None).most_common(10)
            print '-' * 80
        else:
            pass
        
    def store_msgs(self, label):
        storage_file_fd = open("../Data/Processed" + label + ".txt", "w+")
            
        for msg in self.__msg_list:
            storage_file_fd.writelines(msg + "," + label + "\n")
        storage_file_fd.close()
            
    def exp(self, process, name):
        orig_msgs = process.get_first_msgs(self.__dataset_size, True)
        
        process.extract_stats(orig_msgs)
        print "Orignal"
        process.print_stats()
        
        print "Orignal + No puncts & special chars"
        process.remove_puncts_and_special_chars(None, True)
        process.extract_stats(None)
        process.print_stats()
    
        print "Orignal + No puncts & special chars + Lowercased"
        process.convert_to_lowercase(None, True)
        process.extract_stats(None)
        process.print_stats()
    
        process.get_tf_idf()
        print "Orignal + No puncts & special chars + Lowercased + No Stopwords"
        process.remove_stopwords(None, True)
        process.extract_stats(None)
        process.print_stats()
             
        print "Orignal + No puncts & special chars + Lowercased + No Stopwords + Stemming"
        process.stem_msgs(None, True)
        process.extract_stats(None)
        process.print_stats()
     
        print "Orignal + No puncts & special chars + Lowercased + No Stopwords + Stemming"
        process.stem_msgs(None, True)
        process.extract_stats(None)
        process.print_stats()
     
        print "Orignal + No puncts & special chars + Lowercased + No Stopwords + Stemming + Lemmatize"
        process.lemmatize_msgs(None, True)
        process.extract_stats(None)
        process.print_stats()
             
        process.store_msgs(name)


    def load_datasets(self):
        msgfd   = open("../Data/ProcessedHam.txt", 'r')
        msgcsv  = csv.reader(msgfd, delimiter=',')
        
        self.__training_msgs = list()
        self.__training_msgs_hams = list()
        self.__training_msgs_spams = list()
        
        
        for msg in msgcsv:
            self.__training_msgs.append((msg[0], msg[1]))
            self.__training_msgs_hams.append((msg[0], msg[1]))
            
        msgfd   = open("../Data/ProcessedSpam.txt", 'r')
        msgcsv  = csv.reader(msgfd, delimiter=',')
        
        for msg in msgcsv:
            self.__training_msgs.append((msg[0], msg[1]))        
            self.__training_msgs_spams.append((msg[0], msg[1]))
        
        return self.__training_msgs


    def generate_features_set(self):
        self.__feature_set = {}
        
    
    def get_msg_list (self):
        return self.__msg_list

    def get_training_set_hams(self):
        return self.__training_msgs_hams

    def get_training_set_spams(self):
        return self.__training_msgs_spams
    

    def plot_dict(self, dict2plot):
        pyplot.bar(range(len(dict2plot)), dict2plot.values(), align='center')
        pyplot.xticks(range(len(dict2plot)), dict2plot.keys())
        pyplot.show()

    def get_prob_dist(self, msgs):
        words = self.get_word_freq(self.msgs_to_words_list(msgs)).keys()
        freqs = self.get_word_freq(self.msgs_to_words_list(msgs)).values()
        
        freqs_dict = dict()
        
        for word, freq in zip(words, freqs):
            freqs_dict.update({word : freq})
        
        pd = DictionaryProbDist(freqs_dict)

        return pd

if __name__ == '__main__':
    
    name = "Ham"
    dataset_size = 747
    process = OptiPreProcess("../Data/" + name + ".csv", dataset_size)
    process.exp(process, name)
    process.extract_stats(None)

    pdham = process.get_prob_dist(process.get_msg_list())
 
    training_set = process.load_datasets()
    
    random.shuffle(training_set)
    
    name = "Spam"
    dataset_size = 747
    process = OptiPreProcess("../Data/" + name + ".csv", dataset_size)
    process.exp(process, name)
    process.extract_stats(None)
    pdspam = process.get_prob_dist(process.get_msg_list())
       
    training_set = process.load_datasets()    
    random.shuffle(training_set)

    ham_count = 0
    spam_count = 0
    
    for msg in process.get_training_set_spams():

        hamscore = 0
        spamscore = 0
    
        for word in msg[0]:
            hamscore += pdham.prob(word)
            spamscore += pdspam.prob(word)
        
        if hamscore > spamscore:
            ham_count += 1
        else:
            spam_count += 1
    print ham_count, spam_count
    
#     freqs_dict_top = dict()
#     
#     for word, freq in zip(words, freqs):
#         if freq > 30:
#             freqs_dict_top.update({word : freq})
# 
#     process.plot_dict(freqs_dict_top)
 
#     print name + " Processing"
#     process.exp(process, name)
#      
#      
#     name = "Spam"
#     process = OptiPreProcess("../Data/" + name + ".csv", dataset_size)
#     orig_msgs = process.get_msgs()
#     print name + " Processing"
#     process.exp(process, name)
     