# !/python/TextResuming/util/preprocess.py
# stemming
# Author : @ismailsunni

from stemming.porter2 import stem
import nltk
import unicodedata
import string
import util as util


#default value
stop_word_file_path = 'stopwords.txt'

def load_stopwords(file_path):
    '''Return list of stopwords from external file.
		
		@parameters:
		@file_path : path of stopwords file.
		'''
	
    try:
        file_stopwords = open(file_path)
        stopwords = file_stopwords.readlines()
        file_stopwords.close()
        return stopwords
    except Exception, e:
        util.debug('load_stopwords error ' + str(e))

def stem_list_of_sentence(listOfSentence):
    '''Stem a list of sentence using stemming module.
		
		@parameters
		@listOfSentence = list of sentence
        @return : a list of stemmed sentence.'''
		
    listOfStemmedSentence = []
    for sentence in listOfSentence:
        listOfStemmedSentence.append(stem_sentence(sentence))
    return listOfStemmedSentence

def stem_sentence(sentence):
    '''Stem a sentence using stemming module.

        @parameters
		@sentence = sentence
        @return : stemmed sentence.'''
		
    #listWord = sentence.split() #using split function
    listWord = nltk.word_tokenize(sentence) #using nltk
    listStemmedWord = []
    for word in listWord:
        listStemmedWord.append(stem(word))
    return ' '.join(listStemmedWord)

def stem_list_of_word(list_of_word):
    '''Stem a list of word using stemming module.

        @parameters
		@list_of_word = list of word
        @return : a list of stemmed word.'''

    list_stemmed_word = []
    for word in list_of_word:
        list_stemmed_word.append(stem(word))

    return list_stemmed_word

def normalizeString(stringAbsurd):
    '''Normalize a string from unicode character.
	
		@parameters
		@stringAbsurd = string that has unicode character
        @return : string without unicode character.'''
	
    return unicodedata.normalize('NFKD', stringAbsurd.decode('latin-1')).encode('ascii', 'ignore')

def removePunctuation(stringAbsurd):
    '''Removing punctuation from a string.
		
		@parameters
		@stringAbsurd = string that has punctuation
        @return : string without unicode punctuation.'''
	
    for punct in string.punctuation:
        stringAbsurd = stringAbsurd.replace(punct, '')
    return stringAbsurd

def removeStopWord(listWord):
    '''Removing stop word from a list of word.
		
		@parameters
		@listWord = list of word that has stopword
        @return : string without unicode character.'''
	
    stopWords = nltk.corpus.stopwords.words('english')
    extraStopWords = load_stopwords(stop_word_file_path)
    stopWords.extend(extraStopWords)
    for stopWord in stopWords:
        if stopWord in listWord:
            listWord = remove_values_from_list(listWord, stopWord)
    return listWord

def remove_values_from_list(the_list, val):
    '''Removing all value from a list. Obvious.'''
	
    return [value for value in the_list if value != val]

def foldCase(stringAbsurd):
    '''Lower-casinging the string. Obvious.'''
	
    return stringAbsurd.lower()

def normalize_list_string(list_string):
    '''Normalize a list of string from unicode character.
	
		@parameters
		@list_string = list of string that has unicode character
        @return : list of string without unicode character.'''
	
    retval = []
    for string in list_string:
        retval.append(normalizeString(string))
    return retval

def remove_quotation(stringAbsurd):
    '''Removing quotation mark like \'sunni' or \"sunni" '''
	
    if '"' in stringAbsurd or "'" in stringAbsurd :
        if (stringAbsurd[0] == "'" or stringAbsurd[0] == '"') :
            stringAbsurd = stringAbsurd[1:]
        if (stringAbsurd[-1] == "'" or stringAbsurd[-1] == '"') :
            stringAbsurd = stringAbsurd[:-1]
        stringAbsurd = stringAbsurd.replace(' "', ' ')
        stringAbsurd = stringAbsurd.replace('" ', ' ')
        stringAbsurd = stringAbsurd.replace(" '", ' ')
        stringAbsurd = stringAbsurd.replace("' ", ' ')
        stringAbsurd = stringAbsurd.replace("'.", '.')
        stringAbsurd = stringAbsurd.replace('".', '.')
        stringAbsurd = stringAbsurd.replace("'\n", '\n')
        stringAbsurd = stringAbsurd.replace('"\n', '\n')

    return stringAbsurd

def main():
    a = "Seattle-based Aviation Partners (API), patent-holder of the blended winglet, is seeking the dismissal of an Airbus lawsuit aiming to clarify the intellectual property rights of the European airframer's alleged use of API's winglet on the Airbus A320's sharklets."
    b = "Air New Zealand currently serves as launch customer for the sharklets, and anticipates the delivery of its first A320 with winglets later this year."
    c = "The implications of the lawsuit filed by Airbus could nullify any royalty claims by API, if Airbus's sharklet design is found to use API's intellectual property."

    listOfSentence = [a,b,c]
    util.print_index_list_dict(stem_list_of_sentence(listOfSentence))
    util.print_index_list_dict(listOfSentence)

if __name__ == '__main__':
    load_stopwords('stopwords.txt')
    main()
