
from nltk.stem import WordNetLemmatizer

from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
from sentence_util import get_tokens
from nltk.stem.lancaster import LancasterStemmer

class Nltk_util():
    def __init__(self):
        self.stop_words = set(stopwords.words('english'))
        self.wordnet_lemmatizer = WordNetLemmatizer()
        self.lancaster_stemmer = LancasterStemmer()

    def unicode_2_str(self, unicode):
        return unicode.encode('unicode-escape').decode('string_escape')

    def filter_stop_words(self, title):
        word_tokens = word_tokenize(title)
        filtered_sentence = [w for w in word_tokens if not w in self.stop_words]
        _title = ' '.join(filtered_sentence)
        return _title

    def stem_process(self, title):
        title = title.lower()
        # unicode to str
        # title = title.encode('unicode-escape').decode('string_escape')
        _title = self.filter_stop_words(title)
        tokens = get_tokens(_title)

        token_list = []
        for token in tokens:
            token = self.lancaster_stemmer.stem(token)
            token_list.append(token)
        _title = ' '.join(token_list)
        return _title

    def stem_process_title(self, title):
        _title = title.lower()
        _title = self.filter_stop_words(_title)
        tokens = get_tokens(_title)
        token_list = []
        for token in tokens:
            token = self.lancaster_stemmer.stem(token)
            token_list.append(token)
        _title = ' '.join(token_list)
        return _title


    """
        venue doesn't fit stem
    """
    def stem_process_venue(self, venue):
        word_tokens = word_tokenize(venue)
        filtered_sentence = [w for w in word_tokens if not w in self.stop_words]

        _venue = ' '.join(filtered_sentence)
        # tokens = get_tokens(_venue)
        tokens = _venue.split(' ')
        token_list = []
        for token in tokens:
            token = self.lancaster_stemmer.stem(token)
            token_list.append(token)
        _venue = ' '.join(token_list)
        return _venue

    # def str
    def lemma_process(self, title):
        word_tokens = word_tokenize(title)
        filtered_sentence = [w for w in word_tokens if not w in self.stop_words]

        _title = ' '.join( filtered_sentence )
        # drop , .
        tokens = get_tokens(_title)
        token_list = []
        for token in tokens:
            token = self.wordnet_lemmatizer.lemmatize(token)
            token = self.wordnet_lemmatizer.lemmatize(token, pos='v')
            token = self.wordnet_lemmatizer.lemmatize(token, pos='a')
            token = self.wordnet_lemmatizer.lemmatize(token, pos='r')
            token = self.wordnet_lemmatizer.lemmatize(token, pos='s')

            token_list.append(token)
        _title = ' '.join(token_list)
        return _title
        # return token_list

    def lemma_process_title(self, title):
        _title = title.lower()
        _title = self.filter_stop_words(_title)
        tokens = get_tokens(_title)
        token_list = []
        for token in tokens:
            token = self.wordnet_lemmatizer.lemmatize(token, pos='n')
            token = self.wordnet_lemmatizer.lemmatize(token, pos='v')
            # token = self.wordnet_lemmatizer.lemmatize(token, pos='a')
            # token = self.wordnet_lemmatizer.lemmatize(token, pos='r')
            # token = self.wordnet_lemmatizer.lemmatize(token, pos='s')
            token_list.append(token)
        _title = ' '.join(token_list)
        return _title


