from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.feature_extraction.text import CountVectorizer
from nltk import sent_tokenize, word_tokenize
from nltk.corpus import wordnet as wn
from nltk.metrics import edit_distance
import enchant
import re
__author__ = 'panagiotis'


class ColumnSelector(BaseEstimator, TransformerMixin):

    def __init__(self, p=0):
        self.p = p

    def fit(self, x, y=None, **fit_params):
        return self

    def transform(self, x, **transform_params):
        return [l[self.p] for l in x]


class NegationTransformer(BaseEstimator, TransformerMixin):

    def __init__(self, tokenize=True, output="text", tokenizer=word_tokenize):
        self.tokenize = tokenize
        self.tokenizer = tokenizer
        self.output = output
        self.negations = {'never', 'no', 'nothing', 'nowhere', 'noone', 'none', 'not',
                          'havent', 'hasnt', 'cant', 'couldnt', 'shouldnt', 'wont',
                          'wouldnt', 'dont', 'doesnt', 'didnt', 'isnt', 'arent', 'aint', "n't"}
        self.punctuation = {".", ":", ";", "!", "?"}

    def fit(self, X, y=None):
        return self

    def tokenize_negations(self, text):
        if self.tokenize:
            tokens = self.tokenizer(text)
        else:
            tokens = text
        result = []
        neg_flag = False
        for i in xrange(0, len(tokens)):
            token = tokens[i].lower()
            if token in self.negations:
                neg_flag = True
            if token in self.punctuation:
                neg_flag = False
            if neg_flag:
                result.append(token+"_NEG")
            else:
                result.append(token)
        if self.output == "text":
            return " ".join(result)
        elif self.output == "tokens":
            return result
        else:
            return None

    def transform(self, documents, y=None):
        return [self.tokenize_negations(document) for document in documents]


class RemoveContractions(BaseEstimator, TransformerMixin):
    # replace punctuation so that sentence splitting can be done correctly
    def __init__(self, spell_check=False, output="text"):
        self.output = output
        patterns = [(r'(\d+)', " <NUMBER> "), (r'\'\'', "'"), (r'ma\'am', 'madam'), (r'o\'clock', 'of the clock'),
                    (r'a.m.', 'in the morning'), (r'p.m.', 'in the evening'), (r'\'ow\'s\'at', 'how is that'),
                    (r'`', '\''), (r'won\'t', 'will not'), (r'can\'t', 'cannot'), (r'i\'m', 'i am'),
                    (r'ain\'t', 'is not'), (r'shan\'t', 'shall not'), (r'y\'all', 'you all'),
                    (r'(\w+)\'ll', '\g<1> will'), (r'(\w+)\'ve', '\g<1> have'), (r'(\w+)n\'t', '\g<1> not'),
                    (r'(\w+)\'s', '\g<1> is'), (r'(\w+)\'re', '\g<1> are'), (r'(\w+)\'d', '\g<1> would'),
                    (r'havent', 'have not'), (r'hasnt', 'has not'), (r'cant', 'can not'), (r'couldnt', 'could not'),
                    (r'shouldnt', 'should not'), (r'wont', 'will not'), (r'wouldnt', 'would not'), (r'dont', 'do not'),
                    (r'doesnt', 'does not'), (r'didnt', 'did not'), (r'isnt', 'is not'), (r'arent', 'are not'),
                    (r'\'', ' <SLASH> '), (r'\!{2,', ' <EXCLAMATION> !'), (r'\*{2,}', ' <INTENSE> '),
                    (r'\.{2,}', ' <TRAIL> .'), (r'\.', ' . '), (r',', ' , '), (r'\/', ' <OR> '), (r'-', ' <AND> ')]
        self.patterns = [(re.compile(regex), repl) for (regex, repl) in patterns]
        self.repeat_regexp = re.compile(r'(\w*)(\w)\2(\w*)')
        self.repl = r'\1\2\3'
        self.spell_dict = enchant.Dict('en')
        self.max_dist = 2
        self.spell_check = spell_check

    def fit(self, x, y=None):
        return self

    def replace_contractions(self, text):
        s = text
        for (pattern, repl) in self.patterns:
            (s, count) = re.subn(pattern, repl, s)
        return s

    def replace_repeating_chars(self, word):
        if wn.synsets(word):
            return word
        repl_word = self.repeat_regexp.sub(self.repl, word)
        if repl_word != word:
            return self.replace_repeating_chars(repl_word)
        else:
            return repl_word

    def replace_misspelled(self, word):
        if self.spell_dict.check(word) or not word.isalpha():
            return word
        suggestions = self.spell_dict.suggest(word)
        if suggestions and edit_distance(word, suggestions[0]) <= self.max_dist:
            return suggestions[0]
        else:
            return word

    def transform(self, documents, y=None):
        result = [[self.replace_repeating_chars(word)
                  for sentence in sent_tokenize(self.replace_contractions(document.lower()))
                  for word in word_tokenize(sentence)]
                  for document in documents]
        if self.spell_check:
            result = [[self.replace_misspelled(token) for token in document] for document in result]
        if self.output == "text":
            return [" ".join(document) for document in result]
        elif self.output == "tokens":
            return result
        else:
            return None


class SpellingTransformer(BaseEstimator, TransformerMixin):

    def __init__(self, tokenize=True, output="text", max_dist=2):
        self.tokenize = tokenize
        self.output = output
        self.dictionary = dict()
        self.spell_dict = enchant.Dict('en')
        self.max_dist = max_dist

    def replace_misspelled(self, word):
        if self.spell_dict.check(word) or not word.isalpha():
            return word
        suggestions = self.spell_dict.suggest(word)
        if suggestions and edit_distance(word, suggestions[0]) <= self.max_dist:
            return suggestions[0]
        else:
            return word

    def fit(self, documents, y=None):
        if self.tokenize:
            documents = [word_tokenize(document) for document in documents]
        self.dictionary = {token: self.replace_misspelled(token)
                           for token in set([term for document in documents for term in document])}
        return self

    def _fit(self, documents, y=None):
        if self.tokenize:
            vect = CountVectorizer(tokenizer=word_tokenize)
        else:
            vect = CountVectorizer(tokenizer=lambda x: x, lowercase=False)
        vect.fit(documents)
        self.dictionary = {token: self.replace_misspelled(token) for token in vect.get_feature_names()}
        return self

    def transform(self, documents):
        if self.tokenize:
            documents = [word_tokenize(document) for document in documents]
        result = [[self.dictionary[word] if word in self.dictionary.keys() else self.replace_misspelled(word)
                   for word in document] for document in documents]
        if self.output == "text":
            return [" ".join(document) for document in result]
        elif self.output == "tokens":
            return result
        else:
            return None

    def fit_transform(self, documents, y=None, **fit_params):
        result = []
        for document in documents:
            doc = []
            if self.tokenize:
                tokens = word_tokenize(document)
            else:
                tokens = document
            for word in tokens:
                if word not in self.dictionary.keys():
                    self.dictionary[word] = self.replace_misspelled(word)
                doc.append(self.dictionary[word])
            if self.output == "text":
                result.append(" ".join(doc))
            elif self.output == "tokens":
                result.append(doc)
        return result
