from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.preprocessing import normalize as sk_normalize
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn.preprocessing import scale
from nltk import sent_tokenize, word_tokenize, pos_tag
from nltk.stem import PorterStemmer, WordNetLemmatizer
from nltk.corpus import sentiwordnet as swn
from nltk.corpus import wordnet as wn
from collections import defaultdict, Counter
import numpy
from scipy import sparse
from gensim.models import Word2Vec
__author__ = 'panagiotis'


class old_SentimentTransformer(BaseEstimator, TransformerMixin):

    def __init__(self, vectorizer=None):
        if vectorizer:
            self.vectorizer = vectorizer
        else:
            self.vectorizer = TfidfVectorizer()
        self.sentiments = None

    def fit(self, X, y=None):
        self.vectorizer.fit(X)
        synsets = defaultdict(lambda: numpy.array([0.0, 0.0, 0.0, 1.0]),
                              {w.synset.name().split(".")[0]: numpy.array([w.pos_score(), w.obj_score(), w.neg_score(), 0.0])
                               for w in swn.all_senti_synsets()})
        # self.temp = [synsets[feature] if not feature.endswith("_neg")
        #                                else synsets[feature.strip("_neg")][[2, 1, 0, 3]]
        #                                for feature in self.vectorizer.get_feature_names()]
        self.sentiments = numpy.array([synsets[feature] if not feature.endswith("_neg")
                                       else synsets[feature.strip("_neg")][[2, 1, 0, 3]]
                                       for feature in self.vectorizer.get_feature_names()])
        return self

    def transform(self, X):
        if not len(self.vectorizer.get_feature_names()) == self.sentiments.shape[0]:
            raise ValueError('SentimentTransformer: vectorizer and sentiments Dimension Mismatch')
        t = self.vectorizer.transform(X).dot(self.sentiments)
        return numpy.c_[t, t[:, 0]-t[:, 2]]

    @staticmethod
    def get_feature_names(self):
        return ["positive", "neutral", "negative", "unknown", "difference"]


class SentimentFeatures(BaseEstimator, TransformerMixin):

    def __init__(self, norm=None):
        self.tr = defaultdict(lambda: None, {"VB": wn.VERB, "VBD": wn.VERB, "VBG": wn.VERB, "VBN": wn.VERB,
                                             "VBP": wn.VERB, "VBZ": wn.VERB, "JJ": wn.ADJ, "JJR": wn.ADJ, "JJS": wn.ADJ,
                                             "NN": wn.NOUN, "NNS": wn.NOUN, "NNP": wn.NOUN, "NNPS": wn.NOUN,
                                             "RB": wn.ADV, "RBR": wn.ADV, "RBS": wn.ADV})
        self.norm = norm

    def normalize(self, X):
        if self.norm:
            return sk_normalize(X, norm=self.norm)
        else:
            return X

    @staticmethod
    def get_feature_names(self):
        return numpy.array(['#sentences', '#toks_per_sent', '#nouns', 'positive', 'negative', 'mean', 'mean_adj_sent', 'mean_adv_sent'])

    def fit(self, x, y=None, **attrib):
        try:
            self.norm = attrib['norm']
        except KeyError:
            pass
        return self

    def transform(self, documents, **attrib):
        results = defaultdict(list)
        for document in documents:
            counter = Counter()
            sentences = sent_tokenize(document)
            counter['sentences'] = len(sentences)
            for sentence in sentences:
                tokens = [(word.lower(), self.tr[pos]) for (word, pos) in pos_tag(word_tokenize(sentence))]
                counter['tokens'] += len(tokens)
                counter['number_of_nouns'] += len([1 for (w, pos) in tokens if pos == wn.NOUN])
                counter['number_of_adjectives'] += len([1 for (w, pos) in tokens if pos == wn.ADJ])
                counter['number_of_adverbs'] += len([1 for (w, pos) in tokens if pos == wn.ADV])
                for (word, pos) in tokens:
                    synsets = swn.senti_synsets(word, pos)
                    if len(synsets) > 0:
                        positive = synsets[0].pos_score()
                        negative = synsets[0].neg_score()
                        counter['total_positive'] += positive
                        counter['total_negative'] += negative
                        if pos == wn.ADJ:
                            counter['adj_positive'] += positive
                            counter['adj_negative'] += negative
                        elif pos == wn.ADV:
                            counter['adv_positive'] += positive
                            counter['adv_negative'] += negative
            results['sentences'].append(counter['sentences'])
            if counter['sentences'] > 0:
                results['toks_sent'].append(float(counter['tokens'])/counter['sentences'])
            else:
                results['toks_sent'].append(0)
            results['nouns'].append(counter['number_of_nouns'])
            if counter['tokens'] > 0:
                results['positive'].append(counter['total_positive']/counter['tokens'])
                results['negative'].append(counter['total_negative']/counter['tokens'])
                results['sentiment'].append((counter['total_positive']-counter['total_negative'])/counter['tokens'])
            else:
                results['positive'].append(0)
                results['negative'].append(0)
                results['sentiment'].append(0)
            if counter['number_of_adjectives'] > 0:
                results['adj_sent'].append((counter['adj_positive']-counter['adj_negative'])/counter['number_of_adjectives'])
            else:
                results['adj_sent'].append(0)
            if counter['number_of_adverbs'] > 0:
                results['adv_sent'].append((counter['adv_positive']-counter['adv_negative'])/counter['number_of_adverbs'])
            else:
                results['adv_sent'].append(0)
        return self.normalize(numpy.array([results['sentences'], results['toks_sent'], results['positive'], results['negative'],
                            results['sentiment'], results['adj_sent'], results['adv_sent']]).T)


class Word2VecExtractor(BaseEstimator, TransformerMixin):

    def __init__(self, norm=None, size=300):
        self.size = size
        self.norm = norm
        self.w2v = None

    def normalize(self, X):
        if self.norm:
            return sk_normalize(X, norm=self.norm)
        else:
            return X

    def fit(self, x, y=None, size=None):
        if size:
            self.size = size
        self.w2v = Word2Vec([word_tokenize(s) for s in x], size=self.size)
        return self

    def transform(self, documents, **attrib):
        result = []
        for doc in documents:
            vec = numpy.zeros(self.size).reshape((1, self.size))
            count = 0
            for word in word_tokenize(doc):
                try:
                    vec += self.w2v[word].reshape((1, self.size))
                    count += 1
                except KeyError:
                    continue
            if count != 0:
                vec /= count
            result.append(vec)
        return scale(numpy.vstack(result))

    def get_feature_names(self):
        return ["Topic"+str(i) for i in xrange(0, self.size)]


class DocVectorizer(BaseEstimator, TransformerMixin):

    def __init__(self, tokenizer=word_tokenize, tokenize=True, lemmatize=False, stem=False,
                 n_features=100, min_df=1, window=10, sample=1e-3, negative=5,
                 training="skip-gram", hierarchical=True, lowercase=False):
        par_sg = lambda x: 0 if x == "cbow" else 1
        par_hs = lambda x: 1 if x else 0
        self.lowercase = lowercase
        self.tokenize = tokenize
        self.n_features = n_features
        self.tokenizer = tokenizer
        self.lemma = WordNetLemmatizer()
        if not lemmatize:
            self.lemma.lemmatize = lambda x: x
        self.stemmer = PorterStemmer()
        if not stem:
            self.stemmer.stem = lambda x: x
        self.w2v = Word2Vec(size=n_features, min_count=1, window=window, sample=sample, negative=negative, iter=1)
                            # , sg=par_sg(training), hs=par_hs(hierarchical))
        self.tfidf = TfidfVectorizer(tokenizer=lambda x: x, lowercase=False, min_df=min_df, use_idf=False, norm='l2')
        self.features = None

    def fit(self, documents, y=None):
        if self.tokenize:
            sentences = [sentence.lower() if self.lowercase else sentence
                         for document in documents for sentence in sent_tokenize(document)]
            docs = [self.tokenizer(sentence) for sentence in sentences]
        else:
            docs = documents
        docs = [[self.lemma.lemmatize(token.strip("_NEG"))+"_NEG" if token.endswith("_NEG") else self.lemma.lemmatize(token)
                 for token in sentence] for sentence in docs]
        docs = [[self.stemmer.stem(token.strip("_NEG"))+"_NEG" if token.endswith("_NEG") else self.stemmer.stem(token)
                 for token in sentence] for sentence in docs]
        self.tfidf.fit(docs)
        self.w2v.build_vocab([self.tfidf.get_feature_names() for i in xrange(0, 5)])
        self.w2v.train(docs)
        a = normalize(numpy.array([self.w2v[feat] if feat in self.w2v.vocab.keys() else numpy.zeros(self.n_features)
                                   for feat in self.tfidf.get_feature_names()]))
        self.features = a + abs(numpy.min(a))
        return self

    def transform(self, documents, y=None):
        return self.tfidf.transform(documents).dot(self.features)

    def get_feature_names(self):
        return ["Topic"+str(i) for i in xrange(0, self.n_features)]


class veryold_SentimentTransformer(BaseEstimator, TransformerMixin):

    def __init__(self, vectorizer=None, tokenize=True):
        self.tokenize = tokenize
        if vectorizer:
            self.vectorizer = vectorizer
        else:
            self.vectorizer = TfidfVectorizer()
        self.sentiments = None

    def fit(self, X, y=None):
        self.vectorizer.fit(X)
        synsets = defaultdict(lambda: numpy.array([0.0, 0.0, 0.0, 1.0]),
                              {w.synset.name().split(".")[0]: numpy.array([w.pos_score(), w.obj_score(), w.neg_score(), 0.0])
                               for w in swn.all_senti_synsets()})
        self.sentiments = numpy.array([synsets[feature] if not feature.endswith("_neg")
                                       else synsets[feature.strip("_neg")][[2, 1, 0, 3]]
                                       for feature in self.vectorizer.get_feature_names()])
        return self

    def transform(self, documents):
        t = numpy.array([numpy.mean(self.vectorizer.transform([sentence for sentence in sent_tokenize(document)])
                                    .dot(self.sentiments), axis=0) for document in documents])
        return numpy.c_[t, t[:, 0]-t[:, 2]]

    @staticmethod
    def get_feature_names(self):
        return ["positive", "neutral", "negative", "unknown", "difference"]


class MultiDocVectorizer(BaseEstimator, TransformerMixin):

    def __init__(self, n_features=(100,), **params):
        self.vectorizers = [DocVectorizer(n_features=num, **params) for num in n_features]

    def fit(self, documents, y=None):
        for vect in self.vectorizers:
            vect.fit(documents, y)
        return self

    def transform(self, documents, y=None):
        return numpy.hstack([vect.transform(documents, y) for vect in self.vectorizers])


from sklearn.preprocessing import normalize, MinMaxScaler


class SentimentTransformer(BaseEstimator, TransformerMixin):

    def __init__(self, vectorizer="count", tokenize=True, sentences=True, lemmatize=False, stem=False, lowercase=False):
        self.params = dict()
        self.params["vectorizer"] = vectorizer
        self.params["tokenize"] = tokenize
        self.params["sentences"] = sentences
        self.params["lemmatize"] = lemmatize
        self.params["stem"] = stem
        self.params["lowercase"] = lowercase
        self.lemma = WordNetLemmatizer()
        self.stemmer = PorterStemmer()
        self.sentiments = None
        self.vectorizer = None
        # self.scaler = MinMaxScaler()

    def fit(self, documents, y=None, **fit_params):
        for param in fit_params:
            self.params[param] = fit_params[param]
        if not self.params["lemmatize"]:
            self.lemma.lemmatize = lambda x: x
        if not self.params["stem"]:
            self.stemmer.stem = lambda x: x

        if self.params["vectorizer"] == "count":
            self.vectorizer = CountVectorizer(tokenizer=lambda x: x, lowercase=False, min_df=1)
        elif self.params["vectorizer"] == "tf":
            self.vectorizer = TfidfVectorizer(tokenizer=lambda x: x, lowercase=False, min_df=1, use_idf=False, norm='l2')
        elif self.params["vectorizer"] == "tfidf":
            self.vectorizer = TfidfVectorizer(tokenizer=lambda x: x, lowercase=False, min_df=1, use_idf=True, norm='l2')
        else:
            raise TypeError("Wrong vectorizer parameter. Valid options: 'count', 'tf' or 'tfidf'")

        if self.params["tokenize"]:
            docs = [[[self.stemmer.stem(self.lemma.lemmatize(token.strip("_NEG")))+"_NEG" if token.endswith("_NEG")
                      else self.stemmer.stem(self.lemma.lemmatize(token)) for token in word_tokenize(sentence.lower())]
                     if self.params["lowercase"] else
                     [self.stemmer.stem(self.lemma.lemmatize(token.strip("_NEG")))+"_NEG" if token.endswith("_NEG")
                      else self.stemmer.stem(self.lemma.lemmatize(token)) for token in word_tokenize(sentence)]
                     for sentence in d] for d in [sent_tokenize(document) if self.params["sentences"] else [document]
                                                  for document in documents]]
        else:
            docs = [[[self.stemmer.stem(self.lemma.lemmatize(token.strip("_NEG")))+"_NEG" if token.endswith("_NEG")
                      else self.stemmer.stem(self.lemma.lemmatize(token)) for token in document]]
                    for document in documents]

        # self.vectorizer.fit([doc for doc in docs])
        self.vectorizer.fit([tokens for doc in docs for tokens in doc])

        synsets = defaultdict(lambda: numpy.array([0.0, 0.0, 0.0, 1.0]),
                              {w.synset.name().split(".")[0]:
                                   numpy.array([w.pos_score(), w.obj_score(), w.neg_score(), 0.0])
                               for w in swn.all_senti_synsets()})

        self.sentiments = numpy.array([synsets[feature] if not feature.endswith("_NEG")
                                                            else synsets[feature.strip("_NEG")][[2, 1, 0, 3]]
                                                            for feature in self.vectorizer.get_feature_names()])
        return self

    def transform(self, documents, y=None):
        if self.params["tokenize"]:
            docs = [[[self.stemmer.stem(self.lemma.lemmatize(token.strip("_NEG")))+"_NEG" if token.endswith("_NEG")
                      else self.stemmer.stem(self.lemma.lemmatize(token)) for token in word_tokenize(sentence.lower())]
                     if self.params["lowercase"] else
                     [self.stemmer.stem(self.lemma.lemmatize(token.strip("_NEG")))+"_NEG" if token.endswith("_NEG")
                      else self.stemmer.stem(self.lemma.lemmatize(token)) for token in word_tokenize(sentence)]
                     for sentence in d] for d in [sent_tokenize(document) if self.params["sentences"] else [document]
                                                  for document in documents]]
        else:
            docs = [[[self.stemmer.stem(self.lemma.lemmatize(token.strip("_NEG")))+"_NEG" if token.endswith("_NEG")
                      else self.stemmer.stem(self.lemma.lemmatize(token)) for token in document]]
                    for document in documents]
        # t = numpy.vstack([self.vectorizer.transform(doc).dot(self.sentiments).mean(axis=0) for d in docs for doc in d])
        t = numpy.vstack([self.vectorizer.transform(doc).dot(self.sentiments).mean(axis=0) for doc in docs])
        s = numpy.zeros((t.shape[0], 2))
        s[t[:, 0] - t[:, 2] > 0, 0] = t[t[:, 0] - t[:, 2] > 0, 0] - t[t[:, 0] - t[:, 2] > 0, 2]
        s[t[:, 0] - t[:, 2] < 0, 1] = t[t[:, 0] - t[:, 2] < 0, 2] - t[t[:, 0] - t[:, 2] < 0, 0]
        return numpy.c_[t, s]

    @staticmethod
    def get_feature_names(self):
        return ["positive", "neutral", "negative", "unknown", "pos_diff", "neg_diff"]
