from sklearn.base import ClusterMixin, ClassifierMixin
from sklearn.dummy import DummyClassifier
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.utils import safe_asarray as safe
from sklearn.utils import atleast2d_or_csr as csr
from sklearn.base import clone
from collections import defaultdict, Counter
from scipy import sparse
import numpy
import uuid
__author__ = 'panagiotis'


def random_name():
    while True:
        yield uuid.uuid4().hex


class AdaptiveClustering(ClusterMixin):
    def __init__(self, threshold=1.0):
        self.Clusters = defaultdict(lambda: {"centroid": 0, "population": 0.0})
        self.Map = defaultdict(str)
        self.shrink_threshold = threshold

    def fit(self, vectors, names=None, threshold=None, debug=False):
        # Fits clustering algorithm to given vectors.
        if threshold:
            self.shrink_threshold = threshold
        # if there are no names given for the samples, create random ones
        if names:
            mix = zip(names, vectors)
        else:
            mix = zip(random_name(), vectors)
        # for every sample to cluster,
        for (vector_id, vector_) in mix:
            # make sure vector is of the right form
            vector = safe(vector_)
            if sparse.issparse(vector):
                vector = vector.todense()
            similarities = Counter()
            cluster_ids = self.Clusters.keys()
            # iterate through formed clusters
            # and measure cosine similarity with centroids
            for cluster_id in cluster_ids:
                similarities[cluster_id] = cosine_similarity(vector, self.Clusters[cluster_id]['centroid'])
            # if there were clusters to begin with get best similarity
            try:
                best_sim = similarities.most_common(1)[0][1]
            # unless there are no clusters yet, so make sure best similarity is below threshold
            except IndexError:
                best_sim = -1
            # if best similarity is above threshold
            if best_sim > self.shrink_threshold:
                # get the id of the cluster this sample is going to be added
                cluster_id = similarities.most_common(1)[0][0]
                centroid = self.Clusters[cluster_id]['centroid']
                # multiply centroid with cluster population
                centroid *= self.Clusters[cluster_id]['population']
                # adjust centroid by sample
                centroid += vector
                # increase population of cluster
                self.Clusters[cluster_id]['population'] += 1
                # divide centroid with population
                centroid /= self.Clusters[cluster_id]['population']
                # store new centroid
                self.Clusters[cluster_id]['centroid'] = centroid
            else:
                # create a new cluster with 1 item and sample vector as its centroid
                cluster_id = uuid.uuid4().hex
                self.Clusters[cluster_id]['items'] = 1
                self.Clusters[cluster_id]['centroid'] = vector
            # inform map that sample with id was added to cluster with id
            self.Map[vector_id] = cluster_id
            if debug:
                print "fitting progress", 100.0 * float(len(self.Map)) / len(mix)

    def transform(self, X, debug=False):
    # Returns the vector of the closest centroid.
        # make sure X is of the right form
        if not hasattr(X, "__iter__"):
            return 0
        # if not X.shape[1] == self.Clusters[self.Clusters.keys()[0]].shape[0]:
        #     pass  # throw exception
        y = []
        # for every sample to transform
        if debug:
            i = 0
            try:
                size_of_X = X.shape[0]
            except AttributeError:
                size_of_X = len(X)
            print "transforming X vectors to cluster vectors"
        for vector_ in X.__iter__():
            # make sure vector is of the right form
            vector = safe(vector_)
            similarities = Counter()
            cluster_ids = self.Clusters.keys()
            # iterate through formed clusters
            # and measure cosine similarity with centroids
            for cluster_id in cluster_ids:
                similarities[cluster_id] = cosine_similarity(vector, self.Clusters[cluster_id]['centroid'])
            # append to results the centroid of the cluster with best similarity
            cluster_id, best_sim = similarities.most_common(1)[0]
            y.append(self.Clusters[cluster_id]['centroid'])
            if debug:
                i += 1
                print "transform progress", 100.0 * i / size_of_X
        return y

    def predict(self, X, debug=False):
    # Predict the closest cluster each sample in X belongs to.
        # make sure X is of the right form
        if not hasattr(X, "__iter__"):
            return 0
        # if not X.shape[1] == self.Clusters[self.Clusters.keys()[0]].shape[0]:
        #     pass  # throw exception
        y = []
        # for every sample to transform
        if debug:
            i = 0
            try:
                size_of_X = X.shape[0]
            except AttributeError:
                size_of_X = len(X)
            print "predicting cluster ids from X"
        for vector_ in X.__iter__():
            # make sure vector is of the right form
            vector = safe(vector_)
            similarities = Counter()
            cluster_ids = self.Clusters.keys()
            # iterate through formed clusters
            # and measure cosine similarity with centroids
            for cluster_id in cluster_ids:
                similarities[cluster_id] = cosine_similarity(vector, self.Clusters[cluster_id]['centroid'])
            # append to results the id of the cluster with best similarity
            cluster_id, best_sim = similarities.most_common(1)[0]
            y.append(cluster_id)
            if debug:
                i += 1
                print "prediction progress", 100.0 * i / size_of_X
        return y

    def number_of_clusters(self):
        return len(self.Clusters)

    def training_samples(self):
        return len(self.Map)


class ArtMAP(ClassifierMixin):
    def __init__(self, classifier=None, choices=list):
        self.X_cluster = AdaptiveClustering()
        self.fitted_X = False
        self.Y_cluster = AdaptiveClustering()
        self.fitted_Y = False
        self.choices = choices
        if classifier:
            self.classifier_womb = classifier
        else:
            self.classifier_womb = DummyClassifier(strategy="stratified")
        self.Targets = []
        self.classifiers = dict()

    def _prefit_X(self, X, names=None, threshold=None):
    # Fit clustering algorithm of X beforehand
        self.X_cluster.fit(vectors=X, names=names, threshold=threshold)
        self.fitted_X = True

    def _prefit_Y(self, X, names=None, threshold=None):
    # Fit clustering algorithm of Y beforehand
        self.Y_cluster.fit(vectors=X, names=names, threshold=threshold)
        self.fitted_Y = True

    def fit(self, X, Y, z, threshold_x=None, threshold_y=None, debug=False):
        if self.choices == []:
            self.choices = sorted(set(z))
    # Fit the model using X and Y as training data and z as target values
        # check if X, Y, z are arrays or lists! (usage has lists!)
        # X = safe(X)
        # Y = safe(Y)
        # z = safe(z)
        if debug:
            from datetime import datetime
            t0 = datetime.now()
            print "starting ArtMAP fit process"
        # if prefit hasn't been called, fit clustering algorithm of X with data
        if not self.fitted_X:
            self.X_cluster.fit(vectors=X, threshold=threshold_x)
        # if prefit hasn't been called, fit clustering algorithm of Y with data
        if not self.fitted_Y:
            self.Y_cluster.fit(vectors=Y, threshold=threshold_y)
        ratings = defaultdict(list)
        # for every target to be learned, predict the X cluster it belongs to
        # and append the centroid of the Y cluster it belongs to
        if debug:
            print "creating cluster to cluster MAP"
        # for row in xrange(0, len(z)):
        #     cid = self.X_cluster.predict(X[row])[0]
        #     # perhaps group them and find ratings mean !!!
        #     ratings[cid].append(row)  # (self.Y_cluster.transform(Y[row])[0], z[row])
        #     if debug:
        #         print "progress", 100.0 * row / len(z)
        row_to_cluster = self.X_cluster.predict(X, debug=debug)
        cluster_ids = set(row_to_cluster)
        for cid in cluster_ids:
            ratings[cid] = [ind for ind, val in enumerate(row_to_cluster) if val == cid]
            if debug:
                print "mapping progress", 100.0 * len(ratings) / len(cluster_ids)
        # for every cluster in the X dimension(users), create a classifier of the given type
        if debug:
            print "fitting cluster classifiers"
        for cluster in ratings.keys():
            this_y = self.Y_cluster.transform([Y[row] for row in ratings[cluster]], debug=debug)
            this_z = [z[row] for row in ratings[cluster]]
            try:
                # fit cluster's classifier with ratings gathered
                clf = clone(self.classifier_womb)
                clf.fit(this_y, this_z)
            except ValueError:
                # because some classifiers (if not all) demand that at least 2 classes exist,
                # a value error exception might appear. If this happens, a dummy classifier
                # replaces the predefined one, returning only one prediction
                print "ValueError exception in classifier fitting for cluster:", cluster
                clf = DummyClassifier(strategy="stratified")
                clf.fit(this_y, this_z)
            # trained classifier is stored
            self.classifiers[cluster] = clf
            if debug:
                print "cluster fitting progress", 100.0 * len(self.classifiers) / len(ratings)
        if debug:
            print "fitting completed in", str(datetime.now() - t0)

    def predict(self, X, Y, debug=False):
        # Predict the rating for every x,y
        z = []
        nX = self.X_cluster.predict(X=X, debug=debug)
        nY = self.Y_cluster.transform(X=Y, debug=debug)
        if sparse.issparse(nY):
            nY = nY.todense()
        for row in xrange(0, len(nX)):
            try:
                z.append(self.classifiers[nX[row]].predict(nY[row])[0])
            except:
                print "exception in classifier", nX[row]
                z.append(self.choices[0])
            if debug:
                if hasattr(X, "shape"):
                    X_len = X.shape[0]
                else:
                    X_len = len(nX)
                print "model prediction progress", 100.0 * row / X_len
        return z

    # def score(self, X, Y, z, pos_label="P"):
    #     if isinstance(self.classifier_womb, ClassifierMixin):
    #         from sklearn.metrics import f1_score
    #         return f1_score(y_true=z, y_pred=numpy.array(self.predict(X, Y)), pos_label=pos_label)
    #     else:
    #         from sklearn.metrics import r2_score
    #         return r2_score(y_true=z, y_pred=self.predict(X, Y))


