
import numpy as np
import scipy.special as ss

from sklearn.preprocessing import FunctionTransformer


def expit(x, lb=0, ub=255):
    # inverse of logit
    return np.uint8(np.round((ub-lb+1) * ss.expit(x) + lb - 0.5))

def logit(x, lb=0, ub=255):
    # [lb, ub] -> (-oo, +oo)
    return ss.logit((x-lb+0.5) / (ub-lb+1))


def normalize(X, axis=1):
    # normalize a matrix
    if axis == 1:
        return X / X.sum(axis=1)[:,None]
    elif axis == 0:
        return X / X.sum(axis=0)
    else:
        raise ValueError('`axis` must be 0 or 1!')

def exp_normalize(X, axis=1):
    return normalize(np.exp(X), axis=axis)


class LogitTransformer(FunctionTransformer):
    def __init__(self, lb=0, ub=256, *args, **kwargs):
        super().__init__(func=_logit, inverse_func=_expit, *args, **kwargs)


def compare(models:dict, X_train, X_test, y_train, y_test, timeit=False, predict=False):
    """Compare the performance of models
    models: machine learning models
    """
    
    from collections import defaultdict
    import time

    _result = defaultdict(list)

    for name, model in models.items():
        time1 = time.perf_counter()
        model.fit(X_train, y_train)
        _result['models'].append(name)
        if timeit:
            time2 = time.perf_counter()
            _result['times'].append(time2 - time1)
        _result['train scores'].append(model.score(X_train, y_train))
        _result['test scores'].append(model.score(X_test, y_test))
        if predict:
            _result[f'predict={y_test[0]}'].append(model.predict(X_test[:1])[0])
    result = pd.DataFrame(_result)
    print(result.round(decimals=4).to_markdown())


def visualize(model, X, y, axes, x1lim=None, x2lim=None, 
    markers = ('o', '+', 'x', 's', 'v', '>', '<', '^', 'd'), 
    colors = ('r', 'b', 'g', 'y', 'm', 'c', 'k'),
    backgroud_kw={'alpha': 0.1, 'marker':'x'}, scatter_kw={}):
    """To visulize the result of classifying

    number of classes <= 7
    """

    model.fit(X, y)

    xlabel, ylabel = 'component 1', 'component 2'

    X = np.asarray(X)
    if x1lim is None:
        l, u = X[:,0].min(), X[:,0].max()
        d = (u-l)*0.02
        x1lim = l-d, u+d
    if x2lim is None:
        l, u = X[:,0].min(), X[:,0].max()
        d = (u-l)*0.02
        x2lim = l-d, u+d    
    x1 = np.linspace(x1lim[0], x1lim[1], 100)
    x2 = np.linspace(x2lim[0], x2lim[1], 100)
    x2, x1 = np.meshgrid(x2, x1)
    X_ = np.column_stack((x1.ravel(), x2.ravel()))
    y_ = model.predict(X_)
    for k, m, c in zip(model.classes_, markers, colors):
        axes.scatter(X_[y_==k, 0], X_[y_==k, 1], c=c, **backgroud_kw)
    handles = [axes.scatter(X[y==k, 0], X[y==k, 1], c=c, marker=m, **scatter_kw)
            for k, m, c in zip(model.classes_, markers, colors)]
    axes.set_xlabel(xlabel)
    axes.set_ylabel(ylabel)
    return handles

from sklearn.base import BaseEstimator, TransformerMixin

class BaseEncoder(TransformerMixin, BaseEstimator):
    def encode(self, X):
        return self.transform(X)

    def decode(self, X):
        return self.inverse_transform(X)

from sklearn.neural_network import MLPRegressor
from sklearn.utils.extmath import safe_sparse_dot

def inplace_relu(X):
    np.maximum(X, 0, out=X)

class MLPEncoder(BaseEncoder, MLPRegressor):
    def __init__(n_components=2, *args, **kwargs):
        super().__init__(hidden_layer_sizes=(n_components,), *args, **kwargs)

    def transform(self, X):
        hidden_activation = inplace_relu
        activation = X
        activation = safe_sparse_dot(activation, self.coefs_[0]) + intercepts_[0]
        hidden_activation(activation)
        return activation

    def inverse_transform(self, Y):
        activation = Y
        activation = safe_sparse_dot(activation, self.coefs_[1]) + intercepts_[1]
        # output_activation(activation)
        return activation

    def fit(self, X):
        return super().fit(X, X)

