from dataclasses import dataclass, field
from typing import Any, List

import numpy as np
import pandas as pd
from minepy import MINE  # type: ignore

from . import base


@dataclass
class MIC(base.BaseTransformer, base.CopyMixin):
    alpha: float = 0.6
    c: int = 15
    new_col: str = 'y'
    feat_cols: List[str] = field(default_factory=list)
    
    res: List[float] = field(default_factory=list, init=False)
    
    def fit(self, X, y):
        mine = MINE(alpha=self.alpha, c=self.c)
        for i in range(X.shape[1]):
            mine.compute_score(X[:, i], y)
            self.res.append(mine.mic())
        return self
    
    def get_df(self) -> pd.DataFrame:
        df_mic = pd.Series(self.res, index=self.feat_cols).to_frame(self.new_col)
        return df_mic
    
    @classmethod
    def plot_bar(cls, x: pd.DataFrame, col: str = 'y', *args, **kwargs):
        xx = x.sort_values(col, ascending=False) if col else x
        return xx.plot.bar(*args, **kwargs)
            
    def transform(self, X: pd.DataFrame, *args, **kwargs) -> pd.DataFrame:
        return self.copy_or(X)
    
    

@dataclass
class PermutationImportance(base.BaseTransformer, base.CopyMixin):
    feature_names: List[str] = field(default_factory=list)
    model: base.BaseEstimator = base.BaseEstimator()
    result: Any = None
    n_repeats: int = 10
    random_state: int = 42
    n_jobs: int = 4
    importances: pd.Series = pd.Series()
    
    
    def fit(self, X, y):
        import time
        from sklearn.inspection import permutation_importance
        
        start_time = time.time()
        self.result = permutation_importance(
            self.model, X, y, 
            n_repeats=self.n_repeats, 
            random_state=self.random_state, 
            n_jobs=self.n_jobs
        )
        elapsed_time = time.time() - start_time
        print(f"Elapsed time to compute the importances: "
              f"{elapsed_time:.3f} seconds")
        self.importances = pd.Series(self.result.importances_mean)
        if self.feature_names is not None:
            self.importances.index = self.feature_names  # type: ignore
        self.importances = self.importances.sort_values(ascending=False)
        return self
        
    def transform(self, X: pd.DataFrame, *args, **kwargs) -> pd.DataFrame:
        return self.copy_or(X)
    
    def plot_bar(self, figsize=(12, 4)):
        import matplotlib.pyplot as plt
        
        fig, ax = plt.subplots(figsize=figsize)
        self.importances.plot.bar(yerr=self.result.importances_std, ax=ax)
        ax.set_title("Feature importances using permutation on full model")
        ax.set_ylabel("Mean accuracy decrease")
        fig.tight_layout()
        plt.show()
        
        
@dataclass
class MDI(base.BaseEstimator):
    model: base.BaseEstimator = base.BaseEstimator()
    feature_names: List[str] = field(default_factory=list)
    std: np.ndarray = np.array([])
    importances: pd.Series = pd.Series()
    
    def fit(self, X=None, y=None):
        importances = self.model.feature_importances_  # type: ignore
        self.std = np.std([tree.feature_importances_ for tree in self.model.estimators_], axis=0)
        self.importances = importances = pd.Series(importances)
        if self.feature_names is not None:
            self.importances.index = self.feature_names
        self.importances = self.importances.sort_values(ascending=False)
        return self
        
    def plot_bar(self, figsize=(12, 4)):
        import matplotlib.pyplot as plt
        
        fig, ax = plt.subplots(figsize=figsize)
        self.importances.plot.bar(yerr=self.std, ax=ax)
        ax.set_title("Feature importances using MDI")
        ax.set_ylabel("Mean decrease in impurity")
        if self.feature_names is not None:
            plt.axhline(y=1/len(self.feature_names),ls=":",c="red")
        else:
            plt.axhline(y=1/len(self.importances),ls=":",c="red")
        fig.tight_layout()
        
        
def featImpMDI(fit, featNames):
    # feat importance based on IS mean impurity reduction
    df0 = {i: tree.feature_importances_ for i,
           tree in enumerate(fit.estimators_)}
    df0 = pd.DataFrame.from_dict(df0, orient='index')
    df0.columns = featNames
    df0 = df0.replace(0, np.nan)  # because max_features=1
    imp = pd.concat({'mean': df0.mean(), 'std': df0.std()
                     * df0.shape[0]**-.5}, axis=1)
    imp /= imp['mean'].sum()
    return imp


def featImpMDA(clf, X, y, cv, sample_weight, t1, pctEmbargo, scoring='neg_log_loss'):
    # feat importance based on OOS score reduction
    if scoring not in ['neg_log_loss', 'accuracy']:
        raise Exception('wrong scoring method.')

    from sklearn.metrics import log_loss, accuracy_score
    from quant_pipeline.model_selection.cv import PurgedKFold

    cvGen = PurgedKFold(n_splits=cv, t1=t1,
                        pct_embargo=pctEmbargo)  # purged cv
    scr0, scr1 = pd.Series(), pd.DataFrame(columns=X.columns)
    for i, (train, test) in enumerate(cvGen.split(X=X)):
        X0, y0, w0 = X.iloc[train, :], y.iloc[train], sample_weight.iloc[train]
        X1, y1, w1 = X.iloc[test, :], y.iloc[test], sample_weight.iloc[test]
        fit = clf.fit(X=X0, y=y0, sample_weight=w0.values)
        if scoring == 'neg_log_loss':
            prob = fit.predict_proba(X1)
            scr0.loc[i] = -log_loss(y1, prob,
                                    sample_weight=w1.values, labels=clf.classes_)  # type: ignore
        else:
            pred = fit.predict(X1)
            scr0.loc[i] = accuracy_score(y1, pred, sample_weight=w1.values)
        for j in X.columns:
            X1_ = X1.copy(deep=True)
            np.random.shuffle(X1_[j].values)  # permutation of a single column
            if scoring == 'neg_log_loss':
                prob = fit.predict_proba(X1_)
                scr1.loc[i, j] = -log_loss(y1, prob, sample_weight=w1.values,
                                           labels=clf.classes_)  # type: ignore
            else:
                pred = fit.predict(X1_)
                scr1.loc[i, j] = accuracy_score(
                    y1, pred, sample_weight=w1.values)
    imp = (-scr1).add(scr0, axis=0)
    if scoring == 'neg_log_loss':
        imp = imp/-scr1
    else:
        imp = imp/(1. - scr1) # type: ignore
    imp = pd.concat({'mean': imp.mean(), 'std': imp.std()
                     * imp.shape[0]**-.5}, axis=1)
    return imp, scr0.mean()


def auxFeatImpSFI(featNames, clf, trnsX, cont, scoring, cvGen):
    from quant_pipeline.model_selection.cv import cvScore

    imp = pd.DataFrame(columns=['mean', 'std'])
    for featName in featNames:
        df0 = cvScore(clf, X=trnsX[[featName]], y=cont['bin'], sample_weight=cont['w'],
                      scoring=scoring, cvGen=cvGen)
        imp.loc[featName, 'mean'] = df0.mean()
        imp.loc[featName, 'std'] = df0.std() * df0.shape[0] ** -.5
    return imp


def get_eVec(dot, varThres):
    # compute eVec from dot prod matrix, reduce dimension
    eVal, eVec = np.linalg.eigh(dot)
    idx = eVal.argsort()[::-1]  # arguments for sorting eVal desc
    eVal, eVec = eVal[idx], eVec[:, idx]
    #2) only positive eVals
    eVal = pd.Series(eVal, index=['PC_'+str(i+1)
                                  for i in range(eVal.shape[0])])
    eVec = pd.DataFrame(eVec, index=dot.index, columns=eVal.index)
    eVec = eVec.loc[:, eVal.index]
    #3) reduce dimension, form PCs
    cumVar = eVal.cumsum()/eVal.sum()
    dim = cumVar.values.searchsorted(varThres)
    eVal, eVec = eVal.iloc[:dim+1], eVec.iloc[:, :dim+1]
    return eVal, eVec


def orthoFeats(dfX, varThres=.95):
    # Given a dataframe dfX of features, compute orthofeatures dfP
    dfZ = dfX.sub(dfX.mean(), axis=1).div(dfX.std(), axis=1)  # standardize
    dot = pd.DataFrame(np.dot(dfZ.T, dfZ),
                       index=dfX.columns, columns=dfX.columns)
    eVal, eVec = get_eVec(dot, varThres)
    dfP = np.dot(dfZ, eVec)
    return dfP


def get_weighted_tau(feat_imp, pc_rank):
    from scipy.stats import weightedtau
    
    return weightedtau(feat_imp, pc_rank**-1.)[0]


def featImportance(trnsX, cont, n_estimators=1000, cv=10, max_samples=1., 
                   pctEmbargo=0, scoring='accuracy', method='SFI', minWLeaf=0., n_jobs=-1):
    # feature importance from a random forest
    from sklearn.tree import DecisionTreeClassifier
    from sklearn.ensemble import BaggingClassifier
    from quant_pipeline.model_selection.cv import PurgedKFold, cvScore

    # run 1 thread with ht_helper in dirac1
    n_jobs = n_jobs
    #1) prepare classifier,cv. max_features=1, to prevent masking
    clf = DecisionTreeClassifier(criterion='entropy', max_features=1,
                                 class_weight='balanced', min_weight_fraction_leaf=minWLeaf)
    clf = BaggingClassifier(base_estimator=clf, n_estimators=n_estimators,
                            max_features=1., max_samples=max_samples, oob_score=True, n_jobs=n_jobs)
    fit = clf.fit(X=trnsX, y=cont['bin'], sample_weight=cont['w'].values)
    oob = fit.oob_score_
    if method == 'MDI':
        imp = featImpMDI(fit, featNames=trnsX.columns)
        oos = cvScore(clf, X=trnsX, y=cont['bin'], cv=cv, sample_weight=cont['w'],
                      t1=cont['t1'], pctEmbargo=pctEmbargo, scoring=scoring).mean()
    elif method == 'MDA':
        imp, oos = featImpMDA(clf, X=trnsX, y=cont['bin'], cv=cv, sample_weight=cont['w'],
                              t1=cont['t1'], pctEmbargo=pctEmbargo, scoring=scoring)
    elif method == 'SFI':
        cvGen = PurgedKFold(n_splits=cv, t1=cont['t1'], pct_embargo=pctEmbargo)
        oos = cvScore(clf, X=trnsX, y=cont['bin'], sample_weight=cont['w'], scoring=scoring,
                      cvGen=cvGen).mean()
        clf.n_jobs = 1  # paralellize auxFeatImpSFI rather than clf
        imp = auxFeatImpSFI(featNames=trnsX.columns,
                            clf=clf, trnsX=trnsX, cont=cont, scoring=scoring, cvGen=cvGen)
    else:
        raise ValueError("invalid method %s" %method)
    return imp, oob, oos
