import imp
from typing import Optional
import typing as tp
import numpy as np
from scipy.stats import rv_continuous, kstest
import pandas as pd
from sklearn.model_selection._split import _BaseKFold
from sklearn.pipeline import Pipeline
from sklearn.model_selection import GridSearchCV, RandomizedSearchCV
from sklearn.ensemble import BaggingClassifier


def getTrainTimes(t1: pd.Series,
                  testTimes: pd.Series):
    """PURGING OBSERVATION IN THE TRAINING SET
    
    :param t1: 三隔栏抽样的起止时间戳
        * t1.index: Time when the observation started.
        * t1.value: Time when the observation ended.
        
    :param testTimes: 测试集的time index，与t1格式一致
    """
    trn = t1.copy(deep=True)
    for i, j in testTimes.iteritems():
        df0 = trn[(i <= trn.index) & (trn.index <= j)].index  # train starts within test
        df1 = trn[(i <= trn) & (trn <= j)].index  # train ends within test
        df2 = trn[(trn.index <= i) & (j <= trn)].index  # train envelops test
        trn = trn.drop(df0.union(df1).union(df2))
    return trn


def getEmbargoTimes(times: pd.DatetimeIndex,
                    pctEmbargo: float = .01):
    """Get embargo time for each bar

    计算每个bar的embargo的起止时间戳
    ! include embargo before purge
    
    :param times: 整个数据集的时间戳
    :param pctEmbargo: embargo的比例，一般0.01足够消除信息泄露
    """
    # Get embargo time for each bar
    step = int(times.shape[0] * pctEmbargo)
    if step == 0:
        mbrg = pd.Series(times, index=times)
    else:
        mbrg = pd.Series(times[step:], index=times[:-step])
        mbrg = mbrg.append(pd.Series(times[-1], index=times[-step:]))
    return mbrg


class PurgedKFold(_BaseKFold):
    """
    Extend KFold class to work with labels that span intervals
    The train is purged of observations overlapping test-label intervals
    Test set is assumed contiguous (shuffle=False), w/o training samples in between
    
    Tips:
        * 大部分情况下purge就够了
        * embargo的比例，一般0.01足够消除信息泄露
        
        
    :param n_splits: fold数
    :param t1: 三隔栏label的起止时间戳
        * t1.index: Time when the observation started.
        * t1.value: Time when the observation ended.
        
    :param pct_embargo: embargo的比例，一般0.01足够消除信息泄露
    """

    def __init__(self,
                 n_splits: int = 3,
                 t1: Optional[pd.Series] = None,
                 pct_embargo: float = 0.):
        if not isinstance(t1, pd.Series):
            raise ValueError("Label Through Dates must be pd.Series")
        super().__init__(n_splits=n_splits, shuffle=False, random_state=None)
        self.t1 = t1
        self.pct_embargo = pct_embargo

    def split(self, X: pd.DataFrame, y: pd.Series = None, groups=None):
        if not (X.index == self.t1.index).all():
            raise ValueError("X and ThruDateValues must have the same index")
        indices = np.arange(X.shape[0])
        mbrg = int(X.shape[0] * self.pct_embargo)
        test_starts = [(i[0], i[-1] + 1) for i in
                       np.array_split(np.arange(X.shape[0]), self.n_splits)]
        for i, j in test_starts:
            t0 = self.t1.index[i]  # start of test set
            test_indices = indices[i:j]
            max_t1_idx = self.t1.index.searchsorted(self.t1[test_indices].max())
            train_indices = self.t1.index.searchsorted(self.t1[self.t1 <= t0].index)
            if max_t1_idx < X.shape[0]:
                train_indices = np.concatenate((train_indices, indices[max_t1_idx + mbrg:]))
            yield train_indices, test_indices


def cvScore(clf, X, y, sample_weight, scoring='neg_log_loss',
            t1=None, cv=None, cvGen=None, pctEmbargo=None):
    """
    Please understand that it may take a long time until a fix 
    for these bugs is agreed upon, implemented, tested, and released. 
    Until then, you should use cvScore in Snippet 7.4, 
    and avoid running the function cross_val_score.
    """
    if scoring not in ['neg_log_loss', 'accuracy']:
        raise Exception('wrong scoring method.')

    from sklearn.metrics import log_loss, accuracy_score

    if cvGen is None:
        cvGen = PurgedKFold(n_splits=cv, t1=t1,
                            pct_embargo=pctEmbargo)  # purged
    score = []
    for train, test in cvGen.split(X=X):
        fit = clf.fit(X=X.iloc[train, :], y=y.iloc[train],
                      sample_weight=sample_weight.iloc[train].values)
        if scoring == 'neg_log_loss':
            prob = fit.predict_proba(X.iloc[test, :])
            score_ = -log_loss(y.iloc[test], prob,
                               sample_weight=sample_weight.iloc[test].values,
                               labels=clf.classes_)  # type: ignore
        else:
            pred = fit.predict(X.iloc[test, :])
            score_ = accuracy_score(y.iloc[test], pred,
                                    sample_weight=sample_weight.iloc[test].values)
        score.append(score_)
    return np.array(score)


def clfHyperFit(feat, lbl, t1, pipe_clf, param_grid, cv=3, bagging=(0, 0, 1.),
                rndSearchIter=0, n_jobs=-1, pctEmbargo=0, **fit_params):
    """GRID SEARCH WITH PURGED K-FOLD CROSS-VALIDATION
    
     fit_params can be used to pass sample_weight
     param_grid contains the values that will be combined into a grid. 
    """
    if set(lbl.values) == {0, 1}:
        scoring = 'f1'  # f1 for meta-labeling
    else:
        scoring = 'neg_log_loss'  # symmetric towards all cases
    # 1) hyperparameter search, on train data
    inner_cv = PurgedKFold(
        n_splits=cv, t1=t1, pct_embargo=pctEmbargo)  # purged
    if rndSearchIter == 0:
        gs = GridSearchCV(estimator=pipe_clf, param_grid=param_grid,
                          scoring=scoring, cv=inner_cv, n_jobs=n_jobs, iid=False)
    else:
        gs = RandomizedSearchCV(estimator=pipe_clf, param_distributions=param_grid,
                                scoring=scoring, cv=inner_cv, n_jobs=n_jobs, iid=False,
                                n_iter=rndSearchIter)
    gs = gs.fit(feat, lbl, **fit_params).best_estimator_  # pipeline
    gs = tp.cast(Pipeline, gs)
    # 2) fit validated model on the entirety of the data
    if bagging[1] > 0:
        gs = BaggingClassifier(base_estimator=MyPipeline(gs.steps),
                               n_estimators=int(bagging[0]), max_samples=float(bagging[1]),
                               max_features=float(bagging[2]), n_jobs=n_jobs)
        gs = gs.fit(
            feat, lbl, sample_weight=fit_params[gs.base_estimator.steps[-1][0] + '__sample_weight'])
        gs = Pipeline([('bag', gs)])
    return gs


class MyPipeline(Pipeline):
    def fit(self, X, y, sample_weight=None, **fit_params):
        if sample_weight is not None:
            fit_params[self.steps[-1][0] + '__sample_weight'] = sample_weight
        return super(MyPipeline, self).fit(X, y, **fit_params)


class logUniform_gen(rv_continuous):
    # random numbers log-uniformly distributed between 1 and e
    def _cdf(self, x):
        return np.log(x / self.a) / np.log(self.b / self.a)  # type: ignore


def logUniform(a=1, b=np.exp(1)):
    return logUniform_gen(a=a, b=b, name='logUniform')
