import numpy as np
import pandas as pd
from pandas.core.groupby import DataFrameGroupBy
from scipy.sparse import csc_matrix


def get_group_by_bar(bars: pd.Series, cap: float) -> np.ndarray:
    mark = np.ceil(np.cumsum(bars) / cap)  # type: ignore
    return mark  # type: ignore


def group_by_bar_df(df: pd.DataFrame, bar_col: str, cap: float) -> DataFrameGroupBy:
    return df.groupby(get_group_by_bar(df[bar_col], cap))  # type: ignore


def getTEvents(gRaw: pd.Series, h: float) -> pd.DatetimeIndex:
    """cumsum filter
    返回对应的时间戳

    :param gRaw: 目标时间序列，index应为datetime格式
    :param h: 正数，变化的阈值
    """
    tEvents, sPos, sNeg = [], 0, 0
    diff = gRaw.diff()
    for i in diff.index[1:]:
        sPos, sNeg = max(0, sPos + diff.loc[i]), min(0, sNeg + diff.loc[i])
        if sNeg < -h:
            sNeg = 0
            tEvents.append(i)
        elif sPos > h:
            sPos = 0
            tEvents.append(i)
    return pd.DatetimeIndex(tEvents)


def mpNumCoEvents(closeIdx, t1, molecule):
    """
    Compute the number of concurrent events per bar.
    +molecule[0] is the date of the first event on which the weight will be computed
    +molecule[-1] is the date of the last event on which the weight will be computed
    Any event that starts before t1[molecule].max() impacts the count.

    :param coloseIdx: 价格序列的index
    :param t1: 第一次触碰三隔栏的时间戳，用events的t1
    :param molecule: 并行计算分块的index，默认可用events的t1
    """
    # 1) find events that span the period [molecule[0],molecule[-1]]
    # unclosed events still must impact other weights
    t1 = t1.fillna(closeIdx[-1])
    t1 = t1[t1 >= molecule[0]]  # events that end at or after molecule[0]
    # events that start at or before t1[molecule].max()
    t1 = t1.loc[: t1[molecule].max()]
    # 2) count events spanning a bar
    iloc = closeIdx.searchsorted(np.array([t1.index[0], t1.max()]))
    count = pd.Series(0, index=closeIdx[iloc[0] : iloc[1] + 1])
    for tIn, tOut in t1.iteritems():
        count.loc[tIn:tOut] += 1.0
    return count.loc[molecule[0] : t1[molecule].max()]


def mpSampleTW(t1, numCoEvents, molecule):
    """Derive average uniqueness over the event's lifespan

    :param t1: 第一次触碰三隔栏的时间戳，用events的t1
    :param numCoEvents: concurrency
    :param molecule: 行计算分块的index，默认可用events的t1
    :return:
    """
    # Derive average uniqueness over the event's lifespan
    wght = pd.Series(index=molecule)
    for tIn, tOut in t1.loc[wght.index].iteritems():
        wght.loc[tIn] = (1.0 / numCoEvents.loc[tIn:tOut]).mean()
    return wght


# def getIndMatrix(barIx, t1):
#     # Get indicator matrix
#     indM = pd.DataFrame(0, index=barIx, columns=range(t1.shape[0]))
#     for i, (t0, t1) in enumerate(t1.iteritems()):
#         indM.loc[t0:t1, i] = 1.
#     return indM


def getIndMatrix(barIdx, t1):
    """计算指标矩阵

    :param barIdx: close的index
    :param t1: 三隔栏法中第一次触碰隔栏的时间，默认events的t1
    :return: 指标矩阵（稀疏矩阵）
    """
    bar_iloc = pd.Series(range(barIdx.shape[0]), index=barIdx).to_frame("iloc")
    t0_iloc = bar_iloc.join(
        pd.Series(t1.index, index=t1.index, name="t0"), how="inner"
    )["iloc"]
    t1_iloc = bar_iloc.join(
        pd.Series(t1.values, index=t1.values, name="t1"), how="inner"
    )["iloc"]
    # Get indicator matrix
    row_idx = []
    col_idx = []
    data = []
    for i, (t0, t1_) in enumerate(zip(t0_iloc, t1_iloc)):
        rng = list(range(t0, t1_ + 1))
        row_idx.extend(rng)
        col_idx.extend([i] * len(rng))
        data.extend([1] * len(rng))
    indM = csc_matrix(
        (data, (row_idx, col_idx)), shape=(barIdx.shape[0], t1.shape[0]), dtype=np.int
    )
    return indM


def getAvgUniqueness(indM):
    """计算整体平均独特性
    
    :param indM: 指标矩阵
    """
    c = indM.sum(axis=1)# concurrency
    c = np.array(c).reshape(-1)
    c = c.astype("float")
    c[c==0] = np.nan
    diag_ci = csc_matrix((1/c, (list(range(c.shape[0])), list(range(c.shape[0])))))
    n = (indM > 0).sum(dtype=np.double)
    avgU = np.dot(diag_ci, indM).sum(dtype=np.double) / n
    return avgU


def getAvgUniqueness_every_label(indM):
    """计算每个label的平均独特性
    
    :param indM: 指标矩阵
    """
    c = indM.sum(axis=1)  # concurrency
    c = np.array(c).reshape(-1)
    c = c.astype("float")
    c[c == 0] = np.nan
    diag_ci = csc_matrix((1 / c, (list(range(c.shape[0])), list(range(c.shape[0])))))
    n = (indM > 0).sum(axis=0)
    avgU = np.dot(diag_ci, indM).sum(axis=0) / n
    return avgU


def seqBootstrap(indM, sLength=None):
    """序列抽样
    
    :param indM: 指标矩阵
    :param sLength: 样本集大小，None时样本和原始数据集一样大
    :rtype: `list`
    """
    # Generate a sample via sequential bootstrap
    columns = list(range(indM.shape[1]))
    if sLength is None:
        sLength = indM.shape[1]
    phi = []
    while len(phi) < sLength:
        avgU = np.zeros(len(columns))
        for i in columns:
            indM_ = indM[:, phi + [i]]  # reduce indM
            avgU[i] = getAvgUniqueness(indM_)
        avgU[np.isnan(avgU)] = 0
        prob = avgU / avgU.sum()  # draw prob
        phi += [np.random.choice(columns, p=prob)]
    return phi


def seqBootstrap2(indM, sLength=None):
    """序列抽样（优化一点点）
    
    :param indM: 指标矩阵
    :param sLength: 样本集大小，None时样本和原始数据集一样大
    :return: 样本下标索引
    :rtype: `numpy.ndarray`
    """
    # Generate a sample via sequential bootstrap
    columns = list(range(indM.shape[1]))
    if sLength is None:
        sLength = indM.shape[1]
    phi = np.zeros(len(columns), dtype=np.int)
    avgU = np.zeros(len(columns), dtype=np.double)
    for j in range(sLength):
        avgU[:] = 0.0
        for i in columns:
            phi[j] = i
            indM_ = indM[:, phi[: j + 1]]  # reduce indM
            avgU[i] = getAvgUniqueness(indM_)
        avgU[np.isnan(avgU)] = 0
        prob = avgU / avgU.sum()  # draw prob
        phi[j] = np.random.choice(columns, p=prob)
    return phi


def mpSampleW(t1, numCoEvents, close, molecule):
    # Derive sample weight by return attribution
    ret = np.log(close).diff()  # log-returns, so that they are additive
    wght = pd.Series(index=molecule)
    for tIn, tOut in t1.loc[wght.index].iteritems():
        wght.loc[tIn] = (ret.loc[tIn:tOut] / numCoEvents.loc[tIn:tOut]).sum()
    return wght.abs()


def getTimeDecay(tW, clfLastW=1.0):
    # apply piecewise-linear decay to observed uniqueness (tW)
    # newest observation gets weight=1, oldest observation gets weight=clfLastW
    clfW = tW.sort_index().cumsum()
    if clfLastW >= 0:
        slope = (1.0 - clfLastW) / clfW.iloc[-1]
    else:
        slope = 1.0 / ((clfLastW + 1) * clfW.iloc[-1])
    const = 1.0 - slope * clfW.iloc[-1]
    clfW = const + slope * clfW
    clfW[clfW < 0] = 0
    # print const, slope
    return clfW
