
import typing as tp
import pandas as pd
import numpy as np


def get_bsadf(logP: pd.Series, 
              minSL: int, 
              constant: float, 
              lags: int):
    """SADF’S INNER LOOP

    :param logP: 对数价格
    :param minSL: 最小的抽样长度，用于最终的回归
    :param constant: 回归函数的趋势分量
        * 'nc': 没有时间趋势
        * 'ct': 常数+线性时间趋势
        * 'ctt': 常数+二次项时间趋势  
        
    :param lags: 用于ADF检测的滞后阶数
    """
    y, x = getYX(logP, constant=constant, lags=lags)
    startPoints, bsadf, allADF = range(0, y.shape[0]+lags-minSL+1), 0, []
    for start in startPoints:
        y_, x_ = y[start:], x[start:]
        bMean_, bStd_ = getBetas(y_, x_)
        bMean_ = tp.cast(np.ndarray, bMean_)
        bMean_, bStd_ = bMean_[0, 0], bStd_[0, 0]**.5
        allADF.append(bMean_/bStd_)
        if allADF[-1] > bsadf:
            bsadf = allADF[-1]
    out = {'Time': logP.index[-1], 'gsadf': bsadf}
    return out


def getYX(series, constant, lags):
    """PREPARING THE DATASETS
    为递归检测准备numpy对象
    
    
    """
    series_ = series.diff().dropna()
    x = lagDF(series_, lags).dropna()
    x.iloc[:, 0] = series.values[-x.shape[0]-1:-1, 0]  # lagged level
    y = series_.iloc[-x.shape[0]:].values
    if constant != 'nc':
        x = np.append(x, np.ones((x.shape[0], 1)), axis=1)
        x = tp.cast(np.ndarray, x)
        trend = np.arange(x.shape[0]).reshape(-1, 1)
        if constant[:2] == 'ct':
            x = np.append(x, trend, axis=1)
        if constant == 'ctt':
            x = np.append(x, trend**2, axis=1)
    return y, x


def lagDF(df0: pd.DataFrame, 
          lags: tp.Union[int, tp.Sequence[int]]):
    """APPLY LAGS TO DATAFRAME
    
    """
    df1 = pd.DataFrame()
    if isinstance(lags, int):
        lags = range(lags+1)
    else:
        lags = [int(lag) for lag in lags]
    for lag in lags:
        df_ = df0.shift(lag).copy(deep=True)
        df_.columns = [str(i)+'_'+str(lag) for i in df_.columns]
        df1 = df1.join(df_, how='outer')
    return df1


def getBetas(y, x):
    """拟合ADF函数"""
    xy = np.dot(x.T, y)
    xx = np.dot(x.T, x)
    xxinv = np.linalg.inv(xx)
    bMean = np.dot(xxinv, xy)
    err = y-np.dot(x, bMean)
    bVar = np.dot(err.T, err)/(x.shape[0]-x.shape[1])*xxinv
    return bMean, bVar
