import numpy as np
from sklearn.metrics import r2_score
from sklearn.model_selection import KFold
from parameters import get_wiener_params


def format_data(x, y, n_lag):
    """
    To reshape the numpy arrays for Wiener filter fitting
    Parameters
        x: the input data for Wiener filter fitting, an ndarray
        y: the output data for Wiener filter fitting, an ndarray
        n_lag: the number of time lags, an int number
    Returns:
        out1: the reshaped array for x, an ndarray
        out2: the trimmed array for y, an ndarray
    """
    x_ = [x[i:i + n_lag, :].reshape(n_lag * x.shape[1]) for i in range(x.shape[0] - n_lag + 1)]
    return np.asarray(x_), y[n_lag - 1:, :]


def format_data_from_list(x, y, n_lag):
    if type(x) == np.ndarray:
        x = [x]
    if type(y) == np.ndarray:
        y = [y]
    x_, y_ = [], []
    for each in zip(x, y):
        temp = format_data(each[0], each[1], n_lag)
        x_.append(temp[0])
        y_.append(temp[1])
    return np.concatenate(x_), np.concatenate(y_)


def format_data_from_trials(x, y, n_lag):
    """
    To reshape lists containing multiple trials into a big array so as to form
    the training data for Wiener filter fitting
    Parameters
        x: a list containing multiple trials, as the inputs for Wiener filter fitting
        y: a list containing multiple trials, as the outputs for Wiener filter fitting
        n_lag: the number of time lags, an int number
    Returns
        out1: the reshaped data for the input list x, an ndarray
        out2: the reshaped data for the input list y, an ndarray
    """
    if type(x) == np.ndarray:
        x = [x]
    if type(y) == np.ndarray:
        y = [y]
    x_, y_ = [], []
    for each in zip(x, y):
        temp = format_data(each[0], each[1], n_lag)
        x_.append(temp[0])
        y_.append(temp[1])
    return np.concatenate(x_), np.concatenate(y_)


def parameter_fit(x, y, c):
    """
    c : L2 regularization coefficient
    I : Identity Matrix
    Linear Least Squares (code defaults to this if c is not passed)
    H = ( X^T * X )^-1 * X^T * Y
    Ridge Regression
    R = c * I
    ridge regression doesn't penalize x
    R[0,0] = 0
    H = ( (X^T * X) + R )^-1 * X^T * Y
    """
    x_plus_bias = np.c_[np.ones((np.size(x, 0), 1)), x]
    R = c * np.eye(x_plus_bias.shape[1])
    R[0, 0] = 0;
    temp = np.linalg.inv(np.dot(x_plus_bias.T, x_plus_bias) + R)
    temp2 = np.dot(temp, x_plus_bias.T)
    H = np.dot(temp2, y)
    return H


def parameter_fit_with_sweep(x, y, C, kf):
    reg_r2 = []
    print('Sweeping ridge regularization using CV decoding on train data')
    for c in C:
        print('Testing c= ' + str(c))
        cv_r2 = []
        for train_indices, test_indices in kf.split(x):
            # split data into train and test
            train_x, test_x = x[train_indices, :], x[test_indices, :]
            train_y, test_y = y[train_indices, :], y[test_indices, :]
            # fit decoder
            H = parameter_fit(train_x, train_y, c)
            # print( H.shape )
            # predict
            test_y_pred = test_wiener_filter(test_x, H)
            # evaluate performance
            cv_r2.append(r2_score(test_y, test_y_pred, multioutput='raw_values'))
        # append mean of CV decoding for output
        cv_r2 = np.asarray(cv_r2)
        reg_r2.append(np.mean(cv_r2, axis=0))

    reg_r2 = np.asarray(reg_r2)
    reg_r2 = np.mean(reg_r2, axis=1)
    best_c = C[np.argmax(reg_r2)]
    return best_c


def train_wiener_filter(x, y, l2=0):
    """
    To train a linear decoder
    x: input data, e.g. neural firing rates
    y: expected results, e.g. true EMG values
    l2: 0 or 1, switch for turning L2 regularization on or off
    """
    if l2 == 1:
        n_l2 = 20
        C = np.logspace(1, 5, n_l2)
        kfolds = 4
        kf = KFold(n_splits=kfolds)
        best_c = parameter_fit_with_sweep(x, y, C, kf)
        print(best_c)
    else:
        best_c = 0
    H_reg = parameter_fit(x, y, best_c)
    return H_reg


def test_wiener_filter(x, H):
    """
    To get predictions from input data x with linear decoder
    x: input data
    H: parameter vector obtained by training
    """
    x_plus_bias = np.c_[np.ones((np.size(x, 0), 1)), x]
    y_pred = np.dot(x_plus_bias, H)
    return y_pred


def train_wiener(train_data, train_labels, test_data, test_labels, params):
    """
    :param train_data: 训练集数据，为一个列表，列表中单个数据大小为(data_length,num_neurons)
    :param train_labels: 训练集标签，为一个列表，列表中单个数据大小为(data_length,num_dimension)
    :param test_data: 测试集数据，为一个列表，列表中单个数据大小为(data_length,num_neurons)
    :param test_labels: 测试集标签，为一个列表，列表中单个数据大小为(data_length,num_dimension)
    :param params: 训练过程所用到的超参数，是一个字典。
    """
    # 获取参数
    n_lags = params['n_lags']
    save_model = params['save_model']

    # 把数据转换成适合wiener滤波器训练的形式
    train_data, train_labels = format_data_from_trials(train_data, train_labels, n_lags)
    test_data, test_labels = format_data_from_trials(test_data, test_labels, n_lags)

    # 训练wiener滤波器
    H = train_wiener_filter(train_data, train_labels, l2=1)

    # 使用测试集数据预测出标签
    pred_test_labels = test_wiener_filter(test_data, H)

    # 计算r2分数
    r2 = r2_score(test_labels, pred_test_labels, multioutput='variance_weighted')
    print(f'Test r2_score: {r2: .4f}')

    # 保存训练好的模型
    if save_model:
        np.save('wiener_decoder.npy', H)


if __name__ == '__main__':
    # data为一个列表，列表中单个数据大小为(data_length,num_neurons)
    train_data = 'train_data'
    test_data = 'test_data'
    # labels为一个列表，列表中单个数据大小为(data_length,num_dimension)
    train_labels = 'train_labels'
    test_labels = 'test_labels'

    # 获取wiener所需要的参数
    params = get_wiener_params()

    # 开始训练wiener模型
    train_wiener(train_data, train_labels, test_data, test_labels, params)
