import numpy as np
import pickle
from sklearn.metrics import r2_score
from scipy.linalg import inv
from parameters import get_kalman_params


class KalmanFilterRegression(object):
    """
    Class for the Kalman Filter Decoder

    Parameters
    -----------
    C - float, optional, default 1
    This parameter scales the noise matrix associated with the transition in kinematic states.
    It effectively allows changing the weight of the new neural evidence in the current update.
    dt - float, optional, sampling interval of the decoder
    h - float, optional, required half-life

    Our implementation of the Kalman filter for neural decoding is based on that of Wu et al 2003 (https://papers.nips.cc/paper/2178-neural-decoding-of-cursor-motion-using-a-kalman-filter.pdf)
    with the exception of the addition of the parameter C.
    The original implementation has previously been coded in Matlab by Dan Morris (http://dmorris.net/projects/neural_decoding.html#code)
    """

    def __init__(self, C=1, dt=0.01, h=1000):
        self.C = C
        self.dt = dt
        self.h = h

    def fit(self, X_kf_train, y_train):
        """
        Train Kalman Filter Decoder

        Parameters
        ----------
        X_kf_train: numpy 2d array of shape [n_samples(i.e. timebins) , n_neurons]
            This is the neural data in Kalman filter format.
            See example file for an example of how to format the neural data correctly

        y_train: numpy 2d array of shape [n_samples(i.e. timebins), n_outputs]
            This is the outputs that are being predicted
        """

        # First we'll rename and reformat the variables to be in a more standard kalman filter nomenclature (specifically that from Wu et al, 2003):
        # xs are the state (here, the variable we're predicting, i.e. y_train)
        # zs are the observed variable (neural data here, i.e. X_kf_train)
        X = np.matrix(y_train.T)
        Z = np.matrix(X_kf_train.T)

        # number of time bins
        nt = X.shape[1]

        # Calculate the transition matrix (from x_t to x_t+1) using least-squares, and compute its covariance
        # In our case, this is the transition from one kinematic state to the next
        X2 = X[:, 1:]
        X1 = X[:, 0:nt - 1]
        A = X2 * X1.T * inv(X1 * X1.T)  # Transition matrix
        # W: Covariance of transition matrix. Note we divide by nt-1 since only nt-1 points were used in the computation
        # (that's the length of X1 and X2). We also introduce the extra parameter C here.
        W = (X2 - A * X1) * (X2 - A * X1).T / (nt - 1) / self.C

        # Calculate the measurement matrix (from x_t to z_t) using least-squares, and compute its covariance
        # In our case, this is the transformation from kinematics to spikes
        H = Z * X.T * (inv(X * X.T))  # Measurement matrix
        Q = ((Z - H * X) * ((Z - H * X).T)) / nt  # Covariance of measurement matrix
        params = [A, W, H, Q]
        self.model = params

    def fit_with_lambda(self, X_kf_train, y_train):
        l = 0.5 ** (self.dt / self.h)

        X = np.matrix(y_train.T)
        Z = np.matrix(X_kf_train.T)

        nt = X.shape[1]

        X2 = X[:, 1:]
        X1 = X[:, 0:nt - 1]

        A = X2 * X1.T * inv(X1 * X1.T)
        W = (X2 - A * X1) * (X2 - A * X1).T / (nt - 1) / self.C

        R = np.matrix(np.zeros([X.shape[0], X.shape[0]]))
        S = np.matrix(np.zeros([Z.shape[0], X.shape[0]]))
        T = np.matrix(np.zeros([Z.shape[0], Z.shape[0]]))
        EBS = 0

        for i in range(nt):
            R = l * R + X[:, i:i + 1] * X[:, i:i + 1].T
            S = l * S + Z[:, i:i + 1] * X[:, i:i + 1].T
            T = l * T + Z[:, i:i + 1] * Z[:, i:i + 1].T
            EBS = l * EBS + 1

        H = S * (inv(R))
        Q = (T - H * S.T) / EBS

        params = [A, W, H, Q]
        self.model = params

    def predict(self, X_kf_test, y_test):
        """
        Predict outcomes using trained Kalman Filter Decoder

        Parameters
        ----------
        X_kf_test: numpy 2d array of shape [n_samples(i.e. timebins) , n_neurons]
            This is the neural data in Kalman filter format.

        y_test: numpy 2d array of shape [n_samples(i.e. timebins),n_outputs]
            The actual outputs
            This parameter is necesary for the Kalman filter (unlike other decoders)
            because the first value is nececessary for initialization

        Returns
        -------
        y_test_predicted: numpy 2d array of shape [n_samples(i.e. timebins),n_outputs]
            The predicted outputs
        """

        # Extract parameters
        A, W, H, Q = self.model

        # First we'll rename and reformat the variables to be in a more standard kalman filter nomenclature (specifically that from Wu et al):
        # xs are the state (here, the variable we're predicting, i.e. y_train)
        # zs are the observed variable (neural data here, i.e. X_kf_train)
        X = np.matrix(y_test.T)
        Z = np.matrix(X_kf_test.T)

        # Initializations
        num_states = X.shape[0]  # Dimensionality of the state
        states = np.empty(
            X.shape)  # Keep track of states over time (states is what will be returned as y_test_predicted)
        P_m = np.matrix(np.zeros([num_states, num_states]))
        P = np.matrix(np.zeros([num_states, num_states]))
        state = X[:, 0]  # Initial state
        states[:, 0] = np.copy(np.squeeze(state))

        # Get predicted state for every time bin
        for t in range(X.shape[1] - 1):
            # Do first part of state update - based on transition matrix
            P_m = A * P * A.T + W
            state_m = A * state

            # Do second part of state update - based on measurement matrix
            K = P_m * H.T * inv(H * P_m * H.T + Q)  # Calculate Kalman gain
            P = (np.matrix(np.eye(num_states)) - K * H) * P_m
            state = state_m + K * (Z[:, t + 1] - H * state_m)
            states[:, t + 1] = np.squeeze(state)  # Record state at the timestep
        y_test_predicted = states.T
        return y_test_predicted


def format_data(x, y, n_lag):
    """
    To reshape the numpy arrays for Wiener filter fitting
    Parameters
        x: the input data for Wiener filter fitting, an ndarray
        y: the output data for Wiener filter fitting, an ndarray
        n_lag: the number of time lags, an int number
    Returns:
        out1: the reshaped array for x, an ndarray
        out2: the trimmed array for y, an ndarray
    """
    x_ = [x[i:i + n_lag, :].reshape(n_lag * x.shape[1]) for i in range(x.shape[0] - n_lag + 1)]
    return np.asarray(x_), y[n_lag - 1:, :]


def format_data_from_trials(x, y, n_lag):
    """
    To reshape lists containing multiple trials into a big array so as to form
    the training data for Wiener filter fitting
    Parameters
        x: a list containing multiple trials, as the inputs for Wiener filter fitting
        y: a list containing multiple trials, as the outputs for Wiener filter fitting
        n_lag: the number of time lags, an int number
    Returns
        out1: the reshaped data for the input list x, an ndarray
        out2: the reshaped data for the input list y, an ndarray
    """
    if type(x) == np.ndarray:
        x = [x]
    if type(y) == np.ndarray:
        y = [y]
    x_, y_ = [], []
    for each in zip(x, y):
        temp = format_data(each[0], each[1], n_lag)
        x_.append(temp[0])
        y_.append(temp[1])
    return np.concatenate(x_), np.concatenate(y_)


def train_kalman(train_data, train_labels, test_data, test_labels, params):
    """
    :param train_data: 训练集数据，为一个列表，列表中单个数据大小为(data_length,num_neurons)
    :param train_labels: 训练集标签，为一个列表，列表中单个数据大小为(data_length,num_dimension)
    :param test_data: 测试集数据，为一个列表，列表中单个数据大小为(data_length,num_neurons)
    :param test_labels: 测试集标签，为一个列表，列表中单个数据大小为(data_length,num_dimension)
    :param params: 训练过程所用到的超参数，是一个字典。
    """

    # 获取参数
    C = params['C']
    dt = params['dt']
    h = params['h']
    n_lags = params['n_lags']
    save_model = params['save_model']

    # 把数据转换成适合wiener滤波器训练的形式
    train_data, train_labels = format_data_from_trials(train_data, train_labels, n_lags)
    test_data, test_labels = format_data_from_trials(test_data, test_labels, n_lags)

    # 构建kalman模型
    model = KalmanFilterRegression(C=C,dt=dt,h=h)

    # 训练模型
    model.fit(train_data, train_labels)

    # 使用测试集数据预测出标签
    pred_test_labels = model.predict(test_data, test_labels)

    # 计算r2分数
    r2 = r2_score(test_labels, pred_test_labels, multioutput='variance_weighted')
    print(f'Test r2_score: {r2: .4f}')

    # 保存训练好的模型
    if save_model:
        with open('Kalman_decoder.pkl', 'wb') as f:
            pickle.dump(model, f)


if __name__ == '__main__':
    # data为一个列表，列表中单个数据大小为(data_length,num_neurons)
    train_data = 'train_data'
    test_data = 'test_data'
    # labels为一个列表，列表中单个数据大小为(data_length,num_dimension)
    train_labels = 'train_labels'
    test_labels = 'test_labels'

    # 获取kalman所需要的参数
    params = get_kalman_params()

    # 开始训练kalman模型
    train_kalman(train_data, train_labels, test_data, test_labels, params)
