import numpy as np
from numpy.linalg import inv as inv
from numpy.random import normal as normrnd
from scipy.linalg import khatri_rao as kr_prod
from scipy.stats import wishart
from scipy.stats import invwishart
from numpy.linalg import solve as solve
from numpy.linalg import cholesky as cholesky_lower
from scipy.linalg import cholesky as cholesky_upper
from scipy.linalg import solve_triangular as solve_ut
from numpy.random import multivariate_normal as mvnrnd
from scipy.stats import wishart
from scipy.stats import invwishart
from numpy.linalg import inv as inv
from osgeo import gdal, osr
import matplotlib.pylab as plt
import pandas as pd
from sklearn import metrics
from utils import *
from tqdm import tqdm
from sklearn import metrics
from time import time



def nrmse(y_pred, y_true):
    """ Normalized RMSE"""
    t1 = np.linalg.norm(y_pred - y_true)**2 / np.size(y_true)
    t2 = np.sum(abs(y_true)) / np.size(y_true)
    return np.sqrt(t1) / t2


def mvnrnd_pre(mu, Lambda):
    src = normrnd(size=(mu.shape[0], ))
    return solve_ut(
        cholesky_upper(Lambda, overwrite_a=True,
                       check_finite=False), src, lower=False, check_finite=False, overwrite_b=True) + mu


def cov_mat(mat, mat_bar):
    mat = mat - mat_bar
    return mat.T @ mat


def ten2mat(tensor, mode):
    return np.reshape(np.moveaxis(tensor, mode, 0), (tensor.shape[mode], -1), order='F')


def sample_factor_u(tau_sparse_tensor, tau_ind, U, V, X, beta0=1):
    """Sampling M-by-R factor matrix U and its hyperparameters (mu_u, Lambda_u)."""

    dim1, rank = U.shape
    U_bar = np.mean(U, axis=0)
    temp = dim1 / (dim1 + beta0)
    var_mu_hyper = temp * U_bar
    var_U_hyper = inv(np.eye(rank) + cov_mat(U, U_bar) + temp * beta0 * np.outer(U_bar, U_bar))
    var_Lambda_hyper = wishart.rvs(df=dim1 + rank, scale=var_U_hyper)
    var_mu_hyper = mvnrnd_pre(var_mu_hyper, (dim1 + beta0) * var_Lambda_hyper)

    var1 = kr_prod(X, V).T
    var2 = kr_prod(var1, var1)
    var3 = (var2 @ ten2mat(tau_ind, 0).T).reshape([rank, rank, dim1]) + var_Lambda_hyper[:, :, None]
    var4 = var1 @ ten2mat(tau_sparse_tensor, 0).T + (var_Lambda_hyper @ var_mu_hyper)[:, None]

    for i in range(dim1):
        U[i, :] = mvnrnd_pre(solve(var3[:, :, i], var4[:, i]), var3[:, :, i])

    return U


def sample_factor_v(tau_sparse_tensor, tau_ind, U, V, X, beta0=1):
    """Sampling N-by-R factor matrix V and its hyperparameters (mu_v, Lambda_v)."""

    dim2, rank = V.shape
    V_bar = np.mean(V, axis=0)
    temp = dim2 / (dim2 + beta0)
    var_mu_hyper = temp * V_bar
    var_V_hyper = inv(np.eye(rank) + cov_mat(V, V_bar) + temp * beta0 * np.outer(V_bar, V_bar))
    var_Lambda_hyper = wishart.rvs(df=dim2 + rank, scale=var_V_hyper)
    var_mu_hyper = mvnrnd_pre(var_mu_hyper, (dim2 + beta0) * var_Lambda_hyper)

    var1 = kr_prod(X, U).T
    var2 = kr_prod(var1, var1)
    var3 = (var2 @ ten2mat(tau_ind, 1).T).reshape([rank, rank, dim2]) + var_Lambda_hyper[:, :, None]
    var4 = var1 @ ten2mat(tau_sparse_tensor, 1).T + (var_Lambda_hyper @ var_mu_hyper)[:, None]
    for j in range(dim2):
        V[j, :] = mvnrnd_pre(solve(var3[:, :, j], var4[:, j]), var3[:, :, j])
    return V


def mnrnd(M, U, V):
    """
    Generate matrix normal distributed random matrix.
    M is a m-by-n matrix, U is a m-by-m matrix, and V is a n-by-n matrix.
    """
    dim1, dim2 = M.shape
    X0 = np.random.randn(dim1, dim2)
    P = cholesky_lower(U)
    Q = cholesky_lower(V)

    return M + P @ X0 @ Q.T


def sample_var_coefficient(X, time_lags):
    dim, rank = X.shape
    d = time_lags.shape[0]
    tmax = np.max(time_lags)

    Z_mat = X[tmax:dim, :]
    Q_mat = np.zeros((dim - tmax, rank * d))
    for k in range(d):
        Q_mat[:, k * rank:(k + 1) * rank] = X[tmax - time_lags[k]:dim - time_lags[k], :]
    var_Psi0 = np.eye(rank * d) + Q_mat.T @ Q_mat
    var_Psi = inv(var_Psi0)
    var_M = var_Psi @ Q_mat.T @ Z_mat
    var_S = np.eye(rank) + Z_mat.T @ Z_mat - var_M.T @ var_Psi0 @ var_M
    Sigma = invwishart.rvs(df=rank + dim - tmax, scale=var_S)

    return mnrnd(var_M, var_Psi, Sigma), Sigma


def sample_factor_x(tau_sparse_tensor, tau_ind, time_lags, U, V, X, A, Lambda_x):
    """Sampling T-by-R factor matrix X."""

    dim3, rank = X.shape
    tmax = np.max(time_lags)
    tmin = np.min(time_lags)
    d = time_lags.shape[0]
    A0 = np.dstack([A] * d)
    for k in range(d):
        A0[k * rank:(k + 1) * rank, :, k] = 0
    mat0 = Lambda_x @ A.T
    mat1 = np.einsum('kij, jt -> kit', A.reshape([d, rank, rank]), Lambda_x)
    mat2 = np.einsum('kit, kjt -> ij', mat1, A.reshape([d, rank, rank]))

    var1 = kr_prod(V, U).T
    var2 = kr_prod(var1, var1)
    var3 = (var2 @ ten2mat(tau_ind, 2).T).reshape([rank, rank, dim3]) + Lambda_x[:, :, None]
    var4 = var1 @ ten2mat(tau_sparse_tensor, 2).T
    for t in range(dim3):
        Mt = np.zeros((rank, rank))
        Nt = np.zeros(rank)
        Qt = mat0 @ X[t - time_lags, :].reshape(rank * d)
        index = list(range(0, d))
        if t >= dim3 - tmax and t < dim3 - tmin:
            index = list(np.where(t + time_lags < dim3))[0]
        elif t < tmax:
            Qt = np.zeros(rank)
            index = list(np.where(t + time_lags >= tmax))[0]
        if t < dim3 - tmin:
            Mt = mat2.copy()
            temp = np.zeros((rank * d, len(index)))
            n = 0
            for k in index:
                temp[:, n] = X[t + time_lags[k] - time_lags, :].reshape(rank * d)
                n += 1
            temp0 = X[t + time_lags[index], :].T - np.einsum('ijk, ik -> jk', A0[:, :, index], temp)
            Nt = np.einsum('kij, jk -> i', mat1[index, :, :], temp0)

        var3[:, :, t] = var3[:, :, t] + Mt
        if t < tmax:
            var3[:, :, t] = var3[:, :, t] - Lambda_x + np.eye(rank)
        X[t, :] = mvnrnd_pre(solve(var3[:, :, t], var4[:, t] + Nt + Qt), var3[:, :, t])

    return X


def sample_precision_tau(sparse_tensor, tensor_hat, ind):
    var_alpha = 1e-6 + 0.5 * np.sum(ind, axis=2)
    var_beta = 1e-6 + 0.5 * np.sum(((sparse_tensor - tensor_hat)**2) * ind, axis=2)
    return np.random.gamma(var_alpha, 1 / var_beta)


def compute_mape(var, var_hat):
    return np.sum(np.abs(var - var_hat) / var) / var.shape[0]


def compute_rmse(var, var_hat):
    return np.sqrt(np.sum((var - var_hat)**2) / var.shape[0])


def BTTF(sparse_tensor, init, rank, time_lags, burn_iter, gibbs_iter, early_stop, decline_rate, multi_steps=1, vargin=0):
    """Bayesian Temporal Tensor Factorization, BTTF."""

    dim1, dim2, dim3 = sparse_tensor.shape
    d = time_lags.shape[0]
    U = init["U"]
    V = init["V"]
    X = init["X"]
    if np.isnan(sparse_tensor).any() == False:
        ind = sparse_tensor != 0
        pos_obs = np.where(ind)
    elif np.isnan(sparse_tensor).any() == True:
        ind = ~np.isnan(sparse_tensor)
        pos_obs = np.where(ind)
        sparse_tensor[np.isnan(sparse_tensor)] = 0

    U_plus = np.zeros((dim1, rank))
    V_plus = np.zeros((dim2, rank))
    X_new_plus = np.zeros((dim3 + multi_steps, rank))
    A_plus = np.zeros((rank * d, rank))
    temp_hat = np.zeros(sparse_tensor.shape)
    show_iter = 20
    if vargin == 0:  # scalar tau
        tau = 1
    elif vargin == 1:  # matrix tau
        tau = np.ones((dim1, dim2))
    tensor_hat_plus = np.zeros(sparse_tensor.shape)
    gibbs_sample = False
    gibbs = 0  # 记录最后采样了多少次
    burn = burn_iter  # 记录最后burn了多少次
    result_rmse = []  # 保存每一次迭代后的rmse
    for it in range(burn_iter + gibbs_iter):
        if vargin == 0:  # scalar tau
            tau_ind = tau * ind
            tau_sparse_tensor = tau * sparse_tensor
            U = sample_factor_u(tau_sparse_tensor, tau_ind, U, V, X)
            V = sample_factor_v(tau_sparse_tensor, tau_ind, U, V, X)
            A, Sigma = sample_var_coefficient(X, time_lags)
            X = sample_factor_x(tau_sparse_tensor, tau_ind, time_lags, U, V, X, A, inv(Sigma))
            tensor_hat = np.einsum('is, js, ts -> ijt', U, V, X)
            tau = np.random.gamma(1e-6 + 0.5 * np.sum(ind), 1 / (1e-6 + 0.5 * np.sum(((sparse_tensor - tensor_hat)**2) * ind)))
        elif vargin == 1:  # matrix tau
            tau_ind = tau[:, :, None] * ind
            tau_sparse_tensor = tau[:, :, None] * sparse_tensor
            U = sample_factor_u(tau_sparse_tensor, tau_ind, U, V, X)
            V = sample_factor_v(tau_sparse_tensor, tau_ind, U, V, X)
            A, Sigma = sample_var_coefficient(X, time_lags)
            X = sample_factor_x(tau_sparse_tensor, tau_ind, time_lags, U, V, X, A, inv(Sigma))
            tensor_hat = np.einsum('is, js, ts -> ijt', U, V, X)
            tau = sample_precision_tau(sparse_tensor, tensor_hat, ind)
        rmse = np.sqrt(metrics.mean_squared_error(tensor_hat[ind], sparse_tensor[ind]))
        print("\nIter ：{} rmse:{}".format(it, round(rmse, 3)), end=' ')
        result_rmse.append(rmse)
        """使用early_stop"""
        early_iters = 100
        if gibbs_sample != True:
            if it >= early_iters and early_stop:  # 如果使用early_stop
                print("比{}次前的rmse下降{}".format(early_iters, round(result_rmse[it - early_iters] - rmse, 3)), end=' ')
                if result_rmse[it - early_iters] - rmse < decline_rate:  # 如果检测到100次内下降小于decline_rate则停止迭代，直接采样200次即可
                    burn = it
                    print('执行early_stop!', end=' ')
                    gibbs_sample = True
            else:
                if it + 1 > burn_iter:
                    gibbs_sample = True
        temp_hat += tensor_hat
        if (it + 1) % show_iter == 0 and it < burn_iter:
            temp_hat = temp_hat / show_iter
            temp_hat = np.zeros(sparse_tensor.shape)

        X_new = np.zeros((dim3 + multi_steps, rank))

        if gibbs_sample:
            U_plus += U
            V_plus += V
            A_plus += A
            X_new[:dim3, :] = X.copy()
            if multi_steps == 1:
                X_new[dim3, :] = A.T @ X_new[dim3 - time_lags, :].reshape(rank * d)
            elif multi_steps > 1:
                for t0 in range(multi_steps):
                    X_new[dim3 + t0, :] = A.T @ X_new[dim3 + t0 - time_lags, :].reshape(rank * d)
            X_new_plus += X_new
            tensor_hat_plus += tensor_hat
            gibbs += 1
            if gibbs >= gibbs_iter:
                break
    tensor_hat = tensor_hat_plus / gibbs_iter
    tensor_hat[ind] = sparse_tensor[ind]

    return tensor_hat,burn_iter, U, V, X_new, A


def train_model(sparse_tensor, burn_iter, gibbs_iter, rank, scale, early_stop, decline_rate):

    sparse_tensor = sparse_tensor * scale
    sparse_tensor = sparse_tensor.transpose()
    dim1, dim2, dim3 = sparse_tensor.shape
    time_lags = np.array([1, 2])
    init = {
        "U": 0.1 * np.random.randn(dim1, rank),
        "V": 0.1 * np.random.randn(dim2, rank),
        "X": 0.1 * np.random.randn(dim3, rank)
    }
    start = time()
    tensor_hat, burn, U, V, X_new, A = BTTF(sparse_tensor, init, rank, time_lags, burn_iter, gibbs_iter, early_stop, decline_rate)
    return tensor_hat, burn, time()-start
