#include "common.cuh"
#include "utilities.cuh"
#include "inversion.cuh"
#include "blocklu.cuh"
#include "lutrtri.cuh"
#include "cpu_blas.h"
#include "gpu_blas.cuh"

namespace CPU {
//perform inversion of LU fractorization in-place
void inplace_trtri(double* A, lapack_int * ipiv, int n, int lda) {
    getrf(A, ipiv, n, n, lda);
    for(int i = 0; i < n; i++) {
        assert(ipiv[i] == i + 1);
    }
    trtri('L', 'U', A, n, lda);
    trtri('U', 'N', A, n, lda);
}

void update_lutrtri(double* A, double* inverse, lapack_int * ipiv, double* workplace, int m, int n, int lda, int ldi) {
    double* L21 = workplace;
    double* U12 = workplace + 1*(n - m)*m;

    // U12 = iL11 @ A[:M, M:]
    memcpy_submatrix(&A[m * lda], U12, m, n - m, lda, m);
    trmm('L', 'L', 'U', inverse, U12, m, n - m, ldi, m, 1.0);

    // L21 = A[M:, :M] @ iU11
    memcpy_submatrix(&A[m], L21, n - m, m, lda, n - m);
    trmm('R', 'U', 'N', inverse, L21, n - m, m, ldi, n - m, 1.0);

    // compute \hat{A22} = A22 - L21 @ U12 (Schur Complement)
    memcpy_submatrix(&A[m + m * lda], &inverse[m + m * ldi], n - m, n - m, lda, ldi);
    gemm(L21, U12, &inverse[m + m * ldi], n - m, n - m, m, n - m, m, ldi, -1.0, 1.0);

    // P22, L22, U22 = scipy.linalg.lu(h_A22)
    // iL22 = np.linalg.inv(L22)
    // iU22 = np.linalg.inv(U22)
    inplace_trtri(&inverse[m + m * ldi], ipiv, n - m, ldi);

    // iL21 = -1*iL22 @ L21 @ iL11
    trmm('L', 'L', 'U', &inverse[m + m * ldi], L21, n - m, m, ldi, n - m, -1.0);
    trmm('R', 'L', 'U', inverse, L21, n - m, m, ldi, n - m, 1.0);
    memcpy_submatrix(L21, &inverse[m], n - m, m, n - m, ldi);

    // iU12 = -1*iU11 @ U12 @ iU22 
    trmm('R', 'U', 'N', &inverse[m + m * ldi], U12, m, n - m, ldi, m, -1.0);
    trmm('L', 'U', 'N', inverse, U12, m, n - m, ldi, m, 1.0);
    memcpy_submatrix(U12, &inverse[m * ldi], m, n - m, m, ldi);
    
}

void solve_trmv(double* inverse, double* rhs, int n, int lda) {
    trmv('L', 'U', inverse, rhs, n, lda);
    trmv('U', 'N', inverse, rhs, n, lda);
}

};

namespace DEVICE{

void update_lutrtri(SOLVER_HANDLE handle, double* A, double* inverse, int m, int n, int lda, int ldi) {
    double* L21 = handle.workspace;
    double* U12 = handle.workspace + 1*(n - m)*m;
    double* iL21 = handle.workspace + 2*(n - m)*m;
    double* iU12 = handle.workspace + 3*(n - m)*m;

    // U12 = iL11 @ A[:M, M:]
    trmm(handle.cublasH, 'L', 'L', 'U', inverse, &A[m * lda], U12, m, n - m, ldi, lda, m, 1.0);

    // L21 = A[M:, :M] @ iU11
    trmm(handle.cublasH, 'R', 'U', 'N', inverse, &A[m], L21, n - m, m, ldi, lda, n - m, 1.0);

    // compute \hat{A22} = A22 - L21 @ U12 (Schur Complement)
    memcpy_submatrix(&A[m + m * lda], &inverse[m + m * ldi], n - m, n - m, lda, ldi);
    gemm(handle.cublasH, L21, U12, &inverse[m + m * ldi], n - m, n - m, m, n - m, m, ldi, -1.0, 1.0);

    // P22, L22, U22 = scipy.linalg.lu(h_A22)
    // iL22 = np.linalg.inv(L22)
    // iU22 = np.linalg.inv(U22)
    getMatrix(&inverse[m + m * ldi], handle.pinned_mem, n - m, n - m, ldi, n - m);
    lapack_int* host_ipiv = (lapack_int*)(handle.pinned_mem + (n - m)*(n - m));
    CPU::inplace_trtri(handle.pinned_mem, host_ipiv, n - m, n - m);
    setMatrix(handle.pinned_mem, &inverse[m + m * ldi], n - m, n - m, n - m, ldi);

    // iL21 = -1*iL22 @ L21 @ iL11
    trmm(handle.cublasH, 'L', 'L', 'U', &inverse[m + m * ldi], L21, iL21, n - m, m, ldi, n - m, n - m, -1.0);
    trmm(handle.cublasH, 'R', 'L', 'U', inverse, iL21, &inverse[m], n - m, m, ldi, n - m, ldi, 1.0);

    // iU12 = -1*iU11 @ U12 @ iU22 
    trmm(handle.cublasH, 'R', 'U', 'N', &inverse[m + m * ldi], U12, iU12, m, n - m, ldi, m, m, -1.0);
    trmm(handle.cublasH, 'L', 'U', 'N', inverse, iU12, &inverse[m * ldi], m, n - m, ldi, m, ldi, 1.0);
    CUDA_RT_CALL(cudaDeviceSynchronize());
}

void solve_trmv(SOLVER_HANDLE handle, double* inverse, double* rhs, int n, int lda) {
    trmv(handle.cublasH, 'L', 'U', inverse, rhs, n, lda);
    trmv(handle.cublasH, 'U', 'N', inverse, rhs, n, lda);
}

};