#include "common.cuh"
#include "utilities.cuh"
#include "inversion.cuh"
#include "blocklu.cuh"
#include "lutrtri.cuh"
#include "cpu_blas.h"
#include "gpu_blas.cuh"

namespace CPU {

void inplace_inversion(double* A, lapack_int * ipiv, int n, int lda) {
    //call getrs then use getri to inverse the matrix A
    getrf(A, ipiv, n, n, lda);
    getri(A, ipiv, n, lda);
}

void update_inversion(double* A, double* inverse, lapack_int * ipiv, double* workplace, int m, int n, int lda, int ldi) {
    double* A12 = workplace;
    double* A21 = workplace + (n - m) * m;
    double* A22 = workplace + 2 * (n - m) * m;
    memcpy_submatrix(&A[m * lda], A12, m, n - m, lda, m);
    memcpy_submatrix(&A[m], A21, n - m, n, lda, n - m);

    // iA[:M, M:N] = iA11 @ A[:M, M:N], compute A^{-1}X on (1, 2)
    gemm (inverse, A12, &inverse[m * ldi], m, n - m, m, ldi, m, ldi, 1.0, 0.0);

    // iA[M:, :M] = A[M:, :M] @ iA[:M, :M], compute  YA^{-1} on (2, 1)
    gemm (A21, inverse, &inverse[m], n - m, m, m, n - m, ldi, ldi, 1.0, 0.0);

    // iA[M:N, M:N] = A[M:N, M:N] - A[M:N, :M] @ iA[:M, M:N] compute R - YA^{-1}X on (2, 2)
    memcpy_submatrix(A22, &inverse[m + m * ldi], n - m, n - m, n - m, ldi);
    gemm (A21, &inverse[m * ldi], &inverse[m + m * ldi], n - m, n - m, m, n - m, ldi, ldi, -1.0, 1.0);

    // iA[M:N, M:N] = np.linalg.inv(iA[M:N, M:N]), compute inverse of R - YA^{-1}X
    inplace_inversion(&inverse[m + m * ldi], ipiv, n - m, ldi);

    // iA[:M, M:N] = (-1) * iA[:M, M:N] @ iA[M:N, M:N], compute A^{-1}X(YA^{-1}X - R)^{-1}
    gemm (&inverse[m * ldi], &inverse[m + m * ldi], A12, m, n - m, n - m, ldi, ldi, m, -1.0, 0.0);
    memcpy_submatrix(A12, &inverse[m * ldi], m, n - m, m, ldi);

    // iA[:M, :M] = iA[:M, :M] - iA[:M, M:N] @ iA[M:, :M], A^{-1} - (A^{-1}X(YA^{-1}X - R)^{-1})(YA^{-1})
    gemm (&inverse[m * ldi], &inverse[m], inverse, m, m, n - m, ldi, ldi, ldi, -1.0, 1.0);

    // iA[M:, :M] = (-1) * iA[M:N, M:N] @ iA[M:, :M], compute (YA^{-1}X - R)^{-1} YA^{-1}
    gemm (&inverse[m + m * ldi], &inverse[m], A21, n - m, m, n - m, ldi, ldi, n - m, -1.0, 0.0);
    memcpy_submatrix(A21, &inverse[m], n - m, m, n - m, ldi);
}   


};

namespace DEVICE {
__global__ 
void inplace_inverse_2x2_kernel(double* A, int n, int lda) {
    double a = A[0]; double b = A[lda];
    double c = A[1]; double d = A[lda + 1];
    double det = a * d - c * b;
    double idet = 1.0 / det;
    A[0] = idet * d; A[lda] = idet * -b;
    A[1] = idet * -c; A[lda + 1] = idet * a;
}

void inplace_inverse_2x2(double* A, int n, int lda) {
    inplace_inverse_2x2_kernel<<<1, 1>>>(A, n, lda);
}

__global__ void set_eye_kernel(double* eye, int m, int n, int lda) {
    int i = blockIdx.x*blockDim.x + threadIdx.x;
    int j = blockIdx.y*blockDim.y + threadIdx.y;
    double val = i == j ? 1.0 : 0.0;
    if(i < m && j < n)
        eye[i + j * lda] = val;
}

void set_eye(double* eye, int n, int lda) {
    dim3 block(32, 32, 1);
    dim3 grid((n + 31) / 32, (n + 31) / 32, 1);
    set_eye_kernel<<<grid, block>>>(eye, n, n, lda);
}

void inplace_inverse(cusolverDnHandle_t handle, double* workspace, double* A, int* ipiv, int n, int lda, int* devInfo) {
    if(n == 2) {
        inplace_inverse_2x2(A, n, lda);
    } else {
        getrf(handle, workspace, A, n, n, lda, devInfo);
        set_eye(workspace, n, n);
        getrs(handle, A, ipiv, workspace, n, n, lda, n, devInfo);
        // getri(solverH.cublasH, A, solverH.workspace, n, lda, n, solverH.devInfo);
        memcpy_submatrix(workspace, A, n, n, n, lda);
    }
}

template<typename T>
__device__ __forceinline__ T warpReduceSum(T sum){
    sum += __shfl_down_sync(0xffffffff,sum,16);
    sum += __shfl_down_sync(0xffffffff,sum,8);
    sum += __shfl_down_sync(0xffffffff,sum,4);
    sum += __shfl_down_sync(0xffffffff,sum,2);
    sum += __shfl_down_sync(0xffffffff,sum,1);
    return sum;
}

__device__ __forceinline__ void load_float4(float* destination, float* source) {
    reinterpret_cast<float4*>(destination)[0] = reinterpret_cast<float4*>(source)[0];
}
__device__ __forceinline__ void load_float2(float* destination, float* source) {
    reinterpret_cast<float2*>(destination)[0] = reinterpret_cast<float2*>(source)[0];
}
__device__ __forceinline__ void load_float(float* destination, float* source) {
    reinterpret_cast<float*>(destination)[0] = reinterpret_cast<float*>(source)[0];
}
__device__ __forceinline__ void load_double2(double* destination, const double* source) {
    reinterpret_cast<double2*>(destination)[0] = reinterpret_cast<double2*>(const_cast<double*>(source))[0];
}
__device__ __forceinline__ void load_double(double* destination, double* source) {
    reinterpret_cast<double*>(destination)[0] = reinterpret_cast<double*>(source)[0];
}
__device__ __forceinline__ void store_float4(float* destination, float* source) {
    reinterpret_cast<float4*>(destination)[0] = reinterpret_cast<float4*>(source)[0];
}
__device__ __forceinline__ void store_float2(float* destination, float* source) {
    reinterpret_cast<float2*>(destination)[0] = reinterpret_cast<float2*>(source)[0];
}
__device__ __forceinline__ void store_float(float* destination, float* source) {
    reinterpret_cast<float*>(destination)[0] = reinterpret_cast<float*>(source)[0];
}
__device__ __forceinline__ void store_double2(double* destination, double* source) {
    reinterpret_cast<double2*>(destination)[0] = reinterpret_cast<double2*>(source)[0];
}
__device__ __forceinline__ void store_double(double* destination, double* source) {
    reinterpret_cast<double*>(destination)[0] = reinterpret_cast<double*>(source)[0];
}


__global__ void dgemm_2NN_kernel(double *A, double *B, double *C, int n, int ldB) {
  constexpr int ldA = 2;
  constexpr int ldC = 2;
  int warp_id     = threadIdx.x / 32;
  int thread_lane = threadIdx.x % 32;
  int num_warps   = blockDim.x  / 32;
  // tileA 2 * 2
  // tileB 2 * 2
  // tileC 2 * 2
  double dA_reg[4] = {};
  double dB_reg[4] = {};
  double dC_reg[4] = {};

  dC_reg[0] = 0;
  dC_reg[1] = 0;
  dC_reg[2] = 0;
  dC_reg[3] = 0;

  int col_B_start = blockIdx.x * (num_warps * 2) + warp_id * 2;
  
  if (col_B_start >= n) {
    return;
  }
  for (int col_A_start = thread_lane * 2; col_A_start < n; col_A_start += warpSize * 2) {
    // load tile_A
    load_double2(&dA_reg[0], &A[0 + col_A_start * ldA]);
    if (col_A_start + 1 < n) {
      load_double2(&dA_reg[2], &A[0 + (col_A_start + 1) * ldA]);
    }
    else {
      dA_reg[2] = 0;
      dA_reg[3] = 0;
    }

    // load tile_B
    load_double2(&dB_reg[0], &B[col_A_start + col_B_start * ldB]);
    if (col_B_start + 1 < n)
      load_double2(&dB_reg[2], &B[col_A_start + (col_B_start + 1) * ldB]);
    else {
      dB_reg[2] = 0;
      dB_reg[3] = 0;
    }

    #pragma unroll
    for (int i = 0; i < 2; i ++) {
      for (int j = 0; j < 2; j ++) {
        for (int k = 0; k < 2; k ++) {
          dC_reg[i + j * 2] += dA_reg[i + k * 2] * dB_reg[k + j * 2];
        }
      }
    }
  }
  // warp level reduce dC_reg[0-4]
  #pragma unroll(4)
  for (int i = 0; i < 4; i ++) {
    dC_reg[i] = warpReduceSum(dC_reg[i]);
  }
  // thead_lane 0 store reduce dC to global
  if (thread_lane == 0) {
    store_double2(&C[0 + col_B_start * ldC], &dC_reg[0]);
    if (col_B_start + 1 < n)
      store_double2(&C[0 + (col_B_start + 1) * ldC], &dC_reg[2]);
  }
}



__global__ void dgemm_4NN_kernel(double *A, double *B, double *C, int n, int ldB) {
  constexpr int ldA = 4;
  constexpr int ldC = 4;
  int warp_id     = threadIdx.x / 32;
  int thread_lane = threadIdx.x % 32;
  int num_warps   = blockDim.x  / 32;
  // tileA 4 * 2
  // tileB 2 * 2
  // tileC 4 * 2
  double dA_reg[8] = {};
  double dB_reg[4] = {};
  double dC_reg[8] = {};

  // dC_reg[0] = 0;
  // dC_reg[1] = 0;
  // dC_reg[2] = 0;
  // dC_reg[3] = 0;

  int col_B_start = blockIdx.x * (num_warps * 2) + warp_id * 2;
  
  if (col_B_start >= n) {
    return;
  }
  for (int col_A_start = thread_lane * 2; col_A_start < n; col_A_start += warpSize * 2) {
    // load tile_A
    load_double2(&dA_reg[0], &A[0 + col_A_start * ldA]);
    load_double2(&dA_reg[2], &A[2 + col_A_start * ldA]);
    if (col_A_start + 1 < n) {
      load_double2(&dA_reg[4], &A[0 + (col_A_start + 1) * ldA]);
      load_double2(&dA_reg[6], &A[2 + (col_A_start + 1) * ldA]);
    }
    else {
      dA_reg[4] = 0;
      dA_reg[5] = 0;
      dA_reg[6] = 0;
      dA_reg[7] = 0;
    }

    // load tile_B
    load_double2(&dB_reg[0], &B[col_A_start + col_B_start * ldB]);
    if (col_B_start + 1 < n)
      load_double2(&dB_reg[2], &B[col_A_start + (col_B_start + 1) * ldB]);
    else {
      dB_reg[2] = 0;
      dB_reg[3] = 0;
    }

    #pragma unroll
    for (int i = 0; i < 4; i ++) {
      for (int j = 0; j < 2; j ++) {
        for (int k = 0; k < 2; k ++) {
          dC_reg[i + j * 4] += dA_reg[i + k * 4] * dB_reg[k + j * 2];
        }
      }
    }
  }
  // warp level reduce dC_reg[0-4]
  #pragma unroll
  for (int i = 0; i < 8; i ++) {
    dC_reg[i] = warpReduceSum(dC_reg[i]);
  }
  // thead_lane 0 store reduce dC to global
  if (thread_lane == 0) {
    store_double2(&C[0 + col_B_start * ldC], &dC_reg[0]);
    store_double2(&C[2 + col_B_start * ldC], &dC_reg[2]);
    if (col_B_start + 1 < n)
      store_double2(&C[0 + (col_B_start + 1) * ldC], &dC_reg[4]);
      store_double2(&C[2 + (col_B_start + 1) * ldC], &dC_reg[6]);
  }
}


__global__ void dgemm_N2N_kernel(double *A, double *B, double *C, int n, int ldA, int ldB, int ldC) {
  int warp_id     = threadIdx.x / 32;
  int thread_lane = threadIdx.x % 32;
  int num_warps   = blockDim.x  / 32;
  // tileA 2 * 2
  // tileB 2 * 2
  // tileC 2 * 2
  double dA_reg[4] = {};
  double dB_reg[4] = {};
  double dC_reg[4] = {};

  dC_reg[0] = 0;
  dC_reg[1] = 0;
  dC_reg[2] = 0;
  dC_reg[3] = 0;

  int row_A_start = blockIdx.x * (num_warps * 2) + warp_id * 2;
  
  if (row_A_start >= n) {
    return;
  }
  for (int col_A_start = thread_lane * 2; col_A_start < n; col_A_start += warpSize * 2) {
    // load tile_A
    load_double2(&dA_reg[0], &A[row_A_start + 0 + col_A_start * ldA]);
    if (col_A_start + 1 < n) {
      load_double2(&dA_reg[2], &A[row_A_start + 0 + (col_A_start + 1) * ldA]);
    }
    else {
      dA_reg[2] = 0;
      dA_reg[3] = 0;
    }

    // load tile_B
    load_double2(&dB_reg[0], &B[col_A_start + 0 * ldB]);
    load_double2(&dB_reg[2], &B[col_A_start + 1 * ldB]);

    #pragma unroll
    for (int i = 0; i < 2; i ++) {
      for (int j = 0; j < 2; j ++) {
        for (int k = 0; k < 2; k ++) {
          dC_reg[i + j * 2] += dA_reg[i + k * 2] * dB_reg[k + j * 2];
        }
      }
    }
  }
  // warp level reduce dC_reg[0-4]
  #pragma unroll(4)
  for (int i = 0; i < 4; i ++) {
    dC_reg[i] = warpReduceSum(dC_reg[i]);
  }
  // thead_lane 0 store reduce dC to global
  if (thread_lane == 0) {
    store_double2(&C[row_A_start + 0 * ldC], &dC_reg[0]);
    store_double2(&C[row_A_start + 1 * ldC], &dC_reg[2]);
  }
}


__global__ void dgemm_N2N_kernelv3(double *A, double *B, double *C, int n, int ldA, int ldB, int ldC) {
  int warp_id     = threadIdx.x / 32;
  int thread_lane = threadIdx.x % 32;
  int num_warps   = blockDim.x  / 32;
  // tileA 2 * 2
  // tileB 2 * 2
  // tileC 2 * 2
  double dA_reg[4] = {};
  double dB_reg[4] = {};
  double dC_reg[4] = {};

  __shared__ double dC_shared[4][32];
  if (warp_id < 4) {
    dC_shared[warp_id][thread_lane] = 0;
  }

  dC_reg[0] = 0;
  dC_reg[1] = 0;
  dC_reg[2] = 0;
  dC_reg[3] = 0;

  int row_A_start = blockIdx.x * (warpSize * 2) + thread_lane * 2;
  
  if (row_A_start >= n) {
    return;
  }
  for (int col_A_start = warp_id * 2; col_A_start < n; col_A_start += num_warps * 2) {
    // load tile_A
    load_double2(&dA_reg[0], &A[row_A_start + 0 + col_A_start * ldA]);
    if (col_A_start + 1 < n) {
      load_double2(&dA_reg[2], &A[row_A_start + 0 + (col_A_start + 1) * ldA]);
    }
    else {
      dA_reg[2] = 0;
      dA_reg[3] = 0;
    }

    // load tile_B
    load_double2(&dB_reg[0], &B[col_A_start + 0 * ldB]);
    load_double2(&dB_reg[2], &B[col_A_start + 1 * ldB]);

    #pragma unroll
    for (int i = 0; i < 2; i ++) {
      for (int j = 0; j < 2; j ++) {
        for (int k = 0; k < 2; k ++) {
          dC_reg[i + j * 2] += dA_reg[i + k * 2] * dB_reg[k + j * 2];
        }
      }
    }
  }
  // block level reduce dC_reg[0-4]
  #pragma unroll(4)
  for (int i = 0; i < 4; i ++) {
    // dC_reg[i] = warpReduceSum(dC_reg[i]);
    atomicAdd(&(dC_shared[i][thread_lane]), dC_reg[i]);
  }

  __syncthreads();
  // warp 0 store reduce dC to global
  if (warp_id == 0) {

    C[row_A_start + 0 + 0 * ldC] = dC_shared[0][thread_lane];
    C[row_A_start + 1 + 0 * ldC] = dC_shared[1][thread_lane];
    C[row_A_start + 0 + 1 * ldC] = dC_shared[2][thread_lane];
    C[row_A_start + 1 + 1 * ldC] = dC_shared[3][thread_lane];
    // store_double2(&C[row_A_start + 0 * ldC], &dC_reg[0]);
    // store_double2(&C[row_A_start + 1 * ldC], &dC_reg[2]);
  }
}

__global__ void dgemm_N4N_kernelv3(double *A, double *B, double *C, int n, int ldA, int ldB, int ldC) {
  int warp_id     = threadIdx.x / 32;
  int thread_lane = threadIdx.x % 32;
  int num_warps   = blockDim.x  / 32;
  // tileA 2 * 2
  // tileB 2 * 4
  // tileC 2 * 4
  double dA_reg[4] = {};
  double dB_reg[8] = {};
  double dC_reg[8] = {};

  __shared__ double dC_shared[8][32];
  if (warp_id < 8) {
    dC_shared[warp_id][thread_lane] = 0;
  }

  int row_A_start = blockIdx.x * (warpSize * 2) + thread_lane * 2;
  
  if (row_A_start >= n) {
    return;
  }
  for (int col_A_start = warp_id * 2; col_A_start < n; col_A_start += num_warps * 2) {
    // load tile_A
    load_double2(&dA_reg[0], &A[row_A_start + 0 + col_A_start * ldA]);
    if (col_A_start + 1 < n) {
      load_double2(&dA_reg[2], &A[row_A_start + 0 + (col_A_start + 1) * ldA]);
    }
    else {
      dA_reg[2] = 0;
      dA_reg[3] = 0;
    }

    // load tile_B
    load_double2(&dB_reg[0], &B[col_A_start + 0 * ldB]);
    load_double2(&dB_reg[2], &B[col_A_start + 1 * ldB]);
    load_double2(&dB_reg[4], &B[col_A_start + 2 * ldB]);
    load_double2(&dB_reg[6], &B[col_A_start + 3 * ldB]);

    #pragma unroll
    for (int i = 0; i < 2; i ++) {
      for (int j = 0; j < 4; j ++) {
        for (int k = 0; k < 2; k ++) {
          dC_reg[i + j * 2] += dA_reg[i + k * 2] * dB_reg[k + j * 2];
        }
      }
    }
  }
  // block level reduce dC_reg[0-8]
  #pragma unroll(8)
  for (int i = 0; i < 8; i ++) {
    // dC_reg[i] = warpReduceSum(dC_reg[i]);
    atomicAdd(&(dC_shared[i][thread_lane]), dC_reg[i]);
  }

  __syncthreads();
  // warp 0 store reduce dC to global
  if (warp_id == 0) {

    C[row_A_start + 0 + 0 * ldC] = dC_shared[0][thread_lane];
    C[row_A_start + 1 + 0 * ldC] = dC_shared[1][thread_lane];
    C[row_A_start + 0 + 1 * ldC] = dC_shared[2][thread_lane];
    C[row_A_start + 1 + 1 * ldC] = dC_shared[3][thread_lane];
    C[row_A_start + 0 + 2 * ldC] = dC_shared[4][thread_lane];
    C[row_A_start + 1 + 2 * ldC] = dC_shared[5][thread_lane];
    C[row_A_start + 0 + 3 * ldC] = dC_shared[6][thread_lane];
    C[row_A_start + 1 + 3 * ldC] = dC_shared[7][thread_lane];
    // store_double2(&C[row_A_start + 0 * ldC], &dC_reg[0]);
    // store_double2(&C[row_A_start + 1 * ldC], &dC_reg[2]);
  }
}

void dgemm_KNN(double *A, double *B, double *C, int k, int n, int ldB) {
  assert(k <= 4 && k >= 1);
  switch (k) {
    case 2: {
      dim3 dimBlock(512);
      dim3 dimGrid(((n + 31) / 32));
      dgemm_2NN_kernel<<<dimGrid, dimBlock>>>(A, B, C, n, ldB);
      break;
    }
    case 4: {
      dim3 dimBlock(256);
      dim3 dimGrid(((n + 15) / 16));
      dgemm_4NN_kernel<<<dimGrid, dimBlock>>>(A, B, C, n, ldB);
      break;
    }
  }
}

void dgemm_NKN(double *A, double *B, double *C, int k, int n, int ldA, int ldB, int ldC) {
  assert(k <= 4 && k >= 1);
  switch (k) {
    case 2: {
      dim3 dimBlock(512);
      dim3 dimGrid(((n + 31) / 32));
      // dgemm_N2N_kernel<<<dimGrid, dimBlock>>>(A, B, C, n, ldA, ldB, ldC);
      // dgemm_N2N_kernelv2<<<dimGrid, dimBlock>>>(A, B, C, n, ldA, ldB, ldC);
      dgemm_N2N_kernelv3<<<dimGrid, dimBlock>>>(A, B, C, n, ldA, ldB, ldC);

      break;
    }
    case 4: {
      dim3 dimBlock(512);
      dim3 dimGrid(((n + 31) / 32));
      dgemm_N4N_kernelv3<<<dimGrid, dimBlock>>>(A, B, C, n, ldA, ldB, ldC);
      break;
    }
  }
}

void local_gemm_KNN(cublasHandle_t handle, double* A, double*B, double* C, int m, int n, int k, int lda, int ldb, int ldc) {
    if(m == 2 || m == 4) {
        dgemm_KNN(A, B, C, m, n, ldb);
    } else {
        gemm(handle, A, B, C, m, n, k, lda, ldb, ldc, 1.0, 0.0);
    }
}


void local_gemm_NKN(cublasHandle_t handle, double* A, double*B, double* C, int m, int n, int k, int lda, int ldb, int ldc) {
    if(n == 2 || n == 4) {
        dgemm_NKN(A, B, C, n, k, lda, ldb, ldc);
    } else {
        gemm(handle, A, B, C, m, n, k, lda, ldb, ldc, 1.0, 0.0);
    }
}




void update_inversion(SOLVER_HANDLE solverH, double* A, double* inverse, int* ipiv, int m, int n, int lda, int ldi) {
    double* A12 = solverH.workspace;
    double* A21 = solverH.workspace + (n - m) * m;
    double* A22 = solverH.workspace + 2 * (n - m) * m;
    double* iA12 = A22 + (n - m)*(n - m);
    double* iA21 = A22 + (n - m)*(n - m) + (n - m) * m;
    double* workspace = solverH.workspace + 4 * (n - m) * m + (n - m)*(n - m);
    memcpy_submatrix(&A[m * lda], A12, m, n - m, lda, m);
    memcpy_submatrix(&A[m], A21, n - m, n, lda, n - m);

    // iA[:M, M:N] = iA11 @ A[:M, M:N], compute A^{-1}X on (1, 2)
    local_gemm_NKN (solverH.cublasH, inverse, A12, iA12, m, n - m, m, ldi, m, m);

    // iA[M:, :M] = A[M:, :M] @ iA[:M, :M], compute  YA^{-1} on (2, 1)
    local_gemm_KNN (solverH.cublasH, A21, inverse, iA21, n - m, m, m, n - m, ldi, n - m);

    // iA[M:N, M:N] = A[M:N, M:N] - A[M:N, :M] @ iA[:M, M:N] compute R - YA^{-1}X on (2, 2)
    // memcpy_submatrix(A22, &inverse[m + m * ldi], n - m, n - m, n - m, ldi);
    gemm (solverH.cublasH, A21, iA12, A22, n - m, n - m, m, n - m, m, n - m, -1.0, 1.0);

    // iA[M:N, M:N] = np.linalg.inv(iA[M:N, M:N]), compute inverse of R - YA^{-1}X
    inplace_inverse(solverH.cudenseH, workspace, A22, ipiv, n - m, n - m, solverH.devInfo);

    // iA[:M, M:N] = (-1) * iA[:M, M:N] @ iA[M:N, M:N], compute A^{-1}X(YA^{-1}X - R)^{-1}
    gemm (solverH.cublasH, iA12, A22, A12, m, n - m, n - m, m, n - m, m, -1.0, 0.0);

    // iA[:M, :M] = iA[:M, :M] - iA[:M, M:N] @ iA[M:, :M], A^{-1} - (A^{-1}X(YA^{-1}X - R)^{-1})(YA^{-1})
    gemm (solverH.cublasH, A12, iA21, inverse, m, m, n - m, m, n - m, ldi, -1.0, 1.0);

    // iA[M:, :M] = (-1) * iA[M:N, M:N] @ iA[M:, :M], compute (YA^{-1}X - R)^{-1} YA^{-1}
    gemm (solverH.cublasH, A22, iA21, A21, n - m, m, n - m, n - m, n - m, n - m, -1.0, 0.0);

    memcpy_submatrix(A12, &inverse[m * ldi], m, n - m, m, ldi);
    memcpy_submatrix(A21, &inverse[m], n - m, m, n - m, ldi);
    memcpy_submatrix(A22, &inverse[m + m * ldi], n - m, n - m, n - m, ldi);
    CUDA_RT_CALL(cudaDeviceSynchronize());
}


__global__ void dgemv_kernelv3(const double *A,const double *B, double *C, int n, int ldA, int ldB, int ldC) {
  int warp_id     = threadIdx.x / 32;
  int thread_lane = threadIdx.x % 32;
  int num_warps   = blockDim.x  / 32;
  // tileA 2 * 4
  // tileB 4 * 1
  // tileC 2 * 1
  double dA_reg[8] = {};
  double dB_reg[4] = {};
  double dC_reg[2] = {};

  __shared__ double dC_shared[2][32];
  if (warp_id < 2) {
    dC_shared[warp_id][thread_lane] = 0;
  }

  dC_reg[0] = 0;
  dC_reg[1] = 0;

  int row_A_start = blockIdx.x * (warpSize * 2) + thread_lane * 2;
  
  if (row_A_start >= n) {
    return;
  }
  for (int col_A_start = warp_id * 4; col_A_start < n; col_A_start += num_warps * 4) {
    // load tile_A
    load_double2(&dA_reg[0], &A[row_A_start + 0 + col_A_start * ldA]);
    if (col_A_start + 1 < n) {
      load_double2(&dA_reg[2], &A[row_A_start + 0 + (col_A_start + 1) * ldA]);
    }
    else {
      dA_reg[2] = 0;
      dA_reg[3] = 0;
    }
    if (col_A_start + 2 < n) {
      load_double2(&dA_reg[4], &A[row_A_start + 0 + (col_A_start + 2) * ldA]);
    }
    else {
      dA_reg[4] = 0;
      dA_reg[5] = 0;
    }
    if (col_A_start + 3 < n) {
      load_double2(&dA_reg[6], &A[row_A_start + 0 + (col_A_start + 3) * ldA]);
    }
    else {
      dA_reg[6] = 0;
      dA_reg[7] = 0;
    }

    // load tile_B
    load_double2(&dB_reg[0], &B[col_A_start + 0 * ldB]);
    if (col_A_start + 1 < n) {
      load_double2(&dB_reg[2], &B[col_A_start + 2 + 0 * ldB]);
    }
    else {
      dB_reg[2] = 0;
      dB_reg[3] = 0;
    }
    
    // load_double2(&dB_reg[2], &B[col_A_start + 1 * ldB]);

    #pragma unroll
    for (int i = 0; i < 2; i ++) {
      for (int j = 0; j < 1; j ++) {
        for (int k = 0; k < 4; k ++) {
          dC_reg[i + j * 2] += dA_reg[i + k * 2] * dB_reg[k + j * 4];
        }
      }
    }
  }
  // block level reduce dC_reg[0-1]
  #pragma unroll(2)
  for (int i = 0; i < 2; i ++) {
    // dC_reg[i] = warpReduceSum(dC_reg[i]);
    atomicAdd(&(dC_shared[i][thread_lane]), dC_reg[i]);
  }

  __syncthreads();
  // warp 0 store reduce dC to global
  if (warp_id == 0) {

    C[row_A_start + 0 + 0 * ldC] = dC_shared[0][thread_lane];
    if (row_A_start + 1 < n)
      C[row_A_start + 1 + 0 * ldC] = dC_shared[1][thread_lane];
    // C[row_A_start + 0 + 1 * ldC] = dC_shared[2][thread_lane];
    // C[row_A_start + 1 + 1 * ldC] = dC_shared[3][thread_lane];
    // store_double2(&C[row_A_start + 0 * ldC], &dC_reg[0]);
    // store_double2(&C[row_A_start + 1 * ldC], &dC_reg[2]);
  }
}

void new_dgemv(const double *A,const double *B, double *C, int n, int ldA, int ldB, int ldC) {
  dim3 dimBlock(512);
  dim3 dimGrid(((n + 31) / 32));
  dgemv_kernelv3<<<dimGrid, dimBlock>>>(A, B, C, n, ldA, ldB, ldC);
}

};