#ifndef __GPU_TEMPLATES_CUH__
#define __GPU_TEMPLATES_CUH__

#include "gpu_helper.cuh"

extern float * dc_f32;
extern double* dc_f64;

#define FULL_MASK 0xffffffff
template<typename data_t> __inline__ __device__
data_t warpReduceSum(data_t val) {
    for (int offset = (warpSize >> 1); offset > 0; offset >>= 1) 
        val += __shfl_down_sync(FULL_MASK, val, offset);
    return val;
}

template<typename data_t> __inline__ __device__
data_t blockReduceSum(data_t val) {
    static __shared__ data_t shared[32]; // Shared mem for 32 partial sums 一个 block最多1024线程=32个warp
    int lane = threadIdx.x % warpSize;// warp内通道号
    int wid  = threadIdx.x / warpSize;// warp的序号
    int num_warps = blockDim.x / warpSize;

    val = warpReduceSum(val);// Each warp performs partial reduction
    if (lane == 0) shared[wid]=val; // Write reduced value to shared memory
    __syncthreads();              // Wait for all partial reductions

    // read from shared memory only if that warp existed 只有本线程号小于本block内的warp数时才继续累加
    val = (threadIdx.x < num_warps) ? shared[lane] : 0;
    if (wid==0) val = warpReduceSum(val); //Final reduce within first warp
    return val;
}

template<typename data_t> __inline__ __device__
void blockAxpy(int N, data_t alpha, const data_t * X, data_t * Y)
{
    int stride = blockDim.x * gridDim.x;
    int glb_tid = threadIdx.x + blockIdx.x * blockDim.x;
    for (int i = glb_tid; i < N; i += stride)
        Y[i] += alpha * X[i];
}


template<typename data_t1, typename data_t2> __global__
static void naive_copy(const int nrows, const int rank, const data_t1* L_1001, int L_1001_lda, const data_t1* U_0110, int U_0110_lda,
	data_t2* P_rowM, data_t2* Q_colM)
{
	if (threadIdx.x == 0) {
		for (int i = 0; i < nrows; i++) {
			for (int j = 0; j < rank; j++) {
				P_rowM[i * rank + j] = -L_1001[j * L_1001_lda + i];
				// printf("%.5e ", P_rowM[i * rank + j]);
			}
			// printf("\n");
		}
		for (int j = 0; j < nrows; j++) {
			for (int i = 0; i < rank; i++)
				Q_colM[j * rank + i] = U_0110[j * U_0110_lda + i];
		}
	}
}

template<typename data_t> __global__
static void naive(const int i, const int nrows,
	data_t* P_rowM, data_t* Q_colM, int rank, data_t* LU_old, int LU_lda)
{
	if (threadIdx.x == 0) {
		data_t* Pi = P_rowM + i * rank;
		data_t* Qi = Q_colM + i * rank;
		{// diagonal update
			data_t res = 0.0;
			for (int k = 0; k < rank; k++)
				res += Pi[k] * Qi[k];

			data_t val = LU_old[i * LU_lda + i];
			LU_old[i * LU_lda + i] += res;

			res = 1.0 / (val + res);
			for (int k = 0; k < rank; k++)
				Qi[k] *= res;
			printf("i %d res %.15e\n", i, res);
		}
		for (int j = i + 1; j < nrows; j++) {// L update
			data_t* Pj = P_rowM + j * rank;

			data_t val = LU_old[i * LU_lda + j];// L_new(j, i);
			data_t res = 0.0;
			for (int k = 0; k < rank; k++) {
				Pj[k] -= val * Pi[k];
				res += Pj[k] * Qi[k];
			}

			LU_old[i * LU_lda + j] += res;
		}
		for (int j = i + 1; j < nrows; j++) {// U update
			data_t* Qj = Q_colM + j * rank;

			data_t val = LU_old[j * LU_lda + i];
			data_t res = 0.0;
			for (int k = 0; k < rank; k++)
				res += Pi[k] * Qj[k];

			LU_old[j * LU_lda + i] += res;

			res = -(val + res);
			for (int k = 0; k < rank; k++)
				Qj[k] += res * Qi[k];
		}
	}
}

template<typename data_t> __global__
static void update_kernel_static(const int nrows, volatile data_t* P_rowM, volatile data_t* Q_colM, int rank,
							data_t* LU_old, int LU_lda, int* flag)
{
	assert(blockDim.y == 1 && blockDim.z == 1);
	assert(gridDim.y == 1 && gridDim.z == 1);
	assert(warpSize == 32);
	uint32_t glb_tid = threadIdx.x + blockDim.x * blockIdx.x;// 全局的线程号
	uint32_t vectorId = glb_tid / warpSize;// 从全局视角，本线程组负责哪一行/列的计算
	uint32_t laneId = threadIdx.x & (warpSize - 1);
	assert(blockDim.x % warpSize == 0);// 每个block必须负责整齐的vector数目
	uint32_t tbeg = blockDim.x * blockIdx.x / warpSize;// 本block内起始的vector号
	uint32_t tend = tbeg + blockDim.x / warpSize;

	extern __shared__ data_t s[];// 本block所负责的P、Q
	data_t* shared_P = s;
	data_t* shared_Q = s 		+ (tend - tbeg) * rank;
	data_t* other_P  = shared_Q + (tend - tbeg) * rank;
	data_t* other_Q  = other_P  + rank;

	if (vectorId < nrows) {
		data_t* myP = shared_P + (vectorId - tbeg) * rank;
		data_t* myQ = shared_Q + (vectorId - tbeg) * rank;
		{// 载入本线程所负责的P、Q数据到共享内存中
			for (int k = laneId; k < rank; k += warpSize) {
				myP[k] = P_rowM[vectorId * rank + k];
				myQ[k] = Q_colM[vectorId * rank + k];
			}
			__syncthreads();// 本block内所有线程同步
		}
		
		for (int i = 0; i < nrows; i++) {
			if (vectorId == i) {// 本组恰好负责第i行/列的话是只要计算对角元
				// 开始计算
				data_t res = 0.0;
				data_t val = LU_old[i * LU_lda + i];
				for (int k = laneId; k < rank; k += warpSize)// 一共有多少个向量共同算一行/列，就跨步多少
					res += myP[k] * myQ[k];
				for (int k = warpSize >> 1; k > 0; k >>= 1)
					res += __shfl_down_sync(FULL_MASK, res, k);
				res = __shfl_sync(FULL_MASK, res, 0);
				if (laneId == 0) {// 组内0号线程将结果变换一下
					LU_old[i * LU_lda + i] += res;
				}
				res = 1.0 / (val + res);
				for (int k = laneId; k < rank; k += warpSize)
					myQ[k] *= res;
			}
			__syncthreads();// 本block内线程同步，则本block内的其它线程也立即可见刚才vectorId==i的线程对共享内存的修改
			// 在使本block内其它线程可见修改后，需要写回到全局内存让其它block可见
			if (vectorId == i) {// 本block内的线程还需要将修改后的共享内存写回全局内存中，并设置fence
				for (int k = laneId; k < rank; k += warpSize) {
					P_rowM[i * rank + k] = myP[k];// 虽然本次循环中没有修改myP，但是需要将之前累计的修改写回
					Q_colM[i * rank + k] = myQ[k];
				}
				__threadfence();
				if (laneId == 0) {
					int ret = atomicAdd(flag, 1);// 表示本位置已经经历了i次计算了
					assert(ret == i);
				}
			}
			else if (vectorId > i && tbeg > i) {// i循环中更新对角元的线程组非本block内，本组需要从全局内存中读取最新的Pi和Qi
				if (vectorId == tbeg) {// 由首个线程组来负责更新本block的共享内存
					if (laneId == 0) while (atomicCAS(flag, -1, -1) < i+1) {  }// 前面的依赖还没算完，等待
					__syncwarp(FULL_MASK);// 本组内所有线程同步
					// 将vectorId==i的那个线程组（在别的block内）刚刚写入全局内存的数据读入
					for (int k = laneId; k < rank; k += warpSize) {
						other_P[k] = P_rowM[i * rank + k];
						other_Q[k] = Q_colM[i * rank + k];
					}
				}// 否则，更新的数据Pi和Qi已经立刻写在了shared_P/Q中
			}
			__syncthreads();// 要使共享内存的other_P和other_Q读入对本block内所有的线程可见

			if (vectorId > i) {// 需要更新自己位置的P和Q，直接写回共享内存
				data_t* Pi = (tbeg > i) ? other_P : (shared_P + (i - tbeg)*rank);
				data_t* Qi = (tbeg > i) ? other_Q : (shared_Q + (i - tbeg)*rank);
				{// 先算L-update
					data_t res = 0.0;
					data_t val = LU_old[i * LU_lda + vectorId];
					for (int k = laneId; k < rank; k += warpSize) {
						myP[k] -= val * Pi[k];// Pj[k] -= val * Pi[k];
						res += myP[k] * Qi[k];// res += Pj[k] * Qi[k];
					}
					for (int k = warpSize >> 1; k > 0; k >>= 1)
						res += __shfl_down_sync(FULL_MASK, res, k);
					if (laneId == 0) {
						LU_old[i * LU_lda + vectorId] += res;
					}
				}
				{// 后算U-update
					data_t res = 0.0;
					data_t val = LU_old[vectorId * LU_lda + i];
					for (int k = laneId; k < rank; k += warpSize)
						res += Pi[k] * myQ[k];// res += Pi[k] * Qj[k];
					for (int k = warpSize >> 1; k > 0; k >>= 1)
						res += __shfl_down_sync(FULL_MASK, res, k);
					res = __shfl_sync(FULL_MASK, res, 0);
					if (laneId == 0) {
						LU_old[vectorId * LU_lda + i] += res;
					}
					res = - (val + res);
					for (int k = laneId; k < rank; k += warpSize)
						myQ[k] += res * Qi[k];// Qj[k] += res * Qi[k];
				}
			}
		}
	}
}

#define TPV 32
template<typename data_t>
static void LU_update(cublasHandle_t handle, int nrows, data_t* LU_old, int LU_lda,
		int rank, const data_t* L_1001, int L_1001_lda, const data_t* U_0110, int U_0110_lda, int* d_flag)
{
	const dim3 scalar_Block(1, 1, 1), scalar_Grid(1, 1, 1);
	data_t* d_work = (data_t*) check_device_workspace_to_enlarge(nrows * rank * sizeof(data_t) * 4);
	data_t* P_rowM = d_work;
	data_t* Q_colM = d_work + (rank * nrows * 2);
	dim3 grdSize_mat(1, 1, 1);
    grdSize_mat.x = (nrows - 1 + blkSize_mat.x) / blkSize_mat.x;
    grdSize_mat.y = (rank  - 1 + blkSize_mat.y) / blkSize_mat.y;
	// 注意这里要取个负号
	device_copy_mat_neg_transpose<<<grdSize_mat, blkSize_mat>>>(L_1001, L_1001_lda, P_rowM, rank, rank, nrows);
	// 注意这里要修改grid大小
    grdSize_mat.x = (rank  - 1 + blkSize_mat.x) / blkSize_mat.x;
    grdSize_mat.y = (nrows - 1 + blkSize_mat.y) / blkSize_mat.y;
    device_copy_mat<<<grdSize_mat, blkSize_mat>>>(U_0110, U_0110_lda, Q_colM, rank , nrows, rank );
#ifdef PROFILE
    cudaEvent_t start, stop;
    CUDA_CHECK(cudaEventCreate(&start));
    CUDA_CHECK(cudaEventCreate(&stop));
    float tms;
    CUDA_CHECK(cudaEventRecord(start));
#endif
	// for (int i = 0; i < nrows; i++) {
	// 	naive<data_t><<<scalar_Grid, scalar_Block>>>(i, nrows, P_rowM, Q_colM, rank, LU_old, LU_lda);
	// }
	
	CUDA_CHECK(cudaMemset(d_flag, 0, sizeof(*d_flag)));
	uint32_t nt_per_blk = 512;
	uint32_t nt_per_vec = 32;
	uint32_t nvec_per_blk = nt_per_blk / nt_per_vec;
	uint32_t grd = (nrows - 1 + nvec_per_blk) / nvec_per_blk;
	uint32_t shmem_bytes = (nt_per_blk/32*2 + 2)*rank*sizeof(data_t);
	// printf("nrows %d nt_per_blk %u nt_per_vec %u nvec_per_blk %u grd %u shmem_bytes %u\n",
	// 	nrows, nt_per_blk, nt_per_vec, nvec_per_blk, grd, shmem_bytes);
	update_kernel_static<data_t><<<grd, nt_per_blk, shmem_bytes>>>(nrows, P_rowM, Q_colM, rank, LU_old, LU_lda, d_flag);
#ifdef PROFILE
    CUDA_CHECK(cudaEventRecord(stop));
    CUDA_CHECK(cudaEventSynchronize(stop));
    CUDA_CHECK(cudaEventElapsedTime(&tms, start, stop));
	CUDA_CHECK(cudaEventDestroy(start));
    CUDA_CHECK(cudaEventDestroy(stop));
    printf("my LU update Kernel: %.6f s\n", tms / 1000);
#endif
}

template<typename data_t>
void gpu_extend_full(cublasHandle_t blas_handle, cusolverDnHandle_t sol_handle,
		const data_t* last_M, int last_nrows, data_t* this_M, int this_nrows, int* ipiv, int* d_info)
{
    // 注意这个模式
    cublasPointerMode_t mode;
    CUBLAS_CHECK(cublasGetPointerMode(blas_handle, &mode));
    if (mode != CUBLAS_POINTER_MODE_DEVICE) 
        CUBLAS_CHECK(cublasSetPointerMode(blas_handle, CUBLAS_POINTER_MODE_DEVICE));

	int last_hf = last_nrows >> 1;
	int hf      = this_nrows >> 1;
	int nrows = this_nrows;
	dim3 grdSize_mat(1, 1, 1);
	    cublasStatus_t(*trsm)(cublasHandle_t handle, cublasSideMode_t side, cublasFillMode_t uplo,
        cublasOperation_t trans, cublasDiagType_t diag, int m, int n, const data_t * alpha,
        const data_t * A, int lda, data_t * B, int ldb) = nullptr;
    cublasStatus_t(*gemm)(cublasHandle_t handle, cublasOperation_t transa, cublasOperation_t transb,
        int m, int n, int k, const data_t * alpha, const data_t * A, int lda, const data_t * B, int ldb,
        const data_t * beta, data_t * C, int ldc) = nullptr;
    cusolverStatus_t(*getrf)(cusolverDnHandle_t handle, int m, int n, data_t * A, int lda,
        data_t * Workspace, int* devIpiv, int* devInfo) = nullptr;
	const data_t* dc = nullptr;
    if constexpr (sizeof(data_t) == 4) {
        trsm = cublasStrsm;
        gemm = cublasSgemm;
        getrf = cusolverDnSgetrf;
		dc = dc_f32;
    }
    else if constexpr (sizeof(data_t) == 8) {
        trsm = cublasDtrsm;
        gemm = cublasDgemm;
        getrf = cusolverDnDgetrf;
		dc = dc_f64;
    }

	{// batch形式聚合计算
		data_t* rhs = (data_t*) check_device_workspace_to_enlarge(last_hf * (hf - last_hf) * 2 * sizeof(data_t));
		// U_0001:=L_0011^{-1}*B_00 和 U_0101:=L_0011^{-1}*B_01 一起算
		data_t* B_00 = this_M + last_hf * nrows;
		data_t* B_01 = this_M + (hf + last_hf) * nrows;
		grdSize_mat.x = (	   last_hf   - 1 + blkSize_mat.x) / blkSize_mat.x;
    	grdSize_mat.y = ((hf - last_hf)  - 1 + blkSize_mat.y) / blkSize_mat.y;
		device_copy_mat <<<grdSize_mat, blkSize_mat>>> (B_00, nrows, rhs						 , last_hf, hf - last_hf, last_hf);
		device_copy_mat <<<grdSize_mat, blkSize_mat>>> (B_01, nrows, rhs + (hf - last_hf)*last_hf, last_hf, hf - last_hf, last_hf);
		CUBLAS_CHECK(trsm(blas_handle, CUBLAS_SIDE_LEFT, CUBLAS_FILL_MODE_LOWER, CUBLAS_OP_N, CUBLAS_DIAG_UNIT,
			last_hf, (hf - last_hf) * 2, &dc[1], last_M, last_nrows, rhs, last_hf));
		device_copy_mat <<<grdSize_mat, blkSize_mat>>> (rhs						 	, last_hf, B_00, nrows, hf - last_hf, last_hf);
		device_copy_mat <<<grdSize_mat, blkSize_mat>>> (rhs + (hf - last_hf)*last_hf, last_hf, B_01, nrows, hf - last_hf, last_hf);
        // L_0010:=C_00*U_0011^{-1} 和 L_1010:=C_10*U_0011^{-1} 一起算
		data_t* C_00 = this_M + last_hf;
		data_t* C_10 = this_M + last_hf + hf;
		grdSize_mat.x = ((hf - last_hf)  - 1 + blkSize_mat.x) / blkSize_mat.x;
    	grdSize_mat.y = (	   last_hf   - 1 + blkSize_mat.y) / blkSize_mat.y;
		device_copy_mat <<<grdSize_mat, blkSize_mat>>> (C_00, nrows, rhs               , (hf - last_hf) * 2, last_hf, hf - last_hf);
		device_copy_mat <<<grdSize_mat, blkSize_mat>>> (C_10, nrows, rhs + hf - last_hf, (hf - last_hf) * 2, last_hf, hf - last_hf);
        CUBLAS_CHECK(trsm(blas_handle, CUBLAS_SIDE_RIGHT, CUBLAS_FILL_MODE_UPPER, CUBLAS_OP_N, CUBLAS_DIAG_NON_UNIT,
			(hf - last_hf) * 2, last_hf, &dc[1], last_M, last_nrows, rhs, (hf - last_hf) * 2));
        device_copy_mat <<<grdSize_mat, blkSize_mat>>> (rhs               , (hf - last_hf) * 2, C_00, nrows, last_hf, hf - last_hf);
		device_copy_mat <<<grdSize_mat, blkSize_mat>>> (rhs + hf - last_hf, (hf - last_hf) * 2, C_10, nrows, last_hf, hf - last_hf);
	}
    
	// 之后的拷贝都是拷last_hf * last_hf的大小的块
	grdSize_mat.x = (last_hf - 1 + blkSize_mat.x) / blkSize_mat.x;
	grdSize_mat.y = (last_hf - 1 + blkSize_mat.y) / blkSize_mat.y;
	{// 左上角块
		data_t* this_A00 = this_M;
		device_copy_mat <<<grdSize_mat, blkSize_mat>>> (last_M, last_nrows, this_A00, nrows, last_hf, last_hf);// L_0000, U_0000
		CUBLAS_CHECK(gemm(blas_handle, CUBLAS_OP_N, CUBLAS_OP_N, hf - last_hf, hf - last_hf, last_hf,
			&dc[2], this_A00 + last_hf, nrows, this_A00 + last_hf * nrows, nrows, &dc[1], this_A00 + last_hf * nrows + last_hf, nrows));
		// 分配所需的显存
		int lwork = 0;
		// 此时要分解的大小为 (hf - last_hf) * (hf - last_hf)
		if constexpr (sizeof(data_t) == 8) {
			CUSOLVER_CHECK(cusolverDnDgetrf_bufferSize(sol_handle, hf - last_hf, hf - last_hf, this_A00 + last_hf * nrows + last_hf, nrows, &lwork));
		} else if constexpr (sizeof(data_t) == 4) {
			CUSOLVER_CHECK(cusolverDnSgetrf_bufferSize(sol_handle, hf - last_hf, hf - last_hf, this_A00 + last_hf * nrows + last_hf, nrows, &lwork));
		}
		data_t* d_work = (data_t*) check_device_workspace_to_enlarge(lwork * sizeof(data_t));
		CUSOLVER_CHECK(getrf(sol_handle, hf - last_hf, hf - last_hf, this_A00 + last_hf * nrows + last_hf, nrows, d_work, nullptr, d_info));
	}

	{// 右上角块
		data_t* this_A01 = this_M + hf * nrows;
		const data_t* L_0010 = this_M + last_hf;
		const data_t* L_0011 = this_M + last_hf + last_hf * nrows;
		device_copy_mat <<<grdSize_mat, blkSize_mat>>> (last_M + last_hf * last_nrows, last_nrows, this_A01, nrows, last_hf, last_hf);// U_0100
		data_t* CD_01 = this_A01 + last_hf;
		CUBLAS_CHECK(gemm(blas_handle, CUBLAS_OP_N, CUBLAS_OP_N, hf - last_hf, hf, last_hf,
			&dc[2], L_0010, nrows, this_A01, nrows, &dc[1], CD_01, nrows));// [C_01, D_01] - L_0010*[U_0100, U_0101]
		CUBLAS_CHECK(trsm(blas_handle, CUBLAS_SIDE_LEFT, CUBLAS_FILL_MODE_LOWER, CUBLAS_OP_N, CUBLAS_DIAG_UNIT,
			hf - last_hf, hf, &dc[1], L_0011, nrows, CD_01, nrows));// [U_0110, U_0111]
	}

	{// 左下角块
		data_t* this_A10 = this_M + hf;
		const data_t* U_0001 = this_M + last_hf * nrows;
		const data_t* U_0011 = this_M + last_hf * nrows + last_hf;
		device_copy_mat <<<grdSize_mat, blkSize_mat>>> (last_M + last_hf, last_nrows, this_A10, nrows, last_hf, last_hf);// L_1000
		data_t* BD_10 = this_A10 + last_hf * nrows;
		CUBLAS_CHECK(gemm(blas_handle, CUBLAS_OP_N, CUBLAS_OP_N, hf, hf - last_hf, last_hf,
				&dc[2], this_A10, nrows, U_0001, nrows, &dc[1], BD_10, nrows));
		CUBLAS_CHECK(trsm(blas_handle, CUBLAS_SIDE_RIGHT, CUBLAS_FILL_MODE_UPPER, CUBLAS_OP_N, CUBLAS_DIAG_NON_UNIT,
			hf, hf - last_hf, &dc[1], U_0011, nrows, BD_10, nrows));// [L_1001, L_1011]
	}
    
	{// 右下角块
		data_t* this_A11 = this_M + hf * nrows + hf;
		data_t* B_11 = this_A11 + last_hf * nrows;
		const data_t* L_100x = this_M + hf;
		const data_t* U_01x1 = this_M + (hf + last_hf) * nrows;
		data_t* C_11 = this_A11 + last_hf;
		const data_t* L_101x = this_M + hf + last_hf;
		const data_t* U_01x0 = this_M + hf * nrows;
		data_t* D_11 = this_A11 + last_hf * nrows + last_hf;
		device_copy_mat <<<grdSize_mat, blkSize_mat>>> (last_M + last_hf * last_nrows + last_hf, last_nrows, this_A11, nrows, last_hf, last_hf);// 直接拷贝 L_11_t, U_11_t
        LU_update(blas_handle, last_hf, this_A11, nrows, hf - last_hf, L_100x + last_hf * nrows, nrows, U_01x0 + last_hf, nrows, d_info);

		CUBLAS_CHECK(gemm(blas_handle, CUBLAS_OP_N, CUBLAS_OP_N, last_hf, hf - last_hf, hf,
			&dc[2], L_100x, nrows, U_01x1, nrows, &dc[1], B_11, nrows));
		CUBLAS_CHECK(trsm(blas_handle, CUBLAS_SIDE_LEFT, CUBLAS_FILL_MODE_LOWER, CUBLAS_OP_N, CUBLAS_DIAG_UNIT,
			last_hf, hf - last_hf, &dc[1], this_A11, nrows, B_11, nrows));

		CUBLAS_CHECK(gemm(blas_handle, CUBLAS_OP_N, CUBLAS_OP_N, hf - last_hf, last_hf, hf,
			&dc[2], L_101x, nrows, U_01x0, nrows, &dc[1], C_11, nrows));
		CUBLAS_CHECK(trsm(blas_handle, CUBLAS_SIDE_RIGHT, CUBLAS_FILL_MODE_UPPER, CUBLAS_OP_N, CUBLAS_DIAG_NON_UNIT,
			hf - last_hf, last_hf, &dc[1], this_A11, nrows, C_11, nrows));

		CUBLAS_CHECK(gemm(blas_handle, CUBLAS_OP_N, CUBLAS_OP_N, hf - last_hf, hf - last_hf, hf + last_hf,
			&dc[2], L_101x, nrows, U_01x1, nrows, &dc[1], D_11, nrows));
		// 分配所需的显存
		int lwork = 0;
		// 此时要分解的大小为 (hf - last_hf) * (hf - last_hf)
		if constexpr (sizeof(data_t) == 8) {
			CUSOLVER_CHECK(cusolverDnDgetrf_bufferSize(sol_handle, hf - last_hf, hf - last_hf, D_11, nrows, &lwork));
		} else if constexpr (sizeof(data_t) == 4) {
			CUSOLVER_CHECK(cusolverDnSgetrf_bufferSize(sol_handle, hf - last_hf, hf - last_hf, D_11, nrows, &lwork));
		}
		data_t* d_work = (data_t*) check_device_workspace_to_enlarge(lwork * sizeof(data_t));
		CUSOLVER_CHECK(getrf(sol_handle, hf - last_hf, hf - last_hf, D_11, nrows, d_work, nullptr, d_info));
	}
}

template<typename data_t>
void gpu_extend_block(cublasHandle_t blas_handle, cusolverDnHandle_t sol_handle,
    const data_t* last_M, int same_len, data_t* this_M, int this_lda, int* d_info, int* d_ipiv = nullptr, cudaStream_t stream = NULL)
{
    cublasStatus_t(*trsm)(cublasHandle_t handle, cublasSideMode_t side, cublasFillMode_t uplo,
        cublasOperation_t trans, cublasDiagType_t diag, int m, int n, const data_t * alpha,
        const data_t * A, int lda, data_t * B, int ldb) = nullptr;
    cublasStatus_t(*gemm)(cublasHandle_t handle, cublasOperation_t transa, cublasOperation_t transb,
        int m, int n, int k, const data_t * alpha, const data_t * A, int lda, const data_t * B, int ldb,
        const data_t * beta, data_t * C, int ldc) = nullptr;
    cusolverStatus_t(*getrf)(cusolverDnHandle_t handle, int m, int n, data_t * A, int lda,
        data_t * Workspace, int* devIpiv, int* devInfo) = nullptr;
    // const data_t* ptr_minus_one = nullptr, * ptr_zero = nullptr, * ptr_one = nullptr;
    const data_t* dc = nullptr;
    if constexpr (sizeof(data_t) == 4) {
        trsm = cublasStrsm;
        gemm = cublasSgemm;
        getrf = cusolverDnSgetrf;
        dc = dc_f32;
    }
    else if constexpr (sizeof(data_t) == 8) {
        trsm = cublasDtrsm;
        gemm = cublasDgemm;
        getrf = cusolverDnDgetrf;
        dc = dc_f64;
    }

    cublasPointerMode_t mode;
    CUBLAS_CHECK(cublasGetPointerMode(blas_handle, &mode));
    assert(mode == CUBLAS_POINTER_MODE_DEVICE);

    dim3 grdSize_mat(1, 1, 1);
    grdSize_mat.x = (same_len - 1 + blkSize_mat.x) / blkSize_mat.x;
    grdSize_mat.y = (same_len - 1 + blkSize_mat.y) / blkSize_mat.y;
    device_copy_mat <<<grdSize_mat, blkSize_mat, 0, stream>>> (last_M, same_len, this_M, this_lda, same_len, same_len);

    data_t* A01 = this_M + same_len * this_lda;
    CUBLAS_CHECK(trsm(blas_handle, CUBLAS_SIDE_LEFT, CUBLAS_FILL_MODE_LOWER, CUBLAS_OP_N, CUBLAS_DIAG_UNIT,
        same_len, this_lda - same_len, &dc[1], last_M, same_len, A01, this_lda));

    data_t* A10 = this_M + same_len;
    CUBLAS_CHECK(trsm(blas_handle, CUBLAS_SIDE_RIGHT, CUBLAS_FILL_MODE_UPPER, CUBLAS_OP_N, CUBLAS_DIAG_NON_UNIT,
        this_lda - same_len, same_len, &dc[1], last_M, same_len, A10, this_lda));

    data_t* A11 = A01 + same_len;
    CUBLAS_CHECK(gemm(blas_handle, CUBLAS_OP_N, CUBLAS_OP_N, this_lda - same_len, this_lda - same_len, same_len,
        &dc[2], A10, this_lda, A01, this_lda, &dc[1], A11, this_lda));
    // 分配所需的显存
    int lwork = 0;
    if constexpr (sizeof(data_t) == 8) {
        CUSOLVER_CHECK(cusolverDnDgetrf_bufferSize(sol_handle, this_lda - same_len, this_lda - same_len, A11, this_lda, &lwork));
    } else if constexpr (sizeof(data_t) == 4) {
        CUSOLVER_CHECK(cusolverDnSgetrf_bufferSize(sol_handle, this_lda - same_len, this_lda - same_len, A11, this_lda, &lwork));
    }
    data_t* d_work = (data_t*) check_device_workspace_to_enlarge(lwork * sizeof(data_t));
    if (d_ipiv == nullptr) {
        CUSOLVER_CHECK(getrf(sol_handle, this_lda - same_len, this_lda - same_len, A11, this_lda, d_work, nullptr, d_info));
    } else {
        CUSOLVER_CHECK(getrf(sol_handle, this_lda - same_len, this_lda - same_len, A11, this_lda, d_work, d_ipiv + same_len, d_info));
        dim3 grdSize_vec(1, 1, 1);
        // dim3 tmpB(1,1,1); device_scalar_print<<<grdSize_vec, tmpB>>>(d_ipiv + same_len, this_lda - same_len, this_lda - same_len);
        grdSize_vec.x = (this_lda - same_len - 1 + blkSize_vec.x) / blkSize_vec.x;
        // 调整ipiv中的顺序
        device_incre<<<grdSize_vec, blkSize_vec, 0, stream>>>(this_lda - same_len, d_ipiv + same_len, same_len);
    }
}

template<typename data_t>
void gpu_extend_block_lda(cublasHandle_t blas_handle, cusolverDnHandle_t sol_handle,
    data_t* memory, const int last_nrows, const int nrows, const int lda,
	int* d_info, int* d_ipiv = nullptr, cudaStream_t stream_0 = NULL, cudaStream_t stream_1 = NULL)
{
    cublasStatus_t(*trsm)(cublasHandle_t handle, cublasSideMode_t side, cublasFillMode_t uplo,
        cublasOperation_t trans, cublasDiagType_t diag, int m, int n, const data_t * alpha,
        const data_t * A, int lda, data_t * B, int ldb) = nullptr;
    cublasStatus_t(*gemm)(cublasHandle_t handle, cublasOperation_t transa, cublasOperation_t transb,
        int m, int n, int k, const data_t * alpha, const data_t * A, int lda, const data_t * B, int ldb,
        const data_t * beta, data_t * C, int ldc) = nullptr;
    cusolverStatus_t(*getrf)(cusolverDnHandle_t handle, int m, int n, data_t * A, int lda,
        data_t * Workspace, int* devIpiv, int* devInfo) = nullptr;
    // const data_t* ptr_minus_one = nullptr, * ptr_zero = nullptr, * ptr_one = nullptr;
    const data_t* dc = nullptr;
    if constexpr (sizeof(data_t) == 4) {
        trsm = cublasStrsm;
        gemm = cublasSgemm;
        getrf = cusolverDnSgetrf;
        dc = dc_f32;
    }
    else if constexpr (sizeof(data_t) == 8) {
        trsm = cublasDtrsm;
        gemm = cublasDgemm;
        getrf = cusolverDnDgetrf;
        dc = dc_f64;
    }

    cublasPointerMode_t mode;
    CUBLAS_CHECK(cublasGetPointerMode(blas_handle, &mode));
    assert(mode == CUBLAS_POINTER_MODE_DEVICE);
	CUBLAS_CHECK(cublasSetStream(blas_handle, stream_0));
	CUSOLVER_CHECK(cusolverDnSetStream(sol_handle, stream_0));

	// 左上角块已经在内了
	const int delta = nrows - last_nrows;

	data_t* A01 = memory + last_nrows * lda;
	data_t* A10 = memory + last_nrows;
	data_t* A11 = A01 + last_nrows;

	if (last_nrows > 0) {// 根据之前的结果进行延申
		CUBLAS_CHECK(cublasSetStream(blas_handle, stream_1));
		CUBLAS_CHECK(trsm(blas_handle, CUBLAS_SIDE_LEFT, CUBLAS_FILL_MODE_LOWER, CUBLAS_OP_N, CUBLAS_DIAG_UNIT,
			last_nrows, delta, &dc[1], memory, lda, A01, lda));

		CUBLAS_CHECK(cublasSetStream(blas_handle, stream_0));
		CUBLAS_CHECK(trsm(blas_handle, CUBLAS_SIDE_RIGHT, CUBLAS_FILL_MODE_UPPER, CUBLAS_OP_N, CUBLAS_DIAG_NON_UNIT,
			delta, last_nrows, &dc[1], memory, lda, A10, lda));

		CUDA_CHECK(cudaStreamSynchronize(stream_1));
		CUBLAS_CHECK(gemm(blas_handle, CUBLAS_OP_N, CUBLAS_OP_N, delta, delta, last_nrows,
	        &dc[2], A10, lda, A01, lda, &dc[1], A11, lda));
	} else assert(last_nrows == 0); // 否则直接计算右下角块
	
    // 分配所需的显存
    int lwork = 0;
    if constexpr (sizeof(data_t) == 8) {
        CUSOLVER_CHECK(cusolverDnDgetrf_bufferSize(sol_handle, delta, delta, A11, lda, &lwork));
    } else if constexpr (sizeof(data_t) == 4) {
        CUSOLVER_CHECK(cusolverDnSgetrf_bufferSize(sol_handle, delta, delta, A11, lda, &lwork));
    }
    data_t* d_work = (data_t*) check_device_workspace_to_enlarge(lwork * sizeof(data_t));
    if (d_ipiv == nullptr) {
        CUSOLVER_CHECK(getrf(sol_handle, delta, delta, A11, lda, d_work, nullptr, d_info));
    } else {
        CUSOLVER_CHECK(getrf(sol_handle, delta, delta, A11, lda, d_work, d_ipiv + last_nrows, d_info));
        dim3 grdSize_vec(1, 1, 1);
        // dim3 tmpB(1,1,1); device_scalar_print<<<grdSize_vec, tmpB>>>(d_ipiv + same_len, this_lda - same_len, this_lda - same_len);
        grdSize_vec.x = (delta - 1 + blkSize_vec.x) / blkSize_vec.x;
        // 调整ipiv中的顺序
        device_incre<<<grdSize_vec, blkSize_vec, 0, stream_0>>>(delta, d_ipiv + last_nrows, last_nrows);
    }

	CUBLAS_CHECK(cublasSetStream(blas_handle, NULL));
	CUSOLVER_CHECK(cusolverDnSetStream(sol_handle, NULL));
}

#endif
