#include "gpu_helper.cuh"
#include "gpu_template.cuh"

void init_GPU_query()
{
	int deviceCount;
	CUDA_CHECK(cudaGetDeviceCount(&deviceCount));

	for (int dev = 0; dev < deviceCount; dev++) {
		int driver_version = 0, runtime_version = 0;
		cudaDeviceProp deviceProp;
		CUDA_CHECK(cudaGetDeviceProperties(&deviceProp, dev));
		if (dev == 0)
			if (deviceProp.minor = 9999 && deviceProp.major == 9999)
				printf("\n");
		printf("\nDevice%d:\"%s\"\n", dev, deviceProp.name);
		cudaDriverGetVersion(&driver_version);
		printf("CUDA Driver version:                                   %d.%d\n", driver_version / 1000, (driver_version % 1000) / 10);
		cudaRuntimeGetVersion(&runtime_version);
		printf("CUDA Runtim version:                                 %d.%d\n", runtime_version / 1000, (runtime_version % 1000) / 10);
		printf("Device Compute capability:                                   %d.%d\n", deviceProp.major, deviceProp.minor);
		printf("Total amount of Global Memory:                  %zu bytes\n", deviceProp.totalGlobalMem);
		printf("Number of SMs:                                  %d\n", deviceProp.multiProcessorCount);
		//printf("Total amount of Constant Memory:                %zu bytes\n", deviceProp.totalConstMem);
		printf("Total amount of Shared Memory per block:        %zu bytes\n", deviceProp.sharedMemPerBlock);
		printf("Total number of registers available per block:  %d\n", deviceProp.regsPerBlock);
		printf("Warp size:                                      %d\n", deviceProp.warpSize);
		printf("Maximum number of threads per SM:               %d\n", deviceProp.maxThreadsPerMultiProcessor);
		printf("Maximum number of threads per block:            %d\n", deviceProp.maxThreadsPerBlock);
		printf("Maximum size of each dimension of a block:      %d x %d x %d\n", deviceProp.maxThreadsDim[0], deviceProp.maxThreadsDim[1], deviceProp.maxThreadsDim[2]);
		printf("Maximum size of each dimension of a grid:       %d x %d x %d\n", deviceProp.maxGridSize[0], deviceProp.maxGridSize[1], deviceProp.maxGridSize[2]);
		// printf("Maximum memory pitch:                           %zu bytes\n", deviceProp.memPitch);
		//printf("Texture alignmemt:                              %zu bytes\n", deviceProp.texturePitchAlignment);
		//printf("Clock rate:                                     %.2f GHz\n", deviceProp.clockRate * 1e-6f);
		//printf("Memory Clock rate:                              %.0f MHz\n", deviceProp.memoryClockRate * 1e-3f);
		// printf("Memory Bus Width:                               %d-bit\n", deviceProp.memoryBusWidth);
		printf("Concurrent Kernel Execution:                    %d\n", deviceProp.concurrentKernels);
		printf("can Map Host Memory:                            %d\n", deviceProp.canMapHostMemory);
		printf("can Use Host Pointer For Registered Mem:        %d\n", deviceProp.canUseHostPointerForRegisteredMem);
		printf("support integrated stream ordered memory allocator:    %d\n", deviceProp.memoryPoolsSupported);
		// printf("\n", deviceProp.)
		printf("\n");
	}
}

double* dc_f64 = nullptr;// device coefficients {0, 1, -1}
float * dc_f32 = nullptr;// device coefficients {0, 1, -1}
#define MAX_NSTREAM 12
cudaStream_t streams[MAX_NSTREAM];
static uint64_t pin_locked_len = 0;
static double* pin_locked_hbuf = nullptr;
// static uint64_t pin_locked_len_aux = 0;
// static double* pin_locked_hbuf_aux = nullptr;
static uint64_t device_work_bytes = 0;
static void* device_work = nullptr;
int * d_info = nullptr;
extern int gnt;

__host__
void init_GPU_coeff()
{
	assert(dc_f32 == nullptr && dc_f64 == nullptr);
	printf("Init constant coefficients on the device\n");
	float tmp_f32[4] = {0.0, 1.0, -1.0, 0.0};
	CUDA_CHECK(cudaMalloc((void**)&dc_f32, sizeof(tmp_f32)));
	CUDA_CHECK(cudaMemcpy(dc_f32, tmp_f32, sizeof(tmp_f32), cudaMemcpyHostToDevice));

	double tmp_f64[4] = {0.0, 1.0, -1.0, 0.0};
	CUDA_CHECK(cudaMalloc((void**)&dc_f64, sizeof(tmp_f64)));
	CUDA_CHECK(cudaMemcpy(dc_f64, tmp_f64, sizeof(tmp_f64), cudaMemcpyHostToDevice));

	// 先按最多N=20000, delta_N=100来算
	const int N = 4000, delta_N = 100;
	pin_locked_len = N*N - (N-delta_N)*(N-delta_N);
	assert(pin_locked_hbuf == nullptr);
	CUDA_CHECK(cudaHostAlloc((void**)&pin_locked_hbuf, pin_locked_len * sizeof(*pin_locked_hbuf), cudaHostAllocWriteCombined));
	printf("Init pin locked buf %llu double\n", pin_locked_len);

	// pin_locked_len_aux = N;
	// assert(pin_locked_hbuf_aux == nullptr);
	// CUDA_CHECK(cudaHostAlloc((void**)&pin_locked_hbuf_aux, pin_locked_len_aux * sizeof(*pin_locked_hbuf_aux), cudaHostAllocWriteCombined));
	// printf("Init pin locked buf %llu double\n", pin_locked_len_aux);

	device_work_bytes = N*N*sizeof(*pin_locked_hbuf);// 按照8字节的数据来开辟内存
	assert(device_work == nullptr);
	CUDA_CHECK(cudaMalloc((void**)&device_work, device_work_bytes));
	printf("Init device workspace %llu bytes\n", device_work_bytes);

	assert(d_info == nullptr);
	CUDA_CHECK(cudaMalloc((void**)&d_info, sizeof(*d_info)));

	// cudaFuncCache _old;
	// CUDA_CHECK(cudaDeviceGetCacheConfig(&_old));
	// CUDA_CHECK(cudaDeviceSetCacheConfig(cudaFuncCachePreferShared));// 与默认差不多，但prefer L1会慢一点，
	// printf("Device old config: %d\n", _old);

	for (int i = 0; i < MAX_NSTREAM; i++) {
		cudaStreamCreate(&streams[i]);
	}
}

__host__
void clear_GPU_coeff()
{
	assert(dc_f32 != nullptr && dc_f64 != nullptr);
	printf("Free constant coefficients on the device\n");
	CUDA_CHECK(cudaFree(dc_f32));
	CUDA_CHECK(cudaFree(dc_f64));
	CUDA_CHECK(cudaFreeHost(pin_locked_hbuf)); pin_locked_len = 0;
	// CUDA_CHECK(cudaFreeHost(pin_locked_hbuf_aux)); pin_locked_len_aux = 0;
	CUDA_CHECK(cudaFree(device_work)); device_work_bytes = 0;
	CUDA_CHECK(cudaFree(d_info)); d_info = nullptr;
	for (int i = 0; i < MAX_NSTREAM; i++) {
		cudaStreamDestroy(streams[i]);
	}
}

__host__
void* check_device_workspace_to_enlarge(const uint64_t required_bytes)
{
	if (device_work_bytes <= required_bytes) {
		CUDA_CHECK(cudaFree(device_work));
		while (device_work_bytes <= required_bytes) device_work_bytes <<= 1;// 按两倍扩容
		printf("Realloc device workspace to %llu bytes\n", device_work_bytes);
		CUDA_CHECK(cudaMalloc((void**)&device_work, device_work_bytes));
	}
	return device_work;
}
__host__
int* device_info()
{
	return d_info;
}

double* create_copy_to_device(const double* h_A, const int len, TEST_TIME & record)
{
	auto beg = std::chrono::high_resolution_clock::now();

	double* d_A = nullptr;
	CUDA_CHECK(cudaMalloc((void**)&d_A, sizeof(*d_A) * len));
	CUDA_CHECK(cudaMemcpy(d_A, h_A, sizeof(*d_A) * len, cudaMemcpyHostToDevice));

	auto end = std::chrono::high_resolution_clock::now();
	record.t_trans += (std::chrono::duration_cast<std::chrono::nanoseconds>(end - beg)).count() * 1e-9;
	return d_A;
}

__global__
static void device_scalar_checksum(const int len, const double* data, const int interval)
{
	double res = 0.0;
    for (int i = 0; i < len; i++) {
		// printf("%d %.5e\n", i, data[i]);
        res += (double) (i % interval) * data[i];
    }
    printf("Device Check sum %.20e len %d Inter %d\n", res, len, interval);
}

#define PIN_LOCKED

void test_stream(int num_arr, int M, int N, int K, int nt)
{
    double** d_As = new double* [num_arr];
    double** d_Bs = new double* [num_arr];
    double** d_Cs = new double* [num_arr];
    double* h_A = new double [M * K];
    double* h_B = new double [K * N];
    double* h_C = new double [M * N];
    
	assert(nt <= omp_get_max_threads());

    srand(1003);
    for (int i = 0; i < num_arr; i++) {
		for (int j = 0; j < M * K; j++) h_A[j] = ((double) rand()) / RAND_MAX;
	    for (int j = 0; j < K * N; j++) h_B[j] = ((double) rand()) / RAND_MAX;
    	for (int j = 0; j < M * N; j++) h_C[j] = 0.0;
        
		CUDA_CHECK(cudaMalloc((void**)&d_As[i], sizeof(double) * M * K));
        CUDA_CHECK(cudaMalloc((void**)&d_Bs[i], sizeof(double) * K * N));
        CUDA_CHECK(cudaMalloc((void**)&d_Cs[i], sizeof(double) * M * N));
        CUDA_CHECK(cudaMemcpy(d_As[i], h_A, sizeof(*h_A) * M * K, cudaMemcpyHostToDevice));
        CUDA_CHECK(cudaMemcpy(d_Bs[i], h_B, sizeof(*h_B) * K * N, cudaMemcpyHostToDevice));
        CUDA_CHECK(cudaMemcpy(d_Cs[i], h_C, sizeof(*h_C) * M * N, cudaMemcpyHostToDevice));
    }

	cudaStream_t* streams = new cudaStream_t[nt];
	for (int i = 0; i < nt; i++) {
		cudaStreamCreate(&streams[i]);
	}

    cudaEvent_t start, stop; float tms;
	CUDA_CHECK(cudaEventCreate(&start));
	CUDA_CHECK(cudaEventCreate(&stop));
	CUDA_CHECK(cudaEventRecord(start));

	#pragma omp parallel
	{
		int tid = omp_get_thread_num();
		int num = num_arr / nt;
		int rem = num_arr - num * nt;
		int beg = tid * num;
		if (tid < rem) {
			beg += tid;
			num += 1;
		} else {
			beg += rem;
		}
		printf("T%d/%d: [%d,%d)\n", tid, nt, beg, beg + num);

		cublasHandle_t handle;
	    CUBLAS_CHECK(cublasCreate(&handle));// 创建cublas句柄
		CUBLAS_CHECK(cublasSetPointerMode(handle, CUBLAS_POINTER_MODE_DEVICE));
		// cublasSetStream(handle, streams[tid]);

		for (int k = beg; k < beg + num; k++) {
			// printf("T%d/%d do %d\n", tid, nt, k);
			CUBLAS_CHECK(cublasDgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, M, N, K,
            	&dc_f64[1], d_As[k], M, d_Bs[k], K, &dc_f64[0], d_Cs[k], M));
		}

		// #pragma omp for schedule(static)
		// for (int k = 0; k < num_arr; k++) {
		// 	CUBLAS_CHECK(cublasDgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, M, N, K,
        //     	&dc_f64[1], d_As[k], M, d_Bs[k], K, &dc_f64[0], d_Cs[k], M));
		// }

		CUBLAS_CHECK(cublasDestroy(handle));
	}

    // for (int i = 0; i < num_arr; i++) {
    //     CUBLAS_CHECK(cublasDgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, M, N, K,
    //         &dc_f64[1], d_As[i], M, d_Bs[i], K, &dc_f64[0], d_Cs[i], M));
    // }

    CUDA_CHECK(cudaEventRecord(stop));
	CUDA_CHECK(cudaEventSynchronize(stop));
	CUDA_CHECK(cudaEventElapsedTime(&tms, start, stop));
    printf("TIme %.6f s\n", tms / 1000);

    for (int i = 0; i < num_arr; i++) {
        CUDA_CHECK(cudaMemcpy(h_C, d_Cs[i], sizeof(*h_C) * M * N, cudaMemcpyDeviceToHost));
        int interval = 10;
        double res = 0.0;
        for (int j = 0; j < M * N; j++) {
            res += (double) (j % interval) * h_C[j];
        }
        printf("Arr%d checksum %.20e\n", i, res);

        CUDA_CHECK(cudaFree(d_As[i]));
        CUDA_CHECK(cudaFree(d_Bs[i]));
        CUDA_CHECK(cudaFree(d_Cs[i]));
    }
    
    delete[] d_As;
    delete[] d_Bs;
    delete[] d_Cs;
	for (int i = 0; i < nt; i++) {
		cudaStreamDestroy(streams[i]);
	}
	delete[] streams;
}

void device_expand_mat(double* & old_A, const size_t old_lda, const int last_nrows, const size_t new_lda, const int sid)
{
	double* new_A = nullptr;
	// CUDA_CHECK(cudaMallocAsync((void**)&new_A, sizeof(double) * new_lda * new_lda, streams[sid]));
	CUDA_CHECK(cudaMalloc((void**)&new_A, sizeof(double) * new_lda * new_lda));
	dim3 grdSize_mat;
	grdSize_mat.x = (last_nrows - 1 + blkSize_mat.x) / blkSize_mat.x;
	grdSize_mat.y = (last_nrows - 1 + blkSize_mat.y) / blkSize_mat.y;
	// 数据迁移
	device_copy_mat<<<grdSize_mat, blkSize_mat, 0, streams[sid]>>>
		(old_A, old_lda, new_A, new_lda, last_nrows, last_nrows);
	// CUDA_CHECK(cudaFreeAsync(old_A, streams[sid])); old_A = new_A;
	CUDA_CHECK(cudaFree(old_A)); old_A = new_A;
}

void device_expand_vec(double* & vec, const size_t new_len, const int sid) {
	// CUDA_CHECK(cudaFreeAsync(vec, streams[sid]));
	// CUDA_CHECK(cudaMallocAsync((void**)&vec, sizeof(*vec) * new_len, streams[sid]));
	CUDA_CHECK(cudaFree(vec));
	CUDA_CHECK(cudaMalloc((void**)&vec, sizeof(*vec) * new_len));
}

__global__
static void device_update_mat_kernel(double* A, const size_t dst_lda, const int last_nrows, const int src_nrows, 
	const double* patch)
{
	uint32_t i = blockIdx.x * blockDim.x + threadIdx.x;
	uint32_t j = blockIdx.y * blockDim.y + threadIdx.y;
	if (i < src_nrows && j < src_nrows) {
		const double* my_src = nullptr;
		if (j < last_nrows) {
			if (i >= last_nrows) my_src = patch + j * (src_nrows - last_nrows) + i - last_nrows;
		} else {// j >= last_nrows
			my_src = patch + last_nrows * (src_nrows - last_nrows) + (j - last_nrows) * src_nrows + i;
		}
		if (my_src != nullptr) {
			double val = *my_src;
			assert(val == val);
			A[j * dst_lda + i] = val;
		}
	}
}

void device_update_mat(double* & d_A, const size_t dst_lda, const double* src_A, const size_t src_lda, 
    const int last_nrows, const int src_nrows, const int sid, double* hbuf)
{
	// printf("device_update_mat stream %d: bp 0\n", sid);
    assert(src_nrows <= dst_lda);
	const size_t buf_len = src_nrows * src_nrows - last_nrows * last_nrows;
	double* dbuf = nullptr;
	// CUDA_CHECK(cudaMallocAsync((void**)&dbuf, buf_len * sizeof(double), streams[sid]));
	CUDA_CHECK(cudaMalloc((void**)&dbuf, buf_len * sizeof(double)));

	// printf("device_update_mat stream %d: bp 1\n", sid);

    // 只拷贝增量部分
	const int delta = src_nrows - last_nrows;
	#pragma omp parallel num_threads(gnt)
	{
		#pragma omp for schedule(static) nowait
        for (int j = 0; j < last_nrows; j++)
        for (int i = 0; i < delta; i++) {
            hbuf[j * delta + i] = src_A[j * src_lda + last_nrows + i];
        }
		double* dst = hbuf + last_nrows * delta;
		#pragma omp for schedule(static)
		for (int j = last_nrows; j < src_nrows; j++)
        for (int i = 0; i < src_nrows; i++) {
            dst[(j - last_nrows) * src_nrows + i] = src_A[j * src_lda + i];
        }
	}
	CUDA_CHECK(cudaMemcpyAsync(dbuf, hbuf, sizeof(*dbuf) * buf_len, cudaMemcpyHostToDevice, streams[sid]));
	// printf("device_update_mat stream %d: bp 2\n", sid);

	dim3 grdSize_mat;
	grdSize_mat.x = (src_nrows - 1 + blkSize_mat.x) / blkSize_mat.x;
	grdSize_mat.y = (src_nrows - 1 + blkSize_mat.y) / blkSize_mat.y;
	device_update_mat_kernel<<<grdSize_mat, blkSize_mat, 0, streams[sid]>>>(d_A, dst_lda, last_nrows, src_nrows, dbuf);
	// printf("device_update_mat stream %d: bp 3\n", sid);

	// CUDA_CHECK(cudaFreeAsync(dbuf, streams[sid]));
	CUDA_CHECK(cudaFree(dbuf));
	// printf("device_update_mat stream %d: bp 4\n", sid);
}

__global__
static void device_restore_matrix(double* A, const int nrows, const double* last_A, const int last_nrows,
	const double* patch)
{
	uint32_t i = blockIdx.x * blockDim.x + threadIdx.x;
	uint32_t j = blockIdx.y * blockDim.y + threadIdx.y;
	const int hf = nrows >> 1;
	const int last_hf = last_nrows >> 1;
	const int delta_hf = hf - last_hf;
	if (i < nrows && j < nrows) {
		const double* my_src = nullptr;
		const int pat_half = ((nrows*nrows) - (last_nrows*last_nrows)) >> 1;
		if (j < last_hf) {
			if (i < last_hf) {
				const double* L0 = last_A;
				my_src = L0 + j * last_nrows + i;
			} else if (i < hf) {
				const double* P0 = patch;
				my_src = P0 + j * (nrows - last_nrows) + i - last_hf;
			} else if (i < hf + last_hf) {
				const double* L1 = last_A + last_hf;
				my_src = L1 + j * last_nrows + i - hf;
			} else {
				const double* P1 = patch + delta_hf;
				my_src = P1 + j * (nrows - last_nrows) + i - hf - last_hf;
			}
		}
		else if (j < hf) {
			const double* P2 = patch + (nrows - last_nrows) * last_hf;
			my_src = P2 + (j - last_hf) * nrows + i;
		}
		else if (j < hf + last_hf) {
			if (i < last_hf) {
				const double* L2 = last_A + last_hf * last_nrows;
				my_src = L2 + (j - hf) * last_nrows + i;
			} else if (i < hf) {
				const double* P3 = patch + pat_half;
				my_src = P3 + (j - hf) * (nrows - last_nrows) + i - last_hf;
			} else if (i < hf + last_hf) {
				const double* L3 = last_A + last_hf * last_nrows + last_hf;
				my_src = L3 + (j - hf) * last_nrows + i - hf;
			} else {
				const double* P4 = patch + pat_half + delta_hf;
				my_src = P4 + (j - hf) * (nrows - last_nrows) + i - hf - last_hf;
			}
		}
		else {
			const double* P5 = patch + pat_half + (nrows - last_nrows) * last_hf;
			my_src = P5 + (j - hf - last_hf) * nrows + i;
		}

		if (my_src != nullptr) {
			double val = *my_src;
			assert(val == val);
			A[j * nrows + i] = val;
		}
	}
}

// 此函数将d_last_A释放掉，并重新分配为nrows*nrows的大小
void create_copyPart_to_device(const double* h_A, const int nrows, double* & d_last_A, const int last_nrows, const int sid)
{
	const int hf = nrows >> 1;
	const int last_hf = last_nrows >> 1;
	const int delta_hf = hf - last_hf;
	uint64_t buf_len = (nrows * nrows) - (last_nrows * last_nrows);

	if (pin_locked_len <= buf_len) {
		CUDA_CHECK(cudaFreeHost(pin_locked_hbuf));
		while (pin_locked_len <= buf_len) pin_locked_len <<= 1;// 扩一倍内存
		printf("Realloc pin locked buf to %llu double\n", pin_locked_len);
		CUDA_CHECK(cudaHostAlloc((void**)&pin_locked_hbuf, sizeof(*pin_locked_hbuf) * pin_locked_len, cudaHostAllocWriteCombined));
	}
	double* hbuf = pin_locked_hbuf;
	double* dbuf = nullptr, * new_A = nullptr;
	// CUDA_CHECK(cudaMallocAsync((void**)&dbuf , sizeof(*dbuf) * buf_len, streams[sid]));
	// CUDA_CHECK(cudaMallocAsync((void**)&new_A, sizeof(*new_A) * nrows * nrows, streams[sid]));
	CUDA_CHECK(cudaMalloc((void**)&dbuf , sizeof(*dbuf) * buf_len));
	CUDA_CHECK(cudaMalloc((void**)&new_A, sizeof(*new_A) * nrows * nrows));

	#pragma omp parallel num_threads(gnt)
	{
		#pragma omp for schedule(static) nowait
		for (int j = 0; j < last_hf; j ++) {
			double* patch = hbuf + j * (nrows - last_nrows);
			const double* src = h_A + j * nrows + last_hf;
			for (int k = 0; k < delta_hf; k++)
				patch[k] = src[k];

			patch += delta_hf;
			src += hf;
			for (int k = 0; k < delta_hf; k++)
				patch[k] = src[k];
		}
		#pragma omp for schedule(static) nowait
		for (int j = last_hf; j < hf; j++) {
			double* patch = hbuf + last_hf * (nrows - last_nrows) + (j - last_hf) * nrows;
			const double* src = h_A + j * nrows;
			for (int i = 0; i < nrows; i++)
				patch[i] = src[i];
		}
		
		#pragma omp for schedule(static) nowait
		for (int j = hf; j < hf + last_hf; j ++) {
			double* patch = hbuf + last_hf * (nrows - last_nrows) + delta_hf * nrows
						+ (j - hf) * (nrows - last_nrows);
			const double* src = h_A + j * nrows + last_hf;
			for (int k = 0; k < delta_hf; k++)
				patch[k] = src[k];

			patch += delta_hf;
			src += hf;
			for (int k = 0; k < delta_hf; k++)
				patch[k] = src[k];
		}
		#pragma omp for schedule(static) nowait
		for (int j = hf + last_hf; j < nrows; j++) {
			double* patch = hbuf + last_hf * (nrows - last_nrows) + delta_hf * nrows
						+ last_hf * (nrows - last_nrows) + (j - hf - last_hf) * nrows;
			const double* src = h_A + j * nrows;
			for (int i = 0; i < nrows; i++)
				patch[i] = src[i];
		}
	}
	CUDA_CHECK(cudaMemcpyAsync(dbuf, hbuf, sizeof(*dbuf) * buf_len, cudaMemcpyHostToDevice, streams[sid]));
	
	dim3 grdSize_mat;
	grdSize_mat.x = (nrows - 1 + blkSize_mat.x) / blkSize_mat.x;
	grdSize_mat.y = (nrows - 1 + blkSize_mat.y) / blkSize_mat.y;
	device_restore_matrix<<<grdSize_mat, blkSize_mat, 0, streams[sid]>>>(new_A, nrows, d_last_A, last_nrows, dbuf);
	// CUDA_CHECK(cudaFreeAsync(dbuf, streams[sid]));
	// CUDA_CHECK(cudaFreeAsync(d_last_A, streams[sid])); d_last_A = new_A;
	CUDA_CHECK(cudaFree(dbuf));
	CUDA_CHECK(cudaFree(d_last_A)); d_last_A = new_A;
}

__global__
static void device_reorder_restore_matrix(double* A, const int nrows, const double* last_A, const int last_nrows,
	const double* patch)
{
	uint32_t i = blockIdx.x * blockDim.x + threadIdx.x;
	uint32_t j = blockIdx.y * blockDim.y + threadIdx.y;
	if (i < nrows && j < nrows) {
		const double* my_src = nullptr;
		if (j < last_nrows) {
			if (i < last_nrows) my_src = last_A + j * last_nrows + i;
			else // i >= last_nrows
				my_src = patch + j * (nrows - last_nrows) + i - last_nrows;
		} else {// j >= last_nrows
			my_src = patch + last_nrows * (nrows - last_nrows) + (j - last_nrows) * nrows + i;
		}
		double val = *my_src;
		assert(val == val);
		A[j * nrows + i] = val;
	}
}

__global__
static void device_reorder_pad_matrix(double* A, const int last_nrows, const int nrows, 
	const int lda, const double* patch)
{
	uint32_t i = blockIdx.x * blockDim.x + threadIdx.x;
	uint32_t j = blockIdx.y * blockDim.y + threadIdx.y;
	if (i < nrows && j < nrows) {
		const double* my_src = nullptr;
		if (j < last_nrows && i >= last_nrows) {
			my_src = patch + j * (nrows - last_nrows) + i - last_nrows;
		} else if (j >= last_nrows) {
			my_src = patch + last_nrows * (nrows - last_nrows) + (j - last_nrows) * nrows + i;
		}
		if (my_src != nullptr) {
			double val = *my_src;
			assert(val == val);
			A[j * lda + i] = val;
		}
	}
}

void create_reorder_copyPart_to_device(const double* h_A, const int nrows,
	double* & d_A, const int last_nrows, const int d_lda,
	const int* map_n2o, const int sid)
{
	uint64_t buf_len = (nrows * nrows) - (last_nrows * last_nrows);
	if (pin_locked_len <= buf_len) {
		CUDA_CHECK(cudaFreeHost(pin_locked_hbuf));
		while (pin_locked_len <= buf_len) pin_locked_len <<= 1;// 扩一倍内存
		printf("Realloc pin locked buf to %llu double\n", pin_locked_len);
		CUDA_CHECK(cudaHostAlloc((void**)&pin_locked_hbuf, sizeof(*pin_locked_hbuf) * pin_locked_len, cudaHostAllocWriteCombined));
	}
	double* hbuf = pin_locked_hbuf;
	double* dbuf = nullptr;
	CUDA_CHECK(cudaMalloc((void**)&dbuf , sizeof(*dbuf) * buf_len));

	#pragma omp parallel num_threads(gnt)
	{
		#pragma omp for schedule(static) nowait
		for (int nj = 0; nj < last_nrows; nj++) {
			const int oj = map_n2o[nj];
			for (int ni = last_nrows; ni < nrows; ni++) {
				const int oi = map_n2o[ni];
				hbuf[nj * (nrows - last_nrows) + ni - last_nrows] = h_A[oj * nrows + oi];
			}
		}
		double* dst = hbuf + last_nrows * (nrows - last_nrows);
		#pragma omp for schedule(static)
		for (int nj = last_nrows; nj < nrows; nj++) {
			const int oj = map_n2o[nj];
			for (int ni = 0; ni < nrows; ni++) {
				const int oi = map_n2o[ni];
				dst[(nj - last_nrows) * nrows + ni] = h_A[oj * nrows + oi];
			}
		}
	}
	CUDA_CHECK(cudaMemcpyAsync(dbuf, hbuf, sizeof(*dbuf) * buf_len, cudaMemcpyHostToDevice, streams[sid]));

	dim3 grdSize_mat;
	grdSize_mat.x = (nrows - 1 + blkSize_mat.x) / blkSize_mat.x;
	grdSize_mat.y = (nrows - 1 + blkSize_mat.y) / blkSize_mat.y;
	// device_reorder_restore_matrix<<<grdSize_mat, blkSize_mat, 0, streams[sid]>>>(new_A, nrows, d_last_A, last_nrows, dbuf);
	device_reorder_pad_matrix<<<grdSize_mat, blkSize_mat, 0, streams[sid]>>>
		(d_A, last_nrows, nrows, d_lda, dbuf);
	CUDA_CHECK(cudaFree(dbuf));
}

__global__
static void device_reorder_vec_kernel(const int nrows, const double* old_vec, double* new_vec, const int* map)
{
	uint32_t i = blockIdx.x * blockDim.x + threadIdx.x;
	if (i < nrows) {
		int new_i = map[i];
		new_vec[new_i] = old_vec[i];
	}
}

void device_reorder_vec(const int nrows, const double* old_vec, double* new_vec, const int* map, const int sid)
{
	dim3 grdSize_vec;
	grdSize_vec.x = (nrows - 1 + blkSize_vec.x) / blkSize_vec.x;
	device_reorder_vec_kernel<<<grdSize_vec, blkSize_vec, 0, streams[sid]>>>(nrows, old_vec, new_vec, map);
}

__global__
static void device_reorder_mat_kernel(const int nrows, const double* old_mat, const int old_lda, 
	double* new_mat, const int new_lda, const int* map)
{
	uint32_t i = blockIdx.x * blockDim.x + threadIdx.x;
	uint32_t j = blockIdx.y * blockDim.y + threadIdx.y;
	if (i < nrows && j < nrows) {
		int dst_i = map[i];
		int dst_j = map[j];
		new_mat[dst_j * new_lda + dst_i] = old_mat[j * old_lda + i];
	}
}

void device_reorder_mat(const int nrows, const double* old_mat, const int old_lda,
	double* new_mat, const int new_lda, const int* map, const int sid)
{
	dim3 grdSize_mat;
	grdSize_mat.x = (nrows - 1 + blkSize_mat.x) / blkSize_mat.x;
	grdSize_mat.y = (nrows - 1 + blkSize_mat.y) / blkSize_mat.y;
	device_reorder_mat_kernel<<<grdSize_mat, blkSize_mat, 0, streams[sid]>>>(nrows, old_mat, old_lda, new_mat, new_lda, map);
}

template<typename data_t> __global__ static 
void device_inverse_of_lower_unit(const data_t* known, const int known_lda, data_t* res, const int res_lda, const int nrows)
{
	assert(gridDim.x == 1 && blockDim.x == 1);
	for (int i = 0; i < nrows; i++) res[i * res_lda + i] = 1.0;// 先求对角元
	for (int d = 1; d < nrows; d++) {
		for (int i = d; i < nrows; i++) {
			int j = i - d;
			data_t tmp = - known[j * known_lda + i];
			for (int k = j + 1; k < i; k++) tmp -= known[k * known_lda + i] * res[j * res_lda + k];
			res[j * res_lda + i] = tmp;
		}
	}
}

template<typename data_t> __global__ static 
void device_inverse_of_upper_nonunit(const data_t* known, const int known_lda, data_t* res, const int res_lda, const int nrows)
{
	assert(gridDim.x == 1 && blockDim.x == 1);
	for (int i = 0; i < nrows; i++) res[i * res_lda + i] = 1.0 / known[i * known_lda + i];// 先求对角元
	for (int d = 1; d < nrows; d++) {
		for (int j = d; j < nrows; j++) {
			int i = j - d;
			data_t tmp = 0.0;
			for (int k = i + 1; k <= j; k++) tmp -= known[k * known_lda + i] * res[j * res_lda + k];
			res[j * res_lda + i] = tmp / known[i * known_lda + i];
		}
	}
}

template<typename data_t> __global__ static 
void device_set_mat(const data_t* src_K_and_P, const int src_lda, data_t* P, data_t* K, const int nrows)
{
	uint32_t i = blockIdx.x * blockDim.x + threadIdx.x;
	uint32_t j = blockIdx.y * blockDim.y + threadIdx.y;
	if (i < nrows && j < nrows) {
		P[j * nrows + i] = 0.0;
		K[j * nrows + i] = 0.0;
		if (j < i) K[j * nrows + i] = src_K_and_P[j * src_lda + i];
		else       P[j * nrows + i] = src_K_and_P[j * src_lda + i];
		
		if (j == i) K[i * nrows + i] = 1.0;// 下三角K的对角元置1
	}
}

void device_extend_invLU_and_invA(
	double* & inv_LU, double* & inv_A, const int last_nrows, const int inv_lda,
	const double* new_LU, const int new_nrows, const int LU_lda,
	cudaStream_t stream_0, cudaStream_t stream_1, cudaStream_t stream_2, cudaStream_t stream_3,
	cublasHandle_t handle)
{
	assert(inv_lda >= new_nrows);
    const int delta = new_nrows - last_nrows;
    const double* E_and_F = new_LU + last_nrows *  LU_lda + last_nrows;
    double* K_and_P 	  = inv_LU + last_nrows * inv_lda + last_nrows;
	CUBLAS_CHECK(cublasSetStream(handle, stream_0));

	if (last_nrows == 0) {// 上一轮还没有
		dim3 grdSize(1, 1, 1);
		device_inverse_of_lower_unit   <<<grdSize, grdSize, 0, stream_0>>>
			(new_LU, LU_lda, inv_LU, inv_lda, new_nrows);
        device_inverse_of_upper_nonunit<<<grdSize, grdSize, 0, stream_0>>>
			(new_LU, LU_lda, inv_LU, inv_lda, new_nrows);

		// double* buf = nullptr;
		// CUDA_CHECK(cudaMalloc((void**)&buf, sizeof(*buf) * delta * delta * 2));
		double* buf = (double*) check_device_workspace_to_enlarge(sizeof(*buf) * delta * delta * 2);
		double* P = buf, * K = buf + delta * delta;
		grdSize.x = (delta - 1 + blkSize_mat.x) / blkSize_mat.x;
		grdSize.y = (delta - 1 + blkSize_mat.y) / blkSize_mat.y;
		device_set_mat<<<grdSize, blkSize_mat, 0, stream_0>>>(K_and_P, inv_lda, P, K, delta);
		CUBLAS_CHECK(cublasDgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, delta, delta, delta,
			&dc_f64[1], P, delta, K, delta, &dc_f64[0], inv_A, inv_lda));
		// CUDA_CHECK(cudaFree(buf));
		CUBLAS_CHECK(cublasSetStream(handle, NULL));
		return ;
	}

	double* H = inv_LU + last_nrows * inv_lda,     * W = inv_LU + last_nrows;
	// 先将左上角的 (Lt)^{-1} 和 (Ut)^{-1} 拷贝 (此时左上角的已经在了)
	// dim3 grdSize(1, 1, 1);
	// grdSize.x = (last_nrows - 1 + blkSize_mat.x) / blkSize_mat.x;
	// grdSize.y = (last_nrows - 1 + blkSize_mat.y) / blkSize_mat.y;
    // device_copy_mat<<<grdSize, blkSize_mat, 0, stream>>>(last_inv_LU, last_nrows, new_inv_LU, new_nrows,
	// 													last_nrows, last_nrows);
	// 先lower后upper以免覆盖掉
	dim3 grdSize(1, 1, 1);
	device_inverse_of_lower_unit   <<<grdSize, grdSize, 0, stream_0>>>
		(E_and_F, LU_lda, K_and_P, inv_lda, delta);// 求下三角K
	device_inverse_of_upper_nonunit<<<grdSize, grdSize, 0, stream_0>>>
		(E_and_F, LU_lda, K_and_P, inv_lda, delta);// 求上三角P

	CUDA_CHECK(cudaStreamSynchronize(stream_0));// 等待流0

	// W <- K * C
	CUBLAS_CHECK(cublasSetStream(handle, stream_0));
	CUBLAS_CHECK(cublasDtrmm(handle, CUBLAS_SIDE_LEFT, CUBLAS_FILL_MODE_LOWER, CUBLAS_OP_N, CUBLAS_DIAG_UNIT,
		delta, last_nrows, &dc_f64[1], K_and_P, inv_lda, new_LU + last_nrows			  , LU_lda, W, inv_lda));	
    // W * Lt = - K * C
	CUBLAS_CHECK(cublasDtrsm(handle, CUBLAS_SIDE_RIGHT, CUBLAS_FILL_MODE_LOWER, CUBLAS_OP_N, CUBLAS_DIAG_UNIT,
		delta, last_nrows, &dc_f64[2], new_LU, LU_lda, W, inv_lda));

	// buf <- B * P
	CUBLAS_CHECK(cublasSetStream(handle, stream_1));
	CUBLAS_CHECK(cublasDtrmm(handle, CUBLAS_SIDE_RIGHT, CUBLAS_FILL_MODE_UPPER, CUBLAS_OP_N, CUBLAS_DIAG_NON_UNIT,
		last_nrows, delta, &dc_f64[1], K_and_P, inv_lda, new_LU + last_nrows * LU_lda, LU_lda, H, inv_lda));
    // Ut * H = - B * P
	CUBLAS_CHECK(cublasDtrsm(handle, CUBLAS_SIDE_LEFT, CUBLAS_FILL_MODE_UPPER, CUBLAS_OP_N, CUBLAS_DIAG_NON_UNIT,
		last_nrows, delta, &dc_f64[2], new_LU, LU_lda, H, inv_lda));

	// 至此已完成整个new_inv_LU的计算
    // 开始计算new_inv_A
    double  * new_H = inv_A  + last_nrows * inv_lda, * new_W = inv_A  + last_nrows;
	// 左上角：(At)^{-1} + H*W（左上角已经在了）
	// grdSize.x = (last_nrows - 1 + blkSize_mat.x) / blkSize_mat.x;
	// grdSize.y = (last_nrows - 1 + blkSize_mat.y) / blkSize_mat.y;
	// device_copy_mat<<<grdSize, blkSize_mat, 0, stream>>>(last_inv_A, last_nrows, new_inv_A, new_nrows,
	// 													last_nrows, last_nrows);

	CUDA_CHECK(cudaStreamSynchronize(stream_0));// 等待流0算完W
	CUDA_CHECK(cudaStreamSynchronize(stream_1));// 等待流1算完H
	// 流0算左上角
	CUBLAS_CHECK(cublasSetStream(handle, stream_0));
	CUBLAS_CHECK(cublasDgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, last_nrows, last_nrows, delta,
			&dc_f64[1], H, inv_lda, W, inv_lda, &dc_f64[1], inv_A, inv_lda));
    // 右上角：H * K
	CUBLAS_CHECK(cublasSetStream(handle, stream_1));
	CUBLAS_CHECK(cublasDtrmm(handle, CUBLAS_SIDE_RIGHT, CUBLAS_FILL_MODE_LOWER, CUBLAS_OP_N, CUBLAS_DIAG_UNIT,
		last_nrows, delta, &dc_f64[1], K_and_P, inv_lda, H, inv_lda, new_H, inv_lda));	
    // 左下角：P * W
	CUBLAS_CHECK(cublasSetStream(handle, stream_2));
	CUBLAS_CHECK(cublasDtrmm(handle, CUBLAS_SIDE_LEFT, CUBLAS_FILL_MODE_UPPER, CUBLAS_OP_N, CUBLAS_DIAG_NON_UNIT,
		delta, last_nrows, &dc_f64[1], K_and_P, inv_lda, W, inv_lda, new_W, inv_lda));	
    // 右下角：P * K
	CUBLAS_CHECK(cublasSetStream(handle, stream_3));
	double* buf = (double*) check_device_workspace_to_enlarge(sizeof(*buf) * delta * delta * 2);
	// double* buf = nullptr;
	// CUDA_CHECK(cudaMalloc((void**)&buf, sizeof(*buf) * delta * delta * 2));
	double* P = buf, * K = buf + delta * delta;
	grdSize.x = (delta - 1 + blkSize_mat.x) / blkSize_mat.x;
	grdSize.y = (delta - 1 + blkSize_mat.y) / blkSize_mat.y;
	device_set_mat<<<grdSize, blkSize_mat, 0, stream_3>>>
		(K_and_P, inv_lda, P, K, delta);
	CUBLAS_CHECK(cublasDgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, delta, delta, delta,
			&dc_f64[1], P, delta, K, delta, &dc_f64[0], new_H + last_nrows, inv_lda));
	// CUDA_CHECK(cudaFree(buf));

	CUBLAS_CHECK(cublasSetStream(handle, NULL));
}

template<typename idx_t, typename data_t> __global__ static 
void device_matinv_row(const int nrows, data_t * inout_A, const int lda) {
	// 构造单位阵
	assert(nrows <= 32);
	assert(gridDim.x == 1 && gridDim.y == 1);// 只能有一个block
	__shared__ data_t E[32 * 32], A[32 * 32];
	uint32_t ti = blockIdx.x * blockDim.x + threadIdx.x;
	uint32_t tj = blockIdx.y * blockDim.y + threadIdx.y;
	if (ti < nrows && tj < nrows) {
		A[tj * 32 + ti] = inout_A[tj * lda + ti];
		E[tj * 32 + ti] = 0.0;
		if (ti == tj) E[tj * 32 + ti] = 1.0;// 构造单位阵
		__syncthreads();
		// 初等行变换
		for (idx_t i = 0; i < nrows; i++) {
			data_t tmp = A[i * 32 + i];
			if (i == ti) {
				A[tj * 32 + i] /= tmp;
				E[tj * 32 + i] /= tmp;
			}
			// 此时A(i,i)为1，利用这个1消掉上面、下面的行中该列的非零元
			for (idx_t k = 0; k < nrows; k++) {
				__syncthreads();
				if (k == i) continue;
				data_t tmp = A[i * 32 + k];
				if (k == ti) {
					A[tj * 32 + k] -= tmp * A[tj * 32 + i];
                	E[tj * 32 + k] -= tmp * E[tj * 32 + i];
				}
			}
		}
		// 拷回
		__syncthreads();
		inout_A[tj * lda + ti] = E[tj * 32 + ti];
	}
}

void gpu_extend_invA(double* & inv_A, const int lda, const int last_nrows, const int new_nrows,
	cudaStream_t stream_0, cudaStream_t stream_1, cudaStream_t stream_2, cudaStream_t stream_3,
	cublasHandle_t handle)
{
	assert(lda >= new_nrows);
    const int delta = new_nrows - last_nrows;
    double* K_and_P = inv_A + last_nrows * lda + last_nrows;
	dim3 grdSize_mat(1, 1, 1);
	grdSize_mat.x = (delta -1 + blkSize_mat.x) / blkSize_mat.x;
	grdSize_mat.y = (delta -1 + blkSize_mat.y) / blkSize_mat.y;

	
	if (last_nrows == 0) {// 直接求逆即可返回

		if (delta > 32) {// 传回CPU求逆
			double* cpu_buf = new double [delta * delta];
			cublasGetMatrix(delta, delta, sizeof(*K_and_P), K_and_P, lda, cpu_buf, delta);
			matinv_row<int, double>(delta, cpu_buf, delta);
			cublasSetMatrix(delta, delta, sizeof(*K_and_P), cpu_buf, delta, K_and_P, lda);
			delete[] cpu_buf;
		} else 
			device_matinv_row<int, double><<<grdSize_mat, blkSize_mat, 0, stream_0>>>(delta, K_and_P, lda);

		// dim3 tmp(1,1,1);
		// printf("invAs:\n");
		// for (int j = 0; j < new_nrows; j++) {
		// 	cudaDeviceSynchronize();
		// 	device_scalar_print<<<tmp, tmp, 0, stream_0>>>(inv_A + j * lda, new_nrows, new_nrows);
		// 	cudaDeviceSynchronize();
		// }

		return ;
	}

	const double* A12 = inv_A + last_nrows * lda, * A21 = inv_A + last_nrows;
	double* buf_A12 = nullptr, * buf_A21 = nullptr;

	CUDA_CHECK(cudaMalloc((void**)&buf_A12, sizeof(*A12) * delta * last_nrows));
	CUDA_CHECK(cudaMalloc((void**)&buf_A21, sizeof(*A21) * delta * last_nrows));

	cublasSetStream(handle, stream_1);
	CUBLAS_CHECK(cublasDgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, delta, last_nrows, last_nrows,
		&dc_f64[1], A21, lda, inv_A, lda, &dc_f64[0], buf_A21, delta     ));// buf <- A21*A11^{-1}

	cublasSetStream(handle, stream_0);
	CUBLAS_CHECK(cublasDgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, last_nrows, delta, last_nrows,
		&dc_f64[1], inv_A, lda, A12, lda, &dc_f64[0], buf_A12, last_nrows));// buf <- A11^{-1}*A12
	
	CUBLAS_CHECK(cublasDgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, delta, delta, last_nrows, 
		&dc_f64[2], A21, lda, buf_A12, last_nrows, &dc_f64[1], K_and_P, lda));

	if (delta > 32) {
		double* cpu_buf = new double [delta * delta];
		cublasGetMatrix(delta, delta, sizeof(*K_and_P), K_and_P, lda, cpu_buf, delta);
		matinv_row<int, double>(delta, cpu_buf, delta);
		cublasSetMatrix(delta, delta, sizeof(*K_and_P), cpu_buf, delta, K_and_P, lda);
		delete[] cpu_buf;
	}
	else 
		device_matinv_row<int, double><<<grdSize_mat, blkSize_mat, 0, stream_0>>>(delta, K_and_P, lda);
	
	CUDA_CHECK(cudaStreamSynchronize(stream_0));// 等待流0计算完K_and_P

	cublasSetStream(handle, stream_1);
	CUBLAS_CHECK(cublasDgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, delta, last_nrows, delta,
		&dc_f64[2], K_and_P, lda, buf_A21, delta, &dc_f64[0], inv_A + last_nrows, lda));// PW

	cublasSetStream(handle, stream_0);
	CUBLAS_CHECK(cublasDgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, last_nrows, delta, delta,
		&dc_f64[2], buf_A12, last_nrows, K_and_P, lda, &dc_f64[0], inv_A + last_nrows * lda, lda));// HK
	
	CUDA_CHECK(cudaStreamSynchronize(stream_1));// 等待流1计算完PW
	CUBLAS_CHECK(cublasDgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, last_nrows, last_nrows, delta,
		&dc_f64[2], buf_A12, last_nrows, A21, lda, &dc_f64[1], inv_A, lda));// invA + HW

	CUDA_CHECK(cudaStreamSynchronize(stream_0));
	cublasSetStream(handle, NULL);
	// dim3 tmp(1,1,1);
	// printf("invAs:\n");
	// for (int j = 0; j < new_nrows; j++) {
	// 	cudaDeviceSynchronize();
	// 	device_scalar_print<<<tmp, tmp, 0, stream_0>>>(inv_A + j * lda, new_nrows, new_nrows);
	// 	cudaDeviceSynchronize();
	// }
	// exit(-1);
}

void device_setup(const int sid, const int sol_type, const int pc_type,
	cublasHandle_t blas_handle, cusolverDnHandle_t sol_handle,
	const int nrows, const double* d_A, const int last_nrows, double* & last_mem, const int lda,
	double* & last_inv_LU, double* & last_inv_A)
{
	if (sol_type == 1000) {
		double* d_tmp_mat = nullptr;
		CUDA_CHECK(cudaMalloc((void**)&d_tmp_mat, sizeof(*d_tmp_mat) * nrows * nrows));
		// 需要拷贝一份分解前的A
		dim3 grdSize_mat(1, 1, 1);
		grdSize_mat.x = (nrows - 1 + blkSize_mat.x) / blkSize_mat.x;
		grdSize_mat.y = (nrows - 1 + blkSize_mat.y) / blkSize_mat.y;
		device_copy_mat<<<grdSize_mat, blkSize_mat, 0, streams[sid]>>>(d_A, nrows, d_tmp_mat, nrows, nrows, nrows);

		int lwork = 0;// 申请显存
		CUSOLVER_CHECK(cusolverDnSetStream(sol_handle, streams[sid]));
		CUSOLVER_CHECK(cusolverDnDgetrf_bufferSize(sol_handle, nrows, nrows, d_tmp_mat, nrows, &lwork));
		double* dwork = (double*) check_device_workspace_to_enlarge(lwork * sizeof(*d_A));
		CUSOLVER_CHECK(cusolverDnDgetrf(sol_handle, nrows, nrows, d_tmp_mat, nrows, dwork, nullptr, device_info()));
		CUDA_CHECK(cudaFree(last_mem)); last_mem = d_tmp_mat;
		CUSOLVER_CHECK(cusolverDnSetStream(sol_handle, NULL));
	} else if (sol_type == 1022) {
		gpu_extend_block_lda(blas_handle, sol_handle, last_mem, last_nrows, nrows, lda, device_info(), nullptr, streams[sid], streams[sid + 1]);
		
		// if (pc_type == 0) {
		// 	device_extend_invLU_and_invA(last_inv_LU, last_inv_A, last_nrows, lda, last_mem, nrows, lda,
		// 		streams[sid], streams[sid+1], streams[sid+2], streams[sid+3], blas_handle);
		// 	CUDA_CHECK(cudaStreamSynchronize(streams[sid]));
		// 	CUDA_CHECK(cudaStreamSynchronize(streams[sid+1]));
		// 	CUDA_CHECK(cudaStreamSynchronize(streams[sid+2]));
		// 	CUDA_CHECK(cudaStreamSynchronize(streams[sid+3]));
		// }

		// 注意此时的d_A已经是reorder过后的了
		// gpu_extend_block(blas_handle, sol_handle, last_mem, last_nrows, d_tmp_mat, nrows, device_info(), nullptr, streams[sid]);
	} else if (sol_type == 1033) {
		assert(last_inv_LU == nullptr);
		gpu_extend_invA(last_inv_A, lda, last_nrows, nrows,
			streams[sid], streams[sid+1], streams[sid+2], streams[sid+3], blas_handle);
	} 
	else assert(false);

	// if (type == 1022)
	// 	device_extend_invLU_and_invA(last_inv_LU, last_inv_A, last_nrows, d_tmp_mat, nrows, streams[sid], blas_handle);	
}

