#ifdef USE_GPU
#include <cuda.h>
#include <cuda_runtime.h>
#include <cuda_runtime_api.h>
#include <device_launch_parameters.h>
#include <cstdio>
#include <cassert>
#include "cuda_common.hpp"

#define MAX_THREADS 1024 // 每个线程块的最大线程数
#define WARP_SIZE 32 // warp 大小
#define WARP_NUM ((MAX_THREADS + WARP_SIZE - 1) / WARP_SIZE) // 每个线程块的最大 warp 数

// CUDA 错误检查
#define CHECK_CUDA(func)                                                       \
{                                                                              \
    cudaError_t status = (func);                                               \
    if (status != cudaSuccess) {                                               \
        printf("CUDA API failed at line %d with error: %s (%d)\n",             \
               __LINE__, cudaGetErrorString(status), status);                  \
    }                                                                          \
}

template<typename idx_t, typename data_t>
void cuda_memcpy(data_t * dst, const data_t * src, idx_t n, int direction) {
	switch (direction) {
		case 0: 
			cudaMemcpy(dst, src, n * sizeof(data_t), cudaMemcpyHostToDevice);
			break;
		case 1:
			cudaMemcpy(dst, src, n * sizeof(data_t), cudaMemcpyDeviceToHost);
			break;
		case 2:
			cudaMemcpy(dst, src, n * sizeof(data_t), cudaMemcpyDeviceToDevice);
			break;
		default:
			printf("unsupported direction: %d", direction);
			assert(false);
			break;
	}
}

template<typename idx_t>
void cuda_malloc(void **data, idx_t n_bytes) {
	cudaMalloc(data, n_bytes);
}

template<typename idx_t>
void cuda_memset(void *data, int val, idx_t n_bytes) {
	cudaMemset(data, val, n_bytes);
}

void cuda_free(void *data) {
	cudaFree(data);
}

template<typename idx_t, typename data_t, typename res_t>
__global__ void kernel_vec_dot(const data_t * x_data, const data_t * y_data, res_t * ans_data, const idx_t n) {
	// 获得当前线程的各种索引
	int tid = threadIdx.x;
	int warp_id = tid / WARP_SIZE;
	int lane_id = tid % WARP_SIZE;
	idx_t idx = blockIdx.x * blockDim.x + tid;
		
	// 存放每个 warp 的综合结果
	extern __shared__ __align__(sizeof(res_t)) unsigned char my_smem[];
	res_t *warp_val = reinterpret_cast<res_t *>(my_smem);

	// 每个线程计算一部分
	res_t val = 0;
    for (; idx < n; idx += blockDim.x * gridDim.x) {
		val += ((res_t) x_data[idx]) * ((res_t) y_data[idx]);
    }

	// 每个 warp 内做归约
	val += __shfl_xor_sync(0xffffffff, val, 16);
    val += __shfl_xor_sync(0xffffffff, val, 8);
    val += __shfl_xor_sync(0xffffffff, val, 4);
    val += __shfl_xor_sync(0xffffffff, val, 2);
    val += __shfl_xor_sync(0xffffffff, val, 1);

	// 每个 warp 内的第一个线程将 warp 计算的总结果写入 warp_val
	if (lane_id == 0) warp_val[warp_id] = val;
	__syncthreads();

	// 第一个 warp 对整个线程块的结果做归约
	if (warp_id == 0) {
		val = (lane_id < WARP_NUM) ? warp_val[lane_id] : 0; // 未参与计算的线程置 0
		val += __shfl_xor_sync(0xffffffff, val, 16);
		val += __shfl_xor_sync(0xffffffff, val, 8);
		val += __shfl_xor_sync(0xffffffff, val, 4);
		val += __shfl_xor_sync(0xffffffff, val, 2);
		val += __shfl_xor_sync(0xffffffff, val, 1);
 		if (lane_id == 0) {
			atomicAdd(ans_data, val); // 第一个 warp 的第一个线程将结果写入全局内存
		}
	}
}

template<typename idx_t, typename data_t, typename scalar_t>
__global__ void kernel_vec_add(data_t * dst, const data_t * src1, const data_t * src2, const scalar_t alpha, const idx_t n) {
    int idx = blockIdx.x * blockDim.x + threadIdx.x;
    if (idx < n) {
		dst[idx] = src1[idx] + alpha * src2[idx];
    }
}

template<typename idx_t, typename data_t, typename scalar_t>
__global__ void kernel_vec_scale(data_t * dst, const data_t * src, const scalar_t coeff, const idx_t n) {
    int idx = blockIdx.x * blockDim.x + threadIdx.x;
    if (idx < n) {
		dst[idx] = src[idx] * coeff;
    }
}

template<typename idx_t, typename data_t1, typename data_t2>
__global__ void kernel_vec_copy(data_t1 * dst, const data_t2 * src, const idx_t n) {
    int idx = blockIdx.x * blockDim.x + threadIdx.x;
    if (idx < n) {
		dst[idx] = src[idx];
    }
}

template<typename idx_t, typename data_t, typename calc_t>
__global__ void kernel_vec_elem_mul(calc_t * dst, const calc_t * src, const data_t * coeff, const idx_t n) {
    int idx = blockIdx.x * blockDim.x + threadIdx.x;
    if (idx < n) {
		dst[idx] = src[idx] * coeff[idx];
    }
}

template<typename idx_t, typename data_t, typename setup_t>
__global__ void kernel_separate_invD(data_t * diag, const idx_t * row_ptr, const idx_t * col_idx, const setup_t * vals, const idx_t n) {
    int idx = blockIdx.x * blockDim.x + threadIdx.x;
	constexpr setup_t one = 1.0;
    if (idx < n) {
		idx_t begin = row_ptr[idx], end = row_ptr[idx + 1];
		for (idx_t j = begin; j < end; j++) {
			if (idx == col_idx[j]) {
				diag[idx] = (data_t) (one / vals[j]); // 对角线元素的倒数，方便后续计算时使用乘法，而非除法
				return;
			}
		}
	}
}

// 朴素的稀疏矩阵向量乘法
template<typename idx_t, typename data_t, typename calc_t>
__global__ void kernel_spmv(const idx_t * row_ptr, const idx_t * col_idx, const data_t * vals, const calc_t * x, const calc_t * b, calc_t * y, const calc_t alpha, const calc_t beta, const idx_t n) {
	int idx = blockIdx.x * blockDim.x + threadIdx.x;
    if (idx < n) {
		calc_t tmp = 0.0;
		idx_t begin = row_ptr[idx], end = row_ptr[idx + 1];
		for (idx_t j = begin; j < end; j++) {
			tmp += vals[j] * x[col_idx[j]];
		}
		y[idx] = alpha * tmp + beta * b[idx];
	}
}

// 用于 vec_dot 的全局变量
__device__ float d_dot_f32; 
__device__ double d_dot_f64;

template<typename idx_t, typename data_t, typename res_t>
res_t cuda_vec_dot(const data_t * x_data, const data_t * y_data, const idx_t n) {
	dim3 block_size(MAX_THREADS, 1, 1);
	// 填满 GPU 的 SM，并尽量减少总线程块数，从而降低原子操作的竞争
    dim3 grid_size(80 * 2, 1, 1);
	res_t dot = 0.0;

	if constexpr (sizeof(res_t) == 4) {
		cudaMemcpyToSymbol(d_dot_f32, &dot, sizeof(res_t));
		float *dp = nullptr;
    	cudaGetSymbolAddress((void**)&dp, d_dot_f32);
		kernel_vec_dot<<<grid_size, block_size, WARP_NUM * sizeof(res_t)>>>(x_data, y_data, dp, n);
		cudaMemcpyFromSymbol(&dot, d_dot_f32, sizeof(res_t));
	} else if constexpr (sizeof(res_t) == 8) {
		cudaMemcpyToSymbol(d_dot_f64, &dot, sizeof(res_t));
		double *dp = nullptr;
    	cudaGetSymbolAddress((void**)&dp, d_dot_f64);
		kernel_vec_dot<<<grid_size, block_size, WARP_NUM * sizeof(res_t)>>>(x_data, y_data, dp, n);
		cudaMemcpyFromSymbol(&dot, d_dot_f64, sizeof(res_t));
	}
	return dot;
	
}

template<typename idx_t, typename data_t, typename scalar_t>
void cuda_vec_add(data_t * dst, const data_t * src1, const data_t * src2, const scalar_t alpha, const idx_t n) {
    dim3 block_size(MAX_THREADS, 1, 1);
	dim3 grid_size((n + MAX_THREADS - 1) / MAX_THREADS, 1, 1);
	kernel_vec_add<<<grid_size, block_size>>>(dst, src1, src2, alpha, n);
}

template<typename idx_t, typename data_t, typename scalar_t>
void cuda_vec_scale(data_t * dst, const data_t * src, const scalar_t coeff, const idx_t n) {
	dim3 block_size(MAX_THREADS, 1, 1);
	dim3 grid_size((n + MAX_THREADS - 1) / MAX_THREADS, 1, 1);
	kernel_vec_scale<<<grid_size, block_size>>>(dst, src, coeff, n);
}

template<typename idx_t, typename data_t1, typename data_t2>
void cuda_vec_copy(data_t1 * dst, const data_t2 * src, const idx_t n) {
    dim3 block_size(MAX_THREADS, 1, 1);
	dim3 grid_size((n + MAX_THREADS - 1) / MAX_THREADS, 1, 1);
	kernel_vec_copy<<<grid_size, block_size>>>(dst, src, n);
}

template<typename idx_t, typename data_t, typename calc_t>
void cuda_vec_elem_mul(calc_t * dst, const calc_t * src, const data_t * coeff, const idx_t n) {
    dim3 block_size(MAX_THREADS, 1, 1);
	dim3 grid_size((n + MAX_THREADS - 1) / MAX_THREADS, 1, 1);
	kernel_vec_elem_mul<<<grid_size, block_size>>>(dst, src, coeff, n);
}

template<typename idx_t, typename data_t, typename setup_t>
void cuda_separate_invD(data_t * diag, const idx_t * row_ptr, const idx_t * col_idx, const setup_t * vals, const idx_t n) {
    dim3 block_size(MAX_THREADS, 1, 1);
	dim3 grid_size((n + MAX_THREADS - 1) / MAX_THREADS, 1, 1);
	kernel_separate_invD<<<grid_size, block_size>>>(diag, row_ptr, col_idx, vals, n);
}

template<typename idx_t, typename data_t, typename calc_t>
void cuda_spmv(const idx_t * row_ptr, const idx_t * col_idx, const data_t * vals, const calc_t * x, const calc_t * b, calc_t * y, const calc_t alpha, const calc_t beta, const idx_t n) {
	dim3 block_size(MAX_THREADS, 1, 1);
	dim3 grid_size((n + MAX_THREADS - 1) / MAX_THREADS, 1, 1);
	kernel_spmv<<<grid_size, block_size>>>(row_ptr, col_idx, vals, x, b, y, alpha, beta, n);
}

// 对 CG 前半部分 kernel 的融合
// x += alpha * p; 
// r -= alpha * s; 
// u = diag * r; 
// tmp_glb[0] = r^T * u; 
// tmp_glb[1] = r^T * r;
template<typename idx_t, typename data_t, typename calc_t, typename ksp_t>
__global__ void kernel_CG_kernel_1(calc_t * x, const calc_t alpha, calc_t * p, calc_t * r, calc_t * s, calc_t * u, const data_t * diag, ksp_t * tmp_glb, const idx_t n) {
	int tid = threadIdx.x;
	int idx = blockIdx.x * blockDim.x + tid;
	int warp_id = tid / WARP_SIZE;
	int lane_id = tid % WARP_SIZE;
	extern __shared__ __align__(sizeof(ksp_t)) unsigned char my_smem[];
	ksp_t *warp_val_0 = reinterpret_cast<ksp_t *>(my_smem);
	ksp_t *warp_val_1 = warp_val_0 + WARP_NUM;

	ksp_t val_0 = 0, val_1 = 0;
	for (; idx < n; idx += blockDim.x * gridDim.x) {
		x[idx] += alpha * p[idx];
		calc_t rr = r[idx] - alpha * s[idx];
		r[idx] = rr;
		calc_t uu = diag[idx] * rr;
		u[idx] = uu;

		// 向量点积
		val_0 += ((ksp_t) rr) * ((ksp_t) uu);
		val_1 += ((ksp_t) rr) * ((ksp_t) rr);
	}

	// 以下均为向量点积操作
	val_0 += __shfl_xor_sync(0xffffffff, val_0, 16);
	val_1 += __shfl_xor_sync(0xffffffff, val_1, 16);
	val_0 += __shfl_xor_sync(0xffffffff, val_0, 8);
	val_1 += __shfl_xor_sync(0xffffffff, val_1, 8);
	val_0 += __shfl_xor_sync(0xffffffff, val_0, 4);
	val_1 += __shfl_xor_sync(0xffffffff, val_1, 4);
	val_0 += __shfl_xor_sync(0xffffffff, val_0, 2);
	val_1 += __shfl_xor_sync(0xffffffff, val_1, 2);
	val_0 += __shfl_xor_sync(0xffffffff, val_0, 1);
	val_1 += __shfl_xor_sync(0xffffffff, val_1, 1);

	if (lane_id == 0) {
		warp_val_0[warp_id] = val_0;
		warp_val_1[warp_id] = val_1;
	}

	__syncthreads();

	if (warp_id == 0) {
		val_0 = (lane_id < WARP_NUM) ? warp_val_0[lane_id] : 0;
		val_1 = (lane_id < WARP_NUM) ? warp_val_1[lane_id] : 0;

		val_0 += __shfl_xor_sync(0xffffffff, val_0, 16);
		val_1 += __shfl_xor_sync(0xffffffff, val_1, 16);
		val_0 += __shfl_xor_sync(0xffffffff, val_0, 8);
		val_1 += __shfl_xor_sync(0xffffffff, val_1, 8);
		val_0 += __shfl_xor_sync(0xffffffff, val_0, 4);
		val_1 += __shfl_xor_sync(0xffffffff, val_1, 4);
		val_0 += __shfl_xor_sync(0xffffffff, val_0, 2);
		val_1 += __shfl_xor_sync(0xffffffff, val_1, 2);
		val_0 += __shfl_xor_sync(0xffffffff, val_0, 1);
		val_1 += __shfl_xor_sync(0xffffffff, val_1, 1);

		if (lane_id == 0) {
			atomicAdd(tmp_glb, val_0);
			atomicAdd(tmp_glb + 1, val_1);
		}
	}
}

// tmp_glb[0] = r dot u
// tmp_glb[1] = s dot p
// tmp_glb[2] = b dot b
// tmp_glb[3] = r dot r
template<typename idx_t, typename calc_t, typename ksp_t>
__global__ void kernel_CG_kernel_2(calc_t * u, calc_t * r, calc_t * s, calc_t * p, const calc_t * b, ksp_t * tmp_glb, const idx_t n) {
	int tid = threadIdx.x;
	int idx = blockIdx.x * blockDim.x + tid;
	int warp_id = tid / WARP_SIZE;
	int lane_id = tid % WARP_SIZE;
	extern __shared__ __align__(sizeof(ksp_t)) unsigned char my_smem[];
	ksp_t *warp_val_0 = reinterpret_cast<ksp_t *>(my_smem);
	ksp_t *warp_val_1 = warp_val_0 + WARP_NUM;
	ksp_t *warp_val_2 = warp_val_1 + WARP_NUM;
	ksp_t *warp_val_3 = warp_val_2 + WARP_NUM;

	ksp_t val_0 = 0, val_1 = 0, val_2 = 0, val_3 = 0;
	for (; idx < n; idx += blockDim.x * gridDim.x) {
		ksp_t rr = r[idx];
		ksp_t bb = b[idx];
		val_0 += rr * ((ksp_t) u[idx]);
		val_1 += ((ksp_t) s[idx]) * ((ksp_t) p[idx]);
		val_2 += bb * bb;
		val_3 += rr * rr;
	}

	val_0 += __shfl_xor_sync(0xffffffff, val_0, 16);
	val_1 += __shfl_xor_sync(0xffffffff, val_1, 16);
	val_2 += __shfl_xor_sync(0xffffffff, val_2, 16);
	val_3 += __shfl_xor_sync(0xffffffff, val_3, 16);
	val_0 += __shfl_xor_sync(0xffffffff, val_0, 8);
	val_1 += __shfl_xor_sync(0xffffffff, val_1, 8);
	val_2 += __shfl_xor_sync(0xffffffff, val_2, 8);
	val_3 += __shfl_xor_sync(0xffffffff, val_3, 8);
	val_0 += __shfl_xor_sync(0xffffffff, val_0, 4);
	val_1 += __shfl_xor_sync(0xffffffff, val_1, 4);
	val_2 += __shfl_xor_sync(0xffffffff, val_2, 4);
	val_3 += __shfl_xor_sync(0xffffffff, val_3, 4);
	val_0 += __shfl_xor_sync(0xffffffff, val_0, 2);
	val_1 += __shfl_xor_sync(0xffffffff, val_1, 2);
	val_2 += __shfl_xor_sync(0xffffffff, val_2, 2);
	val_3 += __shfl_xor_sync(0xffffffff, val_3, 2);
	val_0 += __shfl_xor_sync(0xffffffff, val_0, 1);
	val_1 += __shfl_xor_sync(0xffffffff, val_1, 1);
	val_2 += __shfl_xor_sync(0xffffffff, val_2, 1);
	val_3 += __shfl_xor_sync(0xffffffff, val_3, 1);

	if (lane_id == 0) {
		warp_val_0[warp_id] = val_0;
		warp_val_1[warp_id] = val_1;
		warp_val_2[warp_id] = val_2;
		warp_val_3[warp_id] = val_3;
	}

	__syncthreads();

	if (warp_id == 0) {
		val_0 = (lane_id < WARP_NUM) ? warp_val_0[lane_id] : 0;
		val_1 = (lane_id < WARP_NUM) ? warp_val_1[lane_id] : 0;
		val_2 = (lane_id < WARP_NUM) ? warp_val_2[lane_id] : 0;
		val_3 = (lane_id < WARP_NUM) ? warp_val_3[lane_id] : 0;

		val_0 += __shfl_xor_sync(0xffffffff, val_0, 16);
		val_1 += __shfl_xor_sync(0xffffffff, val_1, 16);
		val_2 += __shfl_xor_sync(0xffffffff, val_2, 16);
		val_3 += __shfl_xor_sync(0xffffffff, val_3, 16);
		val_0 += __shfl_xor_sync(0xffffffff, val_0, 8);
		val_1 += __shfl_xor_sync(0xffffffff, val_1, 8);
		val_2 += __shfl_xor_sync(0xffffffff, val_2, 8);
		val_3 += __shfl_xor_sync(0xffffffff, val_3, 8);
		val_0 += __shfl_xor_sync(0xffffffff, val_0, 4);
		val_1 += __shfl_xor_sync(0xffffffff, val_1, 4);
		val_2 += __shfl_xor_sync(0xffffffff, val_2, 4);
		val_3 += __shfl_xor_sync(0xffffffff, val_3, 4);
		val_0 += __shfl_xor_sync(0xffffffff, val_0, 2);
		val_1 += __shfl_xor_sync(0xffffffff, val_1, 2);
		val_2 += __shfl_xor_sync(0xffffffff, val_2, 2);
		val_3 += __shfl_xor_sync(0xffffffff, val_3, 2);
		val_0 += __shfl_xor_sync(0xffffffff, val_0, 1);
		val_1 += __shfl_xor_sync(0xffffffff, val_1, 1);
		val_2 += __shfl_xor_sync(0xffffffff, val_2, 1);
		val_3 += __shfl_xor_sync(0xffffffff, val_3, 1);

		if (lane_id == 0) {
			atomicAdd(tmp_glb, val_0);
			atomicAdd(tmp_glb + 1, val_1);
			atomicAdd(tmp_glb + 2, val_2);
			atomicAdd(tmp_glb + 3, val_3);
		}
	}
}

// CG_kernel_1(x.data, alpha, p.data, r.data, s.data, u.data, jacobi->diag, tmp_glb);
// tmp_glb 在 GPU 上
template<typename idx_t, typename data_t, typename calc_t, typename ksp_t>
void CG_kernel_1(calc_t * x, const calc_t alpha, calc_t * p, calc_t * r, calc_t * s, calc_t * u, const data_t * diag, ksp_t * tmp_glb, const idx_t n) {
	dim3 block_size(MAX_THREADS, 1, 1);
    dim3 grid_size(80 * 2, 1, 1); 
	kernel_CG_kernel_1<<<grid_size, block_size, WARP_NUM * sizeof(ksp_t) * 2>>>(x, alpha, p, r, s, u, diag, tmp_glb, n);
}

// CG_kernel_2(u.data, r.data, s.data, p.data, b.data, d_tmp_glb, loc_nrows);
template<typename idx_t, typename calc_t, typename ksp_t>
void CG_kernel_2(calc_t * u, calc_t * r, calc_t * s, calc_t * p, const calc_t * b, ksp_t * tmp_glb, const idx_t n) {
	dim3 block_size(MAX_THREADS, 1, 1);
    dim3 grid_size(80 * 2, 1, 1); 
	kernel_CG_kernel_2<<<grid_size, block_size, WARP_NUM * sizeof(ksp_t) * 4>>>(u, r, s, p, b, tmp_glb, n);
}


// 以下是 CUDA KERNEL 的实例化
template void cuda_memcpy<int, int>(int * dst, const int * src, int n, int direction);
template void cuda_memcpy<int, float>(float * dst, const float * src, int n, int direction);
template void cuda_memcpy<int, double>(double * dst, const double * src, int n, int direction);
template void cuda_memcpy<long long, int>(int * dst, const int * src, long long n, int direction);
template void cuda_memcpy<long long, float>(float * dst, const float * src, long long n, int direction);
template void cuda_memcpy<long long, double>(double * dst, const double * src, long long n, int direction);

template void cuda_malloc<int>(void **data, int n_bytes);
template void cuda_malloc<long long>(void **data, long long n_bytes);
template void cuda_malloc<unsigned long>(void **data, unsigned long n_bytes);
template void cuda_malloc<unsigned long long>(void **data, unsigned long long n_bytes);

template void cuda_memset<int>(void *data, int val, int n_bytes);
template void cuda_memset<long long>(void *data, int val, long long n_bytes);
template void cuda_memset<unsigned long>(void *data, int val, unsigned long n_bytes);
template void cuda_memset<unsigned long long>(void *data, int val, unsigned long long n_bytes);

template float cuda_vec_dot<int, float, float>(const float * x_data, const float * y_data, const int n);
template double cuda_vec_dot<int, float, double>(const float * x_data, const float * y_data, const int n);
template double cuda_vec_dot<int, double, double>(const double * x_data, const double * y_data, const int n);

template void cuda_vec_add<int, float, float>(float * dst, const float * src1, const float * src2, const float alpha, const int n);
template void cuda_vec_add<int, float, double>(float * dst, const float * src1, const float * src2, const double alpha, const int n);
template void cuda_vec_add<int, double, double>(double * dst, const double * src1, const double * src2, const double alpha, const int n);

template void cuda_vec_scale<int, float, float>(float * dst, const float * src, const float coeff, const int n);
template void cuda_vec_scale<int, float, double>(float * dst, const float * src, const double coeff, const int n);
template void cuda_vec_scale<int, double, double>(double * dst, const double * src, const double coeff, const int n);

template void cuda_vec_copy<int, float, float>(float * dst, const float * src, const int n);
template void cuda_vec_copy<int, float, double>(float * dst, const double * src, const int n);
template void cuda_vec_copy<int, double, float>(double * dst, const float * src, const int n);
template void cuda_vec_copy<int, double, double>(double * dst, const double * src, const int n);

template void cuda_vec_elem_mul<int, float, float>(float * dst, const float * src, const float * coeff, const int n);
template void cuda_vec_elem_mul<int, float, double>(double * dst, const double * src, const float * coeff, const int n);
template void cuda_vec_elem_mul<int, double, double>(double * dst, const double * src, const double * coeff, const int n);

template void cuda_separate_invD<int, float, float>(float * diag, const int * row_ptr, const int * col_idx, const float * vals, const int n);
template void cuda_separate_invD<int, float, double>(float * diag, const int * row_ptr, const int * col_idx, const double * vals, const int n);
template void cuda_separate_invD<int, double, double>(double * diag, const int * row_ptr, const int * col_idx, const double * vals, const int n);

template void cuda_spmv<int, float, float>(const int * row_ptr, const int * col_idx, const float * vals, const float * x, const float * b, float * y, const float alpha, const float beta, const int n);
template void cuda_spmv<int, float, double>(const int * row_ptr, const int * col_idx, const float * vals, const double * x, const double * b, double * y, const double alpha, const double beta, const int n);
template void cuda_spmv<int, double, double>(const int * row_ptr, const int * col_idx, const double * vals, const double * x, const double * b, double * y, const double alpha, const double beta, const int n);

template void CG_kernel_1<int, float, float, float>(float * x, const float alpha, float * p, float * r, float * s, float * u, const float * diag, float * tmp_glb, const int n);
template void CG_kernel_1<int, double, double, double>(double * x, const double alpha, double * p, double * r, double * s, double * u, const double * diag, double * tmp_glb, const int n);

template void CG_kernel_2<int, float, float>(float * u, float * r, float * s, float * p, const float * b, float * tmp_glb, const int n);
template void CG_kernel_2<int, double, double>(double * u, double * r, double * s, double * p, const double * b, double * tmp_glb, const int n);

#endif