#ifndef __GPU_HELPER_CUH__
#define __GPU_HELPER_CUH__

#include <cassert>
#include <stdexcept>
#include <chrono>
#include "cuda_runtime.h"
#include "cuda_runtime_api.h"
#include "cublas_v2.h"
#include "cusolverDn.h"
#include "device_launch_parameters.h"
#include "common.h"
#include <omp.h>
#include <atomic>
#include "thrust/device_vector.h"

// CUDA API error checking
#define CUDA_CHECK(err)                                                                            \
    do {                                                                                           \
        cudaError_t err_ = (err);                                                                  \
        if (err_ != cudaSuccess) {                                                                 \
            printf("CUDA error %d at %s:%d\n", err_, __FILE__, __LINE__);                          \
            throw std::runtime_error("CUDA error");                                                \
        }                                                                                          \
    } while (0)

// cusolver API error checking
#define CUSOLVER_CHECK(err)                                                                        \
    do {                                                                                           \
        cusolverStatus_t err_ = (err);                                                             \
        if (err_ != CUSOLVER_STATUS_SUCCESS) {                                                     \
            printf("cusolver error %d at %s:%d\n", err_, __FILE__, __LINE__);                      \
            throw std::runtime_error("cusolver error");                                            \
        }                                                                                          \
    } while (0)

// cublas API error checking
#define CUBLAS_CHECK(err)                                                                          \
    do {                                                                                           \
        cublasStatus_t err_ = (err);                                                               \
        if (err_ != CUBLAS_STATUS_SUCCESS) {                                                       \
            printf("cublas error %d at %s:%d\n", err_, __FILE__, __LINE__);                        \
            throw std::runtime_error("cublas error");                                              \
        }                                                                                          \
    } while (0)

const dim3 blkSize_vec(1024, 1, 1);
const dim3 blkSize_mat(32, 32, 1);
void init_GPU_query();
void init_GPU_coeff();
void clear_GPU_coeff();

double* create_copy_to_device(const double* h_A, const int len, TEST_TIME & record);

void create_reorder_copyPart_to_device(const double* h_A, const double* h_rhs, const int nrows,
	const double* d_last_A, const int last_nrows, TEST_TIME & record, const std::vector<int> & prev_hfs,
    double * & d_A, double * & d_rhs);
void copy_to_host(const double* d_A, double* h_A, const int len, TEST_TIME & record);
void device_free(void* d_A);
void* check_device_workspace_to_enlarge(const uint64_t required_bytes);
int* device_info();
void device_expand_mat(double* & old_A, const size_t old_lda, const int last_nrows, const size_t new_lda, const int sid);
void device_expand_vec(double* & vec, const size_t new_len, const int sid);
void device_update_mat(double* & d_A, const size_t dst_lda, const double* src_A, const size_t src_lda, 
    const int last_nrows, const int src_nrows, const int sid, double* hbuf);
void device_update_mat_part(double* & d_A, const size_t dst_lda, const double* src_A, const size_t src_lda, 
    const int last_nrows, const int src_nrows, const int sid, double* hbuf);
void create_copyPart_to_device        (const double* h_A, const int nrows, double* & d_last_A, const int last_nrows, 
                        const int sid);
void create_reorder_copyPart_to_device(const double* h_A, const int nrows,
	double* & d_A, const int last_nrows, const int d_lda,
	const int* map_n2o, const int sid);
void device_setup(const int sid, const int sol_type, const int pc_type, cublasHandle_t blas_handle, cusolverDnHandle_t sol_handle,
	const int nrows, const double* d_A, const int last_nrows, double* & last_mem, const int lda,
	double* & last_inv_LU, double* & last_inv_A);
void device_extend_invLU_and_invA(
	double* & inv_LU, double* & inv_A, const int last_nrows, const int inv_lda,
	const double* new_LU, const int new_nrows, const int LU_lda,
	cudaStream_t stream_0, cudaStream_t stream_1, cudaStream_t stream_2, cudaStream_t stream_3,
	cublasHandle_t handle);

void device_reorder_vec(const int nrows, const double* old_vec, double* new_vec, const int* map, const int sid);
void device_reorder_mat(const int nrows, const double* old_mat, const int old_lda,
	double* new_mat, const int new_lda, const int* map, const int sid);

void test_stream(int num_arr, int M, int N, int K, int nt);

template<typename data_t>
__global__ void device_scalar_inv(const data_t * src, data_t * dst) {
    *dst = 1.0 / *src;
    // printf("%e\n", *dst);
}

template<typename data_t>
__global__ void device_scalar_minus(const data_t * src, data_t * dst) {
    *dst = -*src;
}

template<typename data_t>
__global__ void device_scalar_self_add(const data_t * src, data_t * dst) {
    *dst += *src;
}

template<typename data_t>
__global__ void device_scalar_print(const data_t * src, const int len, const int newline) {
	
    for (int i = 0; i < len; i++) {
		// if (i % newline == 0) printf("\n");
		if constexpr (std::is_same<data_t, int>::value) printf("%d ", src[i]);
		else printf("%.6e ", src[i]);
	}
	printf("\n");
}

template<typename data_t1, typename data_t2> __global__
void device_copy_mat(const data_t1* src, int src_lda, data_t2* dst, int dst_lda, int len_j, int len_i)
{
    int i = blockIdx.x * blockDim.x + threadIdx.x;
    int j = blockIdx.y * blockDim.y + threadIdx.y;
    if (j < len_j && i < len_i)
        dst[j * dst_lda + i] =   src[j * src_lda + i];
}
template<typename data_t1, typename data_t2> __global__
void device_copy_mat_neg(const data_t1* src, int src_lda, data_t2* dst, int dst_lda, int len_j, int len_i)
{
    int i = blockIdx.x * blockDim.x + threadIdx.x;
    int j = blockIdx.y * blockDim.y + threadIdx.y;
    if (j < len_j && i < len_i)
        dst[j * dst_lda + i] = - src[j * src_lda + i];
}
template<typename data_t1, typename data_t2> __global__
void device_copy_mat_neg_transpose(const data_t1* src, int src_lda, data_t2* dst, int dst_lda, int len_j, int len_i)
{
    int i = blockIdx.x * blockDim.x + threadIdx.x;
    int j = blockIdx.y * blockDim.y + threadIdx.y;
    if (j < len_j && i < len_i)
        dst[i * dst_lda + j] = - src[j * src_lda + i];
}


template<typename data_t1, typename data_t2> __global__
void device_copy_vec(int nrows, const data_t1 * src, data_t2 * dst) {
    int i = blockIdx.x * blockDim.x + threadIdx.x;
    if (i < nrows)
        dst[i] = src[i];
}

template<typename data_t> __global__
void device_zero_vec(int nrows, data_t* vec) {
    int i = blockIdx.x * blockDim.x + threadIdx.x;
    if (i < nrows)
        vec[i] = 0.0;
}

__global__ static
void device_incre(const int len, int* arr, const int increment) {
    uint32_t tid = blockIdx.x * blockDim.x + threadIdx.x;
    if (tid < len) {
        arr[tid] += increment;
    }
}

#endif // !__GPU_HELPER_CUH__
