#pragma once
namespace CPU {

template<typename IPIV_T>
inline void initalize_ipiv(IPIV_T* ipiv, int n) {
    for(int i = 0; i < n; i++) {
        ipiv[i] = i + 1;
    }
}

inline void merge_Amij_to_A(double* A, double* Am11, double* Am12, double* Am21, double* Am22, int m, int n, int lda, int ldm) {
    #pragma omp parallel for collapse(2)
    for(int j = 0; j < n; j++) {
        for(int i = 0; i < m; i++) {
            A[2*j*lda + 2*i] = Am11[j*ldm + i];
            A[(2*j + 1)*lda + 2*i] = Am12[j*ldm + i];
            A[2*j*lda + 2*i + 1] = Am21[j*ldm + i];
            A[(2*j + 1)*lda + 2*i + 1] = Am22[j*ldm + i];
        }
    }
}

inline void merge_bi_to_b(double* b, double* b1, double* b2, int m) {
    #pragma omp parallel for
    for(int i = 0; i < m; i++) {
        b[2*i] = b1[i];
        b[2*i + 1] = b2[i];
    }
}

inline void scatter_x_to_xi(double* x1, double* x2, double* x, int m) {
    #pragma omp parallel for
    for(int i = 0; i < m; i++) {
        x1[i] = x[2*i];
        x2[i] = x[2*i + 1];
    }
}

inline void extract_scale(double* matrix, double* scale, int n, int lda) {
    #pragma omp parallel for
    for(int i = 0; i < n; i++) {
        scale[i] = ((double)1.0) / matrix[i * lda + i];
    }
}

inline void scale_matrix(double* matrix, double* scale, int m, int n, int lda) {
    #pragma omp parallel for collapse(2)
    for(int i = 0; i < m; i++) {
        for(int j = 0; j < n; j++) {
            matrix[j * lda + i] *= scale[i];
        }
    }
}

template<typename Src_T, typename Dst_T>
void memcpy_submatrix(const Src_T* a, Dst_T* b, int m, int n, int lda, int ldb) {
    #pragma omp parallel for collapse(2)
    for(int j = 0; j < n; j++) {
        for(int i = 0; i < m; i++) {
            b[i + j*ldb] = a[i + j*lda];
        }
    }
}
template<typename T>
inline double checksum(const T* A, int m, int n, int lda) {
    double sum = 0.0;
    for(int j = 0; j < n; j++) {
        for(int i = 0; i < m; i++) {
            sum += fabs(A[i + j * lda]);
        }
    }
    return sum;
}

template<typename T>
inline double checkresult(const T * A, const T * B, int m, int n, int lda, int ldb) {
    double norm_err = 0.0, norm_ref = 0.0;
    for(int j = 0; j < n; j ++) {
        for(int i = 0; i < m; i++) {
            norm_err += (A[i + j * lda] - B[i + j * ldb]) * (A[i + j * lda] - B[i + j * ldb]);
            norm_ref += B[i + j * ldb] * B[i + j * ldb];
        }
    }
    // printf("norm err = %e, norm_ref = %e\n", norm_err, norm_ref);
    return sqrt(norm_err / (norm_ref + 1e-12));
}

};

namespace DEVICE{

template<typename IPIV_T>
__global__ void initalize_ipiv_kernel(IPIV_T* ipiv, int n) {
    int i = blockIdx.x * blockDim.x + threadIdx.x;
    if(i < n) {
        ipiv[i] = i + 1;
    }
}

template<typename IPIV_T>
inline void initalize_ipiv(IPIV_T* ipiv, int n) {
    initalize_ipiv_kernel<<<(n + 255) / 256, 256>>>(ipiv, n);
}


template<typename Src_T, typename Dst_T>
__global__ void kernel_memcpy_submatrix(const Src_T* a, Dst_T* b, int m, int n, int lda, int ldb) {
    int i = blockIdx.x * blockDim.x + threadIdx.x;
    int j = blockIdx.y * blockDim.y + threadIdx.y;
    if( i < m && j < n) {
        b[i + j * ldb] = (Dst_T)a[i + j * lda];
    }
}

template<typename Src_T, typename Dst_T>
inline void memcpy_submatrix(const Src_T* a, Dst_T* b, int m, int n, int lda, int ldb) {
    if constexpr(std::is_same_v<Src_T, Dst_T>) {
        cudaMemcpy2DAsync ( b, sizeof(Dst_T)*ldb, a, sizeof(Src_T)*lda, 
                                sizeof(Src_T)*m, n, cudaMemcpyDeviceToDevice);
    } else {
        dim3 block(32, 32, 1);
        dim3 grid((m + 31) / 32, (n + 31) / 32, 1);
        kernel_memcpy_submatrix<<<grid, block>>>(a, b, m, n, lda, ldb);
    }
}

template<typename Src_T, typename Dst_T>
inline void memcpy_submatrix(const Src_T* a, Dst_T* b, int m, int n, int lda, int ldb, cudaStream_t stream) {
    if constexpr(std::is_same_v<Src_T, Dst_T>) {
        cudaMemcpy2DAsync ( b, sizeof(Dst_T)*ldb, a, sizeof(Src_T)*lda, 
                                sizeof(Src_T)*m, n, cudaMemcpyDeviceToDevice, stream);
    } else {
        dim3 block(32, 32, 1);
        dim3 grid((m + 31) / 32, (n + 31) / 32, 1);
        kernel_memcpy_submatrix<<<grid, block, 0, stream>>>(a, b, m, n, lda, ldb);
    }
}

template<typename T>
inline void setMatrix(const T* src, T* dst, int m, int n, int lda, int ldb) {
    CUDA_RT_CALL(cublasSetMatrix(m, n, sizeof(T), src, lda, dst, ldb));
}

template<typename T>
inline void getMatrix(const T* src, T* dst, int m, int n, int lda, int ldb) {
    CUDA_RT_CALL(cublasGetMatrix(m, n, sizeof(T), src, lda, dst, ldb));
}

template<typename T>
inline void setMatrix(cudaStream_t stream, const T* src, T* dst, int m, int n, int lda, int ldb) {
    cublasSetMatrixAsync(m, n, sizeof(T), src, lda, dst, ldb, stream);
}

template<typename T>
inline void getMatrix(cudaStream_t stream, const T* src, T* dst, int m, int n, int lda, int ldb) {
    cublasGetMatrixAsync(m, n, sizeof(T), src, lda, dst, ldb, stream);
}
template<typename T>
__global__ void scatter_x_to_xi_kernel(T* x1, T* x2, const T* x, int m) {
    int i = blockIdx.x * blockDim.x + threadIdx.x;
    if(i < m) {
        x1[i] = x[2*i];
        x2[i] = x[2*i + 1];
    }
}

template<typename T>
inline void scatter_x_to_xi(T* x1, T* x2, const T* x, int m) {
    scatter_x_to_xi_kernel<<<(m + 255) / 256, 256>>>(x1, x2, x, m);
}

};

