#include <cstdio>
#include <cstdlib>
#include <cstring>
#include <cassert>
#include <cuda_runtime.h>
#include <cuSolverDN.h>
#include <ctime>
#include <chrono>
#include <iostream>
#include <cublas_v2.h>
#include <mkl.h>
typedef double data_t;
#define DLL_IMPLEMENT

extern "C"
{
    void solver_gemv(data_t *matrix, data_t *x, data_t *y, int n, int x_offset, int lda);
    void solver_getrf(data_t *matrix, int n);
    void solver_getrs(data_t *rhs, int n);
}

#ifndef MAX_SIZE
#define MAX_SIZE 5000
#endif

#ifndef MAX_DELTA
#define MAX_DELTA 300
#endif

int INIT_SIZE = 0;
int last_n = 0;
int inv_finished = false;
int *Ipiv = nullptr;

cublasHandle_t cuBlasH = nullptr;
bool solverInited = false;

cudaStream_t copy_thin_stream;
cudaStream_t copy_flat_stream;
cudaStream_t extend_stream;
cudaStream_t reorder_stream;

int niters = 0;
int* d_Ipiv = nullptr;
int* d_info = nullptr;
size_t lwork = 0;

data_t* d_b = nullptr;
data_t* d_x = nullptr;
data_t* d_work = nullptr;

data_t* d_A_inv_reorder = nullptr; // PAP^T (重排) 的逆矩阵  注意 (PAP^T)^{-1}=PA^{-1}P^T
data_t* d_A_inv = nullptr; // 严格意义上的逆矩阵 A^{-1}
data_t* d_v = nullptr;

data_t* flat_buf = nullptr; // (n - last_n) * n 
data_t* d_flat_buf = nullptr; // (n - last_n) * n 
data_t* thin_buf = nullptr; // (n - last_n) * n
data_t* d_thin_buf = nullptr; // (n - last_n) * n

data_t* d_y = nullptr; // y = - A_inv * v
data_t* d_z = nullptr; // z = - u * A_inv
data_t* d_F = nullptr; // F = B'^{-1}

double ck_sum = 0.0;

// 异步拷贝A_inv -> A_inv_reorder
// A_inv: last_n * last_n, lda = last_n
// A_inv_reorder: n * n, lda = n
__global__ void extend_matrix(double* dst, double* const src, int last_n, int n) {
    const int stride = last_n / gridDim.x;
    const int start = blockIdx.x * stride;
    const int end = (blockIdx.x + 1 == gridDim.x) ? last_n : (start + stride);
    for (int i = start; i < end; i++) {
        int j = 0;
        double *dst_ = dst + i * n;
        double *src_ = src + i * last_n;
        for (j = threadIdx.x; j < last_n; j += blockDim.x)
            dst_[j] = src_[j];
    }
}

// 将A_inv_reorder重排回A_inv
__global__ void reorder_matrix(double *dst, const double* src, const int n_2, const int last_n_2) {
    const int n = n_2 << 1;
    const int last_n = last_n_2 << 1;
    const int delta = n_2 - last_n_2;
    
    int stride = last_n_2 / gridDim.x;
    int start = blockIdx.x * stride;
    int end = (blockIdx.x + 1 == gridDim.x) ? last_n_2 : (start + stride);
    for (int i = start; i < end; i++) {
        int j;
        for (j = threadIdx.x; j < last_n_2; j += blockDim.x)
            dst[j + n * i] = src[j + n * i];
        if (j < n_2) {
            dst[j + n * i] = src[last_n_2 + j + n * i];
            j += blockDim.x;
        }
        for (; j < last_n + delta; j += blockDim.x)
            dst[j + n * i] = src[-delta + j + n * i];
        if (j < n) {
            dst[j + n * i] = src[j + n * i];
        }
    }

    dst += n * last_n_2;
    src += n * last_n;

    for (int i = blockIdx.x; i < delta; i += gridDim.x) {
        int j;
        for (j = threadIdx.x; j < last_n_2; j += blockDim.x)
            dst[j + n * i] = src[j + n * i];
        if (j < n_2) {
            dst[j + n * i] = src[last_n_2 + j + n * i];
            j += blockDim.x;
        }
        for (; j < last_n + delta; j += blockDim.x)
            dst[j + n * i] = src[-delta + j + n * i];
        if (j < n) {
            dst[j + n * i] = src[j + n * i];
        }
    }

    dst += n * delta;
    src -= n * last_n_2;

    for (int i = start; i < end; i++) {
        int j;
        for (j = threadIdx.x; j < last_n_2; j += blockDim.x)
            dst[j + n * i] = src[j + n * i];
        if (j < n_2) {
            dst[j + n * i] = src[last_n_2 + j + n * i];
            j += blockDim.x;
        }
        for (; j < last_n + delta; j += blockDim.x)
            dst[j + n * i] = src[-delta + j + n * i];
        if (j < n) {
            dst[j + n * i] = src[j + n * i];
        }
    }
    
    dst += n * last_n_2;
    src += n * n_2;

    for (int i = blockIdx.x; i < delta; i += gridDim.x) {
        int j;
        for (j = threadIdx.x; j < last_n_2; j += blockDim.x)
            dst[j + n * i] = src[j + n * i];
        if (j < n_2) {
            dst[j + n * i] = src[last_n_2 + j + n * i];
            j += blockDim.x;
        }
        for (; j < last_n + delta; j += blockDim.x)
            dst[j + n * i] = src[-delta + j + n * i];
        if (j < n) {
            dst[j + n * i] = src[j + n * i];
        }
    }
}

__global__ void copy_thin(double *dst, double* const src, const int n, const int last_n_2) {
    int DELTA = n - (last_n_2 << 1);
    int delta = DELTA >> 1;
    int last_n = last_n_2 << 1;
    for (int i = blockIdx.x; i < DELTA; i += gridDim.x) {
        data_t *dst_ = dst + i * last_n;
        data_t *src_ = src + i * n;
        int j;
        for (j = threadIdx.x; j < last_n_2; j += blockDim.x) {
            dst_[j] = src_[j];
        }
        for (; j < last_n; j += blockDim.x) {
            dst_[j] = src_[delta + j];
        }
    }
}

void cuda_iter_init(data_t *matrix, int nn) {
    if (last_n != 0) {
        solverInited = true;
        return;
    }
    const int n = MAX_SIZE;
    const int n_2 = n / 2;
    cublasCreate(&cuBlasH);
   
    // double matrix
    cudaMalloc ((void**)&d_A_inv_reorder, sizeof(data_t) * n * n);
    cudaMalloc ((void**)&d_A_inv, sizeof(data_t) * n * n); 

    // double vector
    cudaMalloc ((void**)&d_b, sizeof(data_t) * n);
    cudaMalloc ((void**)&d_x, sizeof(data_t) * n);
    
    // int vector
    cudaMalloc ((void**)&d_Ipiv, sizeof(int) * n);
    cudaMalloc ((void**)&d_info, sizeof(int) * 100);

    // buffer
    flat_buf = new data_t[n * MAX_DELTA];
    cudaMalloc ((void**)&d_flat_buf, sizeof(data_t) * n * MAX_DELTA);
    thin_buf = new data_t[n * MAX_DELTA];
    cudaMalloc ((void**)&d_thin_buf, sizeof(data_t) * n * MAX_DELTA);

    // inv
    cudaMalloc ((void**)&d_v, sizeof(data_t) * n * MAX_DELTA);
    cudaMalloc ((void**)&d_y, sizeof(data_t) * n * MAX_DELTA);
    cudaMalloc ((void**)&d_z, sizeof(data_t) * n * MAX_DELTA);
    cudaMalloc ((void**)&d_F, sizeof(data_t) * MAX_DELTA * MAX_DELTA);

    // stream
    cudaStreamCreate(&copy_thin_stream);
    cudaStreamCreate(&copy_flat_stream);
    cudaStreamCreate(&extend_stream);

    // CPU solve: A^{-1}
    // 调用 MKL： getrf + getri
    Ipiv = (int *)malloc(sizeof(int) * n);
    LAPACKE_dgetrf(LAPACK_COL_MAJOR, nn, nn, matrix, nn, Ipiv);
    LAPACKE_dgetri(LAPACK_COL_MAJOR, nn, matrix, nn, Ipiv);

    cudaMemcpy (d_A_inv, matrix, sizeof(data_t) * nn * nn, cudaMemcpyHostToDevice);
}

void solver_getrf(data_t *matrix, int n) {
    assert(last_n < n);
    // std::chrono::high_resolution_clock::time_point t1, t2, t3;
    // t1 = std::chrono::high_resolution_clock::now();

    if (!solverInited) cuda_iter_init(matrix, n);
    if (solverInited) {
        const int n_2 = (n >> 1);
        const int last_n_2 = (last_n >> 1);
        const int delta = n_2 - last_n_2;
        const int DELTA = n - last_n;
        assert(DELTA <= MAX_DELTA);

        // 异步传输两条竖向 
        cudaMemcpyAsync(d_thin_buf, matrix + n * last_n_2, sizeof(data_t) * n * delta, cudaMemcpyHostToDevice, copy_thin_stream);
        cudaMemcpyAsync(d_thin_buf + n * delta, matrix + n * (n - delta), sizeof(data_t) * n * delta, cudaMemcpyHostToDevice, copy_thin_stream); 
        copy_thin<<<4, 512, 0, copy_thin_stream>>>(d_v, d_thin_buf, n, last_n_2);

        // 异步拷贝A_inv -> A_inv_reorder
        extend_matrix<<<10, 512, 0, extend_stream>>>(d_A_inv_reorder, d_A_inv, last_n, n);
        
        // pack两条横向，顺便重排
        #pragma omp parallel for schedule(static)
        for (int i = 0; i < last_n_2; i++) {
            #pragma unroll(4)
            for (int j = 0; j < delta; j++) 
                flat_buf[j + DELTA * i] = matrix[last_n_2 + j + n * i];
            
            #pragma unroll(4)
            for (int j = 0; j < delta; j++) 
                flat_buf[delta + j + DELTA * i] = matrix[(last_n_2 + n_2) + j + n * i];
        }
        #pragma omp parallel for schedule(static)
        for (int i = 0; i < last_n_2; i++) {
            #pragma unroll(4)
            for (int j = 0; j < delta; j++) 
                flat_buf[(DELTA * last_n_2) + j + DELTA * i] = matrix[(last_n_2 + n * n_2) + j + n * i];
            
            #pragma unroll(4)
            for (int j = 0; j < delta; j++) 
                flat_buf[(DELTA * last_n_2) + delta + j + DELTA * i] = matrix[(last_n_2 + n_2 + n * n_2) + j + n * i];
        }
        for (int i = 0; i < delta; i++) {
            #pragma unroll(4)
            for (int j = 0; j < delta; j++) 
                flat_buf[(DELTA * last_n) + j + DELTA * i] = matrix[(last_n_2 * n + last_n_2) + j + n * i];
            
            #pragma unroll(4)
            for (int j = 0; j < delta; j++) 
                flat_buf[(DELTA * last_n) + delta + j + DELTA * i] = matrix[(last_n_2 * n + last_n_2 + n_2)  + j + n * i];
        }
        for (int i = 0; i < delta; i++) {
            #pragma unroll(4)
            for (int j = 0; j < delta; j++) 
                flat_buf[(DELTA * last_n + DELTA * delta) + j + DELTA * i] = matrix[((n - delta) * n + last_n_2) + j + n * i];
            
            #pragma unroll(4)
            for (int j = 0; j < delta; j++) 
                flat_buf[(DELTA * last_n + DELTA * delta) + delta + j + DELTA * i] = matrix[((n - delta) * n + n - delta) + j + n * i];
        }

        cudaMemcpyAsync(d_flat_buf, flat_buf, sizeof(data_t) * n * DELTA, cudaMemcpyHostToDevice, copy_flat_stream);

        // A = [A_old, v; u, B]
        data_t * d_u = d_flat_buf; 
        data_t * d_B = d_flat_buf + last_n * DELTA; 
        assert(cudaDeviceSynchronize() == cudaSuccess);

        // 2x2 分块矩阵求逆
        // 1. y = - A_inv(last_n*last_n) * v(last_n*DELTA)
        double alpha = -1, beta = 0;
        cublasDgemm(cuBlasH, CUBLAS_OP_N, CUBLAS_OP_N, last_n, DELTA, last_n, &alpha, d_A_inv, last_n, d_v, last_n, &beta, d_y, last_n);

        // 2. z = - u(DELTA*last_n) * A_inv(last_n*last_n)
        cublasDgemm(cuBlasH, CUBLAS_OP_N, CUBLAS_OP_N, DELTA, last_n, last_n, &alpha, d_u, DELTA, d_A_inv, last_n, &beta, d_z, DELTA);
        
        // 3. B' = B(DELTA*DELTA) + u(DELTA*last_n) * y(last_n*DELTA)
        alpha = 1, beta = 1;
        cublasDgemm(cuBlasH, CUBLAS_OP_N, CUBLAS_OP_N, DELTA, DELTA, last_n, &alpha, d_u, DELTA, d_y, last_n, &beta, d_B, DELTA);

        // 4. F = B'^{-1}
        cudaMemcpy(matrix, d_B, sizeof(data_t) * DELTA * DELTA, cudaMemcpyDeviceToHost);
        LAPACKE_dgetrf(LAPACK_COL_MAJOR, DELTA, DELTA, matrix, DELTA, Ipiv);
        LAPACKE_dgetri(LAPACK_COL_MAJOR, DELTA, matrix, DELTA, Ipiv);
        cudaMemcpy(d_F, matrix, sizeof(data_t) * DELTA * DELTA, cudaMemcpyHostToDevice);
        
        // 5. A^{-1}_{21} = F(DELTA*DELTA) * z(DELTA*last_n)
        alpha = 1, beta = 0;
        cublasDgemm(cuBlasH, CUBLAS_OP_N, CUBLAS_OP_N, DELTA, last_n, DELTA, &alpha, d_F, DELTA, d_z, DELTA, &beta, d_A_inv_reorder + last_n, n);
        
        // 6. A^{-1}_{12} = y(last_n*DELTA) * F(DELTA*DELTA)
        cublasDgemm(cuBlasH, CUBLAS_OP_N, CUBLAS_OP_N, last_n, DELTA, DELTA, &alpha, d_y, last_n, d_F, DELTA, &beta, d_A_inv_reorder + last_n * n, n);

        // 7. A^{-1}_{22} = F(DELTA*DELTA) 
        extend_matrix<<<1,32>>>(d_A_inv_reorder + last_n * n + last_n, d_F, DELTA, n);
        
        // 8. A^{-1}_{11} = A_inv(last_n*last_n) + y(last_n*DELTA) * A^{-1}_{21}(DELTA*last_n)
        alpha = 1, beta = 1;
        cublasDgemm(cuBlasH, CUBLAS_OP_N, CUBLAS_OP_N, last_n, last_n, DELTA, &alpha, d_y, last_n, d_A_inv_reorder + last_n, n, &beta, d_A_inv_reorder, n);

        // 重排 A_inv_reorder -> A_inv
        reorder_matrix<<<10, 512>>>(d_A_inv, d_A_inv_reorder, n_2, last_n_2);
    }
    inv_finished = false;
    last_n = n;

    // t2 = std::chrono::high_resolution_clock::now();
    // printf("solver_getrf: %d -> %d, time: %lf\n", last_n, n, std::chrono::duration_cast<std::chrono::duration<double>>(t2 - t1).count() * 1000);
}

void solver_getrs(data_t *rhs, int n) {
    if (!inv_finished) {
        cudaDeviceSynchronize();
        inv_finished = true;
    }
    cudaMemcpy(d_b, rhs, sizeof(data_t) * n, cudaMemcpyHostToDevice);
    double alpha = 1, beta = 0;
    cublasDgemv(cuBlasH, CUBLAS_OP_N, n, n, &alpha, d_A_inv, n, d_b, 1, &beta, d_x, 1);
    cudaMemcpy(rhs, d_x, sizeof(data_t) * n, cudaMemcpyDeviceToHost); // 此处可直接在gemv里面用上，省的来回拷贝
}

int GEMV_MAX_SIZE = 2500;
int gemv_last_n = 0;
int GEMV_MAX_DELTA = 100;
int gemv_turn = 0;
bool gemvInited = false;
bool gemvCalled = false;
data_t *d_gemv_A[5] = {nullptr, nullptr, nullptr, nullptr, nullptr};
data_t *d_gemv_x[5] = {nullptr, nullptr, nullptr, nullptr, nullptr};
data_t *d_gemv_y[5] = {nullptr, nullptr, nullptr, nullptr, nullptr};

cudaStream_t gemv_stream;
cudaStream_t gemv_copy_thin_stream;
cudaStream_t gemv_copy_flat_stream;
cublasHandle_t gemv_cublasH = nullptr;

data_t *gemv_flat_buf = nullptr;
data_t *d_gemv_flat_buf = nullptr;
data_t *gemv_thin_buf = nullptr;
data_t *d_gemv_thin_buf = nullptr;

void gemvInit(data_t *matrix, int nn, int lda) {
    if (!gemvCalled) {
        GEMV_MAX_SIZE = MAX_SIZE / 2;
        int n = GEMV_MAX_SIZE;
        GEMV_MAX_DELTA = MAX_DELTA / 2;
        gemv_turn = 0;
        cublasCreate(&gemv_cublasH);
        cudaStreamCreate(&gemv_stream);
        cudaMalloc ((void**)&d_gemv_A[0], sizeof(data_t) * n * n);
        cudaMalloc ((void**)&d_gemv_A[1], sizeof(data_t) * n * n);
        cudaMalloc ((void**)&d_gemv_A[2], sizeof(data_t) * n * n);
        cudaMalloc ((void**)&d_gemv_A[3], sizeof(data_t) * n * n);
        cudaMalloc ((void**)&d_gemv_A[4], sizeof(data_t) * n * n);
        cudaMalloc ((void**)&d_gemv_x[0], sizeof(data_t) * n);
        cudaMalloc ((void**)&d_gemv_x[1], sizeof(data_t) * n);
        cudaMalloc ((void**)&d_gemv_x[2], sizeof(data_t) * n);
        cudaMalloc ((void**)&d_gemv_x[3], sizeof(data_t) * n);
        cudaMalloc ((void**)&d_gemv_x[4], sizeof(data_t) * n);
        cudaMalloc ((void**)&d_gemv_y[0], sizeof(data_t) * n);
        cudaMalloc ((void**)&d_gemv_y[1], sizeof(data_t) * n);
        cudaMalloc ((void**)&d_gemv_y[2], sizeof(data_t) * n);
        cudaMalloc ((void**)&d_gemv_y[3], sizeof(data_t) * n);
        cudaMalloc ((void**)&d_gemv_y[4], sizeof(data_t) * n);
        cudaMalloc ((void**)&d_gemv_flat_buf, sizeof(data_t) * n * GEMV_MAX_DELTA);
        cudaMalloc ((void**)&d_gemv_thin_buf, sizeof(data_t) * n * GEMV_MAX_DELTA);
        gemv_flat_buf = new data_t[n * GEMV_MAX_DELTA];
        gemv_thin_buf = new data_t[n * GEMV_MAX_DELTA];
    }
    gemvCalled = true;
    cublasSetMatrixAsync(nn, nn, sizeof(data_t), matrix, lda, d_gemv_A[gemv_turn], GEMV_MAX_SIZE, gemv_stream);
#ifdef CHECK
    if (gemv_turn == 3) {
        gemvInited = true;
    }
#else
    if (gemv_turn == 4) {
        gemvInited = true;
    }
#endif
}

// __global__ void gemv_copy_thin(double *dst, double *const src, const int lda, const int last_n, const int n) {
//     int DELTA = n - last_n;
//     for (int i = blockIdx.x; i < DELTA; i += gridDim.x) {
//         data_t *dst_ = dst + i * lda;
//         data_t *src_ = src + i * n;
//         for (int j = threadIdx.x; j < n; j += blockDim.x) {
//             dst_[j] = src_[j];
//         }
//     }
// }

__global__ void gemv_copy_flat(double *dst, double *const src, const int lda, const int last_n, const int delta) {
    for (int i = blockIdx.x; i < last_n; i += gridDim.x) {
        data_t *dst_ = dst + i * lda;
        data_t *src_ = src + i * delta;
        for (int j = threadIdx.x; j < delta; j += blockDim.x) {
            dst_[j] = src_[j];
        }
    }
}

void solver_gemv(data_t *matrix, data_t *x, data_t *y, int n, int x_offset, int lda) {
    // std::chrono::high_resolution_clock::time_point t1, t2, t3;
    // cublasStatus_t status = CUBLAS_STATUS_SUCCESS;
    if (!gemvInited) {
        gemvInit(matrix, n, lda);
    } else {
        if (gemv_last_n < n) {
            data_t *d_cur_A = d_gemv_A[gemv_turn];
            const int delta = n - gemv_last_n;
            // 传输纵向
            cublasSetMatrixAsync(n, delta, sizeof(data_t), matrix + gemv_last_n * lda, lda, d_cur_A + gemv_last_n * GEMV_MAX_SIZE, GEMV_MAX_SIZE, gemv_stream);

            // 打包横向
            #pragma omp parallel for schedule(static)
            for (int i = 0; i < gemv_last_n; i++) {
                #pragma unroll(4)
                for (int j = 0; j < delta; j++) 
                    gemv_flat_buf[j + delta * i] = matrix[gemv_last_n + j + lda * i];
            }
            cudaMemcpyAsync(d_gemv_flat_buf, gemv_flat_buf, sizeof(data_t) * gemv_last_n * delta, cudaMemcpyHostToDevice, gemv_stream);
            gemv_copy_flat<<<4,128,0,gemv_stream>>>(d_cur_A + gemv_last_n, d_gemv_flat_buf, GEMV_MAX_SIZE, gemv_last_n, delta);
            cudaStreamSynchronize(gemv_stream);
        }
    }
    
    // cudaMemcpyAsync(d_gemv_x[gemv_turn], x + x_offset - 1, sizeof(data_t) * n, cudaMemcpyHostToDevice, gemv_stream);
    // double alpha = 1, beta = 0;
    // cublasDgemv(gemv_cublasH, CUBLAS_OP_N, n, n, &alpha, d_gemv_A[gemv_turn], GEMV_MAX_SIZE, d_gemv_x[gemv_turn], 1, &beta, d_gemv_y[gemv_turn], 1);
    // cudaMemcpyAsync(y, d_gemv_y[gemv_turn], sizeof(data_t) * n, cudaMemcpyDeviceToHost, gemv_stream);

    cudaMemcpy(d_gemv_x[gemv_turn], x + x_offset - 1, sizeof(data_t) * n, cudaMemcpyHostToDevice);
    double alpha = 1, beta = 0;
    cublasDgemv(gemv_cublasH, CUBLAS_OP_N, n, n, &alpha, d_gemv_A[gemv_turn], GEMV_MAX_SIZE, d_gemv_x[gemv_turn], 1, &beta, d_gemv_y[gemv_turn], 1);
    cudaMemcpy(y, d_gemv_y[gemv_turn], sizeof(data_t) * n, cudaMemcpyDeviceToHost);

    // 处理递推关系和同步
#ifdef CHECK
    if (gemv_turn == 3) { // 第一组已经执行完
        gemv_last_n = n;
    }
    gemv_turn = (gemv_turn + 1) % 4;
#else
    if (gemv_turn == 4) { // 第一组已经执行完
        gemv_last_n = n;
    }
    gemv_turn = (gemv_turn + 1) % 5;
#endif
}