#include "mt3d_cuda.h"

void mt3d::solve()
{
	clcg_incomplete_Cholesky_cuda_half_buffsize(h_rowIdxA_.get(), h_colIdxA_.get(), nz_num_, &lnz_num_);

	// Preform the incomplete Cholesky factorization
	h_rowIdxIC_.resize(lnz_num_);
	h_colIdxIC_.resize(lnz_num_);
	h_IC_.resize(lnz_num_);

	clcg_incomplete_Cholesky_cuda_half(h_rowIdxA_.get(), h_colIdxA_.get(), h_A_.get(), edge_num_, nz_num_, lnz_num_, h_rowIdxIC_.get(), h_colIdxIC_.get(), h_IC_.get());

    // Create handles
	cublasHandle_t cubHandle;
	cusparseHandle_t cusHandle;

	cublasCreate(&cubHandle);
	cusparseCreate(&cusHandle);

	// Allocate GPU memory & copy matrix/vector to device
	cudaMalloc(&d_A_, nz_num_ * sizeof(cuDoubleComplex));
	cudaMalloc(&d_rowIdxA_, nz_num_ * sizeof(int));
	cudaMalloc(&d_rowPtrA_, (edge_num_ + 1) * sizeof(int));
	cudaMalloc(&d_colIdxA_, nz_num_ * sizeof(int));

	cudaMalloc(&d_P_, edge_num_ * sizeof(cuDoubleComplex));
	cusparseCreateDnVec(&dvec_P_, edge_num_, d_P_, CUDA_C_64F);

	cudaMemcpy(d_A_, h_A_.get(), nz_num_ * sizeof(cuDoubleComplex), cudaMemcpyHostToDevice);
	cudaMemcpy(d_rowIdxA_, h_rowIdxA_.get(), nz_num_ * sizeof(int), cudaMemcpyHostToDevice);
	cudaMemcpy(d_colIdxA_, h_colIdxA_.get(), nz_num_ * sizeof(int), cudaMemcpyHostToDevice);

	cudaMalloc(&d_IC_, lnz_num_ * sizeof(cuDoubleComplex));
	cudaMalloc(&d_rowIdxIC_, lnz_num_ * sizeof(int));
	cudaMalloc(&d_rowPtrIC_, (edge_num_ + 1) * sizeof(int));
	cudaMalloc(&d_colIdxIC_, lnz_num_ * sizeof(int));

    cudaMemcpy(d_IC_, h_IC_.get(), lnz_num_ * sizeof(cuDoubleComplex), cudaMemcpyHostToDevice);
	cudaMemcpy(d_rowIdxIC_, h_rowIdxIC_.get(), lnz_num_ * sizeof(int), cudaMemcpyHostToDevice);
	cudaMemcpy(d_colIdxIC_, h_colIdxIC_.get(), lnz_num_ * sizeof(int), cudaMemcpyHostToDevice);

    // Convert matrix A from COO format to CSR format
	cusparseXcoo2csr(cusHandle, d_rowIdxA_, nz_num_, edge_num_, d_rowPtrA_, CUSPARSE_INDEX_BASE_ZERO);

	// Create sparse matrix
	cusparseCreateCsr(&smat_A_, edge_num_, edge_num_, nz_num_, d_rowPtrA_, d_colIdxA_, d_A_, CUSPARSE_INDEX_32I,
		CUSPARSE_INDEX_32I, CUSPARSE_INDEX_BASE_ZERO, CUDA_C_64F);

	// Convert matrix IC from COO format to CSR format
    cusparseXcoo2csr(cusHandle, d_rowIdxIC_, lnz_num_, edge_num_, d_rowPtrIC_, CUSPARSE_INDEX_BASE_ZERO);

	// Create sparse matrix
    cusparseCreateCsr(&smat_IC_, edge_num_, edge_num_, lnz_num_, d_rowPtrIC_, d_colIdxIC_, d_IC_, CUSPARSE_INDEX_32I,
		CUSPARSE_INDEX_32I, CUSPARSE_INDEX_BASE_ZERO, CUDA_C_64F);

	// Specify Lower fill mode.
    cusparseFillMode_t fillmode = CUSPARSE_FILL_MODE_LOWER;
	cusparseSpMatSetAttribute(smat_IC_, CUSPARSE_SPMAT_FILL_MODE, &fillmode, sizeof(fillmode));

	// Specify Non-Unit diagonal type.
    cusparseDiagType_t diagtype = CUSPARSE_DIAG_TYPE_NON_UNIT;
	cusparseSpMatSetAttribute(smat_IC_, CUSPARSE_SPMAT_DIAG_TYPE, &diagtype, sizeof(diagtype));

	// This is just used to get bufferSize;
	cuda_cd *d_tmp;
	cudaMalloc(&d_tmp, edge_num_ * sizeof(cuDoubleComplex));

	cusparseDnVecDescr_t dvec_tmp;
	cusparseCreateDnVec(&dvec_tmp, edge_num_, d_tmp, CUDA_C_64F);

	cusparseSpMV_bufferSize(cusHandle, CUSPARSE_OPERATION_NON_TRANSPOSE, &_one, smat_A_,
		dvec_P_, &_zero, dvec_tmp, CUDA_C_64F, CUSPARSE_SPMV_ALG_DEFAULT, &bufferSize_);

	// --- begin the preconditoning part ---
	// Preconditioning using triangular matrixes
	cusparseSpSV_createDescr(&descr_L_);
    cusparseSpSV_createDescr(&descr_LT_);

	cusparseSpSV_bufferSize(cusHandle, CUSPARSE_OPERATION_NON_TRANSPOSE, &_one, smat_IC_, dvec_tmp, dvec_P_, 
        CUDA_C_64F, CUSPARSE_SPSV_ALG_DEFAULT, descr_L_, &bufferSize_L_);
	
	cusparseSpSV_bufferSize(cusHandle, CUSPARSE_OPERATION_TRANSPOSE, &_one, smat_IC_, dvec_P_, dvec_tmp,
        CUDA_C_64F, CUSPARSE_SPSV_ALG_DEFAULT, descr_LT_, &bufferSize_LT_);

	bufferSize_ = max(bufferSize_, max(bufferSize_L_, bufferSize_LT_));

	// Member to use separated buffer spaces for lower and upper triangles. Otherwise you get incorrect resutls.
	cudaMalloc(&d_buf_, bufferSize_);
	cudaMalloc(&d_buf2_, bufferSize_);

	cusparseSpSV_analysis(cusHandle, CUSPARSE_OPERATION_NON_TRANSPOSE, &_one, smat_IC_, dvec_tmp, dvec_P_, 
		  CUDA_C_64F, CUSPARSE_SPSV_ALG_DEFAULT, descr_L_, d_buf_);

	cusparseSpSV_analysis(cusHandle, CUSPARSE_OPERATION_TRANSPOSE, &_one, smat_IC_, dvec_P_, dvec_tmp, 
		  CUDA_C_64F, CUSPARSE_SPSV_ALG_DEFAULT, descr_LT_, d_buf2_);
	// --- end the preconditoning part ---

	h_X_TE_.resize(edge_num_, _zero);
    h_X_TM_.resize(edge_num_, _zero);

    clcg_para my_para = clcg_default_parameters();
    my_para.epsilon = 1e-7;
    set_clcg_parameter(my_para);
    set_report_interval(1000);

	std::clog << "Solving the PDE in TE mode ... \n";
	//Minimize(cubHandle, cusHandle, h_X_TE_.get(), h_B_TE_.get(), edge_num_, nz_num_, CLCG_BICG_SYM);
	MinimizePreconditioned(cubHandle, cusHandle, h_X_TE_.get(), h_B_TE_.get(), edge_num_, nz_num_, CLCG_PCG);

	std::clog << "Solving the PDE in TM mode ... \n";
	//Minimize(cubHandle, cusHandle, h_X_TM_.get(), h_B_TM_.get(), edge_num_, nz_num_, CLCG_BICG_SYM);
	MinimizePreconditioned(cubHandle, cusHandle, h_X_TM_.get(), h_B_TM_.get(), edge_num_, nz_num_, CLCG_PCG);

	// Free Device memory
	cudaFree(d_buf_);
	cudaFree(d_buf2_);
	cudaFree(d_A_);
	cudaFree(d_rowIdxA_);
	cudaFree(d_rowPtrA_);
	cudaFree(d_colIdxA_);
	cudaFree(d_rowIdxIC_);
	cudaFree(d_rowPtrIC_);
	cudaFree(d_colIdxIC_);
    cudaFree(d_IC_);
	cudaFree(d_P_);
	cudaFree(d_tmp);

	cusparseDestroyDnVec(dvec_P_);
	cusparseDestroyDnVec(dvec_tmp);
	cusparseDestroySpMat(smat_A_);
	cusparseDestroySpMat(smat_IC_);

	cusparseSpSV_destroyDescr(descr_L_);
    cusparseSpSV_destroyDescr(descr_LT_);

	cublasDestroy(cubHandle);
	cusparseDestroy(cusHandle);
    return;
}
