
#define MIN(A,B) ( (A) < (B)? (A) : (B)  )
#define MAX(A,B) ( (A) > (B)? (A) : (B)  )
#define M2V(i, j, LDA) ((j) * (LDA) + (i))

#define EPSILON 1.0e-16

/* Maximum number of rows or cols allowed in print_matrix */
#define PRINT_LIMIT 0

#define                CONVERT_TO_LINEAR(__A__, __lda__, __B__, __ldb__, __nrows__, __ncols__) \
                { \
                int ii, jj; \
                for (ii = 0; ii < __nrows__; ii++)  \
                    for (jj = 0; jj < __ncols__; jj++) \
                        (*(__B__+M2V(ii,jj,__ldb__))) = (*(__A__ + M2V(ii,jj,__lda__))); \
                } \
                ;



/**** remove me ****/
typedef enum{
    CUBLAS_STATUS_SUCCESS         =0,
    CUBLAS_STATUS_NOT_INITIALIZED =1,
    CUBLAS_STATUS_ALLOC_FAILED    =3,
    CUBLAS_STATUS_INVALID_VALUE   =7,
    CUBLAS_STATUS_ARCH_MISMATCH   =8,
    CUBLAS_STATUS_MAPPING_ERROR   =11,
    CUBLAS_STATUS_EXECUTION_FAILED=13,
    CUBLAS_STATUS_INTERNAL_ERROR  =14
} cublasStatus_t;

struct cublasContext;
typedef struct cublasContext *cublasHandle_t;

#define CUBLASAPI 

typedef enum {
    CUBLAS_FILL_MODE_LOWER=0, 
    CUBLAS_FILL_MODE_UPPER=1
} cublasFillMode_t;

typedef enum {
    CUBLAS_DIAG_NON_UNIT=0, 
    CUBLAS_DIAG_UNIT=1
} cublasDiagType_t; 

typedef enum {
    CUBLAS_SIDE_LEFT =0, 
    CUBLAS_SIDE_RIGHT=1
} cublasSideMode_t; 


typedef enum {
    CUBLAS_OP_N=0,  
    CUBLAS_OP_T=1,  
    CUBLAS_OP_C=2  
} cublasOperation_t;


typedef enum { 
    CUBLAS_POINTER_MODE_HOST   = 0,  
    CUBLAS_POINTER_MODE_DEVICE = 1        
} cublasPointerMode_t;



   /* acc_init( devicetype )
   Initialized the runtime system and sets the accelerator device
   type to use for this host thread.
   Usage example 
   acc_init(acc_device_type);
   */
 /*  typedef  enum  {
      acc_device_none,
      acc_device_default,
      acc_device_host,
      acc_device_not_host,
      acc_device_nvidia,
      acc_device_generic_opencl
   } acc_device_t;*/



 
/* Cholesky factorization (outer product version)
   ===============================================

     Input: R  , matrix to be factorized
            N  , order of the matrix
            LDA

     Output: R ,  Upper matrix of the factorization
*/
int acc_cholesky_outer(double * R, int n, int lda) {
	int i;
	int correct = 1;
    // This is not the optimal OpenACC implementation. Probably it would be better to
    //   split this into smaller kernels and avoid using for inside kernel. 
   // **** Discuss with ivan #pragma acc kernels loop pcopyout(R[n*n]) private(i) firstprivate(lda)
   // #pragma acc kernels loop private(i) firstprivate(lda)
	for (i = 0; i < n; i++) {
		{
            int k;
			R[M2V(i, i, lda)] = sqrt(R[M2V(i, i, lda)]);
            
			for (k = (i + 1); k < n; k++) {
				R[M2V(i, k, lda)] = R[M2V(i, k, lda)] / R[M2V(i, i, lda)];
			}
			if (R[M2V(i, i, lda)] <= 0.0f) {
				correct = 0;
			}
			if (correct) {
				for (k = i + 1; k < n ; k++) {
					for (int j = i + 1; j < n; j++) {
						R[M2V(k, j, lda)] = R[M2V(k, j, lda)] - R[M2V(i, j, lda)] * R[M2V(i, k, lda)];
					}
				}
			}
		}
	}

	return correct;
}

/* Cholesky factorization by blocks


     Input: R  , matrix to be factorized
            N  , order of the matrix
            block_size 

     Output: R ,  Upper matrix of the factorization

*/
int acc_cholesky_blocks(double * R, int n, int block_size) {
	int i, j, k;
	int lda = n;   // Just for now
	int n_block = (n / block_size) + (n % block_size?1:0);	
//    acc_init(acc_device_nvidia);
    /* Initialize runtime */
    // This should create an internal scope
    // Cublas initialization
    cublasStatus_t status;
    cublasHandle_t handle;
    status = cublasCreate(&handle);
    // Frangollo gives access to its internal cuda stream
    //  This functionality is available also for OpenCL, 
    //   but there is not an standard call for this
    // This is not supported by any other compiler, again AFAIK
//    cudaStream_t = FRG__getCmdQueue(); 
    // AFAIK, If using an stream, cublas calls are asyncronous 
//    status = cublasSetStream(handle, stream);
     
	{
	for (k = 0; k < n_block; k++) {
		int bs_k = MIN(block_size, n - k * block_size);
        double * cholBlockDiscont = R + M2V(k * block_size,k * block_size,lda);
        double * cholBlock = (double *) malloc(block_size * block_size * sizeof(double));
                   /* esta rutina tiene un kernels, miralo */
                if (!acc_cholesky_outer(cholBlockDiscont, bs_k, lda)) {
                     printf("*** Cholesky for submatrix [%d][%d] failed \n", k, k);
                }
        CONVERT_TO_LINEAR(cholBlockDiscont, lda, cholBlock, block_size, bs_k, bs_k);
        int blocks_in_row = (n - ((k+1)*block_size))/block_size;
                if (blocks_in_row <= 0) 
                    break;
     #pragma acc data copyin(cholBlock[block_size][block_size])
        {
                double * Panel = (double *) malloc(bs_k * block_size * blocks_in_row * sizeof(double));
                double * dtrsm_init = R + M2V(k * block_size,(k+1) * block_size,lda);   
                    CONVERT_TO_LINEAR(dtrsm_init, lda, Panel, block_size, bs_k, blocks_in_row * block_size);

#define DTRSM_ON_GPU
#ifdef DTRSM_ON_GPU
            #pragma acc data copy(Panel[bs_k][block_size*blocks_in_row]) 
#endif
                   {
                /** Copy the entire n_block*bs **/
                for (j = k + 1; j < n_block; j++) {
                    double ALPHA = 1.0;
                    int bs_j = MIN(block_size, n - j * block_size);
#ifdef DTRSM_ON_GPU
                  #pragma acc host_data use_device(cholBlock, Panel)  
                        status = cublasDtrsm(handle, CUBLAS_FILL_MODE_LOWER, 
                                    CUBLAS_FILL_MODE_UPPER, CUBLAS_OP_T, 
                                    CUBLAS_OP_N, bs_k, bs_j, &ALPHA, 
                                    (double *) cholBlock, block_size, 
                                    (double *) (Panel) + (M2V(0, (j - (k+1)) * block_size,block_size)), block_size ) ;
#else
               //     printf("** B %p \n",(B + M2V( 0, (j - (k+1)) * block_size,block_size)));
                   dtrsm_("L", "U", "T", "N",
                                    &bs_k, &bs_j, &ALPHA, 
                                    (double *) cholBlock, &block_size, 
                                    (double *) (Panel + M2V( 0, (j - (k+1)) * block_size,block_size)), &block_size ) ;
#endif
                        }
                
 #define DGEMM_ON_GPU

                /**** Prepare dgemm **/
                for (i = k + 1; i < n_block ; i++) {
                        double ALPHA = -1.0;
                        double BETA = 1.0;

                        int bs_i = MIN(block_size, n - i * block_size);
                    for (j = i + 1; j < n_block; j++) {
                        // ----- Scalar R[M2V(i, j, n)] = R[M2V(i, j, n)] - R[M2V(k, j, n)] * R[M2V(k, i, n)];
                        // ----- Block Rij = Rij - Rki^T Rkj
                        // Blas:
                        //  1. Rij = Rij - Rki^T * Rkj
                        int bs_j = MIN(block_size, n - j * block_size);
//                        double * fB = R + M2V(k * block_size, j * block_size, lda);
                        double * fC = R + M2V(i * block_size, j * block_size, lda);
//                        double * B = (double * ) malloc(block_size * block_size * sizeof(double));
                        double * C = (double * ) malloc(block_size * block_size * sizeof(double));

//                        CONVERT_TO_LINEAR(fB, lda, B, block_size, block_size, block_size);
                        CONVERT_TO_LINEAR(fC, lda, C, block_size, block_size, block_size);
#ifdef DGEMM_ON_GPU
               #pragma acc data copy(C[block_size][block_size])
                   {
                       #pragma acc host_data use_device(Panel, C)  
                        status = cublasDgemm(handle, CUBLAS_OP_T, CUBLAS_OP_N, 
                                            bs_i, bs_j, bs_k, &ALPHA, 
                                            (double *) (Panel) + (M2V(0, (i - (k+1)) * block_size, block_size)),
                                            block_size, (double *) (Panel) + (M2V(0,((j - (k + 1)) * block_size),block_size)),
                                            block_size, &BETA, (double *)C ,  block_size);
                    }
#else
                    dgemm_("T", "N", 
                                            &bs_i, &bs_j, &bs_k, &ALPHA, 
                                            // (double *)A, 
                                            (double *) (Panel) + (M2V(0, (i - (k+1)) * block_size, block_size)),
                                            &block_size, (double *) (Panel) + (M2V(0, ((j - (k + 1))*block_size), block_size)), 
                                            &block_size, &BETA, (double *)C ,  &block_size);

#endif
                        CONVERT_TO_LINEAR(C, block_size, fC, lda, block_size, block_size);
                    /*free(B);*/free(C);
			        } /* block mxm */ 
#ifdef DGEMM_ON_GPU
                {
                double * fC = R + M2V(i * block_size, i * block_size, lda);
                double * C = (double * ) malloc(block_size * block_size * sizeof(double));
                CONVERT_TO_LINEAR(fC, lda, C, block_size, block_size, block_size);

               #pragma acc data copy(C[block_size][block_size]) 
                        {
                        #pragma acc host_data use_device(Panel, C)
                        cublasDsyrk(handle, CUBLAS_FILL_MODE_UPPER, CUBLAS_OP_T, bs_i, bs_k, 
                            &ALPHA, 
                            (double *) (Panel) + (M2V(0, (i - (k+1)) * block_size, block_size)), block_size, 
                            &BETA, 
                            (double *) (C) , block_size); 
                        }

                CONVERT_TO_LINEAR(C, block_size, fC, lda, block_size, block_size);
                free(C);
                }

#else
                dsyrk_("U", "T", &bs_i, &bs_k, 
                    &ALPHA, 
                    /* R + M2V(k * block_size, i * block_size, lda)*/
                    (double *) (Panel) + (M2V(0, (i - (k+1)) * block_size, block_size)), &block_size, 
                    &BETA, 
                    R + M2V(i * block_size, i * block_size, lda) , &lda); 
#endif

		    } /* tailing submatrix loop end */

                    } /* copy Panel */
                   CONVERT_TO_LINEAR(Panel, block_size, dtrsm_init, lda, bs_k, blocks_in_row * block_size);
                    free(Panel);

            } /* data cholBlock */
        	}
    }
    FRG__printStats();
    // Destroy here all cublas stuff
    // This should destroy runtime context and clear scope
    //    cublasShutdown();
    cublasDestroy(handle);
	return 1;
}

