#include <CL/sycl.hpp>
#include <dpct/dpct.hpp>
#include <iostream>
// #include <hip/hip_runtime_api.h> // hipMalloc, hipMemcpy, etc.
#include <stdio.h>            // printf
#include <stdlib.h>           // EXIT_FAILURE
// #include <hip/hip_runtime.h>

enum sparse_operation {operation_none=0,operation_transpose=1} ;


void device_sparse_spmv(int        trans,
               const int               alpha,
	       const int               beta,
                     int               m,
                     int               n,
               const int*              rowptr,
	       const int*              colindex,
	       const double*           value,
               const double*           x,
                     double*           y
			,
                     sycl::nd_item<3> item_ct1,
                     const sycl::stream &stream_ct1,
                     dpct::accessor<double, dpct::local, 2> yCSRv)
{
        // int thread_id = threadIdx.x + blockDim.x * blockIdx.x;
        // if (thread_id == 0)
        // {
        //         for(int i=0;i<m;i++){
        //                 double y0=0;
        //                 for(int j=rowptr[i];j<rowptr[i+1];j++)
        //                         y0+=value[j]*x[colindex[j]];
        //                 //printf("%d,%d,%f,%f\n",alpha,beta,y0,y[i]);
        //                 y[i]=alpha*y0+beta*y[i];
        //         }
        // }          
        // printf("%d , %d\n",thread_id, row_per_thread);
        // printf("gridDim.x: %d  blockIdx.x: %d  threadIdx.x: %d\n",(int)gridDim.x,(int)blockIdx.x,(int)threadIdx.x);
        // std::cout << "gridDim.x: " << gridDim.x << "  blockIdx.x: " << blockIdx.x << "  threadIdx.x: " << threadIdx.x << std::endl;

        int thread_id = 
            item_ct1.get_local_id(2) +
            item_ct1.get_local_range().get(2) * item_ct1.get_group(2);
        int row_per_thread = m / (item_ct1.get_local_range().get(2) *
                                  item_ct1.get_group_range(2));
        int more_one_threads = m % (item_ct1.get_local_range().get(2) *
                                    item_ct1.get_group_range(2));
    
    
        if(thread_id < more_one_threads){
            for(int i = thread_id * (row_per_thread + 1); i < (thread_id + 1) * (row_per_thread + 1); i++)
            {
                double y0=0;
                for(int j=rowptr[i];j<rowptr[i+1];j++)
                y0+=value[j]*x[colindex[j]];
                //printf("%d,%d,%f,%f\n",alpha,beta,y0,y[i]);
                y[i]=alpha*y0+beta*y[i];
            }
        }
        else {
            for(int i = thread_id * row_per_thread + more_one_threads; i < (thread_id + 1) * row_per_thread + more_one_threads; i++){
            double y0=0;
            for(int j=rowptr[i];j<rowptr[i+1];j++)
            y0+=value[j]*x[colindex[j]];
            //printf("%d,%d,%f,%f\n",alpha,beta,y0,y[i]);
            y[i]=alpha*y0+beta*y[i];
            }
        }

//         if(thread_id == 0){
// //                 stream_ct1 << "\n";
// //                 stream_ct1 << "This is vector dX from device!\n";
// //                 stream_ct1 << "m = " << m << "\n";
// //                 for(int i=0; i<3000; i++){
// //                         stream_ct1 << x[i] << "  ";
// //                 }
//                    stream_ct1 << "\n";
//                    stream_ct1 << "thread_id: " << thread_id << "\n";
//                    stream_ct1 << "get_local_id: " << item_ct1.get_local_id(2) << "\n";
//                    stream_ct1 << "get_group: " << item_ct1.get_group(2) << "\n";
//                    stream_ct1 << "get_local_range: " << item_ct1.get_local_range().get(2) << "\n";
                
//         }

        // const int copyThreadNum = 60;
        // __shared__ double sharedX[3000];
        // // double sharedValue[3000];
        // // __shared__ double sharedValue[m / gridDim.x];
        // if(threadIdx.x < copyThreadNum){
        //      for(int i = threadIdx.x * 3000 / copyThreadNum; i < (threadIdx.x + 1) * 3000 / copyThreadNum; i++){
        //             sharedX[i] = x[i];
        //         //     sharedRowptr[i+1] = rowptr[i+1];    
        //      }  
        // }
        // // else if(threadIdx.x == copyThreadNum){
        // //         sharedRowptr[0] = 0;
        // // }
        // __syncthreads();
        
        #define worksOnOneRow 32
        #define rowPerBlock 8
//         __shared__ double yCSRv[rowPerBlock][worksOnOneRow];
//         double yCSRv[rowPerBlock][worksOnOneRow];
//        const int threadIdInRow = item_ct1.get_local_id(2) % worksOnOneRow;
//        const int rowIdInBlock = item_ct1.get_local_id(2) / worksOnOneRow;
 //       const int i = item_ct1.get_group(2) * rowPerBlock + rowIdInBlock;
 //       const int indexJ1 = rowptr[i];
  //      const int indexJ2 = rowptr[i + 1];
//
  //      yCSRv[rowIdInBlock][threadIdInRow] = 0;
  //      if (i < m)
  //      {
   //             for (int j = rowptr[i] + threadIdInRow; j < rowptr[i + 1]; j += worksOnOneRow)
     //                   // yCSRv[rowIdInBlock][threadIdInRow] += value[j] * sharedX[colindex[j]];
    //                    yCSRv[rowIdInBlock][threadIdInRow] += value[j] * x[colindex[j]];
    //            if (threadIdInRow == 0)
      //          {
        //                for (int j = 1; j < worksOnOneRow; j++)
          //                      yCSRv[rowIdInBlock][0] += yCSRv[rowIdInBlock][j];
           //             y[i] = alpha * yCSRv[rowIdInBlock][0] + beta * y[i];
             //   }
      //  }
//         if(rowIdInBlock == 0 &&threadIdInRow == 0 && thread_id == 0){
//                 stream_ct1 << "\n";
//                 stream_ct1 << "threadIdInRow: " << threadIdInRow << "\n";
//                 stream_ct1 << "rowIdInBlock: " << rowIdInBlock << "\n";
//                 stream_ct1 << "i: " << i << "\n";
//                 stream_ct1 << "y[i] " << y[i] << "\n";
//         }
        // y[i] = alpha * y0 + beta * y[i];
}


// void CPU_helped_spmv(int alpha,int beta,dtype *value,int *rowptr,int *colindex,int m,int n,int a,dtype *x,dtype *y){
//     //calculate the matrix-vector multiply where matrix is stored in the form of CSR
//     for(int i=0;i<m;i++){
//         dtype y0=0;
//         for(int j=rowptr[i];j<rowptr[i+1];j++)
//             y0+=value[j]*x[colindex[j]];
//         //printf("%d,%d,%f,%f\n",alpha,beta,y0,y[i]);
//         y[i]=alpha*y0+beta*y[i];
//     }
//     return;
// }


void  sparse_spmv(int                  htrans,
               const int               halpha,
	       const int               hbeta,
                     int               hm,
                     int               hn,
               const int*              hrowptr,
	       const int*              hcolindex,
	       const double*           hvalue,
               const double*           hx,
                     double*           hy
			)
{
        // printf("value: %lx\n",hvalue); //csr存储格式稀疏值
        // printf("colindex: %lx\n",hcolindex);
        // printf("rowptr: %lx\n",hrowptr);
        dpct::get_default_queue().submit([&](sycl::handler &cgh) {
                sycl::stream stream_ct1(64 * 1024, 80, cgh);

                sycl::range<2> yCSRv_range_ct1(8 /*rowPerBlock*/,
                                               32 /*worksOnOneRow*/);

                sycl::accessor<double, 2, sycl::access::mode::read_write,
                               sycl::access::target::local>
                    yCSRv_acc_ct1(yCSRv_range_ct1, cgh);

                cgh.parallel_for(
                    sycl::nd_range<3>(sycl::range<3>(1, 1, 375) *
                                          sycl::range<3>(1, 1, 256),
                                      sycl::range<3>(1, 1, 256)),
                    [=](sycl::nd_item<3> item_ct1) {
                            device_sparse_spmv(
                                htrans, halpha, hbeta, hm, hn, hrowptr,
                                hcolindex, hvalue, hx, hy, item_ct1, stream_ct1,
                                dpct::accessor<double, dpct::local, 2>(
                                    yCSRv_acc_ct1, yCSRv_range_ct1));
                    });
        });
}