#include "ccl.h"
using namespace SCL;
using std::ostream;
using std::cout;
using std::endl;

CCL::CCL()
{
    cl_int status;

    // reading the opencl platforms informations
    status = clGetPlatformIDs(0, NULL, &NbPlatforms);
    assert (status == CL_SUCCESS);
    assert(NbPlatforms > 0);
    std::cerr<<"\nNbPlatforms="<<NbPlatforms<<endl;

    // allocate the list of platforms
    cl_platform_id* Platforms = new cl_platform_id[NbPlatforms];

    // fill it
    status = clGetPlatformIDs(NbPlatforms, Platforms, NULL);
    assert (status == CL_SUCCESS);

    char pbuf[100];
    std::cerr <<"for NbPlatforms\n";
    for (int i = 0; i < (int)NbPlatforms; ++i) {
        status = clGetPlatformInfo(Platforms[0],
                                   CL_PLATFORM_VENDOR,
                                   sizeof(pbuf),
                                   pbuf,
                                   NULL);
        assert (status == CL_SUCCESS);

        std::cerr << pbuf <<std::endl;
    }



    // count the devices (generally 1 !)
    status = clGetDeviceIDs(Platforms[0],
                            CL_DEVICE_TYPE_GPU ,//CL_DEVICE_TYPE_ALL,
                            0,
                            NULL,
                            &NbDevices);
    assert (status == CL_SUCCESS);
    assert(NbDevices > 0);
    std::cerr<<"NbDevices="<<NbDevices<<endl;
    Devices = new cl_device_id[NbDevices];


    // fill the devices list
    status = clGetDeviceIDs(Platforms[0],
                            CL_DEVICE_TYPE_ALL,
                            NbDevices,
                            Devices,
                            NULL);
    assert (status == CL_SUCCESS);

    numdev=0;  // we choose the first device
    // get some informations
    // device type
    status = clGetDeviceInfo(
                Devices[numdev],
                CL_DEVICE_TYPE,
                sizeof(cl_device_type),
                (void*)&DeviceType,
                NULL);
    assert (status == CL_SUCCESS);

    std::cerr  << "\nThe device is ";
   if (DeviceType == CL_DEVICE_TYPE_CPU) {  std::cerr  << "a CPU\n";};
   if (DeviceType == CL_DEVICE_TYPE_GPU) {  std::cerr  << "a GPU\n";};

    numdev=1;  // we choose the first device
   // get some informations
   // device type
   status = clGetDeviceInfo(
               Devices[numdev],
               CL_DEVICE_TYPE,
               sizeof(cl_device_type),
               (void*)&DeviceType,
               NULL);
   assert (status == CL_SUCCESS);

   std::cerr  << "\nThe device is ";
  if (DeviceType == CL_DEVICE_TYPE_CPU) {  std::cerr  << "a CPU\n";};
  if (DeviceType == CL_DEVICE_TYPE_GPU) {  std::cerr  << "a GPU\n";};


    // size of the gpu global memory
    status = clGetDeviceInfo(
                Devices[numdev],
                CL_DEVICE_GLOBAL_MEM_SIZE,
                sizeof(cl_ulong),
                (void*)&MemGpu,
                NULL);
    assert (status == CL_SUCCESS);

    // local memory size
    status = clGetDeviceInfo(
                Devices[numdev],
                CL_DEVICE_LOCAL_MEM_SIZE,
                sizeof(cl_ulong),
                (void*)&MemLoc,
                NULL);
    assert (status == CL_SUCCESS);

    // nb of compute units
    status = clGetDeviceInfo(
                Devices[numdev],
                CL_DEVICE_MAX_COMPUTE_UNITS,
                sizeof(cl_uint),
                (void*)&NbProcs,
                NULL);
    assert (status == CL_SUCCESS);



    // workgroup size
    status = clGetDeviceInfo(
                Devices[numdev],
                CL_DEVICE_MAX_WORK_GROUP_SIZE,
                sizeof(size_t),
                (void*)&NbWorksMax,
                NULL);
    assert (status == CL_SUCCESS);



    // create a context
    Context = clCreateContext(0,
                              1,
                              &Devices[numdev],
                              NULL,
                              NULL,
                              &status);
    assert (status == CL_SUCCESS);



    // create the commandqueue
    CommandQueue = clCreateCommandQueue(Context,
                                        Devices[numdev],
                                        0,
                                        &status);
    assert (status == CL_SUCCESS);
}

CCL::~CCL() {
    // Shutdown and cleanup
  // clReleaseProgram(Program);
  //  clReleaseKernel(Kernel);
   clReleaseCommandQueue(CommandQueue);
    clReleaseContext(Context);
}


void CCL::display() {
     std::cerr  << "The device is ";
    if (DeviceType == CL_DEVICE_TYPE_CPU) {  std::cerr  << "a CPU";};
    if (DeviceType == CL_DEVICE_TYPE_GPU) {  std::cerr  << "a GPU";};
    std::cerr <<endl;
     std::cerr  << "Computation data:"<<endl;
     std::cerr  << "Mem GPU=" << MemGpu/1024/1024<< "Gb"<<endl;
     std::cerr  << "Nb Compute Units=" << NbProcs <<endl;
     std::cerr  << "Max number of work-items=" << NbWorksMax <<endl;
     std::cerr  << "local memory=" << MemLoc/1024<< "kb"<<endl;
     std::cerr  << "Plateforms:"<<NbPlatforms << endl;
     std::cerr  << "Devices:"<<NbDevices;
}

void CCL::InitKernel(){

  string prog;   // programme
  string ligne;   // pour la lecture des lignes du fichier

  // lecture des paramètres
  std::ifstream fichierprog("param.h",std::ios::in);
  assert(fichierprog);  // s'assure que le fichier existe
  while(!fichierprog.eof()){
    getline(fichierprog,ligne);
    prog=prog+ligne+"\n";
  }

  fichierprog.close();
  // lecture du code source
  //fichierprog.open("transport.cl",ios::in);
  fichierprog.open("transport-kernel.cpp",std::ios::in);
  assert(fichierprog);  // s'assure que le fichier existe


  while(!fichierprog.eof()){
    getline(fichierprog,ligne);
    prog=prog+ligne+"\n";
  }


  cl_int err;

  // Create the compute program from the source buffer
  Program = clCreateProgramWithSource(Context, 1, (const char **) & prog, NULL, &err);
  assert(Program);




  // Build the program executable
  err = clBuildProgram(Program, 0, NULL, NULL, NULL, NULL);
  if (err != CL_SUCCESS)
    {
      size_t len;
      char buffer[2048];
      printf("Error: Failed to build program executable!\n");
      clGetProgramBuildInfo(Program, Devices[numdev], CL_PROGRAM_BUILD_LOG, sizeof(buffer), buffer, &len);
      printf("%s\n", buffer);
      assert( err == CL_SUCCESS);
    }

  // Create the compute kernel in the program we wish to run
  Kernel = clCreateKernel(Program, "transport", &err);
  if (!Kernel || err != CL_SUCCESS)
    {
      printf("Error: Failed to create compute kernel!\n");
      assert(err == CL_SUCCESS);
    }

  // Create the compute kernel in the program we wish to run
  Update = clCreateKernel(Program, "update", &err);

  // Create the compute kernel in the program we wish to run
  Iter = clCreateKernel(Program, "nextiter", &err);

  // fill the initial data
  for(int i=0;i<_N; i++){
    float pi=4.*atan(1.);
    wa[i]=sin(2*pi*i/_N);
    wb[i]=wa[i];
  }

  for(int i=0;i<_N; i++){
    waverif[i]=wa[i];
    wbverif[i]=wa[i];
  }

  iter=0;



 // copy the data into the gpu
  cout <<"copie dans le gpu"<<endl;
  // memory allocation
  wa_gpu  = clCreateBuffer(Context,
                           CL_MEM_READ_WRITE,
                           sizeof(cl_float) * _N,
                           NULL,
                           NULL);
  assert(wa_gpu);
  wb_gpu  = clCreateBuffer(Context,
                           CL_MEM_READ_WRITE,
                           sizeof(cl_float) * _N,
                           NULL,
                           NULL);
  assert(wb_gpu);

  iter_gpu  = clCreateBuffer(Context,
                           CL_MEM_READ_WRITE,
                           sizeof(cl_uint),
                           NULL,
                           NULL);
  assert(iter_gpu);

  //copy
  err = clEnqueueWriteBuffer(CommandQueue, wa_gpu, CL_TRUE, 0, sizeof(float) * _N, wa, 0, NULL, NULL);
  assert(err == CL_SUCCESS);
  err = clEnqueueWriteBuffer(CommandQueue, wb_gpu, CL_TRUE, 0, sizeof(float) * _N, wb, 0, NULL, NULL);
  assert(err == CL_SUCCESS);
  err = clEnqueueWriteBuffer(CommandQueue, iter_gpu, CL_TRUE, 0, sizeof(cl_uint), &iter, 0, NULL, NULL);
  assert(err == CL_SUCCESS);


  // some verifications
  // memory size or one finite volume
  size_t memvf=sizeof(cl_float);


  // approximate number of finite volumes
  // than can be stored into the local memory
  int nvfcache=MemLoc/memvf;  // nb approximatif de vf stockables dans un cache

  cout << "nb of possible FV stored in the cache  "<<nvfcache<<endl;

  cout << "memory for one FV=" <<memvf<<endl;

  NbWorks=_NBWORKS;
  NbWorks= std::min(nvfcache/2,(int)NbWorks);
  NbGlobal = _NBLOCKS*_NBWORKS;
  NbWorks= std::min((int)NbGlobal,(int)NbWorks);

  // if computation on cpu we can use only one work-item/work-group
  if (DeviceType == CL_DEVICE_TYPE_CPU) NbWorks=1;


  cout <<"NbWorks="<<NbWorks <<" NbGlobal="<<NbGlobal<<" _N="<<_N<<endl;

  assert((int)NbGlobal % (int)NbWorks == 0);
  assert(NbWorks <= NbWorksMax);

  // Set the arguments to our compute kernel
  err  = clSetKernelArg(Kernel, 0, sizeof(cl_mem), &wa_gpu);
  assert(err == CL_SUCCESS);
  err  = clSetKernelArg(Kernel, 1, sizeof(cl_mem), &wb_gpu);
  assert(err == CL_SUCCESS);
  err  = clSetKernelArg(Kernel, 2, sizeof(cl_mem), &iter_gpu);
  assert(err == CL_SUCCESS);
  // Set the arguments to our compute kernel
  err  = clSetKernelArg(Update, 0, sizeof(cl_mem), &wa_gpu);
  assert(err == CL_SUCCESS);
  err  = clSetKernelArg(Update, 1, sizeof(cl_mem), &wb_gpu);
  assert(err == CL_SUCCESS);

  err  = clSetKernelArg(Iter, 0, sizeof(cl_mem), &iter_gpu);
  assert(err == CL_SUCCESS);


}

void CCL::Compute(){


      // Execute the kernel over the entire range of our 1d input data set
      // using the maximum number of work group items for this device
      //
      //NbGlobal = _N;
      cl_int err;

      //NbWorks=512;

      t=0.;
      int correct = 0;
      float dx=1.f/_N;   // pas d'espace
      float dt=dx*0.8f;   // pas de temps

      // chronométrage du gpu
      clock_t t1,t2;
      t1=_CHRONO;
      cout <<"début du calcul..."<<endl;
      int iter=0;
      while(t<0.25) {
        t=t+dt;
        iter++;
        // transport
        err = clEnqueueNDRangeKernel(CommandQueue, Kernel, 1, NULL, &NbGlobal, &NbWorks, 0, NULL, NULL);
        assert(err == CL_SUCCESS);
        clFinish(CommandQueue);  // on attend la fin de l'itération
        // mise à jour

        // on incrémente le compteur d'itérations dans le GPU
        err = clEnqueueTask (CommandQueue,Iter,0,NULL,NULL);
        assert(err == CL_SUCCESS);
        clFinish(CommandQueue);

        // mise à jour
        //err = clEnqueueNDRangeKernel(CommandQueue, Update, 1, NULL, &NbGlobal, &NbWorks, 0, NULL, NULL);
        //assert(err == CL_SUCCESS);
        //clFinish(CommandQueue);  // on attend la fin de l'itération

      }
      cout<<"iter="<<iter<<endl;
      //arrêt du chronomètre
      t2=_CHRONO;
      float timeunit=_TIMEUNIT;
      float tgpu=(t2-t1)*timeunit;
      cout <<"temps gpu="<<tgpu<<" s"<<endl;
      cout <<"tfin gpu=" <<t<<endl;

      // Read back the results from the device to verify the output
      err = clEnqueueReadBuffer( CommandQueue, wa_gpu, CL_TRUE, 0, sizeof(float) * _N, wa, 0, NULL, NULL );
      assert (err == CL_SUCCESS);

      // end of gpu work

      // chronomètre pour le cpu
      t=0;
      t1=_CHRONO;
      while(t<0.25) {
        for(int i = 0; i < _N; i++) {
          if(i>0)
            wbverif[i]=waverif[i]-dt/dx*(waverif[i]-waverif[i-1]);
        }
        for(int i = 0; i < _N; i++) {
          if(i>0)
            waverif[i]=wbverif[i];
        }
        t=t+dt;
      }

      // arrêt du chronomètre
      t2=_CHRONO;
      float tcpu=(t2-t1)*timeunit;
      cout <<"temps cpu="<<tcpu<<" s"<<endl;

      // vérification que les résultats sont proches
      for(int i = 0; i < _N; i++) {
        if (fabs( wa[i]-wbverif[i]) < 1e-4f) correct++;
      }
      cout <<"tfin cpu=" <<t<<endl;
      cout << "Computed "<<correct<<"/"<<_N<<"correct values"<<endl;


      std::ofstream cpu("cpu");
      std::ofstream gpu("gpu");

      for(int i = 0; i < _N; i++) {
        cpu << i*dx<<" "<<wbverif[i]<<endl;
        gpu << i*dx<<" "<<wa[i]<<endl;
      }


      cpu.close();
      gpu.close();

      cout<<"speedup="<<tcpu/tgpu<<endl;

    }




