/*
  Copyright (C) 2011, Kyungjoo Kim
  
  This file is part of LINAL (LINear ALgebra)
  
  All rights reserved.

  This library is free software; you can redistribute it and/or
  modify it under the terms of the GNU Lesser General Public
  License as published by the Free Software Foundation; either
  version 2.1 of the License, or (at your option) any later version.
  
  This library is distributed in the hope that it will be useful,
  but WITHOUT ANY WARRANTY; without even the implied warranty of
  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  Lesser General Public License for more details.
  
  You should have received a copy of the GNU Lesser General Public
  License along with this library; if not, write to the Free Software
  Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA
  Also add information on how to contact you by electronic and paper mail.
  
  
  Kyungjoo Kim
  iamkyungjoo@gmail.com
*/
#ifndef LINAL_FLAT_GPU_HXX
#define LINAL_FLAT_GPU_HXX

// GPU utilize software cache. The number of data transfer limit the perfomance.
// Reuse the data which is already transfered to GPU as much as possble.
//#define LINAL_GPU_CACHE_ENABLE 

// Safest option to prevent race condition between CPU and GPU.
//#define LINAL_GPU_CACHE_READ_ONLY 

// 1 GPU vs Multi CPUs
// Case 1 : GPU >>>>>>>> Multi CPUs
//   set_n_thread(0). CPUs only drag down the performance. 
//   For example, Fermi has peak performance of 500 Gflops. Even though 12 CPU
//   can perform total 100 Gflops, Fermi is way faster. Hence All BLAS operation
//   should be done only in GPUs.
// Case 2 : GPU == Multi CPUs
//   set_n_thread(n_thread) n_thread can be varied based on the user problem.
//   Single matrix n_thread = 1 : this will collect all availabe threads as one, 
//   then put the resources into super block factorization.
//   Multiple matrix n_thread = actual threads number : it needs to catch multiple 
//   matrices together at the beginning in order to feed more tasks which can be
//   processed.
// Case 3 : CPU < Multi CPUs
//   Why ? Want to use GPU ????

//#define LINAL_GPU_OVERLAPPING_ENABLE

namespace linal {

  typedef class  Key_GPU_*    Key_GPU;
  typedef class  Matrix_*     Matrix;
  typedef class  Flat_GPU_*   Flat_GPU;
  typedef class  Cache_GPU_*  Cache_GPU;

  typedef struct FLA_Obj_view FLA_Obj;

  // -------------------------------------------------------------- 
  // ** Global variable handling
  extern int  get_device_gpu               ();
  extern void sync_threads_gpu             ();
  extern void sync_threads_gpu_all         ();
    
  extern int  get_device_gpu               (int thread);
  extern void set_device_gpu               (int device);

  extern int  get_n_coarseness             ();
  extern void set_n_coarseness             (int coarsening );

  extern int  get_n_split                  ();
  extern void set_n_split                  (int split );

  extern int  get_cpu_blocksize            ();
  extern void set_cpu_blocksize            (int b_mn);

  extern int  get_work_balance             ();
  extern void set_work_balance             (int balance);

  extern int  get_n_thread                 ();
  extern void set_n_thread                 (int n_thread);

  extern int  get_n_gpu                    ();

  extern void init_gpu                     (int n_thread);
  extern void push_back_gpu                (int device, 
                                            int n_item_bin, int n_item_work);
  extern void init_gpu                     (int n_thread, int n_gpu, 
                                            int n_item_bin, int n_item_work);
  extern void finalize_gpu                 ();
  
  extern void flush_gpu                    ();

  extern void reset_gpu_data_access_counter();
  extern void count_gpu_data_access        (bool is_cache);
  extern void get_gpu_data_access_counter  (int &cache_hit, int &gpu_hit);
  extern void disp_gpu_data_access         (FILE *stream);

  extern void set_flat_gpu_dirty           (FLA_Obj A);
  extern void set_flat_gpu_dirty           (Hier_ A);
  extern void set_flat_gpu_dirty           (Flat_ A);

  extern bool is_locked_cpu                ();
  extern void lock_cpu                     ();
  extern void unlock_cpu                   ();

  extern bool is_locked_gpu                (int device);
  extern void lock_gpu                     (int device);
  extern void unlock_gpu                   (int device);

  extern void pre_sync_flat_gpu_in_bin     (int device, FLA_Obj A);
  extern void post_sync_flat_gpu_in_bin    (int device, FLA_Obj A);
  
  extern Flat_GPU_& pull_flat_gpu_from_bin (int device, FLA_Obj A);
  extern Flat_GPU_& pull_flat_gpu_from_work(int device, int index);

  extern void create_flat_gpu              (int type, int m, int n);
  extern void free_flat_gpu                ();

  // -------------------------------------------------------------- 
  // ** Key to Cache
  class Key_GPU_ {
  private:
  protected:
    int offm, offn;
    long buffer;
  public:
    /*!
      Default constructor.
    */
    Key_GPU_() : offm(0), 
                 offn(0), 
                 buffer(0) { }
    /*!
      Constructor with variables.
    */
    Key_GPU_(int offm, int offn, void *buffer) : offm(0), 
                                                 offn(0),
                                                 buffer((long)buffer) { }
    Key_GPU_(FLA_Obj A) : offm(A.offm),
                          offn(A.offn),
                          buffer((long)FLA_Obj_buffer_at_view(A)) { }

    virtual ~Key_GPU_() { }

    bool operator<(const Key_GPU_ &b) const {
      if (this->offm   - b.offm  ) return ((this->offm   - b.offm  ) < 0);
      if (this->offn   - b.offn  ) return ((this->offn   - b.offn  ) < 0);
      if (this->buffer - b.buffer) return ((this->buffer - b.buffer) < 0);
      return false;
    }
    bool operator==(const Key_GPU_ &b) const {
      return ( (this->offm == b.offm) && 
               (this->offn == b.offn) && 
               (this->buffer == b.buffer) );
    }
    bool operator!=(const Key_GPU_ &b) const {
      return !((*this) == b);
    }
  };

  // -------------------------------------------------------------- 
  // ** Flat GPU Matrix
  /*!
    Flat_GPU_ class only stored in the GPU memory
  */
  class Flat_GPU_ : public Matrix_ {
  private:
  protected:
    bool buffer_gpu_created, dirt, use;
    __device__ void *buffer_gpu;

  public:

    /*!
      Default constructor.
    */
    Flat_GPU_() : Matrix_(),
                  buffer_gpu_created(false), 
                  dirt(true),
                  use(false),
                  buffer_gpu(NULL) {  }

    /*!
      Wrapping constructor - It is thought not being created.
      \param obj [in] Input FLA_Obj
    */
    Flat_GPU_(FLA_Obj obj) : Matrix_(),
                             buffer_gpu_created(false), 
                             dirt(true),
                             use(false),
                             buffer_gpu(NULL) { 
      // do i need this asserting ?
      LINAL_ERROR(obj.base != NULL &&
                  obj.base->elemtype == FLA_SCALAR, 
                  LINAL_ERROR_CHECK_SCALAR_ELEMTYPE);

      this->fla = obj; 
      this->created = 0;
    }

    /*! 
      Copy constructor.
      \param obj [in] Input Flat_GPU_
    */
    Flat_GPU_(const Flat_GPU_& obj) : Matrix_(obj),
                                      buffer_gpu_created(false), 
                                      dirt(true),
                                      use(false),
                                      buffer_gpu(NULL)  { 
      this->buffer_gpu = obj.buffer_gpu;
      this->created =0;
    }

    /*! Destructor.
    */
    virtual ~Flat_GPU_() { this->free(); }

    /*! Set the flag dirt
     */
    inline void set_dirt(bool dirt) { this->dirt = dirt; }
    
    /*! See the buffer is dirty
     */
    inline bool is_dirty()          { return this->dirt; }
    
    /*! Set the flag use.
     */
    inline void set_use(bool use)   { this->use = use; }


    /*! See the buffer is currently used by any encountered thread.
     */
    inline bool is_in_use()         { return this->use; }

    inline void  set_buffer(void *buf_ptr) { this->buffer_gpu = buf_ptr; }
    inline void* get_buffer()              { return (double*)this->buffer_gpu; }


    /*! Create m by n Flat_ matrix.
      \param type [in] LINAL_INT, LINAL_REAL, or LINAL_COMPLEX
      \param m    [in] number of rows
      \param n    [in] number of columns
    */
    inline void create(int type, int m, int n) {
      this->create_without_buffer( type, m, n );
      this->create_buffer();
    }

    /*! Create m by n Flat_ matrix without bufer.
      \param type [in] LINAL_INT, LINAL_REAL, or LINAL_COMPLEX
      \param m    [in] number of rows
      \param n    [in] number of columns
    */
    inline void create_without_buffer(int type, int m, int n) {
      // either m or n is 0, do not create
      if (!m || !n) return;

      LINAL_ERROR(!this->is_created(),          LINAL_ERROR_OBJ_CREATED );
      LINAL_ERROR( check_all_scalar_type(type), LINAL_ERROR_CHECK_SCALAR );

      FLA_Obj_create_without_buffer(type, m, n, &this->fla);
      this->created = 1;
    }
    
    /*! Free the matrix. If buffer was allocated, free it.
    */
    inline void free() {
      if (this->is_created()) {
        FLA_Obj_free_without_buffer(&this->fla);
        this->created = 0;
      }
      if (this->buffer_gpu_created)
        this->free_buffer();
    }

    /*! Create buffer on the gpu
    */
    inline void create_buffer() {
      LINAL_ERROR( !is_base_null(), 
                   LINAL_ERROR_BASE_OBJ_IS_NULL );

      cublasStatus status = CUBLAS_STATUS_SUCCESS;

      // create m by n matrix
      status = cublasAlloc( this->get_m()*this->get_n(),
                            this->get_data_size(),
                            (void**)&this->buffer_gpu );

      LINAL_ERROR( status == CUBLAS_STATUS_SUCCESS,
                   LINAL_ERROR_CUBLAS_FAIL_TO_ALLOC );
      this->buffer_gpu_created = true;
    }

    /*! Free buffer on the gpu
    */
    inline void free_buffer() {

      cublasFree( this->buffer_gpu );

      this->buffer_gpu         = NULL;
      this->buffer_gpu_created = false;
    }

    /*!
      Execute cublasGetMatrix ( memory moves from GPU to CPU )
      \param obj [in] FLA_Obj will read GPU buffer
    */
    inline void get_matrix_to(FLA_Obj obj) {


      cublasStatus status = CUBLAS_STATUS_SUCCESS;

      // Read buffer on GPU to CPU
      status = cublasGetMatrix( this->get_m(),
                                this->get_n(),
                                this->get_data_size(),
                                this->buffer_gpu,
                                this->get_m(),
                                FLA_Obj_buffer_at_view( obj ),
                                FLA_Obj_col_stride( obj ) );

      LINAL_ERROR( status == CUBLAS_STATUS_SUCCESS,
                   LINAL_ERROR_CUBLAS_FAIL_TO_GET_MATRIX );
    }

    /*!
      Execute cublasSetMatrix ( memory moves from CPU to GPU )
      \param obj [in] FLA_Obj whose contenst will be written in GPU
    */
    inline void set_matrix_from(FLA_Obj obj) {

      cublasStatus status = CUBLAS_STATUS_SUCCESS;

      // check cache first
      // buffer pointer, offset m and n, m and n
      if (this->is_dirty()) {
        count_gpu_data_access(false);

        // Write the CPU memory into GPU
        status = cublasSetMatrix( FLA_Obj_length( obj ),
                                  FLA_Obj_width( obj ),
                                  FLA_Obj_datatype_size( FLA_Obj_datatype( obj ) ),
                                  FLA_Obj_buffer_at_view( obj ), 
                                  FLA_Obj_col_stride( obj ),
                                  this->buffer_gpu,
                                  this->get_m() );
	this->set_dirt(false);
      } else {
	count_gpu_data_access(true);
      }
      LINAL_ERROR( status == CUBLAS_STATUS_SUCCESS,
                   LINAL_ERROR_CUBLAS_FAIL_TO_SET_MATRIX );
    }

    inline bool compare_buffer(FLA_Obj obj_cpu, double threshold) {
      FLA_Obj obj_gpu, norm;

      int datatype = FLA_Obj_datatype( obj_cpu );
      int length   = FLA_Obj_length  ( obj_cpu );
      int width    = FLA_Obj_width   ( obj_cpu );

      FLA_Obj_create( datatype, length, width, 0, 0, &obj_gpu );
      FLA_Obj_create( datatype,      1,     1, 0, 0, &norm );
      
      this->get_matrix_to( obj_gpu );

      FLA_Axpy( FLA_MINUS_ONE, obj_cpu, obj_gpu );
      FLA_Norm1( obj_gpu, norm );
	
      double val;
      switch ( datatype ) {
      case FLA_FLOAT:
      case FLA_COMPLEX:
	val = (double)(*((float *)FLA_Obj_buffer_at_view(norm)));
	break;
      case FLA_DOUBLE:
      case FLA_DOUBLE_COMPLEX:
	val = *((double*)FLA_Obj_buffer_at_view(norm));
	break;
      }

      FLA_Obj_free( &norm );
      FLA_Obj_free( &obj_gpu );
      
      return ( val < threshold );
    }

    /*!
      Partition the matrix on the GPU.
    */
    inline void extract(Flat_GPU_ &A, int m, int n) {
      this->extract(A, m, n, 0, 0);
    }

    inline void extract(Flat_GPU_ &A, int m, int n, int offm, int offn) {
      LINAL_ERROR_DETAIL( ( offm+m <= this->get_m() &&
                            offn+n <= this->get_n() ),
                          LINAL_ERROR_OUT_OF_RANGE_FORMAT,
                          LINAL_ERROR_OUT_OF_RANGE,
                          "range (", this->get_m(), ",", this->get_n(), ")",
                          "offset(", offm,          ",", offn,          ")",
                          "dim   (", m,             ",", n,             ")" );

      A.fla.m       = m;
      A.fla.n       = n;
      A.fla.offm    = this->get_offm() + offm;
      A.fla.offn    = this->get_offn() + offn;
      A.fla.base    = this->get_fla().base;

      // share the gpu buffer
      A.buffer_gpu         = this->get_buffer();
      A.buffer_gpu_created = false;

      // share the validity information
      A.dirt   = this->dirt;
    }
    
    inline bool operator==(Flat_GPU_ &b) {
      return (this->get_m()         == b.get_m() &&
              this->get_n()         == b.get_n() &&
              this->get_datatype() == b.get_datatype());
    }
    inline bool operator!=(Flat_GPU_ &b) { return !(*this == b); }

    inline void disp() { 
      this->disp(stdout, (char*)"- Flat GPU -"); 
    }
    inline void disp(char *title) { 
      this->disp(stdout, title); 
    }
    inline void disp(FILE* stream) { 
      this->disp(stream, (char*)"- Flat GPU -"); 
    }
    inline void disp(FILE* stream, char *title){ 
      if (this->get_m() && this->get_n()) {
        fprintf(stream, "%s, is_dirty %d\n", 
                title, this->is_dirty());
        switch (this->get_datatype()) {
        case LINAL_INT:            fprintf(stream, "INT     TYPE\n");  break;
        case LINAL_SINGLE_REAL:   
        case LINAL_DOUBLE_REAL:    fprintf(stream, "REAL    TYPE\n");  break;
        case LINAL_SINGLE_COMPLEX: 
        case LINAL_DOUBLE_COMPLEX: fprintf(stream, "COMPLEX TYPE\n");  break;
        }
        fprintf(stream, "(%4d, %4d )::( %4d, %4d )\n",
                this->get_offm(), this->get_offn(),
                this->get_m(), this->get_n());
      }
    }
  };
  
  // -------------------------------------------------------------- 
  // ** Cache Object for GPU matrices
  // - Only cache the read only matrices
  // - The matrices are only corrupted when the bin is full since it is read only
  // - Matrices in workspace always move back and forth from CPU to GPU
  class Cache_GPU_ {
  private:
  protected:
    bool locker;
    int  device;
    
    // ***********
    omp_lock_t omp;
    
    // containers for the matrices
    // bin  : read only matrices
    // work : read write matrices 
    std::vector< Flat_GPU_ > bin, work;
    
    // dirt  : a deque of dirty matrices in bin
    // table : a map to clean matrices in bin

    std::vector< std::pair<int,std::map<Key_GPU_,int>::iterator> > dirt;
    int dirt_front;

    std::map< Key_GPU_, int > table;
    std::map< Key_GPU_, int >::iterator dummy_in_table;

    inline int  _get_dirt_front()  { 
      return this->dirt_front; 
    }
    inline int  _move_dirt_front() { 
      this->dirt_front = ((++this->dirt_front)%this->dirt.size());
      return this->dirt_front;
    }
    inline int  _get_n_item_bin () { return this->bin.size(); }
    inline int  _get_n_item_work() { return this->work.size(); }
    
    inline Flat_GPU_& _get_flat_gpu_from_bin(int index) {
      return this->bin.at(index);
    }
    inline Flat_GPU_& _get_flat_gpu_from_work(int index) {
      return this->work.at(index);
    }
    inline bool _is_in_use(int index) {
      return this->_get_flat_gpu_from_bin(index).is_in_use();
    }
    
  public:
    Cache_GPU_() : device(0),
		   locker(false),
                   dirt_front(0) { }

    Cache_GPU_(int device) : device(device),
			     locker(false),
                             dirt_front(0) { }

    virtual ~Cache_GPU_() { }

    inline int  get_device() {
      return this->device;
    }
    
    /*! Initialization
      \param n_item_bin  [in] workspace for read only items
      \param n_item_work [in] workspace for read/write items
    */
    inline void init(int n_item_bin, int n_item_work) {

      // ** check input
      n_item_bin  = ( n_item_bin  > 3 ? n_item_bin  : 3 );
      n_item_work = ( n_item_work > 3 ? n_item_work : 3 ); 

      // ** clear the bin and workspace
      this->bin.clear();
      this->work.clear();

      // ** initialize the work and bin 
      // TODO :: I need copy constructor for Flat_GPU_
      Flat_GPU_ in;

      set_device_gpu( this->get_device() );
      omp_init_lock( &this->omp );

      for (int i=0;i<n_item_bin;++i) 
        this->bin.push_back( in );
      
      for (int i=0;i<n_item_work;++i) 
        this->work.push_back( in );
      
      // ** clear the search table 
      this->table.clear();

      std::pair< std::map< Key_GPU_, int >::iterator, bool > 
	ret = this->table.insert( std::make_pair( Key_GPU_(-1, -1, 0), -1) );

      LINAL_ERROR(ret.second == true,
		  ">> Dummy object exists in the table");
      
      this->dummy_in_table = ret.first;

      // ** clear dirt
      this->dirt.clear();
      
      // ** matrices in bin are dirty with dummy iterator
      for (int i=0;i<n_item_bin;++i) 
        this->dirt.push_back( std::make_pair(i, this->dummy_in_table) );

      dirt_front = 0;
    }
    
    /*! Finalization
     */
    inline void finalize() {
      
      set_device_gpu( this->get_device() );
      omp_destroy_lock( &this->omp );

      this->bin.clear();
      this->work.clear();
      
      this->dirt.clear();
      this->table.clear();
    }


    /*! Create matrices in GPU
      \param type [in] datatype of matrix
      \param m [in] length of matrix
      \param n [in] width of matrix
    */
    inline void create_flat_gpu( int type, int m, int n ) {

      set_device_gpu( this->get_device() );

      for (int i=0;i<this->bin.size();++i) 
        this->bin.at(i).create( type, m, n );

      for (int i=0;i<this->work.size();++i)
        this->work.at(i).create( type, m, n );
    }
    
    /*! Free the matrices in the bin and work
     */
    inline void free_flat_gpu() {

      set_device_gpu( this->get_device() );

      for (int i=0;i<this->bin.size();++i)
        this->bin.at(i).free();
      
      for (int i=0;i<this->work.size();++i)
        this->work.at(i).free();
    }

    /*! Flush the matrices - Get all matrices dirty
     */
    inline void flush() {
      for (int i=0;i<this->bin.size();++i)
        this->bin.at(i).set_dirt(true);
    }


    inline void lock()      { 
      omp_set_lock( &this->omp );
      this->locker = true; 
    }
    inline void unlock()    { 
      omp_unset_lock( &this->omp );
      this->locker = false; 
    }
    inline bool is_locked() { 
      return this->locker; 
    }

    inline void set_flat_gpu_dirty(FLA_Obj A) {
      std::map<Key_GPU_,int>::iterator it = this->table.find( Key_GPU_(A) );
      if (it != this->table.end()) 
        this->_get_flat_gpu_from_bin(it->second).set_dirt(true);
    }

    inline void pre_sync_flat_gpu_in_bin(FLA_Obj A) {
      std::map<Key_GPU_,int>::iterator it = this->table.find( Key_GPU_(A) );
      if (it != this->table.end()) 
	this->_get_flat_gpu_from_bin(it->second).set_dirt(true);
    }
    inline void post_sync_flat_gpu_in_bin(FLA_Obj A) {
      std::map<Key_GPU_,int>::iterator it = this->table.find( Key_GPU_(A) );
      if (it != this->table.end()) {
        Flat_GPU_ &AA = this->_get_flat_gpu_from_bin(it->second);
	
        if (!AA.is_dirty()) {
          Flat_GPU_ A_gpu;
          AA.extract( A_gpu, A.m, A.n );
          A_gpu.set_matrix_from( A );
        }
      }
    }
    inline void post_sync_flat_gpu_in_bin(int device_from, FLA_Obj A) {
      // peer to peer communication
      
    }

    inline Flat_GPU_& pull_flat_gpu_from_bin(FLA_Obj A) {

      // ** Search the table first
      std::pair< Key_GPU_, int > in = std::make_pair( Key_GPU_(A), -1 );
      std::pair< std::map<Key_GPU_,int>::iterator , bool > ret;

      int index, front;
      bool is_dirt;

      {
	ret = this->table.insert( in );
	if (ret.second) { 

	  // There is NOT existing item on the table
          int start = this->_get_dirt_front();
          do {
            front   = this->_get_dirt_front();
            index   = this->dirt.at(front).first;
            LINAL_ERROR(start != this->_move_dirt_front(),
                        ">> Cache is too small. Increase the cache size");
          } while ( this->_is_in_use(index) );

	  // If dirt is connected to map, update the map      
	  if (this->dirt.at(front).second != this->dummy_in_table)
	    this->table.erase( this->dirt.at(front).second );

	  this->dirt.at(front).second = ret.first;
	  ret.first->second = index;

	  is_dirt = true;
	  
	} else {

	  // There is an existing item on the table
	  index   = ret.first->second;
	  is_dirt = false;

	}
      }
      
      Flat_GPU_ &r_val = this->_get_flat_gpu_from_bin(index);

      if (is_dirt) r_val.set_dirt(is_dirt);
      r_val.set_use (true);

      return r_val;
    }
    inline Flat_GPU_& pull_flat_gpu_from_work(int index) {
      // ** Matrices from work bin are always dirty
      Flat_GPU_ &r_val = this->_get_flat_gpu_from_work(index);
      r_val.set_dirt(true);
      r_val.set_use (true);

      return r_val;
    }
  };

  //std::deque< std::pair<int,std::map<Key_GPU_,int>::iterator> > dirt;
  // Deque may need later but not now
  /*
  // ** Using Deque or List structure
  // Insertion succeed.
  index   = this->dirt.front().first;
  is_dirt = true;
  
  // If dirt is connected to map, update the map      
  if (this->dirt.front().second != this->table.end()) 
  this->table.erase( this->dirt.front().second );
  
  this->dirt.front().second = ret.first;
  ret.first->second = index;
  
  this->dirt.push_back( this->dirt.front() );
  this->dirt.pop_front();
  */
  
}
#endif
