/*
  Copyright (C) 2011, Kyungjoo Kim
  
  This file is part of LINAL (LINear ALgebra)
  
  All rights reserved.

  This library is free software; you can redistribute it and/or
  modify it under the terms of the GNU Lesser General Public
  License as published by the Free Software Foundation; either
  version 2.1 of the License, or (at your option) any later version.
  
  This library is distributed in the hope that it will be useful,
  but WITHOUT ANY WARRANTY; without even the implied warranty of
  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  Lesser General Public License for more details.
  
  You should have received a copy of the GNU Lesser General Public
  License along with this library; if not, write to the Free Software
  Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA
  Also add information on how to contact you by electronic and paper mail.
  
  
  Kyungjoo Kim
  iamkyungjoo@gmail.com
*/
#include "linal/common.hxx"
#include "linal/const.hxx"
#include "linal/util.hxx"
#include "linal/matrix.hxx"
#include "linal/flat.hxx"
#include "linal/hier.hxx"

#ifdef LINAL_GPU_ENABLE

#include "cublas.h"
#include "linal/gpu.hxx"

namespace linal {

  static int s_n_coarseness=1, s_n_split=0, s_cpu_blocksize=0;
  static int s_n_thread=0, s_n_gpu=0, s_n_work_balance=1;
  static int s_cache_hit=0, s_gpu_hit=0;
  static std::vector< Cache_GPU_ > s_cache;
  static int s_locker_cpu = 0;

  static omp_lock_t gpu_sync_lock;

  // --------------------------------------------------------------
  // ** Global information
  int  get_n_split      ()                 { return s_n_split; }
  void set_n_split      ( int n_split )    { s_n_split = n_split; }

  int  get_n_coarseness ()                 { return s_n_coarseness; }
  void set_n_coarseness ( int coarsening ) { s_n_coarseness = coarsening; }

  int  get_cpu_blocksize()                 { return s_cpu_blocksize; }
  void set_cpu_blocksize( int blocksize )  { s_cpu_blocksize = blocksize; }

  int  get_work_balance()                  { return s_n_work_balance; }
  void set_work_balance(int balance)       {
    s_n_work_balance = (balance > 0 ? balance : 1); 
  }
  
  
  int  get_device_gpu   ()                 {
    int device=0;
    cudaError_t error = cudaGetDevice( &device );
    LINAL_ERROR(error == cudaSuccess, ">> cudaGetDevice Error");
    return device;
  }
  void sync_threads_gpu ()                 {
    cudaError_t error = cudaThreadSynchronize();
    LINAL_ERROR(error == cudaSuccess, ">> cudaThreadSynchronize Error");
  }
  void sync_threads_gpu_all() {
    for (int i=0;i<get_n_gpu();++i) {
      set_device_gpu(i);
      sync_threads_gpu();
    }
  }
    
  int  get_device_gpu   ( int thread )     { 
    // need to change and should be called once in one function
    // or this is good...
    return ( get_n_gpu() ? s_cache.at(thread%get_n_gpu()).get_device() : -1 );
  }
  void set_device_gpu   ( int device )     { 
    cudaError_t error = cudaSetDevice( device );
    LINAL_ERROR(error == cudaSuccess, ">> cudaSetDevice Error");
  }

  int  get_n_thread     ()                 { return s_n_thread; }
  void set_n_thread     ( int n_thread )   { s_n_thread = n_thread; }

  int  get_n_gpu        ()                 { return s_cache.size(); }

  // --------------------------------------------------------------
  // ** GPU init
  // two step initialization
  void init_gpu(int n_thread) {
    cublasInit();
    set_n_thread(n_thread);

    // number of entry points that threads can involve
    s_locker_cpu = n_thread;
    omp_init_lock( &gpu_sync_lock );
  }
  void push_back_gpu(int device, int n_item_bin, int n_item_work) {
    s_cache.push_back( Cache_GPU_(device) );
    s_cache.back().init( n_item_bin, n_item_work );
  }
  // one step initialization 
  void init_gpu(int n_thread, int n_gpu, 
                int n_item_bin, int n_item_work) {
    cublasInit();
    set_n_thread(n_thread);

    // number of entry points that threads can involve
    s_locker_cpu = n_thread;
    omp_init_lock( &gpu_sync_lock );

    for (int i=0;i<n_gpu;++i) {
      s_cache.push_back( Cache_GPU_(i) );
      s_cache.back().init( n_item_bin, n_item_work );
    }
  }
  void finalize_gpu() {

    omp_destroy_lock( &gpu_sync_lock );
    
    for ( ; s_cache.size() ; ) {
      s_cache.back().finalize();
      s_cache.pop_back();
    }

    // Just make sure caches are cleared
    s_cache.clear();

    set_n_thread(0);
    cublasShutdown();
  }

  void flush_gpu() {
    for (std::vector< Cache_GPU_ >::iterator
           it=s_cache.begin();it!=s_cache.end();++it)
      it->flush();
  }

  void reset_gpu_data_access_counter() { 
    s_cache_hit = 0;
    s_gpu_hit   = 0;
  }
  void count_gpu_data_access(bool is_cache) {
    s_cache_hit += is_cache;
    ++s_gpu_hit;
  }
  void disp_gpu_data_access(FILE *stream) {
    fprintf(stream, "linal::gpu::cache hit %d, total gpu hit %d, ratio %lf\n",
            s_cache_hit, s_gpu_hit, 
            ((double)(s_cache_hit))/((double)(s_gpu_hit)) );
  }
  void get_gpu_data_access_counter(int &cache_hit, int &gpu_hit) {
    cache_hit = s_cache_hit;
    gpu_hit   = s_gpu_hit;
  }
  void set_flat_gpu_dirty(FLA_Obj A) {
    for (std::vector< Cache_GPU_ >::iterator 
           it=s_cache.begin();it!=s_cache.end();++it)
      it->set_flat_gpu_dirty(A);
  }
  void set_flat_gpu_dirty(Hier_ A) {
    for (int j=0;j<A.get_n();++j)
      for (int i=0;i<A.get_m();++i)
        for(std::vector< Cache_GPU_ >::iterator
              it=s_cache.begin();it!=s_cache.end();++it)
          it->set_flat_gpu_dirty(A(i,j));
  }
  void set_flat_gpu_dirty(Flat_ A) {
    for(std::vector< Cache_GPU_ >::iterator
          it=s_cache.begin();it!=s_cache.end();++it)
      it->set_flat_gpu_dirty(A.get_fla());
  }

  

  bool is_locked_cpu() {    
    return (s_locker_cpu <= 0);  
  }

  // so far so good without exclusive pragma
  void lock_cpu() {
#pragma omp atomic
    --s_locker_cpu;  
  }
  void unlock_cpu() {
#pragma omp atomic    
    ++s_locker_cpu;
  }

  bool is_locked_gpu(int device) {
    return s_cache.at(device).is_locked();
  }
  void lock_gpu(int device) {
    s_cache.at(device).lock();
    set_device_gpu( device );
  }
  void unlock_gpu(int device) {
    sync_threads_gpu();
    s_cache.at(device).unlock();
  }

  // better solution if sync is really necessary
  void pre_sync_flat_gpu_in_bin(int device, FLA_Obj A) {
    omp_set_lock( &gpu_sync_lock );
    for (int i=0;i<get_n_gpu();++i) 
      if (i!=device) 
        s_cache.at(i).pre_sync_flat_gpu_in_bin(A);
    omp_unset_lock( &gpu_sync_lock );
  }

  // assume that A in the given device is updated, so 
  // the other devices need to be informed. this involves copy
  // not a good solution
  void post_sync_flat_gpu_in_bin(int device, FLA_Obj A) {
    //omp_set_lock( &gpu_sync_lock );
    for (int i=0;i<get_n_gpu();++i) 
      if (i!=device) {
        set_device_gpu(i);
        s_cache.at(i).post_sync_flat_gpu_in_bin(A);
      }
    //omp_unset_lock( &gpu_sync_lock );
  }

  Flat_GPU_& pull_flat_gpu_from_bin(int device, FLA_Obj A) {
    return s_cache.at(device).pull_flat_gpu_from_bin(A);
  }
  Flat_GPU_& pull_flat_gpu_from_work(int device, int index) {
    return s_cache.at(device).pull_flat_gpu_from_work(index);
  }

  void create_flat_gpu(int type, int m, int n) {
    for (std::vector< Cache_GPU_ >::iterator
           it=s_cache.begin();it!=s_cache.end();++it)
      it->create_flat_gpu(type, m, n);
  }
  void free_flat_gpu() {
    for(std::vector< Cache_GPU_ >::iterator
          it=s_cache.begin();it!=s_cache.end();++it)
      it->free_flat_gpu();
  }
}
#endif
