#ifndef CPU_ONLY
#ifdef USE_PROF

#include <limits>
#include <sstream>
#include <algorithm>

#include <glpk.h>

#include <boost/thread.hpp>

#include "caffe/util/kernel_analyzer.hpp"
#include "caffe/util/benchmark.hpp"
#include "caffe/util/info_log.hpp"
#include "caffe/common.hpp"

DEFINE_bool(pdsolver, false,
            "Optional. Option for selecting the parallel degree solver (true->new, false->old)");

#define ARRAY_LEN(array) (array == nullptr ? 0 : sizeof(array) / sizeof(array[0]))

#define CHECK_GLP_ERROR(val, func) {                                                                      \
  if (val == GLP_EBOUND) {                                                                                \
    LOG(FATAL) << "GLP_EBOUND! Unable to start the search, because some double-bounded variables have " <<\
                  "incorrent bouds or some integer variables have non-integer bounds! @" << func;         \
  } else if (val == GLP_EROOT) {                                                                          \
    LOG(FATAL) << "GLP_EROOT! Unable to start the search, because optimal basis for initial LP " <<       \
                  "relaxation is not provided. @" << func;                                                \
    LOG(FATAL) << "This code may appear only if the presolver is disabled.";                              \
  } else if (val == GLP_ENOPFS) {                                                                         \
    LOG(FATAL) << "GLP_ENPFS! Unable to start the search, because LP relaxation of the MIP problem " <<   \
                  "instance has no primal feasible solution. @" << func;                                  \
    LOG(FATAL) << "This code may appear only if the presolver is enabled.";                               \
  } else if (val == GLP_ENODFS) {                                                                         \
    LOG(FATAL) << "GLP_ENODFS! Unable to start the search, because LP relaxation of the MIP problem " <<  \
                  "instance has no dual feasible solution. @" << func;                                    \
    LOG(FATAL) << "This code may appear only if the presolver is enabled.";                               \
  } else if (val == GLP_EFAIL) {                                                                          \
    LOG(FATAL) << "GLP_EFAIL! The search was prematurely terminated due to the solver failure. @" << func;\
  } else if (val == GLP_EMIPGAP) {                                                                        \
    LOG(FATAL) << "GLP_EMIPGAP! The search was prematurely terminated, because the relative mip gap " <<  \
                  "tolerance has been reached. @" << func;                                                \
  } else if (val == GLP_ETMLIM) {                                                                         \
    LOG(FATAL) << "GLP_ETMLIM! The search was prematurely terminated, because the time limit has been " <<\
                  "exceeded. @" << func;                                                                  \
  } else if (val == GLP_ESTOP) {                                                                          \
    LOG(INFO) << "GLP_ESTOP! The search was prematurely terminated by application. @" << func;            \
    LOG(FATAL) << "This code may appear only if the advanced solver interface is used.";                  \
  }                                                                                                       \
}

#define ceiling(num, base) static_cast<int>(ceil(static_cast<double>(num) / base) * base)

namespace caffe {
  using std::numeric_limits;
  using sstream = std::stringstream;
  using uint = unsigned int;

  static boost::thread_specific_ptr<KernelAnalyzer> tls_thread_kernel_analyzer_;

  std::string GetCurrentTime() {
    time_t curr_time = time(NULL);
    tm *local_time = localtime(&curr_time);
    sstream temp_ss;
    temp_ss << local_time->tm_year << "-" << local_time->tm_mon << "-" << local_time->tm_mday;
    std::string result = temp_ss.str();
    temp_ss.str("");
    temp_ss.clear();

    return result;
  }

  KernelAnalyzer& KernelAnalyzer::Get() {
    // If thread_kernel_analyer_ has not been initialized, then initialize it.
    if (!tls_thread_kernel_analyzer_.get()) {
      tls_thread_kernel_analyzer_.reset(new KernelAnalyzer());
    }

    return *(tls_thread_kernel_analyzer_.get());
  }

  KernelAnalyzer::KernelAnalyzer() {
    this->device_id_ = -1;
    this->current_key_str_ = "";

    if (!this->pdegree_map_.empty()) {
      this->pdegree_map_.clear();
    }

    if (!this->k_num_bnd_.empty()) {
      this->k_num_bnd_.clear();
    }
  }

  KernelAnalyzer::~KernelAnalyzer() {
    if (!this->pdegree_map_.empty()) {
      this->pdegree_map_.clear();
    }

    for (auto &temp_k_num_bnd : this->k_num_bnd_) {
      if (!temp_k_num_bnd.second.empty()) {
        temp_k_num_bnd.second.clear();
      }
    }
    this->k_num_bnd_.clear();
  }

  void KernelAnalyzer::Start(const string layer_name, const string loop_label, int &parallel_degree) {
    if (this->device_id_ < 0) {
      this->device_id_ = Caffe::Get().current_device();
      if (this->device_id_ < 0) {
        CUDA_CHECK(cudaGetDevice(&this->device_id_));
      }
    }
    // LOG(INFO) << "Current DEVICE " << this->device_id_ << ".";

    this->current_key_str_ = layer_name + "_" + loop_label + "_" + std::to_string(this->device_id_);

    if (this->pdegree_map_.find(this->current_key_str_) == this->pdegree_map_.end()) {
      // If there is only one resource tracker among all threads, mutex can be added in this place.
      ActivityTracker::Get().Lock();
      // Initialize the resource tracker with SERIAL profiling mode.
      ActivityTracker::Get().Init();
      // Start the ActivityTracker.
      ActivityTracker::Get().Start(this->device_id_);
      parallel_degree = 0;
    } else {
      parallel_degree = pdegree_map_[this->current_key_str_];

      CHECK_GT(parallel_degree, 0) << "ERROR! The parallel degree is not valid.";

      ActivityTracker::Get().Lock();
      ActivityTracker::Get().Init();
      CHECK_EQ(ActivityTracker::Get().SetBuffOverheadRecord(false), false)
        << "Failed to disable recording the overead of buffer allocation.";
      ActivityTracker::Get().Start(this->device_id_);
    }

    return ;
  }

  void KernelAnalyzer::Stop() {
    if (this->pdegree_map_.find(this->current_key_str_) == this->pdegree_map_.end()) {
      // Stop the ActivityTracker
      ActivityTracker::Get().Stop();

      Timer analyzer_timer;
      // Count the time overhead of the GLP4NN model analysis.
      analyzer_timer.Start();

      const uint64_t kernel_launch_overhead = ActivityTracker::Get().GetKernelLaunchOverhead();
      const boost::shared_ptr<std::vector<Kernel_t>> kernels = ActivityTracker::Get().GetKernelsRecorded();

      // pdegree_map_[this->current_key_str_].min_val = ParallelDegreeUB(kernel_launch_overhead, kernels, this->device_id_);
      if (FLAGS_pdsolver) {
        this->pdegree_map_[this->current_key_str_] = ParallelDegree(kernel_launch_overhead, kernels, this->device_id_);
      } else {
        this->pdegree_map_[this->current_key_str_] = ParallelDegreeLB(kernel_launch_overhead, kernels, this->device_id_);
      }
      LOG(WARNING) << this->current_key_str_ << "'s analysis result: " << this->pdegree_map_[this->current_key_str_]
                   << " (with " << (FLAGS_pdsolver ? "ParallelDegree" : "ParallelDegreeLB") << " solver).";

      // Update the size of GpuStreamPool
      // GpuStreamPool::Get().SetPoolSize(this->pdegree_map_[this->current_key_str_]);

      double analyzer_overhead = analyzer_timer.MicroSeconds();
      // Record kernels that needed to be analyzed.
      RecordKernelsAnalyzed(kernels);
      // Record kernel timestamps.
      ActivityTracker::Get().TimestampLog(this->current_key_str_);
      // Compute the Occupancy Ratio.
      ActivityTracker::Get().ComputeOccupancyRatio(this->current_key_str_,
                                                   this->pdegree_map_[this->current_key_str_]);
      // Release temporary buffer.
      ActivityTracker::Get().TempBufRelease();

      // Unlock the asynchronous resource tracker.
      ActivityTracker::Get().Unlock();

      sstream temp_ss;
      temp_ss << this->current_key_str_ << "," << analyzer_overhead << " us" << std::endl;
      InfoLog::Get().RecordInfoLog("AnalyzerOverhead", "analyzer_overhead",
                                   GetCurrentTime() + "-ANALYZER", temp_ss.str());
      temp_ss.str("");
      temp_ss.clear();

      LOG(INFO) << "Asynchronous resource tracker stop!";
    } else {
      ActivityTracker::Get().Stop();
      ActivityTracker::Get().TimestampLog(current_key_str_ + "_" +
                                          std::to_string(pdegree_map_[current_key_str_]));
      ActivityTracker::Get().ComputeOccupancyRatio(current_key_str_,
                                                   pdegree_map_[current_key_str_]);
      ActivityTracker::Get().TempBufRelease();
      ActivityTracker::Get().Unlock();
    }

    return ;
  }

  void KernelAnalyzer::RecordParallelDegree() {
    sstream temp_ss;
    for (auto& pd_record : this->pdegree_map_) {
      temp_ss << pd_record.first << "," << pd_record.second << std::endl;
    }

    InfoLog::Get().RecordInfoLog("ParallelDegree", "", "ParallelDegree_record",
                                 temp_ss.str());

    temp_ss.str("");
    temp_ss.clear();
  }

  void KernelAnalyzer::RecordParallelDegree(const string direction) {
    sstream temp_ss;
    if (direction == "Forward") {
      for (auto& pd_record : this->pdegree_map_) {
        if (pd_record.first.find("FW") != std::string::npos) {
          temp_ss << pd_record.first << "," << pd_record.second << std::endl;
        }
      }
    } else {
      for (auto& pd_record : this->pdegree_map_) {
        if (pd_record.first.find("BP") != std::string::npos) {
          temp_ss << pd_record.first << "," << pd_record.second << std::endl;
        }
      }
    }

    InfoLog::Get().RecordInfoLog("ParallelDegree", direction, direction + "PDeg_rec",
                                 temp_ss.str());

    temp_ss.str("");
    temp_ss.clear();
  }

  void KernelAnalyzer::RecordKernelsAnalyzed(boost::shared_ptr<vector<Kernel_t>> kernels) const {
    LOG(INFO) << "Recording kernels that have been analyzed ..." << kernels->size();
    sstream temp_ss;

    constexpr int record_step = 10;
    for (auto i = 0; i < kernels->size(); i += record_step) {
      temp_ss.str("");
      temp_ss.clear();

      for (auto j = 0; (j < record_step) and ((j + i) < kernels->size()); ++ j) {
        temp_ss << kernels->at(i + j).name_
                << ",grid=[" << kernels->at(i + j).grid_x_ << ":" << kernels->at(i + j).grid_y_ << ":" << kernels->at(i + j).grid_z_
                << "],block=[" << kernels->at(i + j).block_x_ << ":" << kernels->at(i + j).block_y_ << ":" << kernels->at(i + j).block_z_
                << "],sm=" << kernels->at(i + j).sm_total_ << ",regs=" << kernels->at(i + j).reg_per_thread_ << ",invoc="
                << kernels->at(i + j).invocations_ << ",avg_exec_time=" << kernels->at(i + j).average_exec_time_
                << ",local_mem_per_thread=" << kernels->at(i+j).local_mem_per_thread_ << "\n";
      }

      InfoLog::Get().RecordInfoLog("KernelInfo", this->current_key_str_,
                                   "Kernel_Info", temp_ss.str());
    }
  }

  int KernelAnalyzer::ParallelDegreeUB(uint64_t t_launch, boost::shared_ptr<vector<Kernel_t>> kernels, int device_id) {
    cudaDeviceProp gpu_prop;
    CUDA_CHECK(cudaGetDeviceProperties(&gpu_prop, device_id));

    glp_prob *dop_mip = glp_create_prob();
    glp_set_prob_name(dop_mip, "DegreeOfParallelismSovler");
    glp_set_obj_dir(dop_mip, GLP_MAX);
    glp_term_out(GLP_OFF);

    int total_kernel_kinds = kernels->size();
    if (!kernels.get() or total_kernel_kinds == 0) {
      LOG(FATAL) << "There is no kernels recorded! Please CHECK CUPTI settings.";
    }

    glp_add_cols(dop_mip, total_kernel_kinds);
    if (glp_get_num_cols(dop_mip) == 0) {
      LOG(INFO) << "ERROR! There is no kernel recorded!";
    }

    // Bounds settings.
    // launch_bnd: The maximum number of kernels that can be launched concurrently subject to the execution time.
    // sm_bnd: The maximum number of kernels that can be launched concurrently subject to the shared memory.
    // threads_bnd: The maximum number of kernels that can be launched concurrently subject to the maxThreadsPerMultiProcessor.
    // k_num_bnd: The final upper bounds of the number of concurrent kernels.
    // Allocate k_num_bnd_ storage.
    auto &temp_k_num_bnd = this->k_num_bnd_[this->current_key_str_];
    if (temp_k_num_bnd.size() < total_kernel_kinds or temp_k_num_bnd.empty()) {
      if (!temp_k_num_bnd.empty()) {
        temp_k_num_bnd.clear();
      }
      temp_k_num_bnd = vector<int>(total_kernel_kinds, 0);
    }
    unsigned int launch_bnd = 0, sm_bnd = 0, threads_bnd = 0;
    //double k_num_bnd = 0.0;
    unsigned int coef_k = 0, constant_term = 0;
    unsigned int blocks_k = 0, threads_k = 0;
    //unsigned int total_sm = gpu_prop.sharedMemPerMultiprocessor * gpu_prop.multiProcessorCount;
    //unsigned int total_threads = gpu_prop.maxThreadsPerMultiProcessor * gpu_prop.multiProcessorCount;
    unsigned int kernel_exec_time = 0, max_blocks_k = 0, up_blocks_k = 0, tail_blocks_t = 0, tail_blocks_sm = 0;
    for (int i = 0; i < total_kernel_kinds; ++ i) {
      launch_bnd = sm_bnd = threads_bnd = 0;
      blocks_k = kernels->at(i).grid_x_ * kernels->at(i).grid_y_ * kernels->at(i).grid_z_;
      threads_k = kernels->at(i).block_x_ * kernels->at(i).block_y_ * kernels->at(i).block_z_;
      kernel_exec_time = kernels->at(i).average_exec_time_;
      LOG(INFO) << kernels->at(i).name_ << ", blocks_k=" << blocks_k << ", threads_k=" << threads_k << ", exec_time=" << kernel_exec_time;

      // max_blocks_k is used as the upper bound of resident blocks on a GPU constrained by the device's shared memory.
      if (kernels->at(i).sm_total_ != 0) {
        max_blocks_k = (gpu_prop.sharedMemPerMultiprocessor / kernels->at(i).sm_total_) * gpu_prop.multiProcessorCount;
        if (blocks_k > max_blocks_k) {
          tail_blocks_sm = blocks_k % max_blocks_k;
        } else {
          tail_blocks_sm = blocks_k;
        }
        sm_bnd = ceil(static_cast<double>(max_blocks_k) / tail_blocks_sm);
        LOG(INFO) << "max_blocks bounded by shared memory: " << max_blocks_k << " ( sm_max=" << gpu_prop.sharedMemPerMultiprocessor \
          << ", sm_k=" << kernels->at(i).sm_total_ << ", #SM=" << gpu_prop.multiProcessorCount << " ), sm_bnd: " << sm_bnd;
        //this->k_num_bnd_[i] = sm_bnd;
        temp_k_num_bnd[i] = sm_bnd;
      } else {
        //this->k_num_bnd_[i] = numeric_limits<int>::max();
        temp_k_num_bnd[i] = numeric_limits<int>::max();
      }

      // up_blocks_k is used to store the upper bound of blocks_k constrained by thread configuration.
      up_blocks_k = (gpu_prop.maxThreadsPerMultiProcessor / threads_k) * gpu_prop.multiProcessorCount;
      // if blocks_k_i > up_blocks_k; then blocks_k_i = blocks_k_i % up_blocks_k;
      if (blocks_k > up_blocks_k) {
        tail_blocks_t = blocks_k % up_blocks_k;
      } else {
        tail_blocks_t = blocks_k;
      }
      threads_bnd = ceil(static_cast<double>(up_blocks_k) / tail_blocks_t);
      //this->k_num_bnd_[i] = std::min(this->k_num_bnd_[i], threads_bnd);
      temp_k_num_bnd[i] = std::min(static_cast<unsigned int>(temp_k_num_bnd[i]), threads_bnd);
      LOG(INFO) << "max_blocks bounded by threads: " << up_blocks_k << " ( threads_max=" << gpu_prop.maxThreadsPerMultiProcessor \
        << ", threads_k=" << threads_k << ", #SM=" << gpu_prop.multiProcessorCount << " )" << ", threads_bnd: " << threads_bnd;

      if (max_blocks_k != 0) {
        // Now max_blocks_k is used as the maximum number of blocks per SM for kernel k_i.
        max_blocks_k = (max_blocks_k > up_blocks_k) ? up_blocks_k : max_blocks_k;
        max_blocks_k = ceil(static_cast<double>(max_blocks_k) / gpu_prop.multiProcessorCount);
        tail_blocks_t = blocks_k % max_blocks_k;    // Total blocks left for kernel k_i.
        // Now up_blocks_k is used as the maximum number of blocks per SM for kernel k_i.
        up_blocks_k = ceil(static_cast<double>(blocks_k) / gpu_prop.multiProcessorCount);
        if (up_blocks_k > max_blocks_k) {
          kernel_exec_time = (kernel_exec_time * (up_blocks_k % max_blocks_k)) / up_blocks_k;
          //kernel_exec_time = (kernel_exec_time * (up_blocks_k - tail_blocks_t)) / up_blocks_k;
          //this->k_num_bnd_[i] = 1;
        }
      }
      launch_bnd = ceil(static_cast<double>(kernel_exec_time) / t_launch);
      //this->k_num_bnd_[i] = std::min(this->k_num_bnd_[i], launch_bnd);
      temp_k_num_bnd[i] = std::min(static_cast<unsigned int>(temp_k_num_bnd[i]), launch_bnd);
      LOG(INFO) << "average_exec_time=" << kernels->at(i).average_exec_time_ << ", kernel_exec_time=" << kernel_exec_time \
        << ", t_launch=" << t_launch << ", launch_bnd= " << launch_bnd;

      coef_k = static_cast<double>(tail_blocks_t * threads_k) / gpu_prop.multiProcessorCount;
      constant_term += static_cast<double>(threads_k * (gpu_prop.multiProcessorCount - 1)) / gpu_prop.multiProcessorCount;
      //LOG(INFO) << kernels->at(i).name_ << " ---> " << "theads_k: " << threads_k << ", blcoks_k: " << blocks_k << ", k_num_bnd: " << this->k_num_bnd_[i];
      LOG(INFO) << kernels->at(i).name_ << " ---> " << "theads_k: " << threads_k << ", blcoks_k: " << blocks_k << ", k_num_bnd: " << temp_k_num_bnd[i];

      //if (ceil(blocks_k / gpu_prop.multiProcessorCount) * threads_k > gpu_prop.maxThreadsPerMultiProcessor) {
      //  this->k_num_bnd_[i] = 1;
      //}

      glp_set_col_name(dop_mip, i + 1, kernels->at(i).name_.c_str());
      //glp_set_col_bnds(dop_mip, i + 1, GLP_DB, 0.0, this->k_num_bnd_[i]);
      glp_set_col_bnds(dop_mip, i + 1, GLP_DB, 0.0, temp_k_num_bnd[i]);
      glp_set_col_kind(dop_mip, i + 1, GLP_IV);
      glp_set_obj_coef(dop_mip, i + 1, coef_k);
    }
    // Set the constant part of the objective function to constant_term.
    glp_set_obj_coef(dop_mip, 0, constant_term);
    // End of bounds settings.

    // Constraints to the goal.
    const int total_constraints = 3;
    glp_add_rows(dop_mip, total_constraints);
    if (glp_get_num_rows(dop_mip) == 0) {
      LOG(INFO) << "ERROR! There is no kernel recorded.";
    }

    // regs_bias: The bias term of the register constraint formula. DEPRECATED!
    // sm_bias: The bias term of the shared memory constraint formula.
    // thread_bias: The bias term of the thread constraint formula.
    // coef_bias: The bias term used to compute the above three bias terms.
    double sm_bias = 0.0, threads_bias = 0.0;
    double coef_bias = static_cast<double>(gpu_prop.multiProcessorCount - 1) / static_cast<double>(gpu_prop.multiProcessorCount);
    double coef_sm = 0.0, coef_threads = 0.0;
    int *row_idx = new int[1 + total_constraints * total_kernel_kinds],
        *col_idx = new int[1 + total_constraints * total_kernel_kinds];
    double *coef_k_arr = new double[1 + total_constraints * total_kernel_kinds];
    for (int i = 0; i < total_kernel_kinds; ++ i) {
      blocks_k = kernels->at(i).grid_x_ * kernels->at(i).grid_y_ * kernels->at(i).grid_z_;
      threads_k = kernels->at(i).block_x_ * kernels->at(i).block_y_ * kernels->at(i).block_z_;

      sm_bias += kernels->at(i).sm_total_ * coef_bias;
      threads_bias += threads_k * coef_bias;

      up_blocks_k = (gpu_prop.maxThreadsPerMultiProcessor / threads_k) * gpu_prop.multiProcessorCount;
      if (kernels->at(i).sm_total_ != 0) {
        max_blocks_k = (gpu_prop.sharedMemPerMultiprocessor / kernels->at(i).sm_total_) * gpu_prop.multiProcessorCount;
        up_blocks_k = (up_blocks_k > max_blocks_k) ? max_blocks_k : up_blocks_k;
      }
      if (blocks_k > up_blocks_k) {
        blocks_k %= up_blocks_k;
      }

      coef_sm = static_cast<double>(kernels->at(i).sm_total_ * blocks_k) / gpu_prop.multiProcessorCount;
      coef_threads = static_cast<double>(threads_k * blocks_k) / gpu_prop.multiProcessorCount;

      row_idx[i + 1] = static_cast<int>(ceil((i + 1) / static_cast<double>(total_kernel_kinds)));
      col_idx[i + 1] = i + 1;
      coef_k_arr[i + 1] = coef_sm;
      row_idx[i + 1 + total_kernel_kinds * 1] = 1 + static_cast<int>(ceil((i + 1) / static_cast<double>(total_kernel_kinds)));
      col_idx[i + 1 + total_kernel_kinds * 1] = i + 1;
      coef_k_arr[i + 1 + total_kernel_kinds * 1] = coef_threads;
      row_idx[i + 1 + total_kernel_kinds * 2] = 2 + static_cast<int>(ceil((i + 1) / static_cast<double>(total_kernel_kinds)));
      col_idx[i + 1 + total_kernel_kinds * 2] = i + 1;
      coef_k_arr[i + 1 + total_kernel_kinds * 2] = 1;
    }
    glp_set_row_name(dop_mip, 1, "SMs");
    glp_set_row_bnds(dop_mip, 1, GLP_DB, 0.0, static_cast<double>(gpu_prop.sharedMemPerMultiprocessor - sm_bias));
    glp_set_row_name(dop_mip, 2, "Threads");
    glp_set_row_bnds(dop_mip, 2, GLP_DB, 0.0, static_cast<double>(gpu_prop.maxThreadsPerMultiProcessor - threads_bias));
    glp_set_row_name(dop_mip, 3, "Concurrency");
    glp_set_row_bnds(dop_mip, 3, GLP_DB, 1.0, static_cast<double>(MAX_GRIDS_PER_DEVICE)); // MAX_GRIDS_PER_DEVICE is defined in gpu_manager.hpp

    glp_load_matrix(dop_mip, total_kernel_kinds * total_constraints, row_idx, col_idx, coef_k_arr);
    // End of constraints settings and the initialization of MIP parameter matrix.

    glp_iocp dop_param;
    glp_init_iocp(&dop_param);
    dop_param.presolve = GLP_ON;
    CHECK_GLP_ERROR(glp_intopt(dop_mip, &dop_param), "glp_intopt");

    stringstream temp_ss;
    int max_degree_of_parallelism = 0;
    double obj_val = glp_mip_obj_val(dop_mip);
    LOG(INFO) << "OBJECTIVE VALUE: " << obj_val;
    int *obj_k_val = new int[total_kernel_kinds];
    for (int i = 0; i < total_kernel_kinds; ++ i) {
      obj_k_val[i] = glp_mip_col_val(dop_mip, i + 1);
      max_degree_of_parallelism += obj_k_val[i];
      temp_ss << "[" << kernels->at(i).name_ << " = " << obj_k_val[i];
      if (i != (total_kernel_kinds - 1)) {
        temp_ss << ", ";
      } else {
        temp_ss << "]; ";
      }
    }
    LOG(INFO) << "Kernel concurrency settings: " << temp_ss.str();
    temp_ss.str("");
    temp_ss.clear();

    delete[] obj_k_val;
    delete[] row_idx;
    delete[] col_idx;
    delete[] coef_k_arr;
    glp_delete_prob(dop_mip);

    int max_dop = 0;
    for (int i = 0; i < total_kernel_kinds; ++ i) {
      //max_dop = std::max(max_dop, this->k_num_bnd_[i]);
      max_dop = std::max(max_dop, temp_k_num_bnd[i]);
    }
    LOG(INFO) << "max_degree_of_parallelism: " << max_degree_of_parallelism << ", max_dop: " << max_dop;
    if (max_degree_of_parallelism == 0) {
      LOG(INFO) << "CANNOT LAUNCH KERNELS CONCURRENTLY!";
      max_degree_of_parallelism = max_dop;
    }

    return max_degree_of_parallelism;
  }

  int KernelAnalyzer::ParallelDegreeLB(uint64_t t_launch, boost::shared_ptr<vector<Kernel_t>> kernels, int device_id) {
    cudaDeviceProp gpu_prop;
    CUDA_CHECK(cudaGetDeviceProperties(&gpu_prop, device_id));

    glp_prob *dop_mip = glp_create_prob();
    glp_set_prob_name(dop_mip, "DegreeOfParallelismSolver");
    glp_set_obj_dir(dop_mip, GLP_MAX);
    glp_term_out(GLP_OFF);

    int total_kernel_kinds = kernels->size();
    if (!kernels.get() || total_kernel_kinds == 0) {
      LOG(FATAL) << "There is no kernels recorded! Please CHECK CUPTI settings.";
    }

    glp_add_cols(dop_mip, total_kernel_kinds);
    if (glp_get_num_cols(dop_mip) == 0) {
      LOG(INFO) << "ERROR! There is no kernel recorded!";
    }

    auto &temp_k_num_bnd = this->k_num_bnd_[this->current_key_str_];
    // Allocate k_num_bnd_ storage.
    if (temp_k_num_bnd.size() < total_kernel_kinds or temp_k_num_bnd.empty()) {
      // if (!temp_k_num_bnd.empty()) {
      //   temp_k_num_bnd.clear();
      // }
      // temp_k_num_bnd = vector<int>(total_kernel_kinds, 0);
      temp_k_num_bnd.resize(total_kernel_kinds, 0);
    }

    unsigned int launch_bnd = 0, sm_bnd = 0, threads_bnd = 0;
    unsigned int coef_k = 0.0;
    unsigned int blocks_k = 0.0, threads_k = 0.0;
    //unsigned int total_sm = gpu_prop.sharedMemPerMultiprocessor * gpu_prop.multiProcessorCount;
    //unsigned int total_threads = static_cast<double>(gpu_prop.maxThreadsPerMultiProcessor) * gpu_prop.multiProcessorCount;
    unsigned int kernel_exec_time = 0, max_blocks_k = 0, up_blocks_k = 0, tail_blocks_t = 0, tail_blocks_sm = 0;
    for (int i = 0; i < total_kernel_kinds; ++ i) {
      launch_bnd = sm_bnd = threads_bnd = 0;
      blocks_k = kernels->at(i).grid_x_ * kernels->at(i).grid_y_ * kernels->at(i).grid_z_;
      threads_k = kernels->at(i).block_x_ * kernels->at(i).block_y_ * kernels->at(i).block_z_;
      kernel_exec_time = kernels->at(i).average_exec_time_;
      max_blocks_k = up_blocks_k = tail_blocks_t = tail_blocks_sm = 0;

      if (kernels->at(i).sm_total_ != 0) {
        max_blocks_k = (gpu_prop.sharedMemPerMultiprocessor / kernels->at(i).sm_total_) * gpu_prop.multiProcessorCount;
        if (blocks_k > max_blocks_k) {
          tail_blocks_sm = blocks_k % max_blocks_k;
        } else {
          tail_blocks_sm = blocks_k;
        }
        sm_bnd = ceil(static_cast<double>(max_blocks_k) / tail_blocks_sm);
        LOG(INFO) << "max_blocks bounded by shared memory: " << max_blocks_k << " ( sm_max=" << gpu_prop.sharedMemPerMultiprocessor
          << ", sm_k=" << kernels->at(i).sm_total_ << ", #SM=" << gpu_prop.multiProcessorCount << " )" << ", sm_bnd: " << sm_bnd;
        //this->k_num_bnd_[i] = sm_bnd;
        temp_k_num_bnd[i] = sm_bnd;
      } else {
        //this->k_num_bnd_[i] = numeric_limits<int>::max();
        temp_k_num_bnd[i] = numeric_limits<int>::max();
      }

      up_blocks_k = (gpu_prop.maxThreadsPerMultiProcessor / threads_k) * gpu_prop.multiProcessorCount;
      if (blocks_k > up_blocks_k) {
        tail_blocks_t = blocks_k % up_blocks_k;
      } else {
        tail_blocks_t = blocks_k;
      }
      threads_bnd = ceil(static_cast<double>(up_blocks_k) / tail_blocks_t);
      //this->k_num_bnd_[i] = std::min(this->k_num_bnd_[i], threads_bnd);
      temp_k_num_bnd[i] = std::min(static_cast<unsigned int>(temp_k_num_bnd[i]), threads_bnd);
      LOG(INFO) << "max_blocks bounded by threads: " << up_blocks_k << " ( threads_max=" << gpu_prop.maxThreadsPerMultiProcessor
        << ", threads_k=" << threads_k << ", #SM=" << gpu_prop.multiProcessorCount << " )" << ", threads_bnd: " << threads_bnd;

      if (max_blocks_k != 0) {
        max_blocks_k = (max_blocks_k > up_blocks_k) ? up_blocks_k : max_blocks_k;
        max_blocks_k = ceil(static_cast<double>(max_blocks_k) / gpu_prop.multiProcessorCount);
        //tail_blocks_t = blocks_k % max_blocks_k;
        // TODO: Is this right to use the maximum number of blocks on a SM for kernel K_i?
        up_blocks_k = ceil(static_cast<double>(blocks_k) / gpu_prop.multiProcessorCount);
        LOG(INFO) << "max_blocks_k: " << max_blocks_k << ", kernel_exec_time: " << kernel_exec_time << ", up_blocks_k: " << up_blocks_k;
        if (up_blocks_k > max_blocks_k) {
          unsigned int residual_blocks = (up_blocks_k % max_blocks_k == 0) ? max_blocks_k : (up_blocks_k % max_blocks_k);
          kernel_exec_time = (kernel_exec_time * residual_blocks) / static_cast<double>(up_blocks_k);
          LOG(INFO) << "Execution time adjustment: " << kernel_exec_time << " us (new)";
        }
      }
      launch_bnd = ceil(static_cast<double>(kernel_exec_time) / t_launch);
      //this->k_num_bnd_[i] = std::min(this->k_num_bnd_[i], launch_bnd);
      temp_k_num_bnd[i] = std::min(static_cast<unsigned int>(temp_k_num_bnd[i]), launch_bnd);
      LOG(INFO) << "average_exec_time=" << kernels->at(i).average_exec_time_ << ", kernel_exec_time=" << kernel_exec_time
        << ", t_launch=" << t_launch << ", launch_bnd= " << launch_bnd;

      coef_k = (blocks_k * threads_k) / gpu_prop.multiProcessorCount;
      LOG(INFO) << kernels->at(i).name_ << " ----> threads_k: " << threads_k << ", blocks_k: " << blocks_k << ", k_num_bnd: " << temp_k_num_bnd[i];

      glp_set_col_name(dop_mip, i + 1, kernels->at(i).name_.c_str());
      glp_set_col_bnds(dop_mip, i + 1, GLP_DB, 0.0, temp_k_num_bnd[i]);
      glp_set_col_kind(dop_mip, i + 1, GLP_IV); // Used to check low-bound SM.
      glp_set_obj_coef(dop_mip, i + 1, coef_k);
    }

    const int total_constraints = 3;
    glp_add_rows(dop_mip, total_constraints);
    if (glp_get_num_rows(dop_mip) == 0) {
      LOG(INFO) << "ERROR! Cannot construct constraints!";
    }

    double coef_sm = 0.0, coef_threads = 0.0;
    int *row_idx = new int[1 + total_constraints * total_kernel_kinds],
        *col_idx = new int[1 + total_constraints * total_kernel_kinds];
    double *coef_k_arr = new double[1 + total_constraints * total_kernel_kinds];
    for (int i = 0; i < total_kernel_kinds; ++ i) {
      blocks_k = kernels->at(i).grid_x_ * kernels->at(i).grid_y_ * kernels->at(i).grid_z_;
      threads_k = kernels->at(i).block_x_ * kernels->at(i).block_y_ * kernels->at(i).block_z_;

      up_blocks_k = (gpu_prop.maxThreadsPerMultiProcessor / threads_k) * gpu_prop.multiProcessorCount;
      if (kernels->at(i).sm_total_ != 0) {
        max_blocks_k = (gpu_prop.sharedMemPerMultiprocessor / kernels->at(i).sm_total_) * gpu_prop.multiProcessorCount;
        up_blocks_k = (up_blocks_k > max_blocks_k) ? max_blocks_k : up_blocks_k;
      }
      if (blocks_k > up_blocks_k) {
        blocks_k %= up_blocks_k;
      }

      coef_sm = static_cast<double>(kernels->at(i).sm_total_ * blocks_k) / gpu_prop.multiProcessorCount;
      coef_threads = static_cast<double>(blocks_k * threads_k) / gpu_prop.multiProcessorCount;

      row_idx[i + 1] = static_cast<int>(ceil((i + 1) / static_cast<double>(total_kernel_kinds)));
      col_idx[i + 1] = i + 1;
      coef_k_arr[i + 1] = coef_sm;
      row_idx[i + 1 + total_kernel_kinds * 1] = 1 + static_cast<int>(ceil((i + 1) / static_cast<double>(total_kernel_kinds)));
      col_idx[i + 1 + total_kernel_kinds * 1] = i + 1;
      coef_k_arr[i + 1 + total_kernel_kinds * 1] = coef_threads;
      row_idx[i + 1 + total_kernel_kinds * 2] = 2 + static_cast<int>(ceil((i + 1) / static_cast<double>(total_kernel_kinds)));
      col_idx[i + 1 + total_kernel_kinds * 2] = i + 1;
      coef_k_arr[i + 1 + total_kernel_kinds * 2] = 1;
    }
    glp_set_row_name(dop_mip, 1, "SMs");
    glp_set_row_bnds(dop_mip, 1, GLP_DB, 0.0, static_cast<double>(gpu_prop.sharedMemPerMultiprocessor));
    glp_set_row_name(dop_mip, 2, "Threads");
    glp_set_row_bnds(dop_mip, 2, GLP_DB, 0.0, static_cast<double>(gpu_prop.maxThreadsPerMultiProcessor));
    glp_set_row_name(dop_mip, 3, "Concurrency");
    glp_set_row_bnds(dop_mip, 3, GLP_DB, 1.0, static_cast<double>(MAX_GRIDS_PER_DEVICE)); // MAX_GRIDS_PER_DEVICE is defined in gpu_manager.hpp

    glp_load_matrix(dop_mip, total_kernel_kinds * total_constraints, row_idx, col_idx, coef_k_arr);

    //glp_simplex(dop, NULL);
    glp_iocp dop_param;
    glp_init_iocp(&dop_param);
    dop_param.presolve = GLP_ON;
    CHECK_GLP_ERROR(glp_intopt(dop_mip, &dop_param), "glp_intopt");

    stringstream temp_ss;
    int max_degree_of_parallelism = 0;
    //double obj_val = glp_get_obj_val(dop);
    double obj_val = glp_mip_obj_val(dop_mip);
    LOG(INFO) << "OBJECTIVE value: " << obj_val;
    int *obj_k_val =new int[total_kernel_kinds];
    for (int i = 0; i < total_kernel_kinds; ++ i) {
      //obj_k_val[i] = glp_get_col_prim(dop, i + 1);
      obj_k_val[i] = glp_mip_col_val(dop_mip, i + 1);
      max_degree_of_parallelism += obj_k_val[i];
      temp_ss << "[ " << kernels->at(i).name_ << " = " << obj_k_val[i];
      if (i != (total_kernel_kinds - 1)) {
        temp_ss << ", ";
      } else {
        temp_ss << " ];";
      }
    }

    LOG(INFO) << "Kernel concurrency settings: " << temp_ss.str();
    temp_ss.str("");
    temp_ss.clear();

    delete[] obj_k_val;
    delete[] row_idx;
    delete[] col_idx;
    delete[] coef_k_arr;
    glp_delete_prob(dop_mip);

    int max_dop = 0;
    for (int i = 0; i < total_kernel_kinds; ++ i) {
      max_dop = std::max(max_dop, temp_k_num_bnd[i]);
    }
    LOG(INFO) << "max_degree_of_parallelism: " << max_degree_of_parallelism << ", max_dop: " << max_dop;
    if (max_degree_of_parallelism <= 1.0) {
      LOG(INFO) << "CANNOT LAUNCH KERNELS CONCURRENTLY!";
      max_degree_of_parallelism = max_dop;
    }

    return max_degree_of_parallelism;
  }

  int KernelAnalyzer::ParallelDegree(uint64_t t_launch, boost::shared_ptr<vector<Kernel_t>> kernels, int device_id) {
    cudaDeviceProp gpu_prop;
    CUDA_CHECK(cudaGetDeviceProperties(&gpu_prop, device_id));

    glp_prob *dop_mip = glp_create_prob();
    glp_set_prob_name(dop_mip, "HGP4NN_ParallelDegreeSolver");
    glp_set_obj_dir(dop_mip, GLP_MAX);
    glp_term_out(GLP_OFF);

    int total_kernel_kinds = kernels->size();
    if (!kernels.get() or total_kernel_kinds == 0) {
      LOG(FATAL) << "There is no kernels recorded! Please CHECK CUPTI settings.";
    }

    glp_add_cols(dop_mip, total_kernel_kinds);
    if (glp_get_num_cols(dop_mip) == 0) {
      LOG(FATAL) << "ERROR! There is no kernel recorded!";
    }
    // Allocate k_num_bnd_ stroage.
    auto &temp_k_num_bnd = this->k_num_bnd_[this->current_key_str_];
    if (!temp_k_num_bnd.empty()) {
      temp_k_num_bnd.clear();
    }
    temp_k_num_bnd.resize(total_kernel_kinds, 0);

    uint launch_bnd = 0, sm_bnd = 0, threads_bnd = 0;
    uint blocks_k = 0, threads_k = 0;
    uint kernel_exec_time = 0, max_blocks_k = 0, up_blocks_k = 0, tail_blocks_t = 0, tail_blocks_sm = 0;
    std::vector<uint> residual_kernel_blocks(total_kernel_kinds, 0);
    for (auto i = 0; i < total_kernel_kinds; ++ i) {
      blocks_k = kernels->at(i).grid_x_ * kernels->at(i).grid_y_ * kernels->at(i).grid_z_;
      threads_k = kernels->at(i).block_x_ * kernels->at(i).block_y_ * kernels->at(i).block_z_;
      kernel_exec_time = kernels->at(i).average_exec_time_;

      launch_bnd = sm_bnd = threads_bnd = 0;
      max_blocks_k = up_blocks_k = tail_blocks_t = tail_blocks_sm = 0;

      // Calculate the (sm_max * #SM) / (sm_ki * #blocks_ki).
      if (kernels->at(i).sm_total_ != 0) {
        max_blocks_k = (gpu_prop.sharedMemPerMultiprocessor / ceiling(kernels->at(i).sm_total_,
                           SHARED_MEMORY_ALLOCATION_UNIT_SIZE)) * gpu_prop.multiProcessorCount;
        if (blocks_k > max_blocks_k) {
          tail_blocks_sm = (blocks_k % max_blocks_k == 0) ? max_blocks_k : (blocks_k % max_blocks_k);
        } else {
          tail_blocks_sm = blocks_k;
        }
        if (tail_blocks_sm == max_blocks_k) {
          sm_bnd = 1;
        } else {
          sm_bnd = ceil(static_cast<double>(max_blocks_k) / tail_blocks_sm);
        }
        temp_k_num_bnd[i] = sm_bnd;
        residual_kernel_blocks[i] = tail_blocks_sm;
        LOG(INFO) << "max_blocks bounded by shared memory: " << max_blocks_k << " (sm_max="
                  << gpu_prop.sharedMemPerMultiprocessor << ", sm_k="
                  << ceiling(kernels->at(i).sm_total_, SHARED_MEMORY_ALLOCATION_UNIT_SIZE)
                  << ", #SM=" << gpu_prop.multiProcessorCount << ")" << ", sm_bnd: " << sm_bnd;
      } else {
        temp_k_num_bnd[i] = numeric_limits<int>::max();
      }

      // Calculate the (threads_max * #SM) / (threads_ki * #blocks_ki).
      up_blocks_k = (gpu_prop.maxThreadsPerMultiProcessor / threads_k) *
        gpu_prop.multiProcessorCount;
      if (blocks_k > up_blocks_k) {
        tail_blocks_t = (blocks_k % up_blocks_k == 0) ? up_blocks_k : (blocks_k % up_blocks_k);
      } else {
        tail_blocks_t = blocks_k;
      }
      if (tail_blocks_t == up_blocks_k) {
        threads_bnd = 1;
      } else {
        threads_bnd = ceil(static_cast<double>(up_blocks_k) / tail_blocks_t);
      }
      temp_k_num_bnd[i] = std::min(static_cast<uint>(temp_k_num_bnd[i]), threads_bnd);
      residual_kernel_blocks[i] = (residual_kernel_blocks[i] == 0) ? tail_blocks_t :
        std::min(residual_kernel_blocks[i], tail_blocks_t);
      LOG(INFO) << "max_blocks bounded by threads: " << up_blocks_k << "(threads_max="
                << gpu_prop.maxThreadsPerMultiProcessor << ", threads_k=" << threads_k
                << ", #SM=" << gpu_prop.multiProcessorCount <<"), threads_bnd: "
                << threads_bnd;

      // Calculate ceil(T_ki / T_launch).
      // Calculate the execution time of residual blocks.
      if (max_blocks_k != 0) {
        max_blocks_k = (max_blocks_k > up_blocks_k) ? up_blocks_k : max_blocks_k;
        max_blocks_k = max_blocks_k / gpu_prop.multiProcessorCount;
        up_blocks_k = ceil(static_cast<double>(blocks_k) / gpu_prop.multiProcessorCount);
        // LOG(INFO) << kernels->at(i).name_ << ": #MaxBlocksPerSM bounded by SMem&Threads is "
        //           << max_blocks_k << ", OverallKernelExecTime=" << kernel_exec_time
        //           << ", #BlocksPerSM obtained with the kernel's information is " << up_blocks_k;
        if (up_blocks_k > max_blocks_k) {
          uint residual_blocks = (up_blocks_k % max_blocks_k == 0) ? max_blocks_k :
            (up_blocks_k % max_blocks_k);
          CHECK_EQ(residual_blocks, ceil(residual_kernel_blocks[i] /
                                         static_cast<double>(gpu_prop.multiProcessorCount)))
            << "residual_blocks(" << residual_blocks << ") != " << residual_kernel_blocks[i]
            << " / " << static_cast<double>(gpu_prop.multiProcessorCount);
          kernel_exec_time = ceil((kernel_exec_time * residual_blocks) /
                                  static_cast<double>(up_blocks_k));
        }
      } else {
        max_blocks_k = up_blocks_k / gpu_prop.multiProcessorCount;
        up_blocks_k = ceil(static_cast<double>(blocks_k) / gpu_prop.multiProcessorCount);

        if (up_blocks_k > max_blocks_k) {
          uint residual_blocks = (up_blocks_k % max_blocks_k == 0) ? max_blocks_k :
            (up_blocks_k % max_blocks_k);
          CHECK_EQ(residual_blocks, ceil(residual_kernel_blocks[i] /
                                         static_cast<double>(gpu_prop.multiProcessorCount)))
            << "residual_blocks(" << residual_blocks << ") != " << ceil(residual_kernel_blocks[i] /
                                               static_cast<double>(gpu_prop.multiProcessorCount));
          kernel_exec_time = ceil((kernel_exec_time * residual_blocks) /
                                  static_cast<double>(up_blocks_k));
        }
      }
      launch_bnd = ceil(static_cast<double>(kernel_exec_time) / t_launch);

      // Calculate the minimum value of temp_k_num_bnd.
      temp_k_num_bnd[i] = std::min(static_cast<uint>(temp_k_num_bnd[i]), launch_bnd);
      LOG(INFO) << kernels->at(i).name_ << " --> AverageExecTime=" << kernels->at(i).average_exec_time_
                << ", KernelExecTime=" << kernel_exec_time << ", t_launch=" << t_launch
                << ", launch_bnd=" << launch_bnd;
      temp_k_num_bnd[i] = std::min(temp_k_num_bnd[i], MAX_GRIDS_PER_DEVICE);
    }

    // Setup the objective function: threads_total = \sum_{i=1}^N (threads_ki * blocks_ki)
    int col_upper_bound = 0;
    for (int i = 0; i < total_kernel_kinds; ++ i) {
      // blocks_k = kernels->at(i).grid_x_ * kernels->at(i).grid_y_ * kernels->at(i).grid_z_;
      threads_k = kernels->at(i).block_x_ * kernels->at(i).block_y_ * kernels->at(i).block_z_;

      col_upper_bound = ceil(static_cast<double>(residual_kernel_blocks[i]) * temp_k_num_bnd[i] /
                             gpu_prop.multiProcessorCount);
      // col_upper_bound = static_cast<double>(blocks_k) * temp_k_num_bnd[i] /
      //   gpu_prop.multiProcessorCount;
      CHECK_GT(col_upper_bound, 0) << kernels->at(i).name_ <<  ": col_upper_bound should be larger than 0. "
                                   << residual_kernel_blocks[i] << "*" << temp_k_num_bnd[i]
                                   << "/" << gpu_prop.multiProcessorCount;

      glp_set_col_name(dop_mip, i + 1,
                       (kernels->at(i).name_ + "'s Blocks Per Stream Multiprocessor").c_str());
      glp_set_col_bnds(dop_mip, i + 1, GLP_DB, 0.0, col_upper_bound);
      glp_set_col_kind(dop_mip, i + 1, GLP_IV);
      glp_set_obj_coef(dop_mip, i + 1, threads_k);
    }

    constexpr int total_constraint_rows = 3;
    glp_add_rows(dop_mip, total_constraint_rows);
    if (glp_get_num_rows(dop_mip) == 0) {
      LOG(FATAL) << "ERROR! Cannot construct constraints!";
    }
    constexpr double bound_coef = 0.999;
    int *row_idx = new int[1 + total_constraint_rows * total_kernel_kinds],
      *col_idx = new int[1 + total_constraint_rows * total_kernel_kinds];
    double *coef_arr = new double[1 + total_constraint_rows * total_kernel_kinds];
    double low_bound = 1.0, block_smem = 0.0;
    for (int i = 0; i < total_kernel_kinds; ++ i) {
      // blocks_k = kernels->at(i).grid_x_ * kernels->at(i).grid_y_ * kernels->at(i).grid_z_;
      threads_k = kernels->at(i).block_x_ * kernels->at(i).block_y_ * kernels->at(i).block_z_;
      block_smem = kernels->at(i).sm_total_;

      low_bound -= static_cast<double>(gpu_prop.multiProcessorCount) /
        residual_kernel_blocks[i] * bound_coef;
      // low_bound -= static_cast<double>(gpu_prop.multiProcessorCount) / blocks_k * bound_coef;

      row_idx[i + 1] = static_cast<int>(ceil((i + 1) / static_cast<double>(total_kernel_kinds)));
      row_idx[i + 1 + total_kernel_kinds * 1] = 1 + row_idx[i + 1];
      row_idx[i + 1 + total_kernel_kinds * 2] = 2 + row_idx[i + 1];
      col_idx[i + 1] = i + 1;
      col_idx[i + 1 + total_kernel_kinds * 1] = i + 1;
      col_idx[i + 1 + total_kernel_kinds * 2] = i + 1;
      coef_arr[i + 1] = static_cast<double>(gpu_prop.multiProcessorCount) / residual_kernel_blocks[i];
      // coef_arr[i + 1] = static_cast<double>(gpu_prop.multiProcessorCount) / blocks_k;
      coef_arr[i + 1 + total_kernel_kinds * 1] = static_cast<double>(block_smem);
      coef_arr[i + 1 + total_kernel_kinds * 2] = static_cast<double>(threads_k);
    }
    glp_set_row_name(dop_mip, 1, "Concurrency");
    glp_set_row_bnds(dop_mip, 1, GLP_DB, low_bound, MAX_GRIDS_PER_DEVICE);
    glp_set_row_name(dop_mip, 2, "SharedMemory");
    glp_set_row_bnds(dop_mip, 2, GLP_DB, 0.0, gpu_prop.sharedMemPerMultiprocessor);
    glp_set_row_name(dop_mip, 3, "Threads");
    glp_set_row_bnds(dop_mip, 3, GLP_DB, 0.0, gpu_prop.maxThreadsPerMultiProcessor);

    // Load the GLPK coefficient matrix arrays.
    glp_load_matrix(dop_mip, total_kernel_kinds * total_constraint_rows,
                    row_idx, col_idx, coef_arr);

    glp_iocp dop_param;
    glp_init_iocp(&dop_param);
    dop_param.msg_lev = GLP_MSG_ERR;
    dop_param.presolve = GLP_ON;
    CHECK_GLP_ERROR(glp_intopt(dop_mip, &dop_param), "glp_intopt");

    stringstream temp_ss;
    int max_degree_of_parallelism = 0;
    double obj_val = glp_mip_obj_val(dop_mip);
    LOG(INFO) << "OBJECTIVE value: " << obj_val;
    int *obj_k_val = new int[total_kernel_kinds];
    for (int i = 0; i < total_kernel_kinds; ++ i) {
      // blocks_k = kernels->at(i).grid_x_ * kernels->at(i).grid_y_ * kernels->at(i).grid_z_;

      obj_k_val[i] = glp_mip_col_val(dop_mip, i + 1);
      max_degree_of_parallelism += static_cast<int>(ceil(static_cast<double>(obj_k_val[i]) *
                                                         gpu_prop.multiProcessorCount /
                                                         residual_kernel_blocks[i]));
      // max_degree_of_parallelism += static_cast<int>(ceil(static_cast<double>(obj_k_val[i]) *
      //                                                    gpu_prop.multiProcessorCount / blocks_k));

      temp_ss << "[ " << kernels->at(i).name_ << " = " << obj_k_val[i];
      temp_ss << " (" << (static_cast<double>(obj_k_val[i]) * gpu_prop.multiProcessorCount /
                          residual_kernel_blocks[i])
              << "," << (static_cast<double>(obj_k_val[i] + bound_coef) * gpu_prop.multiProcessorCount /
                         residual_kernel_blocks[i]) << ")";

      if (i != (total_kernel_kinds - 1)) {
        temp_ss << ", ";
      } else {
        temp_ss << " ].";
      }
    }

    LOG(INFO) << "Kernel concurrency settings: " << temp_ss.str();
    temp_ss.str("");
    temp_ss.clear();

    delete[] obj_k_val;
    delete[] row_idx;
    delete[] col_idx;
    delete[] coef_arr;
    glp_delete_prob(dop_mip);

    int max_dop = 0;
    for (int i = 0; i < total_kernel_kinds; ++ i) {
      max_dop = std::max(max_dop, temp_k_num_bnd[i]);
    }
    LOG(INFO) << "max_degree_of_parallelism: " << max_degree_of_parallelism << ", max_dop: "
              << max_dop;
    if (max_degree_of_parallelism <= 1.0) {
      LOG(INFO) << "CANNOT LAUNCH KERNELS CONCURRENTLY!";
      max_degree_of_parallelism = max_dop;
    }

    return max_degree_of_parallelism;
  }

  void KernelAnalyzer::SetDevice(int device_id) {
    if (this->device_id_ != device_id) {
      this->device_id_ = device_id;

      if (!this->pdegree_map_.empty()) {
        this->pdegree_map_.clear();
      }
      if (!this->k_num_bnd_.empty()) {
        for (auto &temp_k_num : this->k_num_bnd_) {
          if (!temp_k_num.second.empty()) {
            temp_k_num.second.clear();
          }
        }
      }
    }

    return ;
  }

  void KernelAnalyzer::Reset() {
    if (!this->pdegree_map_.empty()) {
      this->pdegree_map_.clear();
    }
    if (!this->k_num_bnd_.empty()) {
      for (auto &temp_k_num : this->k_num_bnd_) {
        if (!temp_k_num.second.empty()) {
          temp_k_num.second.clear();
        }
      }
      this->k_num_bnd_.clear();
    }
    // TODO: cudaEvent reset.
  }

} // namespace caffe

#endif  // USE_PROF
#endif  // CPU_ONLY
