/**
 * Copyright (C) [2019] [NAOC-TJU Lab & NUS Xtra Computing Group]
 *
 * This program is free software: you can redistribute it and/or modify
 * it under the terms of the GNU Lesser General Public License as published
 * by the Free Software Foundation, either version 3 of the License, or (at
 * your option) any later version. You may obtain a copy of the License at
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
 *
 * Author: Hao FU (TJU-NUS Joint Ph.D) <haofu@tju.edu.cn>
 */

#ifndef CPU_ONLY
#ifdef USE_PROF

#ifndef __KERNEL_ANALYZER_H__
#define __KERNEL_ANALYZER_H__

#include <iostream>
#include <sstream>
#include <string>
#include <vector>
#include <map>

#include <cmath>

#include <cuda.h>
#include <cuda_runtime.h>

#include "caffe/util/async_tracker.hpp"
#include "caffe/util/gpu_manager.hpp"

#define SHARED_MEMORY_ALLOCATION_UNIT_SIZE 256

// Keywords adopted which are existed in std namespaces.
using std::string;
using std::vector;
using std::map;
using std::ceil;
using std::floor;
using std::sqrt;

namespace caffe {
  enum class LABEL {START = 0, END = 1};

  /**
   * @class KernelAnalyzer
   *
   * @brief Class used to analyze kernel runtime configurationsd and figure out
   *        the maximum kernels that can be launched in parallel. Each thread
   *        (which is assumed that each thread is used to manage a different
   *        GPU device) should have its own instance of KernelAnalyzer.
   */
  class KernelAnalyzer {
  public:
    // Default destructor.
    ~KernelAnalyzer();

    /**
     * @brief    Get a KernelAnalyzer object.
     *
     * Thread local context for KernelAnalyzer.
     */
    static KernelAnalyzer& Get();

    /**
     * @brief Kernel profiler starter.
     *
     * Function used to start the parallel analyzer. If there is a analysis result
     * already, return the parallel_degree value, or start resource tracker to
     * profiling subsequent kernels, and return 1.
     *
     * @param[in] layer_name    Name of current network layer analyzed.
     * @param[in] loop_label    Unique label for the current kernel block.
     * @param[out] parallel_degree    Result concurrency supported by block labeled in
     *                                current network layer.
     */
    void Start(const string layer_name,
               const string loop_label,
               int& parallel_degree);

    /**
     * @brief Kernel profiler stopper.
     *
     * Function used to stop the parallel analyzer and start the analysis of the
     * current loop recorded.
     */
    void Stop();

    /**
     * @brief    ParallelDegree analyzer AND getter.
     *
     * Method used to analyze recorded kernels and return the degree of parallelism of current
     * kernel block. The method is based on upper bound of thread blocks on a single SM.
     *
     * @param[in] t_launch    Time needed to launch a single CUDA kernel.
     * @param[in] kernels     Vector used to store execution configuration of recorded kernels.
     */
    int ParallelDegreeUB(uint64_t t_launch, boost::shared_ptr<vector<Kernel_t>> kernels, int device_id);
    int ParallelDegreeLB(uint64_t t_launch, boost::shared_ptr<vector<Kernel_t>> kernels, int device_id);
    int ParallelDegree(uint64_t t_launch, boost::shared_ptr<vector<Kernel_t>> kernels, int device_id);

    /**
     * @brief Degree of parallelism recorder.
     *
     * Method used to record the degree of parallelism of each execution unit.
     */
    void RecordParallelDegree();
    void RecordParallelDegree(const string direction);

    /**
     * @brief Kernel analyzed recorder.
     *
     * Method used to record the kernels that is adopted to do analysis.
     */
    void RecordKernelsAnalyzed(boost::shared_ptr<vector<Kernel_t>> kernels) const;

    /**
     * @brief device setting function.
     *
     * Function used to set the current device that kernels are running on.
     *
     * @param[in] device_id    The device ID that kernels are deployed on right now.
     */
    void SetDevice(int device_id);

    /**
     * @brief KenrelAnalyzer reset function.
     *
     * Reset the class member to the initialization value.
     */
    void Reset();

  protected:
    string current_key_str_; /**< Key value of the current loop profiled */
    /**
     * Used to manage degree of parallelism of kernel blocks.
     * Map between a specific loop and the corresponding parallel degree.
     */
    // map<string, DopVal_t> pdegree_map_;
    map<string, int> pdegree_map_;

    int device_id_; /**< ID of the GPU device that kernels run on. */
    // Array for recording bounds of each GPU kernel considered.
    map<string, vector<int>> k_num_bnd_;
    cudaEvent_t start_event_, end_event_;

  private:
    // The private constructor to avoid duplicate instantiation.
    KernelAnalyzer();

    DISABLE_COPY_MOVE_AND_ASSIGN(KernelAnalyzer);
  };
} /* namespace caffe */

#endif /* __KERNEL_ANALYZER_H__ */

#endif /* USE_PROF */
#endif /* CPU_ONLY */
