/*
 * This file is part of AdaptiveCpp, an implementation of SYCL and C++ standard
 * parallelism for CPUs and GPUs.
 *
 * Copyright The AdaptiveCpp Contributors
 *
 * AdaptiveCpp is released under the BSD 2-Clause "Simplified" License.
 * See file LICENSE in the project root for full license details.
 */
// SPDX-License-Identifier: BSD-2-Clause
#include <memory>
#include <mutex>

#include "hipSYCL/common/debug.hpp"
#include "hipSYCL/runtime/application.hpp"
#include "hipSYCL/runtime/dag_direct_scheduler.hpp"
#include "hipSYCL/runtime/dag_manager.hpp"
#include "hipSYCL/runtime/dag_node.hpp"
#include "hipSYCL/runtime/dag_unbound_scheduler.hpp"
#include "hipSYCL/runtime/operations.hpp"
#include "hipSYCL/runtime/settings.hpp"
#include "hipSYCL/runtime/util.hpp"
#include "hipSYCL/runtime/runtime.hpp"

namespace hipsycl {
namespace rt {


dag_build_guard::~dag_build_guard() {
  _mgr->flush();
}

dag_manager::dag_manager(runtime *rt)
    : _builder{std::make_unique<dag_builder>(rt)},
      _direct_scheduler{rt}, _unbound_scheduler{rt}, _rt{rt} {
  HIPSYCL_DEBUG_INFO << "dag_manager: DAG manager is alive!" << std::endl;
}

dag_manager::~dag_manager()
{
  HIPSYCL_DEBUG_INFO << "dag_manager: Waiting for async worker..." << std::endl;
  
  flush_and_gc();
  wait();

  HIPSYCL_DEBUG_INFO << "dag_manager: Shutdown." << std::endl;
}

dag_builder* 
dag_manager::builder() const
{
  return _builder.get();
}

void dag_manager::flush()
{
  HIPSYCL_DEBUG_INFO << "dag_manager: Submitting asynchronous flush..."
                     << std::endl;
  // This lock ensures that the submission process has atomic semantics.
  // In particular, it is important that once we have popped the latest
  // nodes from the DAG builder using finish_and_reset(), we directly submit them
  // to the worker thread.
  // Otherwise, the order in which submissions are processed in the worker thread
  // can be incorrect. This can cause queue::submit();flush_and_gc() to fail in
  // actually ensuring submission, or introduce dependencies in nodes during submission
  //  to other nodes that have not yet been submitted.
  std::lock_guard<std::mutex> lock{_flush_mutex};

  if(_builder->get_current_dag_size() > 0){
    dag new_dag = _builder->finish_and_reset();

    if(new_dag.num_nodes() > 0) {
    
      HIPSYCL_DEBUG_INFO << "dag_manager: Flushing!" << std::endl;
      
      for(dag_node_ptr req : new_dag.get_memory_requirements()){
        assert_is<memory_requirement>(req->get_operation());

        memory_requirement *mreq =
            cast<memory_requirement>(req->get_operation());

        if(mreq->is_buffer_requirement()) {
          
          HIPSYCL_DEBUG_INFO
              << "dag_manager: Releasing dead users of data region "
              << cast<buffer_memory_requirement>(mreq)->get_data_region().get()
              << std::endl;

          cast<buffer_memory_requirement>(mreq)
              ->get_data_region()
              ->get_users()
              .release_dead_users();
        }
        else
          assert(false && "Non-buffer requirements are unsupported");
      }

      // Go!!!
      scheduler_type stype =
          application::get_settings().get<setting::scheduler_type>();
      
      // This is okay because get_command_groups() returns
      // the nodes in the order they were submitted. This
      // makes it safe to submit them in this order to the direct scheduler.
      for(auto node : new_dag.get_command_groups()){
        HIPSYCL_DEBUG_INFO
              << "dag_manager [async]: Submitting node to scheduler!"
              << std::endl;
        if(stype == scheduler_type::direct) {
          _direct_scheduler.submit(node);
        } else if(stype == scheduler_type::unbound) {
          _unbound_scheduler.submit(node);
        }
      }
      HIPSYCL_DEBUG_INFO << "dag_manager [async]: DAG flush complete."
                        << std::endl;

      // Register nodes as submitted with the runtime
      for(auto node : new_dag.get_command_groups())
        this->register_submitted_ops(node);
      for(auto node : new_dag.get_memory_requirements())
        this->register_submitted_ops(node);

      if (this->_submitted_ops.get_num_nodes() >
          application::get_settings().get<setting::gc_trigger_batch_size>())
        this->_submitted_ops.async_wait_and_unregister();
    
    }
  } else {
    HIPSYCL_DEBUG_INFO << "dag_manager: Nothing to do" << std::endl;
  }
}

void dag_manager::flush_and_gc()
{
  if(_builder->get_current_dag_size() > 0){
    this->flush();
    // In a flush_and_gc, we can assume that we have finished a submission burst.
    // So this may be a good time to clean up and perform garbage collection!
    this->_submitted_ops.async_wait_and_unregister();

    HIPSYCL_DEBUG_INFO << "dag_manager: waiting for async worker..."
                        << std::endl;
  }
}

void dag_manager::wait()
{
  this->_submitted_ops.wait_for_all();
}

void dag_manager::wait(std::size_t node_group_id) {
  this->_submitted_ops.wait_for_group(node_group_id);
}

void dag_manager::register_submitted_ops(dag_node_ptr node) {
  this->_submitted_ops.update_with_submission(node);
}

node_list_t dag_manager::get_group(std::size_t node_group_id) {
  return _submitted_ops.get_group(node_group_id);
}
}
}
