#pragma once

#include "TxnnCpp/TxnnCppVersion.h"
#include "TxnnCpp/Common.h"
#include "TxnnCpp/RuntimeBase.h"

//! Construct a single integer denoting Txnn.Cpp version.
//! Usable in preprocessor expressions.
#define TXNNCPP_VERSION_INT(major, minor, patch) ((major) * 10000L + (minor) * 100L + (patch) * 1L)

//! Txnn.Cpp version as a single integer.
//! Usable in preprocessor expressions.
#define TXNNCPP_VERSION TXNNCPP_VERSION_INT(TXNNCPP_MAJOR, TXNNCPP_MINOR, TXNNCPP_PATCH)


/* stream wrapper of txdaStream */
struct txdaStream{};
typedef struct txdaStream* txdaStream_t;


namespace txnncpp {

class IExecutionContext; //!< Forward declaration of IExecutionContext for use
                         //!< by other interfaces.
class ITxdaEngine; //!< Forward declaration of ICudaEngine for use by other
                   //!< interfaces.

class IRuntime {
public:
  ITxdaEngine* deserializeTxdaEngine(void const* blob,
                                     std::size_t size) _TX_NOEXCEPT;

  //!
  //! \brief Return number of IO tensors.
  //!
  //! It is the number of input and output tensors for the network from which
  //! the engine was built. The names of the IO tensors can be discovered by
  //! calling getIOTensorName(i) for i in 0 to getNbIOTensors()-1.
  //!
  //! \see getIOTensorName()
  //!
  int32_t getNbIOTensors() const _TX_NOEXCEPT;

}; // class IRuntime

class ITxdaEngine {
public:

  //!
  //! \brief Create an execution context and specify the strategy for allocating
  //! internal activation memory.
  //!
  //! The default value for the allocation strategy is
  //! ExecutionContextAllocationStrategy::kSTATIC, which means the context will
  //! pre-allocate a block of device memory that is sufficient for all profiles.
  //! The newly created execution context will be assigned optimization profile
  //! 0. If an error recorder has been set for the engine, it will also be
  //! passed to the execution context.
  //!
  //! \see IExecutionContext
  //! \see IExecutionContext::setOptimizationProfileAsync()
  //! \see ExecutionContextAllocationStrategy
  //!
  IExecutionContext* createExecutionContext();


  const char* getIOTensorName(int32_t index) const _TX_NOEXCEPT;

}; // class ITxdaEngine

class IExecutionContext {
public:
  bool setInputShape(char const* tensorName, Dims const& dims);

  bool setTensorAddress(char const* tensorName, void* data);

      //!
    //! \brief Enqueue inference on a stream.
    //!
    //! \param stream A cuda stream on which the inference kernels will be enqueued.
    //!
    //! \return True if the kernels were enqueued successfully, false otherwise.
    //!
    //! Modifying or releasing memory that has been registered for the tensors before stream
    //! synchronization or the event passed to setInputConsumedEvent has been being triggered results in undefined
    //! behavior.
    //! Input tensor can be released after the setInputConsumedEvent whereas output tensors require stream
    //! synchronization.
    //!
    //! \warning Using default stream may lead to performance issues due to additional cudaDeviceSynchronize() calls by
    //!          TensorRT to ensure correct synchronizations. Please use non-default stream instead.
    //!
    //! \warning If the Engine is streaming weights, enqueueV3 will become synchronous, and
    //!          the graph will not be capturable.
    //!
    bool enqueue(txdaStream_t stream) _TX_NOEXCEPT;
}; // class IExecutionContext

//!
//! \brief Internal C entry point for creating IRuntime.
//!
//! \param logger The logging class for the runtime.
//!
//! \param version The version for txnn.cpp, support variants because of internal change.
//!
extern "C" TXAPI void* createInferRuntime_INTERNAL(void* logger, int32_t version) _TX_NOEXCEPT;

//!
//! \brief Create an instance of an IRuntime class.
//!
//! \param logger The logging class for the runtime.
//!
inline IRuntime* createInferRuntime(common::ILogger& logger) _TX_NOEXCEPT {
  return static_cast<IRuntime*>(createInferRuntime_INTERNAL(&logger, TXNNCPP_VERSION));
}

} // namespace txnncpp