// Copyright 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
// Copyright (c) 2025, Shanghai Iluvatar CoreX Semiconductor Co., Ltd.
// All Rights Reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//  * Redistributions of source code must retain the above copyright
//    notice, this list of conditions and the following disclaimer.
//  * Redistributions in binary form must reproduce the above copyright
//    notice, this list of conditions and the following disclaimer in the
//    documentation and/or other materials provided with the distribution.
//  * Neither the name of NVIDIA CORPORATION nor the names of its
//    contributors may be used to endorse or promote products derived
//    from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

#include "igie_model_instance.h"
#include "triton/backend/backend_input_collector.h"
#include "triton/backend/backend_output_responder.h"
#include "triton/common/logging.h"
#include <algorithm>

namespace triton { namespace backend { namespace igie {

// refer to https://docs.nvidia.com/deeplearning/triton-inference-server/user-guide/docs/user_guide/model_configuration.html
// We keep it uint8_t instead of DLDataTypeCode for minimal memory footprint
const std::unordered_map <std::string, DLDataType> g_type_map {
  {"UINT8",  {kDLUInt, 8, 1}},
  {"UINT16", {kDLUInt, 16, 1}},
  {"UINT32", {kDLUInt, 32, 1}},
  {"UINT64", {kDLUInt, 64, 1}},
  {"INT8",   {kDLInt, 8, 1}},
  {"INT16",  {kDLInt, 16, 1}},
  {"INT32",  {kDLInt, 32, 1}},
  {"INT64",  {kDLInt, 64, 1}},
  {"FP16",   {kDLFloat, 16, 1}},
  {"FP32",   {kDLFloat, 32, 1}},
  {"FP64",   {kDLFloat, 64, 1}},
};

bool ModelInstanceState::SupportsDynamicBatching() {
  ModelState* model_state = reinterpret_cast<ModelState*>(StateForModel());
  bool supports_first_dim_batching;
  model_state->SupportsFirstDimBatching(&supports_first_dim_batching);
  return supports_first_dim_batching;
}

int ModelInstanceState::GetConfigMaxBatchSize() {
  if (SupportsDynamicBatching()) {
    ModelState* model_state = reinterpret_cast<ModelState*>(StateForModel());
    return model_state->MaxBatchSize();
  } else {
    return -1;
  }
}

bool ModelInstanceState::AllowRaggedBatch(const char* name) {
  ModelState* model_state = reinterpret_cast<ModelState*>(StateForModel());
  return model_state->IsInputRagged(std::string(name));
}

triton::common::TritonJson::Value& ModelInstanceState::GetModelConfig() {
  ModelState* model_state = reinterpret_cast<ModelState*>(StateForModel());
  triton::common::TritonJson::Value &model_config_ = model_state->ModelConfig();
  return model_config_;
}

TRITONSERVER_Error*
ModelInstanceState::Create(
    ModelState* model_state, TRITONBACKEND_ModelInstance* triton_model_instance,
    ModelInstanceState** state)
{
  try {
    *state = new ModelInstanceState(model_state, triton_model_instance);
  }
  catch (const BackendModelInstanceException& ex) {
    RETURN_ERROR_IF_TRUE(
        ex.err_ == nullptr, TRITONSERVER_ERROR_INTERNAL,
        std::string("unexpected nullptr in BackendModelInstanceException"));
    RETURN_IF_ERROR(ex.err_);
  }

  // is this model support dynamic batching?
  if ((*state)->SupportsDynamicBatching()) { 
    LOG_VERBOSE(2) << model_state->Name() << " support dynamic-batching, max batch size is " << model_state->MaxBatchSize() << std::endl;
  } else {
    LOG_VERBOSE(2) << model_state->Name() << " is non-batching model, all input shape are frozen." << std::endl;
  }

  (*state)->InitDevice();
  (*state)->LoadModule();
  (*state)->InitIOTensor();
  // (*state)->WarmUp();

  return nullptr;  // success
}

void ModelInstanceState::InitDevice() {
  // which gpu is used by this instance?
  device_.device_id = DeviceId();

  // Explicitly specify which stream to use.
  auto deviceAPI = tvm::runtime::DeviceAPI::Get(device_);
  stream_ = reinterpret_cast<cudaStream_t>(deviceAPI->CreateStream(device_));
  
  if (stream_ == nullptr) {
    LOG_MESSAGE(
        TRITONSERVER_LOG_ERROR,
        "Unexpected Error while try to create CUDA Stream.");
  }

  LOG_VERBOSE(1) << "On Device " << std::to_string(DeviceId()) << ", Create CUDA Stream at " << stream_ << "\n";
  deviceAPI->SetStream(device_, reinterpret_cast<TVMStreamHandle>(stream_));

}

void ModelInstanceState::LoadModule() {
  ModelState* model_state = reinterpret_cast<ModelState*>(StateForModel());
  triton::common::TritonJson::Value &model_config_ = model_state->ModelConfig();
  std::string model_filename;
  model_config_.MemberAsString("default_model_filename", &model_filename);
  std::string model_path = JoinPath({model_state->RepositoryPath(), std::to_string(model_state->Version()), model_filename});

  tvm::runtime::Module mod_factory = tvm::runtime::Module::LoadFromFile(model_path);
  _gmod = mod_factory.GetFunction("default")(device_);
  _get_input_names = _gmod.GetFunction("get_input_names");
  _get_num_inputs = _gmod.GetFunction("get_num_inputs");
  _get_input = _gmod.GetFunction("get_input");
  _set_input = _gmod.GetFunction("set_input");
  _run = _gmod.GetFunction("run");
  _get_output_names = _gmod.GetFunction("get_output_names"); 
  _get_num_outputs = _gmod.GetFunction("get_num_outputs"); 
  _get_output_index = _gmod.GetFunction("get_output_index");
  _get_output = _gmod.GetFunction("get_output");
}

int _InnerCheckIOType(std::string config_type, std::string type) {
  // config_type may looks like TYPE_INT32
  std::string prefix = "TYPE_";
  size_t found = config_type.find(prefix);
  if (found == std::string::npos) {
    LOG_MESSAGE(TRITONSERVER_LOG_ERROR, ("Error occur in config.pbtxt, io's data_type should with 'TYPE_' prefix:" + config_type).c_str());
  } else {
    std::string trimmed_config_type = config_type.substr(found + prefix.length());
    // to lower.
    std::transform(trimmed_config_type.begin(), trimmed_config_type.end(), trimmed_config_type.begin(), ::tolower);
    if (trimmed_config_type != type) {
      return -1;
    }
  }
  return 0;
}

void ModelInstanceState::InitIOTensor() {
  triton::common::TritonJson::Value &model_config_ = GetModelConfig();

  std::vector<std::string>* input_names = static_cast<std::vector<std::string>*>((void *)_get_input_names());

  triton::common::TritonJson::Value config_inputs; 
  model_config_.MemberAsArray("input", &config_inputs);

  if(input_names->size() != config_inputs.ArraySize()) {
    LOG_MESSAGE(TRITONSERVER_LOG_ERROR, ("Check failed: input_names->size() == config_inputs.ArraySize() (" + \
                                          std::to_string(input_names->size()) + " vs. " + std::to_string(config_inputs.ArraySize())).c_str());
  }

  // keep input names in engine and configurations consistent
  for (uint32_t i = 0; i < config_inputs.ArraySize(); i++) {
    std::string name = input_names->at(i);  // input name from loaded engine

    // input name from config.pbtxt
    triton::common::TritonJson::Value io;
    config_inputs.IndexAsObject(i, &io);
    std::string io_name;
    io.MemberAsString("name", &io_name);
    if (name != io_name) {
      LOG_MESSAGE(TRITONSERVER_LOG_ERROR, ("Check failed: engine input_name == config input name (" + \
                                            name + " vs. " + io_name).c_str());
    }

    // check the dimension
    triton::common::TritonJson::Value dims;
    io.MemberAsArray("dims", &dims);
    int64_t dims_len = dims.ArraySize();
    if (SupportsDynamicBatching()) {
      dims_len += 1;
    }

    if (AllowRaggedBatch(io_name.c_str()))
    // we will support more than two dimension ragged-bathcing later.
    if (dims_len != 2) {
      LOG_MESSAGE(TRITONSERVER_LOG_ERROR, ("Only Support two dimension Ragged-Batching now!"));
    }

    // check the dtype
    std::string io_dtype;
    io.MemberAsString("data_type", &io_dtype);
    tvm::runtime::NDArray input_ = _get_input(i);
    std::string dtype =  tvm::runtime::DLDataType2String(input_.DataType());
    int ret = _InnerCheckIOType(io_dtype, dtype);
    if (ret != 0) {
      LOG_MESSAGE(TRITONSERVER_LOG_WARN, ("[WARN]: " + name + ".type == " + io_name + ".type (" + \
                                            dtype + " vs. " + io_dtype + ")").c_str());
    }

    // io shape
    std::vector<int64_t> dims_shape {};
    tvm::runtime::ShapeTuple input_shape = input_.Shape();
    for (uint32_t j = 0; j < input_shape.size(); j++){
      std::cout << input_shape.data()[j] << " ";
      dims_shape.push_back(input_shape.data()[j]);
    }
    
    // io_dtype.substr(5): skip prefix "TYPE_"
    inputs[name] = std::make_tuple(dims_shape, TRITONSERVER_StringToDataType(io_dtype.substr(5).c_str()), 
                                  tvm::runtime::NDArray::Empty(
                                    tvm::runtime::ShapeTuple(dims_shape), 
                                    g_type_map.at(io_dtype.substr(5)), device_));
  }

  std::vector<std::string>* output_names = static_cast<std::vector<std::string>*>((void *)_get_output_names());

  triton::common::TritonJson::Value config_outputs; 
  model_config_.MemberAsArray("output", &config_outputs);
  if(output_names->size() != config_outputs.ArraySize()) {
    LOG_MESSAGE(TRITONSERVER_LOG_ERROR, ("Check failed: output_names->size() == config_outputs.ArraySize() (" + \
                                          std::to_string(output_names->size()) + " vs. " + std::to_string(config_outputs.ArraySize())).c_str());
  }

  /*
    keep or update input names in engine and configurations,
    as the optimized igie engine's output name may changed a lot compared to original ones, 
    so there may be low-level error, should be double check by the user.
  */
  for (uint32_t i = 0; i < config_outputs.ArraySize(); i++) {
    std::string name = output_names->at(i);  // output name from loaded engine

    // input name from config.pbtxt
    triton::common::TritonJson::Value io;
    config_outputs.IndexAsObject(i, &io);
    std::string io_name;
    io.MemberAsString("name", &io_name);
    if (name != io_name) {
      LOG_MESSAGE(TRITONSERVER_LOG_WARN, ("[WARN]: engine output_name == config output name (" + \
                                            name + " vs. " + io_name + "), and use config output name instead").c_str());
    }


    // check the dtype
    std::string io_dtype;
    io.MemberAsString("data_type", &io_dtype);
    tvm::runtime::NDArray output_ = _get_output(i);
    std::string dtype =  tvm::runtime::DLDataType2String(output_.DataType());
    int ret = _InnerCheckIOType(io_dtype, dtype);
    if (ret != 0) {
      LOG_MESSAGE(TRITONSERVER_LOG_WARN, ("[WARN]: " + name + ".type == " + io_name + ".type (" + \
                                            dtype + " vs. " + io_dtype + ")").c_str());
    }

    // io shape
    std::vector<int64_t> dims_shape {};
    tvm::runtime::ShapeTuple output_shape = output_.Shape();
    for (uint32_t j = 0; j < output_shape.size(); j++){
      dims_shape.push_back(output_shape.data()[j]);
    }


    /*
      we could only get output placeholder by index with _get_output(),
      so we create the output_map_ to bind output name with output idx specially.
    */  
    outputs[io_name] = std::make_tuple(dims_shape, TRITONSERVER_StringToDataType(io_dtype.substr(5).c_str()), i,
                                  tvm::runtime::NDArray::Empty(
                                    tvm::runtime::ShapeTuple(dims_shape), 
                                    g_type_map.at(io_dtype.substr(5)), device_));                           
  }
}

void ModelInstanceState::WarmUp() {
  // prepare input 

  // warm up model
  _run();

  // syn with GPU by blocking cpu.
  tvm::runtime::DeviceAPI::Get(device_)->StreamSync(device_, nullptr);
}


std::vector<int64_t>
ModelInstanceState::GetConfigInputShape(const char* name) {
  std::vector<int64_t> dims_shape {};

  if (SupportsDynamicBatching()) {
    ModelState* model_state = reinterpret_cast<ModelState*>(StateForModel());
    dims_shape.push_back(model_state->MaxBatchSize());
  }

  triton::common::TritonJson::Value &model_config_ = GetModelConfig();
  triton::common::TritonJson::Value config_inputs; 
  model_config_.MemberAsArray("input", &config_inputs);
  for (int i = 0; i < config_inputs.ArraySize(); i++) {
    triton::common::TritonJson::Value input_;  
    config_inputs.IndexAsObject(i, &input_);
    std::string name_;
    input_.MemberAsString("name", &name_);

    // matched.
    if(name_.compare(name) == 0) {
      triton::common::TritonJson::Value dims;
      input_.MemberAsArray("dims", &dims);
      for (int j = 0; j < dims.ArraySize(); j++) {
        triton::common::TritonJson::Value dim_channel;
        dims.At(j, &dim_channel);
        int64_t dim_channel_int64;
        dim_channel.AsInt(&dim_channel_int64);
        dims_shape.push_back(dim_channel_int64);
      }

      return dims_shape;
    }
  }

  // if failed to match, return -1
  return std::vector<int64_t>{-1};
}

std::vector<int64_t>
ModelInstanceState::GetConfigOutputShape(const char* name) {
  std::vector<int64_t> dims_shape {};

  if (SupportsDynamicBatching()) {
    ModelState* model_state = reinterpret_cast<ModelState*>(StateForModel());
    dims_shape.push_back(model_state->MaxBatchSize());
  }

  triton::common::TritonJson::Value &model_config_ = GetModelConfig();
  triton::common::TritonJson::Value config_outputs; 
  model_config_.MemberAsArray("output", &config_outputs);
  for (int i = 0; i < config_outputs.ArraySize(); i++) {
    triton::common::TritonJson::Value output_;  
    config_outputs.IndexAsObject(i, &output_);
    std::string name_;
    output_.MemberAsString("name", &name_);

    // matched.
    if(name_.compare(name) == 0) {
      triton::common::TritonJson::Value dims;
      output_.MemberAsArray("dims", &dims);
      for (int j = 0; j < dims.ArraySize(); j++) {
        triton::common::TritonJson::Value dim_channel;
        dims.At(j, &dim_channel);
        int64_t dim_channel_int64;
        dim_channel.AsInt(&dim_channel_int64);
        dims_shape.push_back(dim_channel_int64);
      }

      return dims_shape;
    }
  }

  // if failed to match, return -1
  return std::vector<int64_t>{-1};
}

std::vector<int64_t>
ModelInstanceState::GetModuleInputShape(const char* name) {
  // get input item according to input name.
  if (inputs.count(name) == 0) {
    LOG_MESSAGE(TRITONSERVER_LOG_ERROR, ("Check failed: req name in inputs.keys():" + \
                                          std::string(name)).c_str());
    return std::vector<int64_t>{-1};
  }

  std::tuple<std::vector<int64_t>, TRITONSERVER_DataType, tvm::runtime::NDArray> item = inputs[name];
  return std::get<0>(item);
}

std::vector<int64_t>
ModelInstanceState::GetModuleOutputShape(const char* name) {
  if (outputs.count(name) == 0) {
    LOG_MESSAGE(TRITONSERVER_LOG_ERROR, ("Check failed: req name in outputs.keys():" + \
                                          std::string(name)).c_str());
    return std::vector<int64_t>{-1};
  }
  
  std::tuple<std::vector<int64_t>, TRITONSERVER_DataType, uint32_t, tvm::runtime::NDArray> item = outputs[name];
  return std::get<0>(item);
}

TRITONSERVER_DataType
ModelInstanceState::GetModuleInputDType(const char* name) {
  // get input item according to input name.
  if (inputs.count(name) == 0) {
    LOG_MESSAGE(TRITONSERVER_LOG_ERROR, ("Check failed: req name in inputs.keys():" + \
                                          std::string(name)).c_str());
    return TRITONSERVER_TYPE_INVALID;
  }

  std::tuple<std::vector<int64_t>, TRITONSERVER_DataType, tvm::runtime::NDArray> item = inputs[name];
  return std::get<1>(item);
}

TRITONSERVER_DataType
ModelInstanceState::GetModuleOutputDType(const char* name) {
  if (outputs.count(name) == 0) {
    LOG_MESSAGE(TRITONSERVER_LOG_ERROR, ("Check failed: req name in outputs.keys():" + \
                                          std::string(name)).c_str());
    return TRITONSERVER_TYPE_INVALID;
  }
  
  std::tuple<std::vector<int64_t>, TRITONSERVER_DataType, uint32_t, tvm::runtime::NDArray> item = outputs[name];
  return std::get<1>(item);
}

uint64_t ModelInstanceState::GetModuleInputNBytes(const char* name) {
  if (inputs.count(name) == 0) {
    LOG_MESSAGE(TRITONSERVER_LOG_ERROR, ("Check failed: req name in inputs.keys():" + \
                                          std::string(name)).c_str());
    return TRITONSERVER_TYPE_INVALID;
  }

  std::tuple<std::vector<int64_t>, TRITONSERVER_DataType, tvm::runtime::NDArray> item = inputs[name];
  std::vector<int64_t> shape = std::get<0>(item);
  TRITONSERVER_DataType dtype = std::get<1>(item);

  uint64_t byte_size = TRITONSERVER_DataTypeByteSize(std::get<1>(item));
  for (auto v = shape.begin(); v < shape.end(); v++) {
  byte_size *= *v;
  }

  return byte_size;
}

uint64_t ModelInstanceState::GetModuleOutputNBytes(const char* name) {
  if (outputs.count(name) == 0) {
    LOG_MESSAGE(TRITONSERVER_LOG_ERROR, ("Check failed: req name in outputs.keys():" + \
                                          std::string(name)).c_str());
    return TRITONSERVER_TYPE_INVALID;
  }

  std::tuple<std::vector<int64_t>, TRITONSERVER_DataType, uint32_t, tvm::runtime::NDArray> item = outputs[name];
  std::vector<int64_t> shape = std::get<0>(item);
  TRITONSERVER_DataType dtype = std::get<1>(item);

  uint64_t byte_size = TRITONSERVER_DataTypeByteSize(std::get<1>(item));
  for (auto v = shape.begin(); v < shape.end(); v++) {
  byte_size *= *v;
  }

  return byte_size;
}

tvm::runtime::NDArray ModelInstanceState::GetInputPlaceHolder(const char* name) {
    // get input item according to input name.
    if (inputs.count(name) == 0) {
      LOG_MESSAGE(TRITONSERVER_LOG_ERROR, ("Check failed: req name in inputs.keys():" + \
                                            std::string(name)).c_str());
      return tvm::runtime::NDArray();
    }

    std::tuple<std::vector<int64_t>, TRITONSERVER_DataType, tvm::runtime::NDArray> item = inputs[name];
    return std::get<2>(item);
}

tvm::runtime::NDArray ModelInstanceState::GetOutputPlaceHolder(const char* name) {
    if (outputs.count(name) == 0) {
      LOG_MESSAGE(TRITONSERVER_LOG_ERROR, ("Check failed: req name in outputs.keys():" + \
                                            std::string(name)).c_str());
      return tvm::runtime::NDArray();
    }
    
    std::tuple<std::vector<int64_t>, TRITONSERVER_DataType, uint32_t, tvm::runtime::NDArray> item = outputs[name];
    return std::get<3>(item);
}


/*
 - b batch
 - s sequence
 -pb padded batch
 -ps padded sequence
*/
template <typename T>
void _InnerInflateArray(T* padding, T* buffer, uint32_t b, uint32_t s, uint32_t pb, uint32_t ps) {
    for (uint32_t i = 0; i < b; i++) {
      for (uint32_t j = 0; j < s; j++) {
        padding[i * ps + j] = buffer[i * s + j];
      }
    }
}

char* ModelInstanceState::GetPaddedInput(
  char* buffer, std::vector<int64_t> shape, 
  std::vector<int64_t> padding_shape, TRITONSERVER_DataType dtype) {

    uint32_t padding_byte_size = TRITONSERVER_DataTypeByteSize(dtype);
    for (auto v = padding_shape.begin(); v < padding_shape.end(); v++) {
      padding_byte_size *= *v;
    }
    char* padding_buffer = new char[padding_byte_size];
    if (padding_buffer == nullptr) {
      LOG_MESSAGE(TRITONSERVER_LOG_ERROR, ("Failed to Malloc padding_buffer with size" + std::to_string(padding_byte_size)).c_str());
    }
    memset((void *)padding_buffer, 0, padding_byte_size);

    // again: we only support two dims ragged-batching case now.
    uint32_t b = shape.at(0); // batch dim
    uint32_t s = shape.at(1); // sequence dim
    uint32_t pb = padding_shape.at(0); // padding batch dim
    uint32_t ps = padding_shape.at(1); // padding sequence dim

    if (dtype == TRITONSERVER_TYPE_INVALID) {
      LOG_MESSAGE(TRITONSERVER_LOG_ERROR, ("Got TRITONSERVER_TYPE_INVALID while try to padding input."));
    } else if (dtype == TRITONSERVER_TYPE_BOOL) {
      bool* data_ptr = reinterpret_cast<bool*>(buffer);
      bool* padding_ptr = reinterpret_cast<bool*>(padding_buffer);
      _InnerInflateArray(padding_ptr, data_ptr, b, s, pb, ps);
    } else if (dtype == TRITONSERVER_TYPE_UINT8) {
      uint8_t* data_ptr = reinterpret_cast<uint8_t*>(buffer);
      uint8_t* padding_ptr = reinterpret_cast<uint8_t*>(padding_buffer);
      _InnerInflateArray(padding_ptr, data_ptr, b, s, pb, ps);
    } else if (dtype == TRITONSERVER_TYPE_UINT16) {
      uint16_t* data_ptr = reinterpret_cast<uint16_t*>(buffer);
      uint16_t* padding_ptr = reinterpret_cast<uint16_t*>(padding_buffer);
      _InnerInflateArray(padding_ptr, data_ptr, b, s, pb, ps);
    } else if (dtype == TRITONSERVER_TYPE_UINT32) {
      uint32_t* data_ptr = reinterpret_cast<uint32_t*>(buffer);
      uint32_t* padding_ptr = reinterpret_cast<uint32_t*>(padding_buffer);
      _InnerInflateArray(padding_ptr, data_ptr, b, s, pb, ps);
    } else if (dtype == TRITONSERVER_TYPE_UINT64) {
      uint64_t* data_ptr = reinterpret_cast<uint64_t*>(buffer);
      uint64_t* padding_ptr = reinterpret_cast<uint64_t*>(padding_buffer);
      _InnerInflateArray(padding_ptr, data_ptr, b, s, pb, ps);
    } else if (dtype == TRITONSERVER_TYPE_INT8) {
      int8_t* data_ptr = reinterpret_cast<int8_t*>(buffer);
      int8_t* padding_ptr = reinterpret_cast<int8_t*>(padding_buffer);
      _InnerInflateArray(padding_ptr, data_ptr, b, s, pb, ps);
    } else if (dtype == TRITONSERVER_TYPE_INT16) {
      int16_t* data_ptr = reinterpret_cast<int16_t*>(buffer);
      int16_t* padding_ptr = reinterpret_cast<int16_t*>(padding_buffer);
      _InnerInflateArray(padding_ptr, data_ptr, b, s, pb, ps);
    } else if (dtype == TRITONSERVER_TYPE_INT32) {
      int32_t* data_ptr = reinterpret_cast<int32_t*>(buffer);
      int32_t* padding_ptr = reinterpret_cast<int32_t*>(padding_buffer);
      _InnerInflateArray(padding_ptr, data_ptr, b, s, pb, ps);
    } else if (dtype == TRITONSERVER_TYPE_INT64) {
      int64_t* data_ptr = reinterpret_cast<int64_t*>(buffer);
      int64_t* padding_ptr = reinterpret_cast<int64_t*>(padding_buffer);
      _InnerInflateArray(padding_ptr, data_ptr, b, s, pb, ps);
    } else if (dtype == TRITONSERVER_TYPE_FP16) {
      LOG_MESSAGE(TRITONSERVER_LOG_ERROR, ("Not Supported TRITONSERVER_TYPE_FP16 while try to padding input."));
    } else if (dtype == TRITONSERVER_TYPE_FP32) {
      float* data_ptr = reinterpret_cast<float*>(buffer);
      float* padding_ptr = reinterpret_cast<float*>(padding_buffer);
      _InnerInflateArray(padding_ptr, data_ptr, b, s, pb, ps);
    } else if (dtype == TRITONSERVER_TYPE_FP64) {
      LOG_MESSAGE(TRITONSERVER_LOG_ERROR, ("Not Supported TRITONSERVER_TYPE_FP64 while try to padding input."));
    } else if (dtype == TRITONSERVER_TYPE_BYTES) {
      uint8_t* data_ptr = reinterpret_cast<uint8_t*>(buffer);
      uint8_t* padding_ptr = reinterpret_cast<uint8_t*>(padding_buffer);
      _InnerInflateArray(padding_ptr, data_ptr, b, s, pb, ps);
    } else if (dtype == TRITONSERVER_TYPE_BF16) {
      LOG_MESSAGE(TRITONSERVER_LOG_ERROR, ("Not Supported TRITONSERVER_TYPE_BF16 while try to padding input."));
    }

    return reinterpret_cast<char*>(padding_buffer);
}

/*
 - b batch
 - s sequence
 -tb trimmed batch
 -ts trimmed sequence
*/
template <typename T>
void _InnerFlattedArray(T* flatten, T* buffer, uint32_t b, uint32_t s, uint32_t tb, uint32_t ts) {
    for (uint32_t i = 0; i < b; i++) {
      for (uint32_t j = 0; j < s; j++) {
        flatten[i * ts + j] = buffer[i * s + j];
      }
    }
}
char* ModelInstanceState::GetNonPaddedOutput(
  char* buffer, std::vector<int64_t> shape, 
  std::vector<int64_t> trimmed_shape, TRITONSERVER_DataType dtype) {

  uint32_t trimmed_byte_size = TRITONSERVER_DataTypeByteSize(dtype);
  for (auto v = trimmed_shape.begin(); v < trimmed_shape.end(); v++) {
    trimmed_byte_size *= *v;
  }
  char* trimmed_buffer = new char[trimmed_byte_size];
  if (trimmed_buffer == nullptr) {
    LOG_MESSAGE(TRITONSERVER_LOG_ERROR, ("Failed to Malloc trimmed_byte_size with size" + std::to_string(trimmed_byte_size)).c_str());
  }
  memset((void *)trimmed_buffer, 0, trimmed_byte_size);

  // again: we only support two dims ragged-batching case now.
  uint32_t b = shape.at(0); // batch dim
  uint32_t s = shape.at(1); // sequence dim
  uint32_t tb = trimmed_shape.at(0); // trimmed batch dim
  uint32_t ts = trimmed_shape.at(1); // trimmed sequence dim

  if (dtype == TRITONSERVER_TYPE_INVALID) {
    LOG_MESSAGE(TRITONSERVER_LOG_ERROR, ("Got TRITONSERVER_TYPE_INVALID while try to trim output."));
  } else if (dtype == TRITONSERVER_TYPE_BOOL) {
    bool* data_ptr = reinterpret_cast<bool*>(buffer);
    bool* trimmed_ptr = reinterpret_cast<bool*>(trimmed_buffer);
    _InnerFlattedArray(trimmed_ptr, data_ptr, b, s, tb, ts);
  } else if (dtype == TRITONSERVER_TYPE_UINT8) {
    uint8_t* data_ptr = reinterpret_cast<uint8_t*>(buffer);
    uint8_t* trimmed_ptr = reinterpret_cast<uint8_t*>(trimmed_buffer);
    _InnerFlattedArray(trimmed_ptr, data_ptr, b, s, tb, ts);
  } else if (dtype == TRITONSERVER_TYPE_UINT16) {
    uint16_t* data_ptr = reinterpret_cast<uint16_t*>(buffer);
    uint16_t* trimmed_ptr = reinterpret_cast<uint16_t*>(trimmed_buffer);
    _InnerFlattedArray(trimmed_ptr, data_ptr, b, s, tb, ts);
  } else if (dtype == TRITONSERVER_TYPE_UINT32) {
    uint32_t* data_ptr = reinterpret_cast<uint32_t*>(buffer);
    uint32_t* trimmed_ptr = reinterpret_cast<uint32_t*>(trimmed_buffer);
    _InnerFlattedArray(trimmed_ptr, data_ptr, b, s, tb, ts);
  } else if (dtype == TRITONSERVER_TYPE_UINT64) {
    uint64_t* data_ptr = reinterpret_cast<uint64_t*>(buffer);
    uint64_t* trimmed_ptr = reinterpret_cast<uint64_t*>(trimmed_buffer);
    _InnerFlattedArray(trimmed_ptr, data_ptr, b, s, tb, ts);
  } else if (dtype == TRITONSERVER_TYPE_INT8) {
    int8_t* data_ptr = reinterpret_cast<int8_t*>(buffer);
    int8_t* trimmed_ptr = reinterpret_cast<int8_t*>(trimmed_buffer);
    _InnerFlattedArray(trimmed_ptr, data_ptr, b, s, tb, ts);
  } else if (dtype == TRITONSERVER_TYPE_INT16) {
    int16_t* data_ptr = reinterpret_cast<int16_t*>(buffer);
    int16_t* trimmed_ptr = reinterpret_cast<int16_t*>(trimmed_buffer);
    _InnerFlattedArray(trimmed_ptr, data_ptr, b, s, tb, ts);
  } else if (dtype == TRITONSERVER_TYPE_INT32) {
    int32_t* data_ptr = reinterpret_cast<int32_t*>(buffer);
    int32_t* trimmed_ptr = reinterpret_cast<int32_t*>(trimmed_buffer);
    _InnerFlattedArray(trimmed_ptr, data_ptr, b, s, tb, ts);
  } else if (dtype == TRITONSERVER_TYPE_INT64) {
    int64_t* data_ptr = reinterpret_cast<int64_t*>(buffer);
    int64_t* trimmed_ptr = reinterpret_cast<int64_t*>(trimmed_buffer);
    _InnerFlattedArray(trimmed_ptr, data_ptr, b, s, tb, ts);
  } else if (dtype == TRITONSERVER_TYPE_FP16) {
    LOG_MESSAGE(TRITONSERVER_LOG_ERROR, ("Not Supported TRITONSERVER_TYPE_FP16 while try to padding input."));
  } else if (dtype == TRITONSERVER_TYPE_FP32) {
    float* data_ptr = reinterpret_cast<float*>(buffer);
    float* trimmed_ptr = reinterpret_cast<float*>(trimmed_buffer);
    _InnerFlattedArray(trimmed_ptr, data_ptr, b, s, tb, ts);
  } else if (dtype == TRITONSERVER_TYPE_FP64) {
    LOG_MESSAGE(TRITONSERVER_LOG_ERROR, ("Not Supported TRITONSERVER_TYPE_FP64 while try to padding input."));
  } else if (dtype == TRITONSERVER_TYPE_BYTES) {
    uint8_t* data_ptr = reinterpret_cast<uint8_t*>(buffer);
    uint8_t* trimmed_ptr = reinterpret_cast<uint8_t*>(trimmed_buffer);
    _InnerFlattedArray(trimmed_ptr, data_ptr, b, s, tb, ts);
  } else if (dtype == TRITONSERVER_TYPE_BF16) {
    LOG_MESSAGE(TRITONSERVER_LOG_ERROR, ("Not Supported TRITONSERVER_TYPE_BF16 while try to padding input."));
  }

  return reinterpret_cast<char*>(trimmed_buffer);
}

void
ModelInstanceState::ProcessRequests(
    TRITONBACKEND_Request** requests, const uint32_t request_count)
{
  // Collect various timestamps during the execution of this batch or
  // requests. These values are reported below before returning from
  // the function.
  uint64_t exec_start_ns = 0;
  SET_TIMESTAMP(exec_start_ns);

  for (size_t i = 0; i < request_count; i++) {
    // If we get a nullptr request then something is badly wrong. Fail
    // and release all requests.
    if (requests[i] == nullptr) {
      RequestsRespondWithError(
          requests, request_count,
          TRITONSERVER_ErrorNew(
              TRITONSERVER_ERROR_INTERNAL,
              std::string(
                  "null request given to IGIE backend for '" + Name() + "'")
                  .c_str()));
    }
  }

  uint64_t receiver_start_ns = 0;
  SET_TIMESTAMP(receiver_start_ns);

  std::vector<TRITONBACKEND_Response*> responses;
  responses.reserve(request_count);

  for (size_t i = 0; i < request_count; i++) {
    TRITONBACKEND_Response* response;
    auto err = TRITONBACKEND_ResponseNew(&response, requests[i]);
    if (err == nullptr) {
      responses.emplace_back(response);
    } else {
      responses.emplace_back(nullptr);
      LOG_MESSAGE(TRITONSERVER_LOG_ERROR, "Fail to create response");
      TRITONSERVER_ErrorDelete(err);
    }
  }

  ModelState* model_state = reinterpret_cast<ModelState*>(StateForModel());

  uint32_t req_input_count;
  TRITONBACKEND_RequestInputCount(*requests, &req_input_count);

  BackendInputCollector collector(
      requests, request_count, &responses, model_state->TritonMemoryManager(),
      false /* pinned_enabled */, nullptr /* stream*/);

  std::vector<std::pair<TRITONSERVER_MemoryType, int64_t>> allowed_input_types =
      {{TRITONSERVER_MEMORY_CPU_PINNED, 0}, {TRITONSERVER_MEMORY_CPU, 0}};

  _is_batch_padded = false;
  _is_seq_padded = false;
  _req_batch_size = 0;
  _req_seq_size = 0;

  for ( size_t idx = 0; idx < req_input_count; idx++) {
    const char* req_input_name;
    TRITONBACKEND_RequestInputName(*requests, idx, &req_input_name);
    TRITONBACKEND_Input *req_input;
    TRITONBACKEND_RequestInput(*requests, req_input_name, &req_input);

    TRITONSERVER_DataType req_dtype;
    const int64_t* req_shape;
    uint32_t req_dims;
    TRITONBACKEND_InputProperties(req_input, nullptr, &req_dtype, &req_shape, &req_dims, nullptr, nullptr);
    std::vector<int64_t> req_shape_vec {};
    for (uint32_t iter = 0; iter < req_dims; iter++) { req_shape_vec.push_back(req_shape[iter]); }

    const char* input_buffer = nullptr;
    size_t input_buffer_byte_size;
    TRITONSERVER_MemoryType input_buffer_memory_type;
    int64_t input_buffer_memory_type_id;

    RESPOND_ALL_AND_SET_NULL_IF_ERROR(
        responses, request_count,
        collector.ProcessTensor(
            req_input_name, nullptr /* existing_buffer */,
            0 /* existing_buffer_byte_size */, allowed_input_types, &input_buffer,
            &input_buffer_byte_size, &input_buffer_memory_type,
            &input_buffer_memory_type_id/*0-CPU, 1-CPU_PINNED, 2-GPU*/));
    
    const bool need_cuda_input_sync = collector.Finalize();
    if (need_cuda_input_sync) {
      LOG_MESSAGE(
          TRITONSERVER_LOG_ERROR,
          "'igie' backend: unexpected CUDA sync required by collector");
    }

    if (input_buffer == nullptr) {
      LOG_MESSAGE(TRITONSERVER_LOG_ERROR, ("Got nullptr from collector!"));
    }

    LOG_MESSAGE(TRITONSERVER_LOG_INFO, (" process with input " + std::string(req_input_name) + ", total " + std::to_string(input_buffer_byte_size) +" bytes\n").c_str());

    assert(req_dtype == GetModuleInputDType(req_input_name));
    // the dims should equal while dims value may vary dynamically.
    assert(req_shape_vec.size() == GetModuleInputShape(req_input_name).size());

    tvm::runtime::NDArray input_placeholder = GetInputPlaceHolder(req_input_name);
    uint32_t expected_byte_size = GetModuleInputNBytes(req_input_name);
    
    std::vector<int64_t> input_shape_vec = GetModuleInputShape(req_input_name);   

    // check if need padding seq
    if ((req_shape_vec[1] < input_shape_vec[1])) {
      if (AllowRaggedBatch(req_input_name)) {
        _is_seq_padded = true;
        _req_seq_size = req_shape_vec[1];
      } else {
        LOG_MESSAGE(
          TRITONSERVER_LOG_ERROR,
          "Non-ragged-batchng case, and seq dim miscatch!");
      }
    }
    // check if need padding batch
    if ((req_shape_vec[0] < input_shape_vec[0])) { 
      if (SupportsDynamicBatching()) {
        _is_batch_padded = true;
        _req_batch_size = req_shape_vec[0];
      } else {
        LOG_MESSAGE(
          TRITONSERVER_LOG_ERROR,
          "Non-dynamic-batchng case, and batch dim miscatch!");
      }
    }

    if (_is_seq_padded || _is_batch_padded) {
        char* padding_buffer = GetPaddedInput((char *)input_buffer, req_shape_vec, input_shape_vec, req_dtype);
        input_placeholder.CopyFromBytes(padding_buffer, expected_byte_size);
        _set_input(req_input_name, input_placeholder);
        free(padding_buffer);
    } else {
        // non-batching non-ragged vanilla case.
        assert(req_shape_vec == input_shape_vec);
        assert(input_buffer_byte_size == expected_byte_size);
        // if there is a non-batching model, client request data size in bytes should exactly equal to model' inputs.
        input_placeholder.CopyFromBytes(input_buffer, input_buffer_byte_size);
        _set_input(req_input_name, input_placeholder);
    }
  }

  uint64_t receiver_end_ns = 0;
  SET_TIMESTAMP(receiver_end_ns);

  uint64_t compute_start_ns = 0;
  SET_TIMESTAMP(compute_start_ns);
  // after all input are ready, we do the model inference.
  _run();

  // syn with GPU by blocking cpu.
  tvm::runtime::DeviceAPI::Get(device_)->StreamSync(device_, nullptr);

  uint64_t compute_end_ns = 0;
  SET_TIMESTAMP(compute_end_ns);

  uint64_t responder_start_ns = 0;
  SET_TIMESTAMP(responder_start_ns);
  
  bool supports_first_dim_batching;
  RESPOND_ALL_AND_SET_NULL_IF_ERROR(
      responses, request_count,
      model_state->SupportsFirstDimBatching(&supports_first_dim_batching));

  BackendOutputResponder responder(
      requests, request_count, &responses, model_state->TritonMemoryManager(),
      supports_first_dim_batching, model_state_->EnablePinnedOutput() /* pinned_enabled */,
      nullptr /* stream*/);

  for (auto iter = outputs.begin(); iter != outputs.end(); ++iter) {
    
    std::tuple<std::vector<int64_t>, TRITONSERVER_DataType, uint32_t, tvm::runtime::NDArray> output_item = outputs.at(iter->first);
    std::vector<int64_t> shape = std::get<0>(output_item);
    TRITONSERVER_DataType dtype = std::get<1>(output_item);
    uint32_t idx = std::get<2>(output_item);
    tvm::runtime::NDArray output_placeholder = std::get<3>(output_item);
    
    LOG_MESSAGE(TRITONSERVER_LOG_INFO, 
                  (" process with output " + iter->first + ", idx=" \
                  + std::to_string(idx)).c_str());

    _get_output(idx, output_placeholder);

    // std::cout << iter->first << " Shape: [ ";
    // for (int i=0; i< shape.size(); i++){
    //   std::cout << shape.at(i)  << " ";
    // }
    // std::cout << "] DType: " << TRITONSERVER_DataTypeString(dtype) << "\n";

    // as igie does not support dynamic-inference now, we should split output based on context above.
    if (_is_batch_padded) {
      std::vector<int64_t> split_shape = shape;
      split_shape[0] = _req_batch_size;
      
      // we use TVM CreateView() to fetch real-part of output which will greatly reduce D2H time, especially for large output.
      tvm::runtime::NDArray split_output_placeholder = output_placeholder.CreateView(split_shape, g_type_map.at(TRITONSERVER_DataTypeString(dtype)));
      int64_t output_byte_size = TRITONSERVER_DataTypeByteSize(dtype);
      for (int i=0; i < split_shape.size(); i++){
        output_byte_size *= split_shape[i];
      }

      char* output_buffer = new char[output_byte_size];
      if (output_buffer == nullptr) {
        LOG_MESSAGE(TRITONSERVER_LOG_ERROR, ("Failed to Malloc output_buffer with size" + std::to_string(output_byte_size)).c_str());
      }
      memset((void *)output_buffer, 0, output_byte_size);
      split_output_placeholder.CopyToBytes((void*)output_buffer, output_byte_size);

      if (_is_seq_padded) {
        split_shape[1] = _req_seq_size;
        char* non_pad_buffer = GetNonPaddedOutput(output_buffer, shape, split_shape, dtype);
        responder.ProcessTensor(iter->first, dtype, split_shape, non_pad_buffer, TRITONSERVER_MEMORY_CPU, 0);
        free(non_pad_buffer);
      } else {
        responder.ProcessTensor(iter->first, dtype, split_shape, output_buffer, TRITONSERVER_MEMORY_CPU, 0);
      }
      // don't forget to free buffer after used.
      free(output_buffer);
    } else {
      // non dyn-batching case, copy output directly.
      int64_t output_byte_size = GetModuleOutputNBytes(iter->first.c_str());
      char* output_buffer = new char[output_byte_size];
      if (output_buffer == nullptr) {
        LOG_MESSAGE(TRITONSERVER_LOG_ERROR, ("Failed to Malloc output_buffer with size" + std::to_string(output_byte_size)).c_str());
      }
      memset((void *)output_buffer, 0, output_byte_size);
      output_placeholder.CopyToBytes((void*)output_buffer, output_byte_size);

      if (_is_seq_padded) {
        std::vector<int64_t> split_shape = shape;
        split_shape[1] = _req_seq_size;
        char* non_pad_buffer = GetNonPaddedOutput(output_buffer, shape, split_shape, dtype);
        responder.ProcessTensor(iter->first, dtype, split_shape, non_pad_buffer, TRITONSERVER_MEMORY_CPU, 0);
        free(non_pad_buffer);
      } else {
        responder.ProcessTensor(iter->first, dtype, shape, output_buffer, TRITONSERVER_MEMORY_CPU, 0);
      }
      // don't forget to free buffer after used.
      free(output_buffer);
    }
  }

  const bool need_cuda_output_sync = responder.Finalize();
  if (need_cuda_output_sync) {
    LOG_MESSAGE(
        TRITONSERVER_LOG_ERROR,
        "'igie' backend: unexpected CUDA sync required by responder");
  }

  // Send all the responses that haven't already been sent because of
  // an earlier error.
  for (auto& response : responses) {
    if (response != nullptr) {
      LOG_IF_ERROR(
          TRITONBACKEND_ResponseSend(
              response, TRITONSERVER_RESPONSE_COMPLETE_FINAL, nullptr),
          "failed to send response");
    }
  }
  
  uint64_t responder_end_ns = 0;
  SET_TIMESTAMP(responder_end_ns);

  uint64_t exec_end_ns = 0;
  SET_TIMESTAMP(exec_end_ns);

#ifdef TRITON_ENABLE_STATS
  // For batch statistics need to know the total batch size of the
  // requests. This is not necessarily just the number of requests,
  // because if the model supports batching then any request can be a
  // batched request itself.
  size_t total_batch_size = 0;
  if (!supports_first_dim_batching) {
    total_batch_size = request_count;
  } else {
    for (uint32_t r = 0; r < request_count; ++r) {
      auto& request = requests[r];
      TRITONBACKEND_Input* input = nullptr;
      LOG_IF_ERROR(
          TRITONBACKEND_RequestInputByIndex(request, 0 /* index */, &input),
          "failed getting request input");
      if (input != nullptr) {
        const int64_t* shape = nullptr;
        LOG_IF_ERROR(
            TRITONBACKEND_InputProperties(
                input, nullptr, nullptr, &shape, nullptr, nullptr, nullptr),
            "failed getting input properties");
        if (shape != nullptr) {
          total_batch_size += shape[0];
        }
      }
    }
  }
#else
  (void)exec_start_ns;
  (void)exec_end_ns;
  (void)receiver_start_ns;
  (void)receiver_end_ns;
  (void)responder_start_ns;
  (void)responder_end_ns;
  (void)compute_start_ns;
  (void)compute_end_ns;
#endif  // TRITON_ENABLE_STATS

  // Report statistics for each request, and then release the request.
  for (uint32_t r = 0; r < request_count; ++r) {
    auto& request = requests[r];

#ifdef TRITON_ENABLE_STATS
    LOG_IF_ERROR(
        TRITONBACKEND_ModelInstanceReportStatistics(
            this->TritonModelInstance(), request,
            (responses[r] != nullptr) /* success */, exec_start_ns,
            compute_start_ns, compute_end_ns, exec_end_ns),
        "failed reporting request statistics");
#endif  // TRITON_ENABLE_STATS

    LOG_IF_ERROR(
        TRITONBACKEND_RequestRelease(request, TRITONSERVER_REQUEST_RELEASE_ALL),
        "failed releasing request");
  }

#ifdef TRITON_ENABLE_STATS
  LOG_MESSAGE(TRITONSERVER_LOG_INFO, "[fetch data from client => model inference => response to client]");
  uint64_t receiver_duration = (receiver_end_ns - receiver_start_ns)/1000;
  LOG_MESSAGE(TRITONSERVER_LOG_INFO,
        ("igie_inference_receiver_duration_us " + std::to_string(receiver_duration)).c_str());

  uint64_t compute_infer_duration = (compute_end_ns - compute_start_ns)/1000;
  LOG_MESSAGE(TRITONSERVER_LOG_INFO,
        ("igie_inference_compute_infer_duration_us " + std::to_string(compute_infer_duration)).c_str());

  uint64_t responder_duration = (responder_end_ns - responder_start_ns)/1000;
  LOG_MESSAGE(TRITONSERVER_LOG_INFO,
        ("igie_inference_responder_duration_us " + std::to_string(responder_duration)).c_str());

  uint64_t request_duration = (exec_end_ns - exec_start_ns)/1000;
  LOG_MESSAGE(TRITONSERVER_LOG_INFO,
        ("igie_inference_total_request_duration_us " + std::to_string(request_duration)).c_str());

  // Report batch statistics.
  LOG_IF_ERROR(
      TRITONBACKEND_ModelInstanceReportBatchStatistics(
          this->TritonModelInstance(), total_batch_size,
          exec_start_ns, compute_start_ns, compute_end_ns, exec_end_ns),
      "failed reporting batch request statistics");
#endif  // TRITON_ENABLE_STATS

}

ModelInstanceState::~ModelInstanceState() {
  // User should free the stream after use.
  if (stream_ != nullptr) {
    LOG_VERBOSE(1) << "On Device " << std::to_string(DeviceId()) << ", free CUDA Stream at " << stream_ << "\n";
    free(stream_);
  }
}

}}}  // namespace triton::backend::igie