// Copyright 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
// Copyright (c) 2025, Shanghai Iluvatar CoreX Semiconductor Co., Ltd.
// All Rights Reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//  * Redistributions of source code must retain the above copyright
//    notice, this list of conditions and the following disclaimer.
//  * Redistributions in binary form must reproduce the above copyright
//    notice, this list of conditions and the following disclaimer in the
//    documentation and/or other materials provided with the distribution.
//  * Neither the name of NVIDIA CORPORATION nor the names of its
//    contributors may be used to endorse or promote products derived
//    from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

#pragma once

// tvm relates
#include <dlpack/dlpack.h>
#include <tvm/runtime/module.h>
#include <tvm/runtime/packed_func.h>
#include <tvm/runtime/device_api.h>
#include <tvm/runtime/registry.h>
#include <cstdio>
#include <stdlib.h>

#include "igie_model.h"
#include "triton/backend/backend_model_instance.h"


namespace triton { namespace backend { namespace igie {
//
// ModelInstanceState
//
// State associated with a model instance. An object of this class is
// created and associated with each
// TRITONBACKEND_ModelInstance. ModelInstanceState is derived from
// BackendModelInstance class provided in the backend utilities that
// provides many common functions.
//
class ModelInstanceState : public BackendModelInstance {
  public:
    static TRITONSERVER_Error* Create(
        ModelState* model_state,
        TRITONBACKEND_ModelInstance* triton_model_instance,
        ModelInstanceState** state);
    ~ModelInstanceState();

    // Get the state of the model that corresponds to this instance.
    ModelState* StateForModel() const { return model_state_; }

    void ProcessRequests(
        TRITONBACKEND_Request** requests, const uint32_t request_count);

  private:
    //void ParseConfig();
    void LoadModule();
    void InitDevice();
    void InitIOTensor();
    void WarmUp();

    bool SupportsDynamicBatching();
    int GetConfigMaxBatchSize();
    bool AllowRaggedBatch(const char* name);
    triton::common::TritonJson::Value& GetModelConfig();
    std::vector<int64_t> GetConfigInputShape(const char* name);
    std::vector<int64_t> GetConfigOutputShape(const char* name);
    std::vector<int64_t> GetModuleInputShape(const char* name);
    std::vector<int64_t> GetModuleOutputShape(const char* name);
    TRITONSERVER_DataType GetModuleInputDType(const char* name);
    TRITONSERVER_DataType GetModuleOutputDType(const char* name);
    uint64_t GetModuleInputNBytes(const char* name);
    uint64_t GetModuleOutputNBytes(const char* name);
    tvm::runtime::NDArray GetInputPlaceHolder(const char* name);
    tvm::runtime::NDArray GetOutputPlaceHolder(const char* name);
    char* GetPaddedInput(char* buffer, std::vector<int64_t> shape, std::vector<int64_t> padding_shape, TRITONSERVER_DataType dtype);
    char* GetNonPaddedOutput(char* buffer, std::vector<int64_t> shape, std::vector<int64_t> trimmed_shape, TRITONSERVER_DataType dtype);

    ModelInstanceState(
        ModelState* model_state,
        TRITONBACKEND_ModelInstance* triton_model_instance)
        : BackendModelInstance(model_state, triton_model_instance),
        model_state_(model_state)
    {
    }

    ModelState* model_state_;

    bool _is_batch_padded = false;
    bool _is_seq_padded = false;
    int _req_batch_size = 0;
    int _req_seq_size = 0;
    // IGIE inference device, default to gpu0
    DLDevice device_{kDLILUVATAR, -1};
    // model APIs
    tvm::runtime::Module _gmod;
    tvm::runtime::PackedFunc _get_input_names;
    tvm::runtime::PackedFunc _get_num_inputs;
    tvm::runtime::PackedFunc _get_input;
    tvm::runtime::PackedFunc _set_input;
    tvm::runtime::PackedFunc _run;
    tvm::runtime::PackedFunc _get_output_names;
    tvm::runtime::PackedFunc _get_num_outputs;
    tvm::runtime::PackedFunc _get_output_index;
    tvm::runtime::PackedFunc _get_output;

    // inputs and outputs parsed from loaded engine
    // input_name -> (shape, datatype, placeholder)
    std::unordered_map<std::string, std::tuple<std::vector<int64_t>, TRITONSERVER_DataType, tvm::runtime::NDArray>> inputs;
    /*
      we could only get output placeholder by index with _get_output(),
      so we create the output_map_ to bind output name with output idx specially.
    */     
    std::unordered_map<std::string, std::tuple<std::vector<int64_t>, TRITONSERVER_DataType, uint32_t, tvm::runtime::NDArray>> outputs;
};

}}}  // namespace triton::backend::igie
