/*
 *         (C) COPYRIGHT NBI Limited
 *              ALL RIGHT RESERVED
 *
 * File        : sample/main.cc
 * Authors     : dzhang
 * Create Time : 2021/09/02 19:10:11
 * Description :
 *
 */

#include <iostream>
#include <vector>

#include "allocator.h"
#include "fastflow/core/logging.h"
#include "fastflow/core/macros.h"
#include "fastflow/framework/execution_frame.h"
#include "fastflow/framework/kernel_registry.h"
#include "fastflow/framework/ml_value.h"
#include "fastflow/framework/op_kernel_context_internal.h"
#include "fastflow/framework/sequential_executor.h"
#include "fastflow/framework/session_state.h"
#include "fastflow/framework/session_state_initializer.h"
#include "fastflow/graph/model.h"
#include "fastflow/graph/onnx_proto_helper.h"
#include "fastflow/providers/cpu/cpu_execution_provider.h"
#include "fastflow/session/allocator_impl.h"
#include "onnx/shape_inference/implementation.h"

using namespace fastflow;

/**
 * Configuration information for a session.
 */
struct SessionOptions {
    // int num_threads; // not used now until we re-introduce threadpools for async execution
    bool enable_sequential_execution = true;  // TODO: should we default to sequential execution?

    // enable profiling for this session.
    bool enable_profiling = false;

    // enable the memory pattern optimization.
    // The idea is if the input shapes are the same, we could trace the internal memory allocation
    // and generate a memory pattern for future request. So next time we could just do one allocation
    // with a big chunk for all the internal memory allocation.
    bool enable_mem_pattern = true;

    // enable the memory arena on CPU
    // Arena may pre-allocate memory for future usage.
    // set this option to false if you don't want it.
    bool enable_cpu_mem_arena = true;

    // the prefix of the profile file. The current time will be appended to the file name.
    std::string profile_file_prefix = "FASTFLOW_profile_";

    std::string session_logid;                 ///< logger id to use for session output
    unsigned session_log_verbosity_level = 0;  ///< applies to session load, initialization, etc

    unsigned max_num_graph_transformation_steps = 5;  // TODO choose a good default here?

    // How many threads in the session thread pool.
    int session_thread_pool_size = 0;
};

struct RunOptions {
    bool include_dim_values_in_main_graph = true;
    bool include_dim_values_in_subgraph = false;
    bool include_types_in_subgraph = false;
    bool terminate = false;
};

// copied from TensorProto::DataType
// Currently, FASTFLOW doesn't support complex64, complex128, bfloat16 types
typedef enum FASTFLOWTensorElementDataType {
    ONNX_TENSOR_ELEMENT_DATA_TYPE_UNDEFINED = 0, ONNX_TENSOR_ELEMENT_DATA_TYPE_FLOAT = 1,   // maps to c type float
    ONNX_TENSOR_ELEMENT_DATA_TYPE_UINT8 = 2,   // maps to c type uint8_t
    ONNX_TENSOR_ELEMENT_DATA_TYPE_INT8 = 3,    // maps to c type int8_t
    ONNX_TENSOR_ELEMENT_DATA_TYPE_UINT16 = 4,  // maps to c type uint16_t
    ONNX_TENSOR_ELEMENT_DATA_TYPE_INT16 = 5,   // maps to c type int16_t
    ONNX_TENSOR_ELEMENT_DATA_TYPE_INT32 = 6,   // maps to c type int32_t
    ONNX_TENSOR_ELEMENT_DATA_TYPE_INT64 = 7,   // maps to c type int64_t
    ONNX_TENSOR_ELEMENT_DATA_TYPE_STRING = 8,  // maps to c++ type std::string
    ONNX_TENSOR_ELEMENT_DATA_TYPE_BOOL = 9,    //
    ONNX_TENSOR_ELEMENT_DATA_TYPE_FLOAT16 = 10,
    ONNX_TENSOR_ELEMENT_DATA_TYPE_DOUBLE = 11,      // maps to c type double
    ONNX_TENSOR_ELEMENT_DATA_TYPE_UINT32 = 12,      // maps to c type uint32_t
    ONNX_TENSOR_ELEMENT_DATA_TYPE_UINT64 = 13,      // maps to c type uint64_t
    ONNX_TENSOR_ELEMENT_DATA_TYPE_COMPLEX64 = 14,   // complex with float32 real and imaginary components
    ONNX_TENSOR_ELEMENT_DATA_TYPE_COMPLEX128 = 15,  // complex with float64 real and imaginary components
    ONNX_TENSOR_ELEMENT_DATA_TYPE_BFLOAT16 = 16,    // Non-IEEE floating-point format based on IEEE754 single-precision
} FASTFLOWTensorElementDataType;

DEFINE_RUNTIME_CLASS(ONNXValue);

#define FASTFLOW_API_RETURN_IF_ERROR(expr)                                                                             \
    do {                                                                                                               \
        auto _status = (expr);                                                                                         \
        if (_status)                                                                                                   \
            return _status;                                                                                            \
    } while (0)

template<typename T>
ONNXStatusPtr CreateTensorImpl(const size_t *shape, size_t shape_len, FASTFLOWAllocatorInteface **allocator,
        std::unique_ptr<Tensor> *out) {
    size_t elem_count = 1;
    std::vector<int64_t> shapes(shape_len);
    for (size_t i = 0; i != shape_len; ++i) {
        elem_count *= shape[i];
        shapes[i] = shape[i];
    }

    size_t size_to_allocate;
    if (!IAllocator::CalcMemSizeForArray(sizeof(T), elem_count, &size_to_allocate)) {
        VLOGE << "not enough memory";
    }
    void *p_data = (*allocator)->Alloc(allocator, size_to_allocate);
    if (p_data == nullptr) {
        VLOGE << "size overflow";
    }
    *out = std::make_unique < Tensor
            > (DataTypeImpl::GetType<T>(), fastflow::TensorShape(shapes), static_cast<void*>(p_data), *(*allocator)->Info(
                    allocator), std::make_shared < fastflow::AllocatorWrapper > (allocator));
    return nullptr;
}

/**
 *
 * this function will create a copy of the allocator info
 */
template<typename T>
ONNXStatusPtr CreateTensorImpl(const size_t *shape, size_t shape_len, const FASTFLOWAllocatorInfo *info, void *p_data,
        size_t p_data_len, std::unique_ptr<Tensor> *out) {
    size_t elem_count = 1;
    std::vector<int64_t> shapes(shape_len);
    for (size_t i = 0; i != shape_len; ++i) {
        elem_count *= shape[i];
        shapes[i] = shape[i];
    }

    size_t size_to_allocate;
    if (!IAllocator::CalcMemSizeForArray(sizeof(T), elem_count, &size_to_allocate)) {
        VLOGE << "size overflow";
        return nullptr;
    }
    if (size_to_allocate > p_data_len) {
        VLOGE << "not enough space: expected " << size_to_allocate << ", got " << p_data_len;
        return nullptr;
    }
    *out = std::make_unique < Tensor > (DataTypeImpl::GetType<T>(), fastflow::TensorShape(shapes), p_data, *info, nullptr);
    return nullptr;
}

FASTFLOW_API_STATUS_IMPL(FASTFLOWCreateTensorAsONNXValue, _Inout_ FASTFLOWAllocator* allocator,
        _In_ const size_t* shape, size_t shape_len, FASTFLOWTensorElementDataType type,
        _Out_ ONNXValue** out) {
    //   API_IMPL_BEGIN
    std::unique_ptr<Tensor> tensor;
    switch (type) {
        case ONNX_TENSOR_ELEMENT_DATA_TYPE_FLOAT:
            FASTFLOW_API_RETURN_IF_ERROR(CreateTensorImpl<float>(shape, shape_len, allocator, &tensor));
            break;
        case ONNX_TENSOR_ELEMENT_DATA_TYPE_UINT8:
            FASTFLOW_API_RETURN_IF_ERROR(CreateTensorImpl<uint8_t>(shape, shape_len, allocator, &tensor));
            break;
        case ONNX_TENSOR_ELEMENT_DATA_TYPE_INT8:
            FASTFLOW_API_RETURN_IF_ERROR(CreateTensorImpl<int8_t>(shape, shape_len, allocator, &tensor));
            break;
        case ONNX_TENSOR_ELEMENT_DATA_TYPE_UINT16:
            FASTFLOW_API_RETURN_IF_ERROR(CreateTensorImpl<uint16_t>(shape, shape_len, allocator, &tensor));
            break;
        case ONNX_TENSOR_ELEMENT_DATA_TYPE_INT16:
            FASTFLOW_API_RETURN_IF_ERROR(CreateTensorImpl<int16_t>(shape, shape_len, allocator, &tensor));
            break;
        case ONNX_TENSOR_ELEMENT_DATA_TYPE_INT32:
            FASTFLOW_API_RETURN_IF_ERROR(CreateTensorImpl<int32_t>(shape, shape_len, allocator, &tensor));
            break;
        case ONNX_TENSOR_ELEMENT_DATA_TYPE_INT64:
            FASTFLOW_API_RETURN_IF_ERROR(CreateTensorImpl<int64_t>(shape, shape_len, allocator, &tensor));
            break;
        case ONNX_TENSOR_ELEMENT_DATA_TYPE_STRING:
            FASTFLOW_API_RETURN_IF_ERROR(CreateTensorImpl<std::string>(shape, shape_len, allocator, &tensor));
            break;
        case ONNX_TENSOR_ELEMENT_DATA_TYPE_BOOL:
            FASTFLOW_API_RETURN_IF_ERROR(CreateTensorImpl<bool>(shape, shape_len, allocator, &tensor));
            break;
        case ONNX_TENSOR_ELEMENT_DATA_TYPE_FLOAT16:
            FASTFLOW_API_RETURN_IF_ERROR(CreateTensorImpl<MLFloat16>(shape, shape_len, allocator, &tensor));
            break;
        case ONNX_TENSOR_ELEMENT_DATA_TYPE_DOUBLE:
            FASTFLOW_API_RETURN_IF_ERROR(CreateTensorImpl<double>(shape, shape_len, allocator, &tensor));
            break;
        case ONNX_TENSOR_ELEMENT_DATA_TYPE_UINT32:
            FASTFLOW_API_RETURN_IF_ERROR(CreateTensorImpl<uint32_t>(shape, shape_len, allocator, &tensor));
            break;
        case ONNX_TENSOR_ELEMENT_DATA_TYPE_UINT64:
            FASTFLOW_API_RETURN_IF_ERROR(CreateTensorImpl<uint64_t>(shape, shape_len, allocator, &tensor));
            break;
        case ONNX_TENSOR_ELEMENT_DATA_TYPE_COMPLEX64:
        case ONNX_TENSOR_ELEMENT_DATA_TYPE_COMPLEX128:
        case ONNX_TENSOR_ELEMENT_DATA_TYPE_BFLOAT16:
        default: {
            std::ostringstream oss;
            oss << "type " << type << " is not supported in this function";
            std::string errmsg = oss.str();
            return CreateONNXStatus(FASTFLOW_NOT_IMPLEMENTED, errmsg.c_str());
        }
    }
    std::unique_ptr<MLValue> value = std::make_unique<MLValue>();
    value->Init(tensor.release(), DataTypeImpl::GetType<Tensor>(), DataTypeImpl::GetType<Tensor>()->GetDeleteFunc());
    *out = reinterpret_cast<ONNXValue*>(value.release());
    return nullptr;
    //   API_IMPL_END
}

inline ONNXValue* FASTFLOWCreateTensorAsONNXValue(_Inout_ FASTFLOWAllocator *env, const std::vector<size_t> &shape,
        FASTFLOWTensorElementDataType type) {
    ONNXValue *ret;
    //   FASTFLOW_THROW_ON_ERROR(::FASTFLOWCreateTensorAsONNXValue(env, shape.data(), shape.size(), type, &ret));
    ::FASTFLOWCreateTensorAsONNXValue(env, shape.data(), shape.size(), type, &ret);
    return ret;
}

FASTFLOW_API_STATUS_IMPL(FASTFLOWGetTensorMutableData, _In_ ONNXValue* value, _Out_ void** output) {
    // TODO: test if it's a string tensor
    VLOGD << "##############";
    auto v = reinterpret_cast<::fastflow::MLValue*>(value);
    VLOGD << "##############";
    auto tensor = v->GetMutable<fastflow::Tensor>();
    VLOGD << "##############";
    VLOGD << "###########: " << tensor->Size();
    *output = tensor->MutableDataRaw();
    return nullptr;
}

FASTFLOWAllocatorInteface MockedFASTFLOWAllocator::table_ = { { MockedFASTFLOWAllocator::AddRef_,
        MockedFASTFLOWAllocator::Release_ }, MockedFASTFLOWAllocator::Alloc_, MockedFASTFLOWAllocator::Free_,
        MockedFASTFLOWAllocator::Info_ };

int main(int, char**) {
    std::cout << "Hello, world!\n";
    std::string model_path = "/home/dzhang/work/nbi/testdata/model/yolov5s.onnx";
    ONNX_NAMESPACE::ModelProto onnx_model;
    bool success = fastflow::onnx_read_proto_from_binary(model_path.c_str(), &onnx_model);
    DCHECK(success) << "read onnx model failed: " << model_path;
    VLOGI << "ONNX Model ir version: " << onnx_model.ir_version();
    const auto &onnx_graph = onnx_model.graph();
    const int node_size = onnx_graph.node_size();
    VLOGD << "Node size: " << node_size;
    fastflow::onnx_shape_inference(onnx_model);
    fastflow::onnx_write_proto_from_binary("/tmp/mnist.onnx", &onnx_model);

    ExecutionProviders execution_providers_;
    SessionState session_state_(execution_providers_);
    SessionOptions session_options_;
    logging::Logger *session_logger_ = new logging::Logger();
    fastflow::GraphTransformerManager graph_transformation_mgr_ { session_options_.max_num_graph_transformation_steps };
    fastflow::InsertCastTransformer insert_cast_transformer_ { "" };
    fastflow::Model model(onnx_model);
    fastflow::Graph &graph = model.MainGraph();
    VLOGD << "Node size: " << graph.NumberOfNodes();
    const SessionOptions session_options;
    // Register default CPUExecutionProvider if user didn't provide it through the Register() calls
    if (!execution_providers_.Get(fastflow::kCpuExecutionProvider)) {
        VLOGI << "Adding default CPU execution provider.";
        CPUExecutionProviderInfo epi { session_options.enable_cpu_mem_arena };
        execution_providers_.Add(fastflow::kCpuExecutionProvider, std::make_unique < CPUExecutionProvider > (epi));
    }
    VLOGD << "########################";
    KernelRegistryManager kernel_registry_manager_;
    kernel_registry_manager_.RegisterKernels(execution_providers_);
    VLOGD << "########################";

//    for(auto name : kernel_registry_manager_.GetAllKernelRegistries()[0]->GetAllRegisteredOpNames()){
//        VLOGD << name;
//    }

    SessionStateInitializer session_initializer { graph, session_state_, execution_providers_, kernel_registry_manager_,
            *session_logger_ };
    VLOGD << "########################";

    session_initializer.CreatePlan(graph_transformation_mgr_, insert_cast_transformer_, { },
            session_options_.enable_sequential_execution);
    VLOGD << "########################";
    std::map<FASTFLOWAllocatorInfo, BufferUniquePtr> weights_buffers_;
    session_initializer.InitializeAndSave(session_state_.GetEnableMemoryPattern(), weights_buffers_);
    VLOGD << "########################";
    RunOptions run_options;
    auto p_exec = std::unique_ptr < IExecutor > (new SequentialExecutor(run_options.terminate));
    const SequentialExecutionPlan &seq_exec_plan = *session_state_.GetExecutionPlan();
    const auto &exec_plan_vec = seq_exec_plan.execution_plan;
    VLOGI << "Size of execution plan vector: " << exec_plan_vec.size();

    std::unique_ptr<FASTFLOWAllocator> default_allocator(MockedFASTFLOWAllocator::Create());

    FASTFLOWAllocator *env = default_allocator.get();
    std::vector<ONNXValuePtr> input(1);
    std::vector<size_t> dims_x = { 1, 1, 28, 28 };
    input[0] = FASTFLOWCreateTensorAsONNXValue(env, dims_x, ONNX_TENSOR_ELEMENT_DATA_TYPE_FLOAT);

    void *raw_data;
    FASTFLOWGetTensorMutableData(input[0], &raw_data);
    std::vector<float> values_x(1 * 1 * 28 * 28, 0);
    memcpy(raw_data, values_x.data(), values_x.size() * sizeof(values_x[0]));

    std::vector<const char*> input_names { "Input3" };
    std::vector<const char*> output_names1 { "Plus214_Output_0" };
    // ONNXValuePtr output_tensor = nullptr;
    std::vector<ONNXValuePtr> output(output_names1.size());

    auto input_len = input_names.size();
    auto output_names_len = output_names1.size();

    ::fastflow::NameMLValMap feeds;
    const int queue_id = 0;
    for (size_t i = 0; i != input_len; ++i) {
        auto kvp = feeds.insert(
                std::make_pair(std::string(input_names[i]), *reinterpret_cast<const ::fastflow::MLValue*>(input[i])));
        if (!kvp.second) {
            VLOGE << "duplicated input name";
        }
        ::fastflow::MLValue &value = kvp.first->second;
        if (value.Fence())
            value.Fence()->BeforeUsingAsInput(fastflow::kCpuExecutionProvider, queue_id);
    }
    // Create output feed
    std::vector<std::string> output_names(output_names_len);
    for (size_t i = 0; i != output_names_len; ++i) {
        if (output_names1[i] == nullptr || output_names1[i][0] == '\0') {
            VLOGE << "output name cannot be empty";
        }
        output_names[i] = output_names1[i];
    }

    std::vector<MLValue> fetches(output_names_len);
    for (size_t i = 0; i != output_names_len; ++i) {
        if (output[i] != nullptr) {
            ::fastflow::MLValue &value = *reinterpret_cast<::fastflow::MLValue*>(output[i]);
            if (value.Fence())
                value.Fence()->BeforeUsingAsOutput(fastflow::kCpuExecutionProvider, queue_id);
            fetches[i] = value;
        }
    }

    ExecutionFrame frame { feeds, output_names, fetches, session_state_ };

    const bool &terminate_flag_ = false;
    for (const auto &node_exec_plan : exec_plan_vec) {
        if (terminate_flag_) {
            VLOGW << "Exiting due to terminate flag being set to true.";
        }
        auto node_index = node_exec_plan.node_index;
        auto p_op_kernel = session_state_.GetKernel(node_index);

        // if a kernel has been added in the session state, it better be NON-null.
        if (p_op_kernel == nullptr) {
            VLOGE << "Got nullptr from GetKernel for node: " << session_state_.GetGraphViewer()->GetNode(node_index)->Name();
        }
        const std::string &node_name = p_op_kernel->Node().Name();
        const std::string &op_name = p_op_kernel->KernelDef().OpName();
        VLOGD << "node_name: " << node_name << ", op_name: " << op_name;
        OpKernelContextInternal op_kernel_context(frame, *p_op_kernel, *session_logger_, p_op_kernel->Node().ImplicitInputDefs(),
                terminate_flag_);

        // sync before compute
        int queue_id = p_op_kernel->KernelDef().ExecQueueId();
        for (int input_index = 0; input_index < op_kernel_context.InputCount(); ++input_index) {
            Fence_t fence = op_kernel_context.InputFence(input_index);
            if (fence) {
                fence->BeforeUsingAsInput(p_op_kernel->Node().GetExecutionProviderType(), queue_id);
            }
        }

        for (int input_index = 0; input_index < op_kernel_context.ImplicitInputCount(); ++input_index) {
            Fence_t fence = op_kernel_context.ImplicitInputFence(input_index);
            if (fence) {
                fence->BeforeUsingAsInput(p_op_kernel->Node().GetExecutionProviderType(), queue_id);
            }
        }

        for (int output_index = 0; output_index < op_kernel_context.OutputCount(); ++output_index) {
            Fence_t fence = op_kernel_context.OutputFence(output_index);
            if (fence) {
                fence->BeforeUsingAsOutput(p_op_kernel->Node().GetExecutionProviderType(), queue_id);
            }
        }

        // call compute on the kernel
        VLOGI << "Computing kernel: " << p_op_kernel->Node().Name();

        p_op_kernel->Compute(&op_kernel_context);

        // sync after compute for outputs
        for (int input_index = 0; input_index < op_kernel_context.InputCount(); ++input_index) {
            Fence_t fence = op_kernel_context.InputFence(input_index);
            if (fence) {
                fence->AfterUsedAsInput(queue_id);
            }
        }

        for (int input_index = 0; input_index < op_kernel_context.ImplicitInputCount(); ++input_index) {
            Fence_t fence = op_kernel_context.ImplicitInputFence(input_index);
            if (fence) {
                fence->AfterUsedAsInput(queue_id);
            }
        }

        for (int output_index = 0; output_index < op_kernel_context.OutputCount(); ++output_index) {
            Fence_t fence = op_kernel_context.OutputFence(output_index);
            if (fence) {
                fence->AfterUsedAsOutput(queue_id);
            }
        }

        // free ml-values corresponding to this node
        VLOGI << "Releasing node ML values after computing kernel: " << p_op_kernel->Node().Name();
        // ReleaseNodeMLValues(frame, seq_exec_plan, node_exec_plan, session_logger_);
        for (auto i = node_exec_plan.free_from_index; i <= node_exec_plan.free_to_index; ++i) {
            auto mlvalue_idx = seq_exec_plan.to_be_freed[i];
            VLOGI << "Releasing mlvalue with index: " << mlvalue_idx;
            frame.ReleaseMLValue(mlvalue_idx);
        }
    }

    if (fetches.empty()) {
        fetches.resize(output_names.size());
    } else {
        // this should've been checked before already
        if (output_names.size() != fetches.size()) {
            VLOGE
                    << "output_names vector size: " + std::to_string(output_names.size())
                            + " does not match that of fetches vector: " + std::to_string(fetches.size());
        }

    }
    auto idx = 0;
    for (const auto &oname : output_names) {
        VLOGI << "Attempting to fetch output with name: " << oname;
        int mlvalue_index;
        session_state_.GetMLValueNameIdxMap().GetIdx(oname, mlvalue_index);
        const MLValue &output_mlvalue = frame.GetMLValue(mlvalue_index);
        VLOGI << "Copying fetched MLValue to output vector";
        fetches[idx++] = output_mlvalue;
    }

    VLOGI << "Done with execution.";

    for (size_t i = 0; i != output_names_len; ++i) {
        ::fastflow::MLValue &value = fetches[i];
        if (value.Fence()) {
            VLOGD << "######";
            value.Fence()->BeforeUsingAsInput(fastflow::kCpuExecutionProvider, queue_id);
        }
        if (output[i] == nullptr) {
            VLOGD << "######";
            output[i] = reinterpret_cast<ONNXValue*>(new MLValue(value));
        }
    }
    VLOGD << "######";
    float *output_tensor_ptr;
    FASTFLOWGetTensorMutableData(output[0], (void**) &output_tensor_ptr);
    for (int i = 0; i < 10; i++) {
        VLOGD << output_tensor_ptr[i];
    }

    // std::unique_ptr<FASTFLOWTensorTypeAndShapeInfo> shape_info;
    // {
    //   FASTFLOWTensorTypeAndShapeInfo *shape_info_ptr;
    //   FASTFLOW_THROW_ON_ERROR(FASTFLOWGetTensorShapeAndType(output_tensor, &shape_info_ptr));
    //   shape_info.reset(shape_info_ptr);
    // }
    // size_t rtensor_dims = FASTFLOWGetNumOfDimensions(shape_info.get());
    // std::vector<int64_t> shape_array(rtensor_dims);
    // FASTFLOWGetDimensions(shape_info.get(), shape_array.data(), shape_array.size());
    // size_t total_len = 1;
    // for (size_t i = 0; i != rtensor_dims; ++i)
    // {
    //   total_len *= shape_array[i];
    // }
    // float *output_tensor_ptr;
    // FASTFLOW_THROW_ON_ERROR(FASTFLOWGetTensorMutableData(output_tensor, (void **)&output_tensor_ptr));
    // printf("output tensor length: %d\n", total_len);
    // for(int i = 0; i < 32; i++){
    //   printf("%f, ", output_tensor_ptr[i]);
    // }
    // ReleaseONNXValue(output_tensor);
}
