//
// Created by kunnkali on 2023/5/23.
//

#ifndef TNET_OPTION_H
#define TNET_OPTION_H
#include "Prefix.h"
#include <string>

namespace TNet {
    namespace tflite {
        struct TFLCoreOption {
            /**
             * Experimental: Enable an optimized set of floating point CPU kernels (provided by XNNPACK).
             *
             * Enabling this flag will enable use of a new, highly optimized set of CPU kernels provided via the
             * XNNPACK delegate. Currently, this is restricted to a subset of floating point operations.
             * Eventually, we plan to enable this by default, as it can provide significant performance benefits
             * for many classes of floating point models. See
             * https://github.com/tensorflow/tensorflow/blob/master/tensorflow/lite/delegates/xnnpack/README.md
             * for more details.
             *
             * Things to keep in mind when enabling this flag:
             *
             *     * Startup time and resize time may increase.
             *     * Baseline memory consumption may increase.
             *     * Compatibility with other delegates (e.g., GPU) has not been fully validated.
             *     * Quantized models will not see any benefit.
             *
             * WARNING: This is an experimental interface that is subject to change.
             */
            bool useXNNPACK = true;

            /**
            * Maximum number of threads that the interpreter should run on. Defaults to 0 (unspecified, letting
            * TensorFlow Lite to optimize the threading decision).
            */
            uint32_t numberOfThreads = 0;
        };

        struct TFLMetaDelegateOption {

            bool enabled = false;

            /**
            * Indicates whether the GPU delegate allows precision loss, such as allowing `Float16` precision
            * for a `Float32` computation. The default is `false`.
            */
            bool precisionLossAllowed = false;

            enum TFLMetalDelegateThreadWaitType {

                /**
                * The thread does not wait for the work to complete. Useful when the output of the work is used
                * with the GPU pipeline.
                */
                TFLMetalDelegateThreadWaitTypeDoNotWait,
                /** The thread waits until the work is complete. */
                TFLMetalDelegateThreadWaitTypePassive,
                /**
                 * The thread waits for the work to complete with minimal latency, which may require additional
                 * CPU resources.
                 */
                TFLMetalDelegateThreadWaitTypeActive,
                /** The thread waits for the work while trying to prevent the GPU from going into sleep mode. */
                TFLMetalDelegateThreadWaitTypeAggressive,
            };
            TFLMetalDelegateThreadWaitType waitType = TFLMetalDelegateThreadWaitTypePassive;

            /**
            * Indicates whether the GPU delegate allows execution of an 8-bit quantized model. The default is
            * `true`.
            */
            bool quantizationEnabled = true;
        };

        struct TFLCoreMLDelegateOption {

            bool enabled = false;

            // This enum specifies for which devices the Core ML delegate will be enabled.
            enum TFLCoreMLDelegateEnabledDevices {
                /** Enables the delegate for devices with Neural Engine only. */
                TFLCoreMLDelegateEnabledDevicesNeuralEngine,
                /** Enables the delegate for all devices. */
                TFLCoreMLDelegateEnabledDevicesAll,
            };
            TFLCoreMLDelegateEnabledDevices enabledDevices = TFLCoreMLDelegateEnabledDevicesNeuralEngine;

            /**
            * Target Core ML version for the model conversion. When it's not set, Core ML version will be set
            * to highest available version for the platform.
            */
            size_t coreMLVersion = 0;

            /**
            * The maximum number of Core ML delegate partitions created. Each graph corresponds to one
            * delegated node subset in the TFLite model. The default value is `0` indicating that all possible
            * partitions are delegated.
            */
            size_t maxDelegatedPartitions = 0;

            /**
            * The minimum number of nodes per partition to be delegated by the Core ML delegate. The default
            * value is `2`.
            */
            size_t minNodesPerPartition = 2;
        };

        //Android Gpu代理使用
        struct TFLGpuDelegateV2Option {
            bool enabled = false;
        };

    }// namespace tflite
    namespace mnn {
        enum DimensionType {
            /** for tensorflow net type. uses NHWC as data format. */
            TENSORFLOW,
            /** for caffe net type. uses NCHW as data format. */
            CAFFE,
            /** for caffe net type. uses NC4HW4 as data format. */
            CAFFE_C4
        };
        enum MNNForwardType{
            MNN_FORWARD_CPU = 0,
            /*Firtly find the first available backends not equal to CPU
             * If no other backends, use cpu
             * */
            MNN_FORWARD_AUTO = 4,

            /*Hand write metal*/
            MNN_FORWARD_METAL = 1,

            /*NVIDIA GPU API*/
            MNN_FORWARD_CUDA = 2,

            /*Android / Common Device GPU API*/
            MNN_FORWARD_OPENCL = 3,
            MNN_FORWARD_OPENGL = 6,
            MNN_FORWARD_VULKAN = 7,

            /*Android 8.1's NNAPI or CoreML for ios*/
            MNN_FORWARD_NN = 5,

            /*User can use API from Backend.hpp to add or search Backend*/
            MNN_FORWARD_USER_0 = 8,
            MNN_FORWARD_USER_1 = 9,
            MNN_FORWARD_USER_2 = 10,
            MNN_FORWARD_USER_3 = 11,

            MNN_FORWARD_ALL,

            /* Apply arm extension instruction set to accelerate some Ops, this forward type
       is only used in MNN internal, and will be active automatically when user set forward type
       to be MNN_FORWARD_CPU and extension instruction set is valid on hardware.
    */
            MNN_FORWARD_CPU_EXTENSION

        } ;
        struct MNNBackendMode {
            enum MemoryMode { Memory_Normal = 0, Memory_High, Memory_Low };

            MemoryMode memory = Memory_Normal;

            enum PowerMode { Power_Normal = 0, Power_High, Power_Low };

            PowerMode power = Power_Normal;

            enum PrecisionMode { Precision_Normal = 0, Precision_High, Precision_Low, Precision_Low_BF16 };

            PrecisionMode precision = Precision_High;

            /** user defined context */
            union {
                void* sharedContext = nullptr;
                size_t flags; // Valid for CPU Backend
            };
        };

        struct MNNOption {
            DimensionType dimensionType = TENSORFLOW;
            MNNForwardType forwardType = MNN_FORWARD_CPU;
            uint32_t numberOfThreads = 1; // 默认线程数量
            MNNBackendMode backendMode;
        };
    }
    static const uint32_t MAX_THREAD_NUM = 4;

    namespace torchlite {
        static const uint32_t MAX_THREAD_NUM = 4;

        struct TorchLiteCoreOption {
            uint32_t numberOfThreads = 0; //default value
            uint32_t outputNums = 1;
        };
    }// namespace torchlite

    namespace mtk {
        struct MtkTensorParams {
            std::string name;
            /** The data type, e.g NEURON_INT8. */
            int32_t type;
            /** The number of dimensions. It should be 0 for scalars. */
            uint32_t dimCount;
            /** The dimensions of the tensor. It should be nullptr for scalars. */
            uint32_t* dimensions;
        };

        struct MtkOption {
            size_t inputTensorCount;
            MtkTensorParams *inputTensorParams;
            size_t outputTensorCount;
            MtkTensorParams *outputTensorParams;
        };
    }


    // 运行推理工具
    enum FrameworkTypeTag {
        kTflite = 0,
        kOnnxRuntime,
        kMnn,
        kNcnn,
        kCoreML,
        kTorchLite,
        kMtk,
        kQnn,
        kOtherFrame
    };

    /// CoreML inference compute units
    namespace coreML {
        enum class CMLComputeUnits {
            CPUOnly            = 0,
            CPUAndGPU          = 1,
            All                = 2,
            CPUAndNeuralEngine = 3, ///< available since iOS16
        };
    }

    /// QNN options
    namespace qnnOptins {
        struct QnnOptions {
            // qnn
            std::string qnnStubPath; // qnn 硬件加速库stub
            std::string qnnSystemPath; // qnn 系统库
            bool qnnIsLoadFromCache; // qnn htp硬件加速
            std::string qnnOutputPath; // qnn 推理结果
        };
    }

    struct Option {

        FrameworkTypeTag frameworkTypeTag = kTflite;// 选定的推理框架类型

        std::string modelPath;  // 模型文件路径
        std::string modelName;  // 模型名称

        std::string libraryPath;// 底层推理库路径

        std::string extraPath;  // 其他如gpu库路径


        char* modelBuffer = nullptr; // 模型文件的buffer
        size_t modelBufferSize = 0;

        mnn::MNNOption mnnOption;

        tflite::TFLCoreOption tflCoreOption;

#ifdef TNET_ANDROID
        mtk::MtkOption mtkOption;
#endif

#ifdef TARGET_OS_IPHONE
        // Metal (iOS GPU)
        tflite::TFLMetaDelegateOption metaDelegateOption;
        // CoreML
        tflite::TFLCoreMLDelegateOption coreMLDelegateOption;
#endif

#ifdef TNET_ANDROID
        // Android Gpu
        tflite::TFLGpuDelegateV2Option gpuDelegateV2Option;
#endif

#ifdef TARGET_OS_IPHONE
        coreML::CMLComputeUnits computeUnits = coreML::CMLComputeUnits::CPUOnly;
#endif

#ifdef TNET_ANDROID
        torchlite::TorchLiteCoreOption torchLiteCoreOption;

        // qnn
        qnnOptins::QnnOptions qnnOptions;
#endif
    };

}// namespace TNet

#endif// TNET_OPTION_H
