/**
 * Copyright 2021 Huawei Technologies Co., Ltd
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 * http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

/**
 * @addtogroup MindSpore
 * @{
 *
 * @brief Provides APIs related to MindSpore Lite model inference.
 *
 * @Syscap SystemCapability.Ai.MindSpore
 * @since 9
 */

/**
 * @file types.h
 *
 * @brief Provides the model file types and device types supported by MindSpore Lite.
 *
 * File to include: <mindspore/types.h>
 * @library libmindspore_lite_ndk.so
 * @since 9
 */

#ifndef MINDSPORE_INCLUDE_C_API_TYPES_C_H
#define MINDSPORE_INCLUDE_C_API_TYPES_C_H

#ifdef __cplusplus
extern "C" {
#endif

#ifndef OH_AI_API
#ifdef _WIN32
#define OH_AI_API __declspec(dllexport)
#else
#define OH_AI_API __attribute__((visibility("default")))
#endif
#endif
/**
 * @brief Defines model file types.
 *
 * @since 9
 */
typedef enum OH_AI_ModelType {
    /** If the model type is MindIR, the extension of the model file name is .ms.
     *
     * @since 9
     */
    OH_AI_MODELTYPE_MINDIR = 0,
    /** Invalid model type
     *
     * @since 9
     */
    OH_AI_MODELTYPE_INVALID = 0xFFFFFFFF
} OH_AI_ModelType;

/**
 * @brief Defines the supported device types.
 *
 * @since 9
 */
typedef enum OH_AI_DeviceType {
    /** Device type: CPU
     *
     * @since 9
     */
    OH_AI_DEVICETYPE_CPU = 0,
    /** Device type: GPU
     * 
     * Reserved
     * 
     * @since 9
     */
    OH_AI_DEVICETYPE_GPU,
    /** Device type: Kirin NPU
     * 
     * Reserved
     * 
     * @since 9
     */
    OH_AI_DEVICETYPE_KIRIN_NPU,
    /** Device type: NNRt
     *
     * OHOS device range: [60, 80)
     * 
     * @since 9
     */
    OH_AI_DEVICETYPE_NNRT = 60,
    /** Invalid device type
     * 
     * @since 9
     * 
     */
    OH_AI_DEVICETYPE_INVALID = 100,
} OH_AI_DeviceType;

/**
 * @brief Enumerates NNRt device types.
 *
 * @since 10
 */
typedef enum OH_AI_NNRTDeviceType {
    /** Others (any device type except the following three types)
     *
     * @since 10
     */
    OH_AI_NNRTDEVICE_OTHERS = 0,
    /** CPU
     *
     * @since 10
     */
    OH_AI_NNRTDEVICE_CPU = 1,
    /** GPU
     *
     * @since 10
     */
    OH_AI_NNRTDEVICE_GPU = 2,
    /** Specific acceleration device
     *
     * @since 10
     */
    OH_AI_NNRTDEVICE_ACCELERATOR = 3,
} OH_AI_NNRTDeviceType;

/**
 * @brief Enumerates performance modes of the NNRt device.
 *
 * @since 10
 */
typedef enum OH_AI_PerformanceMode {
    /** No special settings
     *
     * @since 10
     */
    OH_AI_PERFORMANCE_NONE = 0,
    /** Low power consumption
     *
     * @since 10
     */
    OH_AI_PERFORMANCE_LOW = 1,
    /** Power consumption and performance balancing
     *
     * @since 10
     */
    OH_AI_PERFORMANCE_MEDIUM = 2,
    /** High performance
     *
     * @since 10
     */
    OH_AI_PERFORMANCE_HIGH = 3,
    /** Ultimate performance
     *
     * @since 10
     */
    OH_AI_PERFORMANCE_EXTREME = 4,
} OH_AI_PerformanceMode;

/**
 * @brief Enumerates NNRt inference task priorities.
 *
 * @since 10
 */
typedef enum OH_AI_Priority {
    /** No priority preference
     *
     * @since 10
     */
    OH_AI_PRIORITY_NONE = 0,
    /** Low priority
     *
     * @since 10
     */
    OH_AI_PRIORITY_LOW = 1,
    /** Medium priority
     *
     * @since 10
     */
    OH_AI_PRIORITY_MEDIUM = 2,
    /** High priority
     *
     * @since 10
     */
    OH_AI_PRIORITY_HIGH = 3,
} OH_AI_Priority;

/**
 * @brief Enumerates optimization levels.
 *
 * @since 11
 */
typedef enum OH_AI_OptimizationLevel {
    /** No optimization level
     *
     * @since 11
     */
    OH_AI_KO0 = 0,
    /** Convert the precision type of the network to float16 and keep the precision type of the
        batch normalization layer and loss function as float32.
     *
     * @since 11
     */
    OH_AI_KO2 = 2,
    /** Convert the precision type of the network (including the batch normalization layer) to float16.
     *
     * @since 11
     */
    OH_AI_KO3 = 3,
    /** Select an optimization level based on the device.
     *
     * @since 11
     */
    OH_AI_KAUTO = 4,
    /** Invalid optimization level
     *
     * @since 11
     */
    OH_AI_KOPTIMIZATIONTYPE = 0xFFFFFFFF
} OH_AI_OptimizationLevel;

/**
 * @brief Enumerates quantization types.
 *
 * @since 11
 */
typedef enum OH_AI_QuantizationType {
    /** No quantification
     *
     * @since 11
     */
    OH_AI_NO_QUANT = 0,
    /** Weight quantization
     *
     * @since 11
     */
    OH_AI_WEIGHT_QUANT = 1,
    /** Full quantization
     *
     * @since 11
     */
    OH_AI_FULL_QUANT = 2,
    /** Invalid quantization type
     *
     * @since 11
     */
    OH_AI_UNKNOWN_QUANT_TYPE = 0xFFFFFFFF
} OH_AI_QuantizationType;

/**
 * @brief Defines the NNRt device information, including the device ID and device name.
 *
 * @since 10
 */
typedef struct NNRTDeviceDesc NNRTDeviceDesc;

#ifdef __cplusplus
}
#endif

/** @} */
#endif // MINDSPORE_INCLUDE_C_API_TYPES_C_H
