# ******************************************************************************
#  Copyright (c) 2021-2022. Kneron Inc. All rights reserved.                   *
# ******************************************************************************

# Legacy Module
from .Legacy.V1.KPValue import \
    GenericRawResult, \
    GenericRawBypassPreProcResult

# Core Module
from .KPValue import \
    DeviceGroup, \
    InferenceConfiguration, \
    GenericImageInferenceDescriptor, \
    GenericImageInferenceResult, \
    GenericDataInferenceDescriptor, \
    GenericDataInferenceResult, \
    InferenceFixedNodeOutput, \
    InferenceFloatNodeOutput, \
    ProfileData, \
    PerformanceMonitorData
from .KPWrapper import KPWrapper
from .KPUtils import _get_model_max_raw_out_size
from .KPEnum import *
from .KPException import _check_api_return_code
from typing import Union
import ctypes


class KPInference:
    __KP_WRAPPER = KPWrapper()

    @staticmethod
    def set_inference_configuration(device_group: DeviceGroup,
                                    inference_configuration: InferenceConfiguration) -> None:
        """
        Configure inference settings.

        Parameters
        ----------
        device_group : kp.DeviceGroup
            Represents a set of devices handle.
        inference_configuration : kp.InferenceConfiguration
            Inference configurations.

        Raises
        ------
        kp.ApiKPException

        See Also
        --------
        kp.core.connect_devices : To connect multiple (including one) Kneron devices.
        kp.InferenceConfiguration
        """
        status = KPInference.__KP_WRAPPER.LIB.kp_inference_configure(
            device_group.address,
            ctypes.byref(inference_configuration._get_element_buffer())
        )

        _check_api_return_code(result=None,
                               api_return_code=ApiReturnCode(status))

    @staticmethod
    def generic_image_inference_send(device_group: DeviceGroup,
                                     generic_inference_input_descriptor: GenericImageInferenceDescriptor) -> None:
        """
        Generic image inference multiple input send.
        This is to perform multiple input image inference, it is non-blocking if device buffer queue is not full.
        When this is performed, user can issue kp.inference.generic_image_inference_receive() to get the
        result. In addition, to have better performance, users can issue multiple
        kp.inference.generic_image_inference_send() then start to receive results through
        kp.inference.generic_image_inference_receive().

        Parameters
        ----------
        device_group : kp.DeviceGroup
            Represents a set of devices handle.
        generic_inference_input_descriptor : kp.GenericImageInferenceDescriptor
            Needed parameters for performing image inference including model ID, images width, height ..etc.

        Raises
        ------
        kp.ApiKPException

        See Also
        --------
        kp.core.connect_devices : To connect multiple (including one) Kneron devices.
        kp.inference.generic_image_inference_receive : Generic image inference multiple input receive.
        kp.GenericImageInferenceDescriptor
        """
        status = KPInference.__KP_WRAPPER.LIB.kp_generic_image_inference_send(
            device_group.address,
            ctypes.byref(generic_inference_input_descriptor._get_element_buffer())
        )

        _check_api_return_code(result=None,
                               api_return_code=ApiReturnCode(status))

    @staticmethod
    def generic_image_inference_receive(device_group: DeviceGroup) -> GenericImageInferenceResult:
        """
        Generic image inference multiple input receive.
        When multiple input image inference is done, this function can be used to get the results in RAW format.
        Note that the data received is in Kneron RAW format, users need
        kp.inference.generic_inference_retrieve_float_node()/kp.inference.generic_inference_retrieve_fixed_node() to
        convert RAW format data to floating-point/fixed-point data.

        Parameters
        ----------
        device_group : kp.DeviceGroup
            Represents a set of devices handle.

        Returns
        -------
        generic_image_inference_result : kp.GenericImageInferenceResult
            GenericImageInferenceResult object contained the received RAW data results.

        Raises
        ------
        kp.ApiKPException

        See Also
        --------
        kp.core.connect_devices : To connect multiple (including one) Kneron devices.
        kp.inference.generic_image_inference_send : Generic image inference multiple input send.
        kp.inference.generic_inference_retrieve_float_node : Retrieve single node output floating-point data buffer
        kp.inference.generic_inference_retrieve_fixed_node : Retrieve single node output fixed-point data buffer
        kp.GenericImageInferenceResult

        Notes
        -----
        The data received is in Kneron RAW format, users need
        kp.inference.generic_inference_retrieve_float_node()/kp.inference.generic_inference_retrieve_fixed_node() to
        convert RAW format data to floating-point/fixed-point data.
        """
        max_raw_out_size = _get_model_max_raw_out_size(device_group=device_group)
        generic_image_inference_result = GenericImageInferenceResult(buffer_size=max_raw_out_size)

        status = KPInference.__KP_WRAPPER.LIB.kp_generic_image_inference_receive(
            device_group.address,
            ctypes.byref(generic_image_inference_result.header._get_element_buffer()),
            generic_image_inference_result.raw_result._get_element_buffer()._LP_raw_out_buffer,
            max_raw_out_size
        )

        _check_api_return_code(result=generic_image_inference_result,
                               api_return_code=ApiReturnCode(status))

        return generic_image_inference_result

    @staticmethod
    def generic_data_inference_send(device_group: DeviceGroup,
                                    generic_inference_input_descriptor: GenericDataInferenceDescriptor) -> None:
        """
        Generic raw data inference multiple input send.
        This is to perform a multiple input data inference, it is non-blocking if device buffer queue is not full.
        When this is performed, user can issue
        kp.inference.generic_data_inference_receive() to get the result.
        In addition, to have better performance, users can issue multiple
        kp.inference.generic_data_inference_send() then start to receive results through
        kp.inference.generic_data_inference_receive().

        Parameters
        ----------
        device_group : kp.DeviceGroup
            Represents a set of devices handle.
        generic_inference_input_descriptor : kp.GenericDataInferenceDescriptor
            Needed parameters for performing multiple data inference including image buffers, image buffers size, model ID ... etc.

        Raises
        ------
        kp.ApiKPException

        See Also
        --------
        kp.core.connect_devices : To connect multiple (including one) Kneron devices.
        kp.inference.generic_data_inference_receive : Generic raw data inference multiple input receive.
        kp.GenericDataInferenceDescriptor
        """
        status = KPInference.__KP_WRAPPER.LIB.kp_generic_data_inference_send(
            device_group.address,
            ctypes.byref(generic_inference_input_descriptor._get_element_buffer())
        )

        _check_api_return_code(result=None,
                               api_return_code=ApiReturnCode(status))

    @staticmethod
    def generic_data_inference_receive(device_group: DeviceGroup) -> GenericDataInferenceResult:
        """
        Generic raw data inference multiple input receive.
        When multiple input data inference is done, this function can be used to get the results in RAW format.
        Note that data received is in Kneron RAW format, users need
        kp.inference.generic_inference_retrieve_float_node()/kp.inference.generic_inference_retrieve_fixed_node() to
        convert RAW format data to floating-point/fixed-point data.

        Parameters
        ----------
        device_group : kp.DeviceGroup
            Represents a set of devices handle.

        Returns
        -------
        generic_data_inference_result : kp.GenericDataInferenceResult
            GenericDataInferenceResult object contained the received RAW data results.

        Raises
        ------
        kp.ApiKPException

        See Also
        --------
        kp.core.connect_devices : To connect multiple (including one) Kneron devices.
        kp.inference.generic_data_inference_send : Generic raw data inference multiple input send.
        kp.inference.generic_inference_retrieve_float_node : Retrieve single node output floating-point data buffer
        kp.inference.generic_inference_retrieve_fixed_node : Retrieve single node output fixed-point data buffer
        kp.GenericDataInferenceResult

        Notes
        -----
        The data received is in Kneron RAW format, users need
        kp.inference.generic_inference_retrieve_float_node()/kp.inference.generic_inference_retrieve_fixed_node() to
        convert RAW format data to floating-point/fixed-point data.
        """
        max_raw_out_size = _get_model_max_raw_out_size(device_group=device_group)
        generic_data_inference_result = GenericDataInferenceResult(buffer_size=max_raw_out_size)

        status = KPInference.__KP_WRAPPER.LIB.kp_generic_data_inference_receive(
            device_group.address,
            ctypes.byref(generic_data_inference_result.header._get_element_buffer()),
            generic_data_inference_result.raw_result._get_element_buffer()._LP_raw_out_buffer,
            max_raw_out_size
        )

        _check_api_return_code(result=generic_data_inference_result,
                               api_return_code=ApiReturnCode(status))

        return generic_data_inference_result

    @staticmethod
    def generic_inference_retrieve_fixed_node(node_idx: int,
                                              generic_raw_result: Union[
                                                  GenericRawResult,
                                                  GenericRawBypassPreProcResult,
                                                  GenericImageInferenceResult,
                                                  GenericDataInferenceResult
                                              ],
                                              channels_ordering: ChannelOrdering) -> InferenceFixedNodeOutput:
        """
        Retrieve single node output fixed-point data from GenericRawResult/GenericRawBypassPreProcResult/GenericImageInferenceResult/GenericDataInferenceResult object.

        Parameters
        ----------
        node_idx : int
            Wanted output node index, starts from 0. Number of total output nodes can be known
            from 'GenericRawResult.header.num_output_node'
        generic_raw_result : kp.v1.GenericRawResult, kp.v1.GenericRawBypassPreProcResult, kp.GenericImageInferenceResult, kp.GenericDataInferenceResult
            GenericRawResult/GenericRawBypassPreProcResult/GenericImageInferenceResult/GenericDataInferenceResult object contained the received RAW data results, it should
            come from 'kp.v1.inference.generic_raw_inference_receive()'/'kp.v1.inference.generic_raw_inference_bypass_pre_proc_receive()'/'kp.inference.generic_image_inference_receive()'/'kp.inference.generic_data_inference_receive()'
        channels_ordering : kp.ChannelOrdering
            The raw output feature map channel ordering.

        Returns
        -------
        inference_fixed_node_output : kp.InferenceFixedNodeOutput
            Raw node output in fixed-point format.

        See Also
        --------
        kp.core.connect_devices : To connect multiple (including one) Kneron devices.
        kp.inference.generic_image_inference_receive : Generic image inference multiple input receive.
        kp.inference.generic_data_inference_receive : Generic raw data inference multiple input receive.
        kp.v1.inference.generic_raw_inference_receive : Generic raw inference receive.
        kp.v1.inference.generic_raw_inference_bypass_pre_proc_receive : Generic raw inference bypass pre-processing receive.
        kp.GenericImageInferenceResult
        kp.GenericDataInferenceResult
        kp.v1.GenericRawResult
        kp.v1.GenericRawBypassPreProcResult
        kp.ChannelOrdering
        kp.InferenceFixedNodeOutput
        """
        inference_fixed_node_output = InferenceFixedNodeOutput(channels_ordering=channels_ordering)
        inference_fixed_node_output._element_buffer = KPInference.__KP_WRAPPER.LIB.kp_generic_inference_retrieve_fixed_node(
            node_idx,
            generic_raw_result.raw_result._get_element_buffer()._LP_raw_out_buffer,
            channels_ordering.value
        )
        inference_fixed_node_output._is_allocate_from_c = True

        return inference_fixed_node_output

    @staticmethod
    def generic_inference_retrieve_float_node(node_idx: int,
                                              generic_raw_result: Union[
                                                  GenericRawResult,
                                                  GenericRawBypassPreProcResult,
                                                  GenericImageInferenceResult,
                                                  GenericDataInferenceResult
                                              ],
                                              channels_ordering: ChannelOrdering) -> InferenceFloatNodeOutput:
        """
        Retrieve single node output floating-point data from GenericRawResult/GenericRawBypassPreProcResult/GenericImageInferenceResult/GenericDataInferenceResult object.

        Parameters
        ----------
        node_idx : int
            Wanted output node index, starts from 0. Number of total output nodes can be known
            from 'GenericRawResult.header.num_output_node'
        generic_raw_result : kp.v1.GenericRawResult, kp.v1.GenericRawBypassPreProcResult, kp.GenericImageInferenceResult, kp.GenericDataInferenceResult
            GenericRawResult/GenericRawBypassPreProcResult/GenericImageInferenceResult/GenericDataInferenceResult object contained the received RAW data results, it should
            come from 'kp.v1.inference.generic_raw_inference_receive()'/'kp.v1.inference.generic_raw_inference_bypass_pre_proc_receive()'/'kp.inference.generic_image_inference_receive()'/'kp.inference.generic_data_inference_receive()'
        channels_ordering : kp.ChannelOrdering
            The raw output feature map channel ordering.

        Returns
        -------
        inference_float_node_output : kp.InferenceFloatNodeOutput
            Raw node output in floating-point format.

        See Also
        --------
        kp.core.connect_devices : To connect multiple (including one) Kneron devices.
        kp.inference.generic_image_inference_receive : Generic image inference multiple input receive.
        kp.inference.generic_data_inference_receive : Generic raw data inference multiple input receive.
        kp.v1.inference.generic_raw_inference_receive : Generic raw inference receive.
        kp.v1.inference.generic_raw_inference_bypass_pre_proc_receive : Generic raw inference bypass pre-processing receive.
        kp.GenericImageInferenceResult
        kp.GenericDataInferenceResult
        kp.v1.GenericRawResult
        kp.v1.GenericRawBypassPreProcResult
        kp.ChannelOrdering
        kp.InferenceFloatNodeOutput
        """
        inference_float_node_output = InferenceFloatNodeOutput(channels_ordering=channels_ordering)
        inference_float_node_output._element_buffer = KPInference.__KP_WRAPPER.LIB.kp_generic_inference_retrieve_float_node(
            node_idx,
            generic_raw_result.raw_result._get_element_buffer()._LP_raw_out_buffer,
            channels_ordering.value
        )
        inference_float_node_output._is_allocate_from_c = True

        return inference_float_node_output

    @staticmethod
    def profile_set_enable(device_group: DeviceGroup,
                           enable: bool) -> None:
        """
        To set enable/disable model profile.

        Parameters
        ----------
        device_group : kp.DeviceGroup
            Represents a set of devices handle.
        enable : bool
            Set enable/disable.

        Raises
        ------
        kp.ApiKPException

        See Also
        --------
        kp.core.connect_devices : To connect multiple (including one) Kneron devices.

        Notes
        -----
        Please using following steps to profile the model inference performance:
        1. Enable feature by kp.inference.profile_set_enable()
        2. Run inference
        3. Collect the statistic result by kp.inference.profile_get_statistics()
        4. Disable feature by kp.inference.profile_set_enable()
        """
        status = KPInference.__KP_WRAPPER.LIB.kp_profile_set_enable(
            device_group.address,
            enable
        )

        _check_api_return_code(result=None,
                               api_return_code=ApiReturnCode(status))

    @staticmethod
    def profile_get_statistics(device_group: DeviceGroup) -> ProfileData:
        """
        Collect inference profile results.

        Parameters
        ----------
        device_group : kp.DeviceGroup
            Represents a set of devices handle.

        Raises
        ------
        kp.ApiKPException

        See Also
        --------
        kp.core.connect_devices : To connect multiple (including one) Kneron devices.
        kp.inference.profile_set_enable: To set enable/disable model profile.
        kp.ProfileModelStatistics
        kp.ProfileData

        Notes
        -----
        Please using following steps to profile the model inference performance:
        1. Enable feature by kp.inference.profile_set_enable()
        2. Run inference
        3. Collect the statistic result by kp.inference.profile_get_statistics()
        4. Disable feature by kp.inference.profile_set_enable()
        """
        profile_data = ProfileData()

        status = KPInference.__KP_WRAPPER.LIB.kp_profile_get_statistics(
            device_group.address,
            ctypes.byref(profile_data._get_element_buffer())
        )

        _check_api_return_code(result=None,
                               api_return_code=ApiReturnCode(status))

        return profile_data

    @staticmethod
    def _performance_monitor_set_enable(device_group: DeviceGroup,
                                        enable: bool) -> None:
        """
        To set enable/disable model performance monitor profile. (Only support KL630)

        Parameters
        ----------
        device_group : kp.DeviceGroup
            Represents a set of devices handle.
        enable : bool
            Set enable/disable.

        Raises
        ------
        kp.ApiKPException

        See Also
        --------
        kp.core.connect_devices : To connect multiple (including one) Kneron devices.

        Notes
        -----
        Please using following steps to profile the model inference performance:
        1. Enable feature by kp.inference._performance_monitor_set_enable()
        2. Run inference
        3. Collect the statistic result by kp.inference._performance_monitor_get_statistics()
        4. Disable feature by kp.inference._performance_monitor_set_enable()
        """
        status = KPInference.__KP_WRAPPER.LIB.kp_performance_monitor_set_enable(
            device_group.address,
            enable
        )

        _check_api_return_code(result=None,
                               api_return_code=ApiReturnCode(status))

    @staticmethod
    def _performance_monitor_get_statistics(device_group: DeviceGroup) -> PerformanceMonitorData:
        """
        Collect inference performance monitor profile results.

        Parameters
        ----------
        device_group : kp.DeviceGroup
            Represents a set of devices handle.

        Raises
        ------
        kp.ApiKPException

        See Also
        --------
        kp.core.connect_devices : To connect multiple (including one) Kneron devices.
        kp.inference._performance_monitor_set_enable: To set enable/disable model profile.
        kp.NpuPerformanceMonitorStatistics
        kp.PerformanceMonitorData

        Notes
        -----
        Please using following steps to profile the model inference performance:
        1. Enable feature by kp.inference._performance_monitor_set_enable()
        2. Run inference
        3. Collect the statistic result by kp.inference._performance_monitor_get_statistics()
        4. Disable feature by kp.inference._performance_monitor_set_enable()
        """
        profile_data = PerformanceMonitorData()

        status = KPInference.__KP_WRAPPER.LIB.kp_performance_monitor_get_statistics(
            device_group.address,
            ctypes.byref(profile_data._get_element_buffer())
        )

        _check_api_return_code(result=None,
                               api_return_code=ApiReturnCode(status))

        return profile_data
