/****************************************************************************
 * Copyright (c) 2020 Qualcomm Technologies International, Ltd
****************************************************************************/
/**
 * \file  @@@cap_name@@@_postprocessing.c
 * \ingroup  capabilities
 *
 *
 *
 */

#include "@@@cap_name@@@_cap.h"
#include "capabilities.h"
#include "post_passthrough.h"
/****************************************************************************
Private Constant Declarations
*/




/**
 * \brief @@@cap_name@@@ Postprocessing data process function
 * \param pointer to the @@@cap_name@@@ op data 
 * \return none
 */
void @@@cap_name@@@_post_process(@@@cap_name^U@@@_OP_DATA *@@@cap_name@@@_data)
{

    /* The data for running the preprocessing algorithms is copied from the output tensor
     * buffer. After each successfull ML inference:
     * Step 1: Run the postprocessing algorithm. This postprocessing algorithm reads from the
     *         buffer associated with the algo_input - which in turn is the output tensor buffer,
               processes it and writes into the linear output buffer.
     * Step 2: Copy one postprocessing frame worth of data from the output linear buffer into
     *         the output buffer of the capability.
     */

    /* Run the postprocessing algorithm */
    post_passthrough_update(@@@cap_name@@@_data->post_processing_container->passthrough_data,
                            @@@cap_name@@@_data->post_processing_container->linear_output_buffer,
                            @@@cap_name@@@_data->post_processing_container->algo_input);

    int copied;
    /* Copy data from the postprocessing linear buffer to the terminal circular buffer */
    copied = cbuffer_write(@@@cap_name@@@_data->op_buffer,
                              @@@cap_name@@@_data->post_processing_container->linear_output_buffer,
                              @@@cap_name@@@_data->post_processing_container->output_block_size);

    L2_DBG_MSG1("postproc: copied to output: %d",copied);
    return;
}

/**
 * \brief @@@cap_name@@@ Postprocessing create
 * \param pointer to the @@@cap_name@@@ op data 
 * \return none
 */
void @@@cap_name@@@_post_processing_create(@@@cap_name^U@@@_OP_DATA *@@@cap_name@@@_data )
{
    /* Initilaise postprocessing:
     * 1. Create a post_processing conatiner and initialise it.
     * 2. Populate algo_input. This is a static array of type ALGO_INPUT_INFO (see lib/ml_algos/postproc_common.h)
     *    The size of this array depends on the number of output tensors(defined in *_defs.h).
     *    For each output tensor, this stores the tensor_id, tensor_size and has a pointer to the
     *    data buffer of the tensor. This is the buffer from which the postprocessing algorithms reads
     *    output tensors. This buffer pointer needs to be pointed to the output tensor buffer already
     *    allocated by the Kymera Machine learning framework on activating the model. Every time a
     *    model is loaded and activated using specific operator messages, the Kymera Machine learning
     *    framework will allocate buffers for each tensors internally. We iterate over all the tensors
     *    in the model and find the buffer associated with the output tensors and associate it to
     *    pointer in the correponding algo_input entry.
     * 3. Create a linear buffer which stores one frame generated by the postprocessing algorithms. This
     *    data is copied into the output linear buffer, which is turn is copied into the output circular
     *    buffer of the capability.
     */

    /* Create and initialise postprocessing container */
    t_POSTPROC_CONTAINER *post_proc_container = @@@cap_name@@@_data->post_processing_container;
    /* Each iteration of the postprocessing algorithms gives us output_block_size worth of data */
    post_proc_container->output_block_size = (int) (OUTPUT_BLOCK_PERIOD * OUTPUT_SAMPLE_RATE);
    post_proc_container->output_sample_rate = OUTPUT_SAMPLE_RATE;
    post_proc_container->num_tensors = NUM_OP_TENSORS;

    /* Populate algo_input: We have one postprocessing algorithm and one output tensor.
     * Hence we have only one ALGO_INPUT_INFO type entry in our postprocessing container
     * and the size of algo_input is 1.
     * Change this depending on the number of postprocessing algorithms and number of output
     * tensors.
     */    
    post_proc_container->algo_input[0].tensor_id = OUTPUT_TENSOR_ID;
    post_proc_container->algo_input[0].size = OP_TENSOR_SIZE;

    /* create linear output buffer */
    post_proc_container->linear_output_buffer = xzpmalloc(post_proc_container->output_block_size * sizeof(signed));

    post_passthrough_create(&post_proc_container->passthrough_data,post_proc_container->output_block_size);

    /* Map the data buffer pointer for each entry in algo_input to the corresponding
     * output tensor buffer as described above.
     */
    ALGO_INPUT_INFO * algo_input = NULL;

    unsigned tensor_id;
    /* Get the output tensors to be filled */
    USECASE_INFO *usecase_info = (USECASE_INFO *)ml_engine_list_find(@@@cap_name@@@_data->ml_engine_container->use_cases,(uint16)@@@cap_name@@@_data->ml_engine_container->uc_id);
    for(int i=0; i<usecase_info->output_tensor.num_tensors; i++)
    {
        MODEL_TENSOR_INFO *tensor = &usecase_info->output_tensor.tensors[i];
        tensor_id = tensor->tensor_id;
        for(unsigned j=0; j<usecase_info->output_tensor.num_tensors; j++)
        {
            if(post_proc_container->algo_input[j].tensor_id == tensor_id)
            {
                /* Found the output buffer details corresponding to this tensor */
                algo_input = &post_proc_container->algo_input[j];
                /* Map engine tensor data location to algo_output */
                algo_input->input_data = (signed *)tensor->data;
                L2_DBG_MSG1("ML_EXAMPLE_SVAD:Preprocessing:mapping tensor id: %d",tensor_id);
                break;                
            }
        }
        if(!algo_input)
        {
            L2_DBG_MSG1("ML_EXAMPLE_SVAD:Preprocessing: cannot map tensor id: %d",tensor->tensor_id);
        }    
    }
    return;
}
/**
 * \brief @@@cap_name@@@ Postprocessing destroy
 * 
 * \param pointer to the post processing container data structure
 * \return none
 */
void @@@cap_name@@@_post_processing_destroy(t_POSTPROC_CONTAINER *post_proc_container)
{
    post_passthrough_destroy(post_proc_container->passthrough_data);
    pfree(post_proc_container->linear_output_buffer);
    return;
}
