/* Copyright (c) 2025 Beijing Semidrive Technology Corporation
 * SPDX-License-Identifier: Apache-2.0
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 * http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */
/**
 * @file  SpiSlave_Ip
 * @brief Semidrive. AUTOSAR 4.3.1 MCAL SpiSlave_Slave plugins.
 */

#ifdef __cplusplus
extern "C" {
#endif

#include "Std_Types.h"
#include "RegHelper.h"
#include "SpiSlave_Fault.h"
#include "SpiSlave_Cfg.h"
#include "SpiSlave_reg.h"
#include "SpiSlave_Ip.h"
#include "SpiSlave_Types.h"
#if (SPI_SLV_ENABLE_DMA == STD_ON)
#include "Dma.h"
#include "Mcal_Cache.h"
#include "Mcal_Soc.h"
#include "SchM_SpiSlave.h"
#endif
#include "SpiSlave_Fault.h"
#define SPI_SLV_START_SEC_CONST_UNSPECIFIED
#include "SpiSlave_MemMap.h"
/*PRQA S 4399,2895,4499 EOF*/
#if (SPI_SLV_ENABLE_DMA == STD_ON)
/********************************************************************************************************
 *                                  Private Variable Definitions                                        *
 *******************************************************************************************************/
/*PRQA S 3218 4*/
/** \brief  this an an spi idx swap to dma instance id table */
static const Dma_PeripheralIdType Dma_ResTable[14] =
{
    DMA_CSIB1, DMA_CSIB2, DMA_CSIB3, DMA_CSIB4,
    DMA_CSIB5, DMA_CSIB6, DMA_CSIB7, DMA_CSIB8,
    DMA_CSIB9, DMA_CSIB10, DMA_CSIB11, DMA_CSIB12,
    DMA_CSIB13, DMA_CSIB14
};
/*PRQA S 3218 4*/
/** \brief  this is an dma beatsize table for differant datawidth */
static const Dma_BeatSizeType Dma_WidthTable[3] =
{
    DMA_BEAT_SIZE_1_BYTE, DMA_BEAT_SIZE_2_BYTE, DMA_BEAT_SIZE_4_BYTE
};
#endif
#define SPI_SLV_STOP_SEC_CONST_UNSPECIFIED
#include "SpiSlave_MemMap.h"

#define SPI_SLV_START_SEC_CODE
#include "SpiSlave_MemMap.h"

/**
 * @brief Initialize SPI Slave Module
 *
 * Initializes the specified SPI slave module and sets relevant register values.
 *
 * @param bus Pointer to the SPI slave module
 *
 * @return Returns 0 on success, -2 on failure
 */
static sint32 sspi_slv_init(struct mld_spi_slv_module *bus)
{
    uint32 reg_val,timeout;

    // init reg val
    reg_val = readl(bus->base + SPI_CTRL_OFF);
    // softrst hwclr
    writel(BM_SPI_CTRL_SW_RST | reg_val, bus->base + SPI_CTRL_OFF);

    /* wait  controller clr softrst bit */
    for (timeout = 0; timeout < SPI_SLAVE_MAX_TIMEOUT_SWRST; timeout++ )
    {
        reg_val = readl(bus->base + SPI_CTRL_OFF);

        if (0U == (reg_val & BM_SPI_CTRL_SW_RST))
        {
            break;
        }/* else is not needed */
    }
    /* Error Inject Point */
    SpiSLvFaultInj_SpiSLv_Ip_Init();

    if (timeout == SPI_SLAVE_MAX_TIMEOUT_SWRST)
    {
       return -2;
    }
    reg_val = 0;
    reg_val |= BM_SPI_CTRL_SLV_MODE;
    /* default the cs polarity is low */
    /* default is disable DMA req */
    /* disable the  ssp mode */
    /* BM_SPI_CTRL_SSP_CLK_MODE Prohibited to set !!!! */
    reg_val &= (uint32)(~BM_SPI_CTRL_SSP_CLK_MODE);
    /*
            idle:the idle between two transfers or between
        data intervals to avoid abnormal stuck transmission.
    */
    reg_val |= FV_SPI_CTRL_IDLE(4);
    /*
            timeout:the timeout between two transfers or between
        data intervals to avoid abnormal stuck transmission.
    */
    writel(reg_val, bus->base + SPI_CTRL_OFF);

    /* default enable timeout hw auto stop and not keep tx dma req when trans done */
    reg_val = readl(bus->base + SPI_CTRL2_OFF);
    reg_val &= (uint32)~BM_SPI_CTRL2_TIME_OUT_CLEAR;
    reg_val &= (uint32)~BM_SPI_CTRL2_TX_DMA_REQ_EN;
    reg_val |= BM_SPI_CTRL2_TIMEOUT_EN;
    writel(reg_val, bus->base + SPI_CTRL2_OFF);
    /* default mask all irq*/
    writel(0xFFFFFFFF, bus->base + SPI_IRQ_MASK_OFF);
    /* default mask the irqs of tim */
    writel(0xF, bus->base + PTY_TIM_ERR_UNC_MASK_OFF);
    /*
        default disable spi avoid slave mode lost bit
    */
    writel(0, bus->base + SPI_EN_OFF);
    writel(0xFFFFFFFF, bus->base + SPI_IRQ_STAT_OFF);
    /*
        wml always set as 8 half of fifo deepth
    */
    writel(FV_SPI_TX_FIFO_CTRL_THRD(SPI_SLV_FIFO_LEN/2),
        bus->base + SPI_TX_FIFO_CTRL_OFF);
    writel(FV_SPI_RX_FIFO_CTRL_THRD((SPI_SLV_FIFO_LEN / 2) - 1),
        bus->base + SPI_RX_FIFO_CTRL_OFF);

    return 0;
}
/**
 * @brief SPI Recovery Function
 *
 * Recovers the SPI module state by disabling SPI and performing a soft reset operation.
 *
 * @param bus Pointer to the SPI slave module
 */
static void sspi_slv_recover(struct mld_spi_slv_module *bus)
{
    uint32 reg_val,timeout;
    writel(0xFFFF, bus->base + SPI_IRQ_MASK_OFF);
    /* default disable spi avoid slave mode lost bit */
    writel((uint32)(~BM_SPI_EN_ENABLE), bus->base + SPI_EN_OFF);
    // init reg val
    reg_val = readl(bus->base + SPI_CTRL_OFF);
    reg_val &= (uint32)(~BM_SPI_CTRL_SLV_UNS_SIZE_EN);
    /* only slave mode do this sclk @133M *6cycle = 50ns softrst */
    writel(reg_val | BM_SPI_CTRL_SW_RST, bus->base + SPI_CTRL_OFF);
    /*  wait  controller clr softrst bit */
    for (timeout = 0; timeout < SPI_SLAVE_MAX_TIMEOUT_SWRST; timeout++ )
    {
        reg_val = readl(bus->base + SPI_CTRL_OFF);

        if (0U == (reg_val & BM_SPI_CTRL_SW_RST))
        {
            break;
        }/* else is not needed */
    }

    writel(reg_val, bus->base + SPI_CTRL_OFF);
    writel(0xFFFFFFFF, bus->base + SPI_IRQ_MASK_OFF);
    writel(0xFFFFFFFF, bus->base + SPI_IRQ_STAT_OFF);
}

/**
 * @brief De-initialize SPI Slave Module
 *
 *   De-initializes the SPI slave module, including disabling interrupts,
 *   clearing interrupt states,disabling the SPI controller, and performing
 *   a software reset.
 *
 * @param bus Pointer to the SPI slave module
 *
 * @return Returns the de-initialization result, 0 on success, non-zero on failure
 */
static sint32 sspi_slv_deinit(struct mld_spi_slv_module *bus)
{
    writel(0xFFFFFFFF, bus->base + SPI_IRQ_MASK_OFF);
    writel(0xFFFFFFFF, bus->base + SPI_IRQ_STAT_OFF);
    writel((uint32)(~BM_SPI_EN_ENABLE), bus->base + SPI_EN_OFF);
    // softrst
    writel(BM_SPI_CTRL_SW_RST, bus->base + SPI_CTRL_OFF);

    /*  wait  controller clr softrst bit */
    for (uint32 timeout = 0; timeout < SPI_SLAVE_MAX_TIMEOUT_SWRST; timeout++ )
    {
        uint32 reg_val = readl(bus->base + SPI_CTRL_OFF);

        if (0U == (reg_val & BM_SPI_CTRL_SW_RST))
        {
            break;
        }/* else is not needed */
    }

    return 0;
}
/**
 * @brief Set SPI Slave Device Parameters
 *
 * Sets the specified SPI slave device parameters onto the SPI bus.
 *
 * @param bus Pointer to the SPI bus module
 *
 * @param dev Pointer to the SPI slave device
 *
 * @return Returns the operation result, 0 on success, non-zero on failure
 */
static  sint32 sspi_slv_set_predev_parameters(struct mld_spi_slv_module *bus, \
        struct mld_spi_slv_device *dev)
{

    struct spi_slv_dev_priv  *priv = (struct spi_slv_dev_priv *)dev->priv;

    uint32 ctrl = readl(bus->base + SPI_CTRL_OFF);

    if (priv->flags & F_SLV_DEV_NSS_HIGH){
        ctrl |= ((uint32)0x1u) << (priv->nss_idx + 8u);
    }else{
        /*PRQA S 4391 2*/
        ctrl &= (uint32)(~(0x1u << (priv->nss_idx + 8u)));
    }

    writel(ctrl, bus->base + SPI_CTRL_OFF);

    return 0;
}
#define SPI_SLV_STOP_SEC_CODE
#include "SpiSlave_MemMap.h"

#define SPI_SLV_START_SEC_CODE_FAST
#include "SpiSlave_MemMap.h"
/**
 * @brief Write Data to SPI Bus
 *
 * Writes the given data into the transmit FIFO data register of the SPI bus.
 *
 * @param bus Pointer to the SPI bus
 *
 * @param data Data to be written
 */
static void sspi_slv_write(struct mld_spi_slv_module  *bus, uint32 data)
{
    writel(data, bus->base + SPI_TX_FIFO_DATA_OFF);
}
/**
 * @brief Read SPI Slave Module Receive FIFO Data
 *
 * Reads the receive FIFO data from the specified SPI slave module.
 *
 * @param bus Pointer to the SPI slave module
 *
 * @return The read receive FIFO data
 */
static uint32 sspi_slv_read(struct mld_spi_slv_module  *bus)
{
    return readl(bus->base + SPI_RX_FIFO_DATA_OFF);
}
/**
 * @brief Check if SSPI Can Write Data
 *
 * Checks if the given SPI slave module can write data.
 *
 * @param bus Pointer to the SPI slave module
 *
 * @return Returns TRUE if data can be written, FALSE otherwise
 *
 * @note
 * - Master mode stops sending when the receive FIFO is full, but the slave does not have this mechanism.
 * - The slave should stop writing immediately and read the receive FIFO when the receive FIFO is full during the writing process to avoid overflow.
 * - Ensure two free locations for writing in transmission hold mode.
 */
static boolean sspi_slv_can_write(struct mld_spi_slv_module  *bus)
{
    /*
           Master mode rx fifo full stop to send,but the Slave does not have this mechanism.
        Slave should stop writing immediately when it finds that rxfifo is full in the
        process of writing data, and then read rx fifo to avoid rx fifo overflow.
            In transmission hold mode, you need to ensure that there are two free
        locations for writing.
    */
    boolean ret = FALSE;

    uint32 reg_val = readl(bus->base + SPI_FIFO_STAT_OFF);
    ret = !( reg_val & (BM_SPI_FIFO_STAT_TX_FULL | BM_SPI_FIFO_STAT_RX_FULL));
    return ret;
}
/**
 * @brief Check if SSPI Can Read Data
 *
 * Checks if the given SPI slave module can Read data.
 *
 * @param bus Pointer to the SPI slave module
 *
 * @return Returns TRUE if data can be written, FALSE otherwise
 *
 * @note
 */
static boolean sspi_slv_can_read(struct mld_spi_slv_module  *bus)
{
    return !(readl(bus->base + SPI_FIFO_STAT_OFF) & BM_SPI_FIFO_STAT_RX_EMPTY);
}
#if (SPI_SLV_ENABLE_DMA == STD_ON)

/**
 * @brief Disable DMA Request
 *
 * Disables DMA requests for the SPI slave module based on the given flags.
 *
 * @param bus Pointer to the SPI slave module
 * @param flags Flags specifying the DMA request types to disable
 */
static inline void disable_dma_req(struct mld_spi_slv_module  *bus, uint32 flags)
{
    uint32 reg_val;
    reg_val = readl(bus->base + SPI_CTRL_OFF);

    if (flags & SPI_SLV_DMA_RX_ENABLE) {
        reg_val &= (uint32)(~BM_SPI_CTRL_RX_DMA_EN);
        SPI_SLV_DBG("spi_slv_dma rx disable\n");
    }

    if (flags & SPI_SLV_DMA_TX_ENABLE) {
        reg_val &= (uint32)(~BM_SPI_CTRL_TX_DMA_EN);
        SPI_SLV_DBG("spi_slv_dma tx disable\n");
    }

    writel(reg_val, bus->base + SPI_CTRL_OFF);

}
extern void SpiSlave_HwMainFunctionHandling(uint8 Hwidx);
/**
 *  @brief DMA Handling Function
 *
 *  Handles DMA operations based on DMA status, including stopping DMA and releasing DMA channels.
 *  The DMA (Direct Memory Access) handling function is designed to manage the completion or error
 *  events of DMA transfers within an SPI (Serial Peripheral Interface) slave device. DMA transfers
 *  are typically utilized for efficiently moving large amounts of data between memory and peripherals
 *  without direct CPU involvement. This specific function, dma_handle, is intended to be called when a
 *  DMA transfer completes or encounters an error, executing corresponding actions based on the context
 *  (receive or transmit) and DMA status.
 *  Here are the main components of the code and their functions:Parameter Explanation:Dma_ControllerStatusType
 *  dma_stat: The state of the DMA controller, indicating whether the DMA operation has completed, is pending,
 *  or has encountered an error. void *context: A pointer to a specific context that was assigned during DMA
 *  channel configuration. It is used to differentiate between different DMA channels or operations (e.g., distinguishing between receive and transmit channels).
 *  Type Conversion and Variable Declaration: Convert the context pointer to Dma_ChannelConfigType * to access the DMA channel's configuration.
 *  Further access the SPI slave module and private data through the context field within the DMA channel's configuration.
 *  Declare and initialize variables such as xfer (for recording the number of bytes transferred) and data_width_shift (for handling different data widths).
 *  Error Injection: Regardless of the DMA operation's outcome, first call SpiSLvFaultInj_SpiSLv_Ip_DmaHandleDmaStatusErr
 *  for error injection, potentially for testing or simulating DMA error handling.
 *  Stopping and Releasing the DMA Channel:If the SPI slave device is not currently in an unsynchronized
 *  enabled state (SPI_SLV_STATE_IS_UNS_EN is not set), stop and release the DMA channel. Note that these
 *  operations typically occur when the SPI chip select (CS) signal is not active.
 *  Handling DMA Completion Events:If the DMA operation has completed (dma_stat == DMA_COMPLETED),
 *  execute different operations based on whether context represents a receive channel or a transmit channel.
 *
 * @param dma_stat DMA status
 *
 * @param context Pointer to the context
 */
static void dma_handle(Dma_ControllerStatusType dma_stat, void *context)
{
    uint32 xfer;
    Dma_ChannelConfigType *chan = (Dma_ChannelConfigType *)context;
    struct mld_spi_slv_module  *bus = (struct mld_spi_slv_module *)chan->context;
    struct spi_slv_bus_priv *bus_priv = (struct spi_slv_bus_priv *)bus->priv;
    uint8 data_width_shift = bus->async.width_type;
    /*
        fault Inj for dma status error.
    */
    SpiSLvFaultInj_SpiSLv_Ip_DmaHandleDmaStatusErr();

    /*
        Dma_Stop and Dma_ReleaseChannel will be called in SPI CS inactive interrupt
    */
    if (!(bus->state & SPI_SLV_STATE_IS_UNS_EN)) {
        Dma_Stop(chan);
        Dma_ReleaseChannel(chan);
    }

    if (dma_stat == DMA_COMPLETED) {
        /*
            fixed-length mode need this part handles
        */
        if (!(bus->state & SPI_SLV_STATE_IS_UNS_EN)) {
            SchM_Enter_SpiSlave_SPI_SLV_EXCLUSIVE_AREA_015();
            /*
                This is the rx dma channel handle.
            */
            if (context == bus_priv->dma_ch_rx) {
                SPI_SLV_DBG("bus%d dma rx.\n", bus->idx);
                disable_dma_req(bus, SPI_SLV_DMA_RX_ENABLE);
#if (SPI_SLV_NO_CACHEABLE_NEEDED == STD_OFF)
                Mcal_InvalidateCache(bus->async.prxdata.val + (bus->async.rx_cur << data_width_shift),
                                     ROUNDUP(((bus->async.expect_len - bus->async.rx_cur) << data_width_shift), CACHE_LINE));
#endif
                xfer = Dma_GetXferBytes(bus_priv->dma_ch_rx, TRUE);
                xfer = xfer >> data_width_shift;
                SpiSLvFaultInj_SpiSLv_DmaLenth_Err();
                if ((bus->async.expect_len - bus->async.rx_cur ) > xfer) {
                    bus_priv->dma_err |= SPI_SLV_RX_DMA_ERR;
                }

                bus->async.rx_cur += xfer;
                bus->async.cur_remian = bus->async.len - bus->async.rx_cur;
                bus_priv->dma_ch_rx = NULL_PTR;
            } else if (context == bus_priv->dma_ch_tx) {
                /*
                    This is the tx dma channel handle.
                */
                SPI_SLV_DBG("bus%d dma tx.\n", bus->idx);
                disable_dma_req(bus, SPI_SLV_DMA_TX_ENABLE);
                xfer = Dma_GetXferBytes(bus_priv->dma_ch_tx, TRUE);
                xfer = xfer >> data_width_shift;
                SpiSLvFaultInj_SpiSLv_DmaLenth_Err();
                if ((bus->async.expect_len - bus->async.tx_cur ) > xfer) {
                    bus_priv->dma_err |= SPI_SLV_TX_DMA_ERR;
                }
                bus->async.tx_cur += xfer;
                bus->async.cur_remian = bus->async.len - bus->async.tx_cur;
                bus_priv->dma_ch_tx = NULL_PTR;
            }
            SchM_Exit_SpiSlave_SPI_SLV_EXCLUSIVE_AREA_015();
        }
    } else {
        /*
            This is the error of  dma channel handle.
        */
        SchM_Enter_SpiSlave_SPI_SLV_EXCLUSIVE_AREA_015();
        /* DMA_PENDING DMA_PAUSED */
        if (context == bus_priv->dma_ch_tx)
            bus_priv->dma_err |= SPI_SLV_TX_DMA_ERR;
        else if (context == bus_priv->dma_ch_rx)
            bus_priv->dma_err |= SPI_SLV_RX_DMA_ERR;

        disable_dma_req(bus, SPI_SLV_DMA_RX_ENABLE);
        disable_dma_req(bus, SPI_SLV_DMA_TX_ENABLE);
        SchM_Exit_SpiSlave_SPI_SLV_EXCLUSIVE_AREA_015();
    }

    SpiSlave_HwMainFunctionHandling(bus->idx);
}
#endif
/**
 * @brief DMA Error Handling Function
 *
 * Handles DMA-related errors when the SPI slave module is using DMA.
 *
 * @param bus Pointer to the SPI slave module
 */
static void sspi_slv_error_handle_for_dma(struct mld_spi_slv_module  *bus)
{
#if (SPI_SLV_ENABLE_DMA == STD_ON)
    uint32 xfer;
    Dma_ChannelConfigType *channel;
    uint8 data_width_shift = bus->async.width_type;
    struct spi_slv_bus_priv *bus_priv = (struct spi_slv_bus_priv *)bus->priv;
    SchM_Enter_SpiSlave_SPI_SLV_EXCLUSIVE_AREA_015();
    channel = bus_priv->dma_ch_rx;
    if (NULL_PTR != channel) {
        bus_priv->dma_ch_rx = NULL_PTR;
        /* Exit exclusive area */
        SchM_Exit_SpiSlave_SPI_SLV_EXCLUSIVE_AREA_015();
        disable_dma_req(bus, SPI_SLV_DMA_RX_ENABLE);
        Dma_Stop(channel);
        xfer = Dma_GetXferBytes(channel, TRUE);
#if (SPI_SLV_NO_CACHEABLE_NEEDED == STD_OFF)
        if(0u != xfer){
            Mcal_InvalidateCache(bus->async.prxdata.val, ROUNDUP(xfer, CACHE_LINE));
        }
#endif
        xfer >>= data_width_shift;
        bus->async.rx_cur = xfer;
        /* read the data of the remaining insufficient water level */
        spi_slv_read_remain(bus);
        Dma_ReleaseChannel(channel);
    }else{
        /* Exit exclusive area */
        SchM_Exit_SpiSlave_SPI_SLV_EXCLUSIVE_AREA_015();
    }
    SchM_Enter_SpiSlave_SPI_SLV_EXCLUSIVE_AREA_015();
    channel = bus_priv->dma_ch_tx;
    if (NULL_PTR != channel) {
        bus_priv->dma_ch_tx = NULL_PTR;
        SchM_Exit_SpiSlave_SPI_SLV_EXCLUSIVE_AREA_015();
        disable_dma_req(bus, SPI_SLV_DMA_TX_ENABLE);
        Dma_Stop(channel);
        xfer = Dma_GetXferBytes(channel, TRUE);
        xfer >>= data_width_shift;
        bus->async.tx_cur = xfer;
        Dma_ReleaseChannel(channel);
    }else{
        /* Exit exclusive area */
        SchM_Exit_SpiSlave_SPI_SLV_EXCLUSIVE_AREA_015();
    }
#endif
}
/**
 * @brief SPI Slave Transmission Current Feature Handling
 *
 *  Handles the current transmission characteristics
 *  based on the state of the SPI slave module.
 *
 * @param bus Pointer to the SPI slave module
 */
static void spi_slv_slave_tx_curr_quirks(struct mld_spi_slv_module *bus)
{
    uint32 fifo_remain =  GFV_SPI_FIFO_STAT_TX_FIFO_DPTR(readl(bus->base + SPI_FIFO_STAT_OFF));

    if (!bus->async.ptxdata.val)
        return ;

    /* pipe line have space same as fifo */
    if (!bus->async.prxdata.val) {
        if (bus->async.width_type == SPI_SLV_DATA_WIDTH_BYTE) {
            fifo_remain += SPI_SLV_PIPE_LINE_SIZE_8B;
        } else {
            fifo_remain += SPI_SLV_PIPE_LINE_SIZE_OB;
        }
    }

    bus->async.tx_cur -= fifo_remain;

}
#if (SPI_SLV_ENABLE_DMA == STD_ON)
/**
 * @brief Enable DMA Request
 *
 * Enables DMA requests on the given SPI slave module.
 *
 * @param bus Pointer to the SPI slave module
 */
static inline void enable_dma_req(struct mld_spi_slv_module  *bus)
{
    uint32 reg_val;
    const struct spi_slv_bus_priv *bus_priv = (struct spi_slv_bus_priv *)bus->priv;
    reg_val = readl(bus->base + SPI_CTRL_OFF);

    if (bus->async.prxdata.val && (bus_priv->flags &SPI_SLV_DMA_RX_ENABLE)) {
        reg_val |= BM_SPI_CTRL_RX_DMA_EN;
    }
    if (bus->async.ptxdata.val && (bus_priv->flags &SPI_SLV_DMA_TX_ENABLE)) {
        reg_val |= BM_SPI_CTRL_TX_DMA_EN;
    }

    writel(reg_val, bus->base + SPI_CTRL_OFF);
}
#endif
/** *****************************************************************************************************
 * \brief This function attach the new vector to bus.
 *
 * \verbatim
 * Syntax             : void SpiSlv_Ip_HasTimingErrors(const struct mld_spi_slv_module  *bus)
 *
 * Service ID[hex]    : 0x00
 *
 * Sync/Async         : Synchronous
 *
 * Reentrancy         : Reentrant
 *
 * Parameters (in)    : bus - Pointer to mld bus object..
 *
 * Parameters (inout) : None
 *
 * Parameters (out)   : None
 *
 * Return value       : None
 *
 * Description        : .
 *
 *
 * \endverbatim
 * Traceability       : SWSR_SPI_007
 *******************************************************************************************************/
static uint8 SpiSlv_Ip_HasTimingErrors(const struct mld_spi_slv_module  *bus)
{
    uint32 irqStat;
    uint8 retValue = 0;

    const struct spi_slv_dev_priv * devPriv = bus->dev_mode;

    irqStat = readl(bus->base + SPI_IRQ_STAT_OFF);

    if(irqStat & BM_SPI_IRQ_STAT_PARITY_BIT_ERR)
    {
        if(0u != ( F_SLV_DEV_PARITY_CHK & devPriv->flags))
        {
            retValue = 1;
        }
    }

    if(irqStat & BM_SPI_IRQ_STAT_SCK_BAUD_ERR)
    {
        if(0u != ( F_SLV_DEV_BAUDRATE_CHK & devPriv->flags))
        {
            retValue = 2;
        }
    }

    if(irqStat & BM_SPI_IRQ_STAT_TIMEOUT)
    {
        if(0u != ( F_SLV_DEV_TIMEOUT_CHK & devPriv->flags))
        {
            retValue = 3;
        }
    }
    return retValue;
}
/**
 * @brief Set SPI Interrupt
 *
 * Sets SPI interrupts based on the given mode and SPI slave module information.
 *
 * @param bus Pointer to the SPI slave module
 *
 * @param mode SPI operating mode
 */
static void setup_spi_slv_irq(struct mld_spi_slv_module  *bus, uint32 mode)
{
    uint32 reg_val;
    const struct spi_slv_bus_priv  *bus_priv = bus->priv;

    reg_val = readl(bus->base +SPI_IRQ_MASK_OFF);
    reg_val &= (uint32)(~BM_SPI_IRQ_MASK_PARITY_BIT_ERR);
    reg_val &= (uint32)(~BM_SPI_IRQ_MASK_SCK_BAUD_ERR);
    if (bus->async.ptxdata.val ) {
        /*
            if size < spi_slv_FIFO_SIZE only fream done will into irq
        */
        reg_val &= (uint32)(~BM_SPI_IRQ_MASK_TX_FIFO_PRE_EMPTY);
        reg_val &= (uint32)(~BM_SPI_IRQ_MASK_TX_FIFO_UDR);
    }

    if (bus->async.prxdata.val) {
        /*
            fifo remain data this case used fream done to read
        */
        reg_val &= (uint32)(~BM_SPI_IRQ_MASK_RX_FIFO_PRE_FULL);
        reg_val &= (uint32)(~BM_SPI_IRQ_MASK_RX_FIFO_OVR);
        reg_val &= (uint32)(~BM_SPI_IRQ_MASK_FRM_DONE);
    }

    if((uint32)MODE_DMA == mode){
        reg_val |= BM_SPI_IRQ_MASK_TX_FIFO_PRE_EMPTY;
        reg_val |= BM_SPI_IRQ_MASK_RX_FIFO_PRE_FULL;
        reg_val |= BM_SPI_IRQ_MASK_FRM_DONE;
    }
    /*
        undef size mode workaround step
        1.enable nss valid irq use to enable cs unvalid irq.
        2.enable nss unvalid irq in cs selected irq handler service.
        3.handling workaround in cs inactiveed irq handler service.

        worksround step1:
        enable cs inactive irq disable cs active irq

        undef size mode enables cs active interrupt to enable nss invalid interrupt
    */
    if (bus->state & SPI_SLV_STATE_IS_UNS_EN) {
        reg_val &= (uint32)(~BM_SPI_IRQ_MASK_SLV_NSS_VLD);
        reg_val |= BM_SPI_IRQ_MASK_SLV_NSS_INVLD;
        bus->state |= SPI_SLV_STATE_EN_VLD_CS;

    }

    /*
        Mask interrupt if it is in polling mode
    */
    if ((bus_priv->flags & SPI_SLAVE_INTERRUPT_MODE) == 0u) {
        reg_val = 0xFFFFFFFF;
    }
    writel(0xFFFFFFFF, bus->base + SPI_IRQ_STAT_OFF);
    /* enable pre irqs*/
    writel(reg_val, bus->base + SPI_IRQ_MASK_OFF);
}
/**
 * @brief Set SPI Slave Interrupt Mask Bits
 *
 * Sets the interrupt mask register for the SPI slave based on the given interrupt mask bits.
 *
 * @param bus Pointer to the SPI slave module
 *
 * @param irq_mask Interrupt mask bits
 */
static void sspi_slv_setup_irq_mask(struct mld_spi_slv_module  *bus, \
                                uint32 irq_mask)
{
    uint32 reg_val = readl(bus->base + SPI_IRQ_MASK_OFF);

    if (irq_mask &SPI_SLV_TX_WRITE_REQ)
        reg_val |= BM_SPI_IRQ_MASK_TX_FIFO_PRE_EMPTY;

    if (irq_mask & SPI_SLV_TRASPORT_FINISH)
        reg_val |= (~BM_SPI_IRQ_MASK_SLV_NSS_INVLD);


    SPI_SLV_DBG("%s reg:%08x\n", __FUNCTION__, reg_val);
    /* disable pre irqs*/
    writel(reg_val, bus->base + SPI_IRQ_MASK_OFF);
}
/**
 * @brief Update SPI Interrupt Mask
 *
 * Updates the SPI interrupt mask based on the given set and clear masks.
 *
 * @param bus Pointer to the SPI slave module
 *
 * @param set_mask Mask to set
 *
 * @param clr_mask Mask to clear
 *
 */
static inline void spi_slv_irq_update_mask(struct mld_spi_slv_module  *bus, \
                                       uint32 set_mask, uint32 clr_mask)
{
    uint32 reg_val = readl(bus->base + SPI_IRQ_MASK_OFF);
    const struct spi_slv_bus_priv  *bus_priv = bus->priv;
    reg_val &= (uint32)(~clr_mask);
    reg_val |= set_mask;

    if ((bus_priv->flags & SPI_SLV_CS_IRQ_ENABLE) == 0u) {
        reg_val |= BM_SPI_IRQ_MASK_SLV_NSS_VLD;
        reg_val |= BM_SPI_IRQ_MASK_SLV_NSS_INVLD;
    }

    SPI_SLV_DBG("%s reg:%08x\n", __FUNCTION__, reg_val);
    /* update pre irqs */
    writel(reg_val, bus->base + SPI_IRQ_MASK_OFF);
}
#if (SPI_SLV_ENABLE_DMA == STD_ON)
/**
* @brief  Configure SPI DMA Transfer Configures SPI DMA transfer based on the given parameters.
* @param bus Pointer to the SPI slave module
*
* @param ptxdata Pointer to the transmit data buffer
*
* @param prxdata Pointer to the receive data buffer
*
* @param size The size of the data to be transferred
*
* @param width FIFO width
*
* @return Configuration result, returns 0 on success, negative value on failure
*/
static sint32 setup_spi_slv_dma(struct mld_spi_slv_module  *bus, uint32 ptxdata, \
                            uint32 prxdata, uint16 size, enum fifo_width width)
{

    sint32 ret = 0;
    uint32 uds_add_lenth = 0;
    Dma_DeviceConfigType dmaCfgs;
    Std_ReturnType dmaTxRetValue = E_OK;
    Std_ReturnType dmaRxRetValue = E_OK;
    uint32  dmaXferBytes = size << width;
    Dma_InstanceConfigType dma_static_cfg;
    struct spi_slv_bus_priv *priv = bus->priv;
    Dma_ChannelConfigType *chan_rx = NULL_PTR, *chan_tx = NULL_PTR;
    Dma_BeatSizeType dmaWidth = Dma_WidthTable[width];
    SpiSLvFaultInj_SpiSLv_Dma_Align_Err();
#if (SPI_SLV_NO_CACHEABLE_NEEDED == STD_OFF)
    if (!SPISLV_IS_ALIGNED(prxdata, CACHE_LINE) || !SPISLV_IS_ALIGNED(ptxdata, CACHE_LINE)) {
        ret = -2;
    }
    else
#endif
    {
        if (0u != ptxdata) {
            /* Get Dma channel form Resm */
            if (E_OK == Dma_GetConfigParams(Dma_ResTable[bus->idx], DMA_SPI_TX, DMA_MODULE_SPI, &dma_static_cfg)) {
                chan_tx = Dma_RequestChannelWithId(dma_static_cfg.controller, dma_static_cfg.channelId);
            }
            /* If Get Channel Fail ,the transmits need stop */
            SpiSLvFaultInj_SpiSLv_Ip_ConfigDmaChannelErr();
            if(NULL_PTR != chan_tx)
            {
                ptxdata = ptxdata + (bus->async.tx_cur << width);
                SPI_SLV_DBG("bus%p add to tx %p\n", bus, chan_tx);
                priv->dma_ch_tx = (void *)chan_tx;
                chan_tx->context = (void *)bus;
                chan_tx->irqCallback = (Dma_IrqHandle)&dma_handle;

                (void)Dma_InitConfigChannel(&dmaCfgs);

                dmaCfgs.direction = DMA_MEMORY_TO_DEVICE;
                dmaCfgs.processMode = DMA_INTERRUPT;
                /*
                    It is necessary to support address conversion for memories such as CRAM and TCM.
                */
                dmaCfgs.srcAddress  = Mcal_AddressConvert(ptxdata);
                dmaCfgs.dstAddress = bus->base + SPI_TX_FIFO_DATA_OFF;
                dmaCfgs.srcBusWidth = dmaWidth;
                dmaCfgs.dstBusWidth = dmaWidth;
                dmaCfgs.srcMaxBurst = DMA_BURST_LENGTH_8;
                dmaCfgs.dstMaxBurst = DMA_BURST_LENGTH_8;
                dmaCfgs.srcPortSelect = DMA_PORT_AXI64;
                dmaCfgs.dstPortSelect = DMA_PORT_AHB32;
                dmaCfgs.srcIncDirection = DMA_BURST_INCREMENTAL;
                dmaCfgs.dstIncDirection = DMA_BURST_FIXED;
                dmaCfgs.loopMode = DMA_LOOP_MODE_2;
                dmaCfgs.flowControl = DMA_DIR_MEMORY_TO_DEVICE;
                dmaCfgs.transaction = DMA_DEVICE;

                dmaCfgs.transferMode = DMA_TRANSFER_MODE_SINGLE;
                dmaCfgs.linkListTriggerMode = DMA_TRIGGER_BY_HARDWARE;
                dmaCfgs.switchControl = DMA_SWT_EVT_CTL_STOP_WTH_INT;

                (void)Dma_ConfigChannel(chan_tx, &dmaCfgs);
                /*
                        Undef size mode need to give more data to tx fifo to avoid udr of ip bug.
                    always give tx FIFO more 8bytes data, even in the case of 32bit data bit width,
                    it can ensure that there are two redundant data in txfifo to avoid tx fifo underflow.
                */
                if(0u != (bus->state & SPI_SLV_STATE_IS_UNS_EN)){
                    uds_add_lenth = 8;
                }
                dmaTxRetValue = Dma_PrepareTransmission(chan_tx, dmaXferBytes + uds_add_lenth);

            }else{
                ret = -3;
            }
        }
        if (0u != prxdata) {
            if (E_OK == Dma_GetConfigParams(Dma_ResTable[bus->idx], DMA_SPI_RX, DMA_MODULE_SPI, &dma_static_cfg)) {
                chan_rx = Dma_RequestChannelWithId(dma_static_cfg.controller, dma_static_cfg.channelId);
            }
            SpiSLvFaultInj_SpiSLv_Ip_ConfigDmaChannelErr();
            if (NULL_PTR != chan_rx)
            {
                prxdata = prxdata + (bus->async.rx_cur << width);
                SPI_SLV_DBG("bus%p add to rx %p\n", bus, chan_rx);
                priv->dma_ch_rx = (void *)chan_rx;
                chan_rx->context = (void *)bus;
                chan_rx->irqCallback = (Dma_IrqHandle)&dma_handle;

                (void)Dma_InitConfigChannel(&dmaCfgs);

                dmaCfgs.direction = DMA_DEVICE_TO_MEMORY;
                dmaCfgs.processMode = DMA_INTERRUPT;
                dmaCfgs.srcAddress  = bus->base + SPI_RX_FIFO_DATA_OFF;
                /*
                    It is necessary to support address conversion for memories such as CRAM and TCM.
                */
                dmaCfgs.dstAddress = Mcal_AddressConvert(prxdata);
                dmaCfgs.srcBusWidth = dmaWidth;
                dmaCfgs.dstBusWidth = dmaWidth;
                dmaCfgs.srcMaxBurst = DMA_BURST_LENGTH_8;
                dmaCfgs.dstMaxBurst = DMA_BURST_LENGTH_8;
                dmaCfgs.srcPortSelect = DMA_PORT_AHB32;
                dmaCfgs.dstPortSelect = DMA_PORT_AXI64;
                dmaCfgs.srcIncDirection = DMA_BURST_FIXED;
                dmaCfgs.dstIncDirection = DMA_BURST_INCREMENTAL;
                dmaCfgs.loopMode = DMA_LOOP_MODE_2;
                dmaCfgs.flowControl = DMA_DIR_DEVICE_TO_MEMORY;
                dmaCfgs.transferMode = DMA_TRANSFER_MODE_SINGLE;
                dmaCfgs.linkListTriggerMode = DMA_TRIGGER_BY_HARDWARE;
                dmaCfgs.transaction = DMA_DEVICE;
                dmaCfgs.switchControl = DMA_SWT_EVT_CTL_STOP_WTH_INT;

                (void)Dma_ConfigChannel(chan_rx, &dmaCfgs);

                dmaRxRetValue = Dma_PrepareTransmission(chan_rx, dmaXferBytes);

            }else{
                ret = -4;
            }
        }
    }
    SpiSLvFaultInj_SpiSLv_Dma_Init_Err();
    if (0 != ret || E_OK != dmaRxRetValue || E_OK != dmaTxRetValue) {

        if (NULL_PTR != chan_rx) {
            Dma_Stop(chan_rx);
            Dma_ReleaseChannel(chan_rx);
            priv->dma_ch_rx = NULL_PTR;
        }

        if (NULL_PTR != chan_tx) {
            Dma_Stop(chan_tx);
            Dma_ReleaseChannel(chan_tx);
            priv->dma_ch_tx = NULL_PTR;
        }

        SPI_SLV_DBG("dma opt error:%d\n", ret);
    }
    else
    {
        if (NULL_PTR != chan_rx) {
            /*
                Read Simultaneous Clear Transfer Count Register
            */
#if (SPI_SLV_NO_CACHEABLE_NEEDED == STD_OFF)
            Mcal_FlushCache(prxdata, ROUNDUP(dmaXferBytes, CACHE_LINE));
#endif
            Dma_GetXferBytes(chan_rx, TRUE);
            Dma_Start(chan_rx);
        }
        if (NULL_PTR != chan_tx) {
            /*
                Read Simultaneous Clear Transfer Count Register
            */
#if (SPI_SLV_NO_CACHEABLE_NEEDED == STD_OFF)
            Mcal_CleanCache(ptxdata, ROUNDUP(dmaXferBytes, CACHE_LINE));
#endif
            Dma_GetXferBytes(chan_tx, TRUE);
            Dma_Start(chan_tx);
        }
        priv->dma_err = 0;
    }

    return ret;
}
#endif
/**
 *
 * @brief Generate SPI Slave Command
 *
 * Generates an SPI slave command based on the given SPI slave module and configuration information.
 *
 * @param bus Pointer to the SPI slave module
 *
 * @param config Pointer to the private configuration of the SPI slave device
 *
 * @return Returns 0 on success, non-zero value on failure
 *
*/
static sint32 generate_cmd(struct mld_spi_slv_module  *bus, const struct spi_slv_dev_priv *config)
{
    uint32 cmd = 0;
    uint32 reg_val;
    if (config->flags & F_SLV_DEV_LSB)
        cmd |= BM_SPI_CMD_CTRL_LSB;
    if (!bus->async.prxdata.val)
        cmd |= BM_SPI_CMD_CTRL_RX_MASK;
    /*
        default  mode is mode 0
    */
    switch (config->mode) {
    case MODE0:
        break;

    case MODE1:
        cmd |= BM_SPI_CMD_CTRL_SPI_CPHA;
        break;

    case MODE2:
        cmd |= BM_SPI_CMD_CTRL_SPI_CPOL;
        break;

    case MODE3:
        cmd |= BM_SPI_CMD_CTRL_SPI_CPHA | BM_SPI_CMD_CTRL_SPI_CPOL;
        break;
    }
    /* fream size hardware limit > 1024 */
    if (bus->async.len > SPI_SLV_FREAM_SIZE_MAX) {
        /* fream size limit > 1024 */
        cmd |= FV_SPI_CMD_CTRL_FRAM_SIZE(SPI_SLV_FREAM_SIZE_MAX - 1);
        bus->async.expect_len = SPI_SLV_FREAM_SIZE_MAX;
    } else {
        /* fream size limit < 1024 */
        cmd |= FV_SPI_CMD_CTRL_FRAM_SIZE(bus->async.len - 1);
        bus->async.expect_len = bus->async.len;

        /* last default set  will limit 1024 once tansmit */
        cmd |= BM_SPI_CMD_CTRL_LAST;
    }
    /*PRQA S 1891,1860,2985 2*/
    cmd |= FV_SPI_CMD_CTRL_WORD_SIZE( config->width - 1);
    cmd |= FV_SPI_CMD_CTRL_NSS(config->nss_idx);

    if(!!(config->nss_idx & 0x4))
    {
        cmd |= BM_SPI_CMD_CTRL_NSS_BIT2;
    }

    writel(config->baudratechk,bus->base + SPI_SCK_CHK_OFF);
    /*
            When the sclk is disturbed, the slave may enter the REAPT state.
        At this time, the new transmission CMD write cannot take effect, so the
        slave state cannot return to the normal state; therefore, in order to ensure
        the robustness of the slave, the reset guarantee is performed before each transmission
        starts. Can recover from errors, the cost is us-level time consumption before starting the transmission.
    */
    sspi_slv_recover(bus);
    /*
        The workaround will be enabled when both conditions are met at the same time:
            1. chip version >1.0.
            2. Transmission length greater thanSPI_SLV_FREAM_SIZE_MAX.
    */
    if (bus->async.len > SPI_SLV_FREAM_SIZE_MAX)
    {
#if ENABLE_SLAVE_MCS_TRIG == STD_ON
        /* Multiple CS is not supported for undefined lengths */
        return -1;
#endif /* ENABLE_SLAVE_MCS_TRIG == STD_ON */
    }
#if ENABLE_SLAVE_MCS_TRIG == STD_OFF
    /*  */
    reg_val = readl(bus->base + SPI_CTRL_OFF);
    writel(reg_val | BM_SPI_CTRL_SLV_UNS_SIZE_EN,bus->base + SPI_CTRL_OFF);
    bus->async.expect_len = bus->async.len;
    /* slave mast only one vector item  mean that next == NULL_PTR always */
    SPI_SLV_DBG("enable uns feature \n");
    /* software undef size mode */
    bus->state |= SPI_SLV_STATE_IS_UNS_EN;
#endif
    SpiSLvFaultInj_SpiSLv_Uns_Disable();
    /* BE or LE */
    reg_val = readl(bus->base + SPI_CTRL2_OFF);
    if (config->flags & F_SLV_DEV_IS_BE)
    {
        reg_val |= BM_SPI_CTRL2_SWAP;
    }
    else
    {
        reg_val &= (uint32)~BM_SPI_CTRL2_SWAP;
    }
    if(0u != (bus->state  & SPI_SLV_STATE_IS_UNS_EN))
    {
        reg_val &= (uint32)~BM_SPI_CTRL2_TX_DMA_REQ_EN;
    }
    else
    {
        reg_val |= BM_SPI_CTRL2_TX_DMA_REQ_EN;
    }
    writel(reg_val, bus->base + SPI_CTRL2_OFF);
    /* Timeout */
    reg_val = readl(bus->base + SPI_CTRL_OFF);
    reg_val &= (uint32)~FM_SPI_CTRL_TIMEOUT;
    reg_val |= FV_SPI_CTRL_TIMEOUT(config->timeout);
    writel(reg_val, bus->base + SPI_CTRL_OFF);

    reg_val = 0;

    if(config->parity != (uint8)SPI_SLV_P_NONE)
    {
        reg_val |= BM_PTY_BIT_CTL_RX_PARITY_EN|BM_PTY_BIT_CTL_TX_PARITY_EN;
    }
    if(config->parity == (uint8)SPI_SLV_P_EVEN)
    {
        reg_val |= BM_PTY_BIT_CTL_RX_PARITY_TYPE|BM_PTY_BIT_CTL_TX_PARITY_TYPE;
    }
    /* else is not need */
    writel(reg_val,bus->base + PTY_BIT_CTL_OFF);
    /* config cmd  */
    writel(BM_SPI_EN_ENABLE, bus->base + SPI_EN_OFF);
    /* config cmd  */
    writel(cmd, bus->base + SPI_TX_FIFO_CMD_OFF);
    /*
        Delaying the enabling of SPI is to prevent abnormal SPI Slave transmission
        caused by external signal interference during the initial power-up period
        before any transmission takes place. Therefore, the enabling of SPI must
        be postponed to this point.
    */
    SPI_SLV_DBG("first cmd %08x\n", cmd);

    return 0;
}
/**
 * @description:SPI Slave Vector Transmission Operation Executes a vector transmission operation for the
                SPI slave. It performs the corresponding transmission based on the given SPI bus, device,
                and asynchronous transmission structure.
 * @param bus Pointer to the SPI slave bus structure
 *
 * @param dev Pointer to the SPI slave device structure
 *
 * @param transmit Pointer to the SPI slave asynchronous transmission structure
 *
 * @param timeout Timeout duration
 *
 * @param mode Operation type
 *
 * @return {*}
 */
static sint32 sspi_slv_vector_transmission_ops(struct mld_spi_slv_module  *bus, \
        struct mld_spi_slv_device *dev, struct mld_spi_slv_async *transmit, \
        uint32 timeout, enum spi_slv_ops_type mode)
{
    uint32 irq_s;
    sint32 ret = 0;
#if (SPI_SLV_ENABLE_DMA == STD_ON)
    uint16 cnt = 0;
#endif

    if (NULL_PTR == bus->dev_mode) {
        ret =  -2;
    } else {
        /*sync to bus */
        next_async_item_cb(transmit, &bus->async);
        /*PRQA S 2992,2996 2*/
        if (0 != generate_cmd(bus, (struct spi_slv_dev_priv *)bus->dev_mode)) {
            ret = -3;
        }
    }
    SpiSLvFaultInj_SpiSLv_ops_Err();
    if(0 == ret){
        if (mode == MODE_NORMAL) {
            /*
                device mode aways block
            */
            SPI_SLV_DBG("Normal mode \n");
            SPI_SLV_DBG("tx:%p rx:%p\n", bus->async.ptxdata.val, bus->async.ptxdata.val);


            while ((bus->async.ptxdata.val && bus->async.tx_cur < bus->async.expect_len)  \
                || (bus->async.prxdata.val && bus->async.rx_cur < bus->async.expect_len)) {
                spi_slv_readwrite_remain(bus);
                SpiSLvFaultInj_SpiSLv_Synctrans_callout();
                irq_s = readl(bus->base + SPI_IRQ_STAT_OFF);
#if ENABLE_SLAVE_MCS_TRIG == STD_OFF
                if(BM_SPI_IRQ_STAT_SLV_NSS_INVLD & irq_s){
                    spi_slv_read_remain(bus);
                    spi_slv_slave_tx_curr_quirks(bus);

                    /*
                        updata expect_len and len make the transfer end
                    */
                    if (bus->state & SPI_SLV_STATE_BUSY_RX)
                    {
                        bus->async.cur_remian = 0;
                        bus->async.expect_len = bus->async.rx_cur;
                    }
                    else if (bus->state & SPI_SLV_STATE_BUSY_TX)
                    {
                        bus->async.cur_remian = 0;
                        bus->async.expect_len = bus->async.tx_cur;
                    }/* else is not need */

                    bus->async.len = bus->async.expect_len;
                }
#endif
                SpiSLvFaultInj_SpiSLv_Ip_ovrudrErr();

                if (irq_s & (BM_SPI_IRQ_STAT_TX_FIFO_UDR)) {
                    if (bus->async.tx_cur < bus->async.expect_len) {
                        ret = -4;
                        break;
                    }
                }
                if (irq_s & BM_SPI_IRQ_STAT_RX_FIFO_OVR ) {
                    ret = -5;
                    break;
                }
                if (0u != SpiSlv_Ip_HasTimingErrors(bus))
                {
                    ret = -8;
                    break;
                }
            }
            SPI_SLV_DBG("w %d r %d l %d\n", bus->async.tx_cur, bus->async.rx_cur, bus->async.rx_cur);
        } else {
            SPI_SLV_DBG("Irq mode\n");
            /*
                slave need tx fifo not empty to avoid  too early udr
            */
            if (mode != MODE_DMA) {
                spi_slv_write_remain(bus);
            }
            /*
                    irq mode and dma mode setup the irq of spislave.
            */
            setup_spi_slv_irq(bus, (uint32)mode);

            if (mode == MODE_DMA) {
#if (SPI_SLV_ENABLE_DMA == STD_ON)
                SPI_SLV_DBG("DMA mode \n");

                if (bus->async.prxdata.val) {
                    cnt = bus->async.expect_len - bus->async.rx_cur;
                } else {
                    cnt = bus->async.expect_len - bus->async.tx_cur;
                }
                /*
                    dma mode setup the dma channels
                */
                ret = setup_spi_slv_dma(bus, bus->async.ptxdata.val, bus->async.prxdata.val, \
                                    cnt, bus->async.width_type);

                if (ret >= 0) {
                    enable_dma_req(bus);
                } else {
                    /*
                        setup dma fail,need mask all irq avoid sync Mode enable irqs.
                    */
                    spi_slv_irq_update_mask(bus, 0xFFFFFFFF, 0);
                    ret =  -6;
                }
#else
                /*
                    setup dma fail,need mask all irq avoid sync Mode enable irqs.
                */
                spi_slv_irq_update_mask(bus, 0xFFFFFFFF, 0);
                ret =  -7;
#endif
            }

        }
    }
    return ret;
}

/**
 * @description: Synchronously send and receive data,will be blocked until the transfer is complete.
 *
 * @param {void} *bus
 *
 * @param {uint32*} ptxdata
 *
 * @param {uint32*} prxdata
 *
 * @param {uint32} timeout
 *
 * @return {*}
 */
static sint32 sspi_slv_vector_transmit_receive(struct mld_spi_slv_module  *bus, \
        struct mld_spi_slv_device *dev, struct mld_spi_slv_async *vector, uint32 timeout)
{
    return sspi_slv_vector_transmission_ops(bus, dev, vector, timeout, MODE_NORMAL);
}
/**
 * @description:Synchronously send and receive data,will return immediately.
 *
 * @param {void} *bus
 *
 * @param {uint32*} ptxdata
 *
 * @param {uint32*} prxdata
 *
 * @param {uint16} size
 *
 * @return {*}
 */
static sint32 sspi_slv_vector_transmit_receive_irq(struct mld_spi_slv_module  *bus, \
        struct mld_spi_slv_device *dev, struct mld_spi_slv_async *vector)
{
    return sspi_slv_vector_transmission_ops(bus, dev, vector, 0, MODE_IRQ);
}
/**
 * @description:Same as sspi_slv_transmit_receive_irq() the difference is
 *                  that the data is handled by DMA
 * @param {void} *bus
 *
 * @param {uint32*} ptxdata
 *
 * @param {uint32*} prxdata
 *
 * @param {uint16} size
 *
 * @return {*}
 */
static sint32 sspi_slv_vector_transmit_receive_dma(struct mld_spi_slv_module  *bus,
        struct mld_spi_slv_device *dev, struct mld_spi_slv_async *vector)
{
    return sspi_slv_vector_transmission_ops(bus, dev, vector, 0, MODE_DMA);
}
/**
 * @brief Get SPI Slave Status
 *
 *  Reads the registers of the SPI slave device to obtain its current status and stores
 *  the status value in the given register variable.
 *
 * @param bus Pointer to the SPI slave module
 *
 * @param reg_val Pointer to the register variable to store the status value (note: corrected from reg_vel to reg_val)
 *
 * @return Returns a 32-bit unsigned integer representing the status of the SPI slave device
 */
static uint32 sspi_slv_get_status(struct mld_spi_slv_module  *bus, uint32 *reg_vel)
{
    uint32 ret = 0;
#if (SPI_SLV_ENABLE_DMA == STD_ON)
    uint32 xfer = 0;
    uint8 data_width_shift = bus->async.width_type;
#endif
    struct spi_slv_bus_priv *bus_priv = (struct spi_slv_bus_priv *)bus->priv;
    uint32 irq_s = readl(bus->base + SPI_IRQ_STAT_OFF);
    *reg_vel = irq_s;

    SPI_SLV_DBG("%p sta:%08x msk:%08x \n", bus, irq_s,readl(bus->base + SPI_IRQ_MASK_OFF));
    /*
        Default need read and write fifo.
    */
    ret =SPI_SLV_TX_FIFO_WRITE |SPI_SLV_RX_FIFO_READ;

    SpiSLvFaultInj_SpiSLv_Ip_ovrudrErr();

    if (irq_s & BM_SPI_IRQ_STAT_TX_FIFO_UDR) {
#if (SPI_SLV_ENABLE_DMA == STD_ON)
        /*
            workaround spi slave mode will have a udr interrupt after the transfer is over for E3 1.0 and 1.1
        */
        if (bus->state & SPI_SLV_STATE_DMA_TX && bus_priv->dma_ch_tx) {
            xfer = Dma_GetXferBytes(bus_priv->dma_ch_tx, FALSE);
            bus->async.tx_cur = xfer >> data_width_shift;
        }
#endif

        if (bus->async.tx_cur < bus->async.expect_len) {
            ret |=SPI_SLV_TX_FIFO_UDR;
            SPI_SLV_DBG("Bus:%d UDF\n", bus->idx);
        }
    }

    if (irq_s & BM_SPI_IRQ_STAT_RX_FIFO_OVR) {
        ret |= SPI_SLV_RX_FIFO_OVR;
        SPI_SLV_DBG("Bus:%d OVR\n", bus->idx);
    }
    /* */
    if (0u != SpiSlv_Ip_HasTimingErrors(bus))
    {
        ret |= SPI_SLV_PARITY_ERR;
    }
    /*PRQA S 1881 1*/
    if ((bus->state & SPI_SLV_STATE_IS_UNS_EN) == SPI_SLV_STATE_IS_UNS_EN ) {
        if ((irq_s & BM_SPI_IRQ_STAT_SLV_NSS_VLD) && (bus->state & SPI_SLV_STATE_EN_VLD_CS)) {
            /*
                worksround step2:
                enable cs inactive irq disable cs active irq
            */
            spi_slv_irq_update_mask(bus, BM_SPI_IRQ_MASK_SLV_NSS_VLD, BM_SPI_IRQ_MASK_SLV_NSS_INVLD);
            bus->state &= (uint32)(~SPI_SLV_STATE_EN_VLD_CS);
            bus->state |= SPI_SLV_STATE_CS_INVLD_EN;
            /*
                cs invalid irq do not clr , avoid chip select valid and invalid too close leads
            to clear of CS invalid interrupt unexpected
            */
            *reg_vel &= (uint32)(~BM_SPI_IRQ_STAT_SLV_NSS_INVLD);
            SPI_SLV_DBG("bus:%d cs vld;dis vld and enable invld\n", bus->idx);
        }

        else if (irq_s & BM_SPI_IRQ_STAT_SLV_NSS_INVLD && (bus->state & SPI_SLV_STATE_CS_INVLD_EN)) {

            /*
                worksround step3:
                  disable cs inactive irq.
                  handle spi reset and dma resource release related processing
            */
            spi_slv_irq_update_mask(bus, BM_SPI_IRQ_MASK_SLV_NSS_INVLD, 0);

            bus->state &= (uint32)(~SPI_SLV_STATE_CS_INVLD_EN);
#if (SPI_SLV_ENABLE_DMA == STD_ON)
            if (bus_priv->dma_ch_rx) {
                disable_dma_req(bus,SPI_SLV_DMA_RX_ENABLE);
                Dma_Stop(bus_priv->dma_ch_rx);
                xfer = Dma_GetXferBytes(bus_priv->dma_ch_rx, TRUE);
#if (SPI_SLV_NO_CACHEABLE_NEEDED == STD_OFF)
                Mcal_InvalidateCache(bus->async.prxdata.val, ROUNDUP(xfer,CACHE_LINE));
#endif
                xfer >>= data_width_shift;
                bus->async.rx_cur = xfer;
                Dma_ReleaseChannel(bus_priv->dma_ch_rx);
                bus_priv->dma_ch_rx = NULL_PTR;
            }

            if (bus_priv->dma_ch_tx) {
                disable_dma_req(bus,SPI_SLV_DMA_TX_ENABLE);
                Dma_Stop(bus_priv->dma_ch_tx);
                xfer = Dma_GetXferBytes(bus_priv->dma_ch_tx, TRUE);
                Dma_ReleaseChannel(bus_priv->dma_ch_tx);
                xfer >>= data_width_shift;
                bus->async.tx_cur = xfer;
                bus_priv->dma_ch_tx = NULL_PTR;
            }
#endif
            spi_slv_read_remain(bus);
            spi_slv_slave_tx_curr_quirks(bus);
            /*
                updata expect_len and len make the transfer end.
                if the cs signal is unstable, the transmitsion will be over
                and the data lenth is 0.
            */
            if (bus->state & SPI_SLV_STATE_BUSY_RX)
            {
                bus->async.cur_remian = 0;
                bus->async.expect_len = bus->async.rx_cur;
            }
            else if (bus->state & SPI_SLV_STATE_BUSY_TX)
            {
                bus->async.cur_remian = 0;
                bus->async.expect_len = bus->async.tx_cur;
            }/* else is not need */

            bus->async.len = bus->async.expect_len;

            bus->state &= (uint32)(~SPI_SLV_STATE_IS_UNS_EN);
            SPI_SLV_DBG("bus:%d cs invld disable\n", bus->idx);
            /*
                slave mode undef size cs inactive mean that the transmition is over --- for irq  mode.
            */
            ret |= SPI_SLV_RX_FIFO_READ |SPI_SLV_TX_FIFO_WRITE; /*  */
        }
    }
    /*
        dma error orr to error flags
    */
    ret |= bus_priv->dma_err;

    SPI_SLV_DBG("%s ret:%08x\n", __FUNCTION__, ret);
    return ret;
}
/**
 * @brief Clear SPI Interrupt Status
 *
 *  Clears the interrupt status of the SPI slave module by specifying the clear flag bits.
 *
 * @param bus Pointer to the SPI slave module
 *
 * @param clr Clear flag bits
*/
static void sspi_slv_clr_irq_state(struct mld_spi_slv_module  *bus, uint32 clr)
{
    SPI_SLV_DBG("%s bus%d:%08x\n", __FUNCTION__,bus->idx, clr);

    writel(clr, bus->base + SPI_IRQ_STAT_OFF);
}
/**
 * @brief Clear SPI Interrupt Status
 *
 *  Clears the interrupt status of the SPI slave module by specifying the clear flag bits.
 *
 * @param bus Pointer to the SPI slave module
 *
 * @param clr Clear flag bits
*/
static uint16 sspi_slv_get_tx_trans_state(const struct mld_spi_slv_module  *bus)
{
    return GFV_SPI_STAT_SLV_UNS_TX_NUM(readl(bus->base + SPI_STAT_OFF));
}
#define SPI_SLV_STOP_SEC_CODE_FAST
#include "SpiSlave_MemMap.h"

#define SPI_SLV_START_SEC_CONST_UNSPECIFIED
#include "SpiSlave_MemMap.h"
/**
 * @brief Spi Hadrdware Controller ops handle.
 *
 */
const struct mld_spi_slv_ops semidrive_spi_slv_bus_ops = {
    .spi_slv_init = sspi_slv_init,
    .spi_slv_deinit = sspi_slv_deinit,
    .spi_slv_vector_transmit_receive =      sspi_slv_vector_transmit_receive,
    .spi_slv_vector_transmit_receive_irq =  sspi_slv_vector_transmit_receive_irq,
    .spi_slv_vector_transmit_receive_dma =  sspi_slv_vector_transmit_receive_dma,
    .spi_slv_write_data =                   sspi_slv_write,
    .spi_slv_read_data =                    sspi_slv_read,
    .spi_slv_can_write =                    sspi_slv_can_write,
    .spi_slv_can_read =                     sspi_slv_can_read,
    .spi_slv_irq_state =                    sspi_slv_get_status,
    .spi_slv_setup_irq_mask =               sspi_slv_setup_irq_mask,
    .spi_slv_clr_irq_state =                sspi_slv_clr_irq_state,
    .spi_slv_set_predev_config =            sspi_slv_set_predev_parameters,
    .spi_slv_dma_stop =                     sspi_slv_error_handle_for_dma,
    .spi_slv_transmit_stop =                sspi_slv_recover,
    .spi_slv_transmit_length =              sspi_slv_get_tx_trans_state,
};

#define SPI_SLV_STOP_SEC_CONST_UNSPECIFIED
#include "SpiSlave_MemMap.h"

#ifdef __cplusplus
}
#endif
