/*
 * Copyright (c) 2013-2014,2016 The Linux Foundation. All rights reserved.
 *
 * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
 *
 *
 * Permission to use, copy, modify, and/or distribute this software for
 * any purpose with or without fee is hereby granted, provided that the
 * above copyright notice and this permission notice appear in all
 * copies.
 *
 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
 * PERFORMANCE OF THIS SOFTWARE.
 */

/*
 * This file was originally distributed by Qualcomm Atheros, Inc.
 * under proprietary terms before Copyright ownership was assigned
 * to the Linux Foundation.
 */

#define ATH_MODULE_NAME hif
#include <linux/kthread.h>
#include "a_debug.h"

#include <adf_os_types.h>
#include <adf_os_dma.h>
#include <adf_os_timer.h>
#include <adf_os_time.h>
#include <adf_os_lock.h>
#include <adf_os_io.h>
#include <adf_os_mem.h>
#include <adf_os_module.h>
#include <adf_os_util.h>
#include <adf_os_stdtypes.h>
#include <adf_os_defer.h>
#include <adf_os_atomic.h>
#include <adf_nbuf.h>
#include <athdefs.h>
#include <adf_net_types.h>
#include <a_types.h>
#include <athdefs.h>
#include <a_osapi.h>
#include <hif.h>
#include <htc_services.h>
#include <htc_internal.h>
#include "hif_sdio_internal.h"
#include "if_ath_sdio.h"
#include "regtable.h"
#include "vos_sched.h"

/* under HL SDIO, with Interface Memory support, we have the following
 * reasons to support 2 mboxs: a) we need place different buffers in different
 * mempool, for example, data using Interface Memory, desc and other using
 * DRAM, they need different SDIO mbox channels; b) currently, tx mempool in
 * LL case is seperated from main mempool, the structure (descs at the beginning
 * of every pool buffer) is different, because they only need store tx desc from host.
 * To align with LL case, we also need 2 mbox support just as PCIe LL cases.
 */

A_UINT8 HIFDevMapPipeToMailBox(HIF_SDIO_DEVICE *pDev, A_UINT8 pipeid)
{
    // TODO: temp version, should not hardcoded here, will be updated after HIF design
    if (2 == pipeid || 3 == pipeid)
        return 1;
    else if (0 == pipeid || 1 == pipeid)
        return 0;
    else {
        printk("%s:--------------------pipeid=%d,should not happen\n",__func__,pipeid);
        adf_os_assert(0);
        return INVALID_MAILBOX_NUMBER;
    }
}

A_UINT8 HIFDevMapMailBoxToPipe(HIF_SDIO_DEVICE *pDev, A_UINT8 mboxIndex,
        A_BOOL upload)
{
    // TODO: temp version, should not hardcoded here, will be updated after HIF design
    if (mboxIndex == 0) {
        return upload ? 1 : 0;
    } else if (mboxIndex == 1) {
        return upload ? 3 : 2;
    } else {
        printk("%s:--------------------mboxIndex=%d,upload=%d,should not happen\n",__func__,mboxIndex,upload);
        adf_os_assert(0);
        return 0xff;
    }
}

A_STATUS HIFDevMapServiceToPipe(HIF_SDIO_DEVICE *pDev, A_UINT16 ServiceId,
        A_UINT8 *ULPipe, A_UINT8 *DLPipe, A_BOOL SwapMapping)
{
    A_STATUS status = EOK;
    switch (ServiceId) {
    case HTT_DATA_MSG_SVC:
        if (SwapMapping) {
            *ULPipe = 1;
            *DLPipe = 0;
        } else {
            *ULPipe = 3;
            *DLPipe = 2;
        }
        break;

    case HTC_CTRL_RSVD_SVC:
    case HTC_RAW_STREAMS_SVC:
        *ULPipe = 1;
        *DLPipe = 0;
        break;

    case WMI_DATA_BE_SVC:
    case WMI_DATA_BK_SVC:
    case WMI_DATA_VI_SVC:
    case WMI_DATA_VO_SVC:
        *ULPipe = 1;
        *DLPipe = 0;
        break;

    case WMI_CONTROL_SVC:
        if (SwapMapping) {
            *ULPipe = 3;
            *DLPipe = 2;
        } else {
            *ULPipe = 1;
            *DLPipe = 0;
        }
        break;

    default:
        status = !EOK;
        break;
    }
    return status;
}

HTC_PACKET *HIFDevAllocRxBuffer(HIF_SDIO_DEVICE *pDev, size_t length)
{
    HTC_PACKET *pPacket;
    adf_nbuf_t netbuf;
    A_UINT32 bufsize = 0, headsize = 0;

    bufsize = length + HIF_SDIO_RX_DATA_OFFSET;
    headsize = sizeof(HTC_PACKET);
    netbuf = adf_nbuf_alloc(NULL, bufsize + headsize, 0, 4, FALSE);
    if (netbuf == NULL) {
        AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
                ("(%s)Allocate netbuf failed\n", __FUNCTION__));
        return NULL;
    }
    pPacket = (HTC_PACKET *) adf_nbuf_data(netbuf);
    adf_nbuf_reserve(netbuf, headsize);

    SET_HTC_PACKET_INFO_RX_REFILL(pPacket,
            pDev,
            adf_nbuf_data(netbuf),
            bufsize,
            ENDPOINT_0);
    SET_HTC_PACKET_NET_BUF_CONTEXT(pPacket, netbuf);
    return pPacket;
}

/**
 * rx_completion_sem_init() - initialize rx completion semaphore
 * @device:  device handle.
 *
 * Initialize semaphore for RX completion thread's synchronization.
 *
 * Return: None.
 */
static inline void rx_completion_sem_init(HIF_SDIO_DEVICE *pDev)
{
#if HIF_RX_THREAD_V2
	spin_lock_init(&pDev->pRecvTask->rx_bundle_lock);
	spin_lock_init(&pDev->pRecvTask->rx_sync_completion_lock);
#else
	spin_lock_init(&pDev->pRecvTask->rx_completion_lock);
#endif
	sema_init(&pDev->pRecvTask->sem_rx_completion, 0);
}
extern int rx_completion_task(void *param);

/**
 * hif_start_tx_completion_thread() - Create and start the RX compl thread
 * @pDev:   pDev handle.
 *
 * This function will create the rx completion thread.
 *
 * Return: A_OK     thread created.
 *         A_ERROR  thread not created.
 */
static inline int hif_start_rx_completion_thread(HIF_SDIO_DEVICE *pDev)
{
    struct sched_param param = {.sched_priority = 99};
	if (!pDev->pRecvTask->rx_completion_task) {
		pDev->pRecvTask->rx_completion_shutdown = 0;
		pDev->pRecvTask->rx_completion_task = kthread_create(rx_completion_task,
			(void *)pDev,	"AR6K RxCompletion");
        sched_setscheduler(pDev->pRecvTask->rx_completion_task, SCHED_FIFO, &param);
		if (IS_ERR(pDev->pRecvTask->rx_completion_task)) {
			pDev->pRecvTask->rx_completion_shutdown = 1;
			AR_DEBUG_PRINTF(ATH_DEBUG_ERROR,
			("AR6000: fail to create rx_comple task\n"));
			pDev->pRecvTask->rx_completion_task = NULL;
			return A_ERROR;
		}
		AR_DEBUG_PRINTF(ATH_DEBUG_TRACE,
			("AR6000: start rx_comple task\n"));
		wake_up_process(pDev->pRecvTask->rx_completion_task);
	}
	return A_OK;
}

/*
 * hif_stop_rx_completion_thread() - Destroy the rx compl thread
 * @pDev: pDev handle.
 *
 * This function will destroy the RX completion thread.
 *
 * Return: None.
 */
static inline void hif_stop_rx_completion_thread(HIF_SDIO_DEVICE *pDev)
{
#if HIF_ASYNC_ALLOC
    HTC_PACKET *pPacket;
#endif
	if (pDev->pRecvTask->rx_completion_task) {
		init_completion(&pDev->pRecvTask->rx_completion_exit);
		pDev->pRecvTask->rx_completion_shutdown = 1;
		up(&pDev->pRecvTask->sem_rx_completion);
		wait_for_completion(&pDev->pRecvTask->rx_completion_exit);
		pDev->pRecvTask->rx_completion_task = NULL;
		sema_init(&pDev->pRecvTask->sem_rx_completion, 0);
	}
#if HIF_ASYNC_ALLOC
    while(!HTC_QUEUE_EMPTY(&pDev->pRecvTask->rxAllocQueue)) {
        pPacket = HTC_PACKET_DEQUEUE(&pDev->pRecvTask->rxAllocQueue);
        if(pPacket == NULL)
            break;
        adf_nbuf_free(pPacket->pNetBufContext);
    }
#endif
}

struct hif_recv_task gRecvTask;
HIF_SDIO_DEVICE* HIFDevCreate(HIF_DEVICE *hif_device,
        MSG_BASED_HIF_CALLBACKS *callbacks,
        void *target)
{

    A_STATUS status;
    HIF_SDIO_DEVICE *pDev;

    pDev = A_MALLOC(sizeof(HIF_SDIO_DEVICE));
    if (!pDev) {
        A_ASSERT(FALSE);
        return NULL;
    }

    A_MEMZERO(pDev, sizeof(HIF_SDIO_DEVICE));
    A_MUTEX_INIT(&pDev->Lock);
    A_MUTEX_INIT(&pDev->TxLock);
    A_MUTEX_INIT(&pDev->RxLock);

    pDev->HIFDevice = hif_device;
    pDev->pTarget = target;

    pDev->pRecvTask = &gRecvTask;
#if HIF_RX_THREAD
    pDev->pRecvTask->rx_completion_task = NULL;
    rx_completion_sem_init(pDev);
#if HIF_RX_THREAD_V2
    INIT_HTC_PACKET_QUEUE(&pDev->pRecvTask->rxBundleQueue);
    INIT_HTC_PACKET_QUEUE(&pDev->pRecvTask->rxSyncCompletionQueue);
#else
    INIT_HTC_PACKET_QUEUE(&pDev->pRecvTask->rxComQueue);
#endif
    hif_start_rx_completion_thread(pDev);
#endif

#if HIF_ASYNC_ALLOC
	spin_lock_init(&pDev->pRecvTask->rx_alloc_lock);
    INIT_HTC_PACKET_QUEUE(&pDev->pRecvTask->rxAllocQueue);
#endif

    status = HIFConfigureDevice(hif_device,
            HIF_DEVICE_SET_HTC_CONTEXT,
            (void*) pDev,
            sizeof(pDev));
    if (status != A_OK) {
        AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
                ("(%s)HIF_DEVICE_SET_HTC_CONTEXT failed!!!\n", __FUNCTION__));
    }

    A_MEMCPY(&pDev->hif_callbacks, callbacks, sizeof(*callbacks));

    return pDev;
}

void HIFDevDestroy(HIF_SDIO_DEVICE *pDev)
{
    A_STATUS status;
#if HIF_RX_THREAD
    hif_stop_rx_completion_thread(pDev);
#endif
    status = HIFConfigureDevice(pDev->HIFDevice,
            HIF_DEVICE_SET_HTC_CONTEXT,
            (void*) NULL,
            0);
    if (status != A_OK) {
        AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
                ("(%s)HIF_DEVICE_SET_HTC_CONTEXT failed!!!\n", __FUNCTION__));
    }
    A_FREE(pDev);
}

HIF_SDIO_DEVICE *HIFDevFromHIF(HIF_DEVICE *hif_device)
{
    HIF_SDIO_DEVICE *pDev = NULL;
    A_STATUS status;
    status = HIFConfigureDevice(hif_device,
            HIF_DEVICE_GET_HTC_CONTEXT,
            (void**) &pDev,
            sizeof(HIF_SDIO_DEVICE));
    if (status != A_OK) {
        AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
                ("(%s)HTC_SDIO_CONTEXT is NULL!!!\n", __FUNCTION__));
    }
    return pDev;
}

A_STATUS HIFDevDisableInterrupts(HIF_SDIO_DEVICE *pDev)
{
    MBOX_IRQ_ENABLE_REGISTERS regs;
    A_STATUS    status = A_OK;
    ENTER();

    LOCK_HIF_DEV(pDev);
    /* Disable all interrupts */
    pDev->IrqEnableRegisters.int_status_enable = 0;
    pDev->IrqEnableRegisters.cpu_int_status_enable = 0;
    pDev->IrqEnableRegisters.error_status_enable = 0;
    pDev->IrqEnableRegisters.counter_int_status_enable = 0;
    /* copy into our temp area */
    A_MEMCPY(&regs,
            &pDev->IrqEnableRegisters,
            sizeof(pDev->IrqEnableRegisters));

    UNLOCK_HIF_DEV(pDev);

    /* always synchronous */
    status = HIFReadWrite(pDev->HIFDevice,
            INT_STATUS_ENABLE_ADDRESS,
            (A_UCHAR *)&regs,
            sizeof(MBOX_IRQ_ENABLE_REGISTERS),
            HIF_WR_SYNC_BYTE_INC,
            NULL);

    if (status != A_OK) {
        /* Can't write it for some reason */
        AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("Failed to update interrupt control registers err: %d", status));
    }

    /* mask the host controller interrupts */
    HIFMaskInterrupt(pDev->HIFDevice);
    EXIT("status :%d",status);
    return status;
}

A_STATUS HIFDevEnableInterrupts(HIF_SDIO_DEVICE *pDev)
{
    A_STATUS status;
    MBOX_IRQ_ENABLE_REGISTERS regs;
    ENTER();


    /* for good measure, make sure interrupt are disabled before unmasking at the HIF
     * layer.
     * The rationale here is that between device insertion (where we clear the interrupts the first time)
     * and when HTC is finally ready to handle interrupts, other software can perform target "soft" resets.
     * The AR6K interrupt enables reset back to an "enabled" state when this happens.
     *  */
    HIFDevDisableInterrupts(pDev);

    /* Unmask the host controller interrupts */
    HIFUnMaskInterrupt(pDev->HIFDevice);

    LOCK_HIF_DEV(pDev);

    /* Enable all the interrupts except for the internal AR6000 CPU interrupt */
    pDev->IrqEnableRegisters.int_status_enable =
            INT_STATUS_ENABLE_ERROR_SET(0x01) | INT_STATUS_ENABLE_CPU_SET(0x01)
                    | INT_STATUS_ENABLE_COUNTER_SET(0x01);

    pDev->IrqEnableRegisters.int_status_enable |=
            INT_STATUS_ENABLE_MBOX_DATA_SET(0x01) | INT_STATUS_ENABLE_MBOX_DATA_SET(0x02); // enable 2 mboxs INT

    /* Set up the CPU Interrupt Status Register, enable CPU sourced interrupt #0, #1
     * #0 is used for report assertion from target
     * #1 is used for inform host that credit arrived
     * */
    pDev->IrqEnableRegisters.cpu_int_status_enable = 0x03;

    /* Set up the Error Interrupt Status Register */
    pDev->IrqEnableRegisters.error_status_enable =
            (ERROR_STATUS_ENABLE_RX_UNDERFLOW_SET(0x01)
                    | ERROR_STATUS_ENABLE_TX_OVERFLOW_SET(0x01)) >> 16;

    /* Set up the Counter Interrupt Status Register (only for debug interrupt to catch fatal errors) */
    pDev->IrqEnableRegisters.counter_int_status_enable =
            (COUNTER_INT_STATUS_ENABLE_BIT_SET(AR6K_TARGET_DEBUG_INTR_MASK)) >> 24;

    /* copy into our temp area */
    A_MEMCPY(&regs,
            &pDev->IrqEnableRegisters,
            sizeof(MBOX_IRQ_ENABLE_REGISTERS));

    UNLOCK_HIF_DEV(pDev);


    /* always synchronous */
    status = HIFReadWrite(pDev->HIFDevice,
            INT_STATUS_ENABLE_ADDRESS,
            (A_UCHAR *)&regs,
            sizeof(MBOX_IRQ_ENABLE_REGISTERS),
            HIF_WR_SYNC_BYTE_INC,
            NULL);

    if (status != A_OK) {
        /* Can't write it for some reason */
        AR_DEBUG_PRINTF( ATH_DEBUG_ERR,
                ("Failed to update interrupt control registers err: %d\n", status));

    }
    EXIT();
    return status;
}

A_STATUS HIFDevSetup(HIF_SDIO_DEVICE *pDev)
{
    A_STATUS status;
    A_UINT32 blocksizes[MAILBOX_COUNT];
    HTC_CALLBACKS htcCallbacks;
    HIF_DEVICE *hif_device = pDev->HIFDevice;

    ENTER();

    status = HIFConfigureDevice(hif_device,
            HIF_DEVICE_GET_MBOX_ADDR,
            &pDev->MailBoxInfo,
            sizeof(pDev->MailBoxInfo));

    if (status != A_OK) {
        AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
                ("(%s)HIF_DEVICE_GET_MBOX_ADDR failed!!!\n", __FUNCTION__));
        A_ASSERT(FALSE);
    }

    status = HIFConfigureDevice(hif_device,
            HIF_DEVICE_GET_MBOX_BLOCK_SIZE,
            blocksizes,
            sizeof(blocksizes));
    if (status != A_OK) {
        AR_DEBUG_PRINTF( ATH_DEBUG_ERR,
                ("(%s)HIF_DEVICE_GET_MBOX_BLOCK_SIZE failed!!!\n", __FUNCTION__));
        A_ASSERT(FALSE);
    }

    pDev->BlockSize = blocksizes[MAILBOX_FOR_BLOCK_SIZE];
    pDev->BlockMask = pDev->BlockSize - 1;
    A_ASSERT((pDev->BlockSize & pDev->BlockMask) == 0);

    /* assume we can process HIF interrupt events asynchronously */
    pDev->HifIRQProcessingMode = HIF_DEVICE_IRQ_ASYNC_SYNC;

    /* see if the HIF layer overrides this assumption */
    HIFConfigureDevice(hif_device,
            HIF_DEVICE_GET_IRQ_PROC_MODE,
            &pDev->HifIRQProcessingMode,
            sizeof(pDev->HifIRQProcessingMode));

    switch (pDev->HifIRQProcessingMode) {
    case HIF_DEVICE_IRQ_SYNC_ONLY:
        AR_DEBUG_PRINTF(ATH_DEBUG_WARN,
                ("HIF Interrupt processing is SYNC ONLY\n"));
        /* see if HIF layer wants HTC to yield */
        HIFConfigureDevice(hif_device,
                HIF_DEVICE_GET_IRQ_YIELD_PARAMS,
                &pDev->HifIRQYieldParams,
                sizeof(pDev->HifIRQYieldParams));

        if (pDev->HifIRQYieldParams.RecvPacketYieldCount > 0) {
            AR_DEBUG_PRINTF( ATH_DEBUG_WARN,
                    ("HIF requests that DSR yield per %d RECV packets \n", pDev->HifIRQYieldParams.RecvPacketYieldCount));
            pDev->DSRCanYield = TRUE;
        }
        break;
    case HIF_DEVICE_IRQ_ASYNC_SYNC:
        AR_DEBUG_PRINTF(ATH_DEBUG_TRC,
                ("HIF Interrupt processing is ASYNC and SYNC\n"));
        break;
    default:
        A_ASSERT(FALSE);
        break;
    }

    pDev->HifMaskUmaskRecvEvent = NULL;

    /* see if the HIF layer implements the mask/unmask recv events function  */
    HIFConfigureDevice(hif_device,
            HIF_DEVICE_GET_RECV_EVENT_MASK_UNMASK_FUNC,
            &pDev->HifMaskUmaskRecvEvent,
            sizeof(pDev->HifMaskUmaskRecvEvent));

    status = HIFDevDisableInterrupts(pDev);

    A_MEMZERO(&htcCallbacks, sizeof(HTC_CALLBACKS));
    /* the device layer handles these */
    htcCallbacks.rwCompletionHandler = HIFDevRWCompletionHandler;
    htcCallbacks.dsrHandler = HIFDevDsrHandler;
    htcCallbacks.context = pDev;
    status = HIFAttachHTC(pDev->HIFDevice, &htcCallbacks);

    EXIT();
    return status;
}


A_STATUS HIFDevSetupMsgBundling(HIF_SDIO_DEVICE *pDev)
{
    A_STATUS status;

    AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("+%s - pDev = %p\n", __func__, pDev));

    if (pDev->MailBoxInfo.Flags & HIF_MBOX_FLAG_NO_BUNDLING)
    {
        AR_DEBUG_PRINTF(ATH_DEBUG_WARN, ("HIF requires bundling disabled\n"));
        return A_ENOTSUP;
    }

    status = HIFConfigureDevice(pDev->HIFDevice,
                                HIF_CONFIGURE_QUERY_SCATTER_REQUEST_SUPPORT,
                                &pDev->HifScatterInfo,
                                sizeof(pDev->HifScatterInfo));

    if (A_FAILED(status))
    {
        AR_DEBUG_PRINTF(ATH_DEBUG_WARN,
            ("HIF layer does not support scatter requests (%d) \n",status));
    } else
    {
        AR_DEBUG_PRINTF(ATH_DEBUG_INFO,
            ("AR6K: HIF layer supports scatter requests (max scatter items:%d: maxlen:%d) \n",
                    DEV_GET_MAX_MSG_PER_BUNDLE(pDev), DEV_GET_MAX_BUNDLE_LENGTH(pDev)));
    }

    return status;
}

A_STATUS HIFDevSubmitScatterRequest(HIF_SDIO_DEVICE *pDev, HIF_SCATTER_REQ *pScatterReq,
                                    A_BOOL Read, A_BOOL Async)
{
    A_STATUS status;

    HTC_TARGET *target;
    target = (HTC_TARGET *)pDev->pTarget;
     //AR_DEBUG_PRINTF(ATH_DEBUG_ERROR, ("+%s\n", __func__));

    if (Read)
    {
            /* read operation */
        pScatterReq->Request = (Async) ? HIF_RD_ASYNC_BLOCK_FIX : HIF_RD_SYNC_BLOCK_FIX;
        pScatterReq->Address = pDev->MailBoxInfo.MboxAddresses[HTC_MAILBOX];
        A_ASSERT(pScatterReq->TotalLength < (HTC_MAX_MSG_PER_BUNDLE_RX * target->TargetCreditSize));
    }

    AR_DEBUG_PRINTF(ATH_DEBUG_SCATTER,
                ("%s, Entries: %d, Total Length: %d Mbox:0x%X (mode: %s : %s)\n",
                __func__, pScatterReq->ValidScatterEntries,
                pScatterReq->TotalLength,
                pScatterReq->Address,
                Async ? "ASYNC" : "SYNC",
                (Read) ? "RD" : "WR"));

    status = DEV_PREPARE_SCATTER_OPERATION(pScatterReq);

    if (A_FAILED(status)) {
        if (Async) {
            pScatterReq->CompletionStatus = status;
            pScatterReq->CompletionRoutine(pScatterReq);
            return A_OK;
        }
        return status;
    }

    status = pDev->HifScatterInfo.pReadWriteScatterFunc(pDev->HIFDevice, pScatterReq);

    if (!Async) {
            /* in sync mode, we can touch the scatter request */
        pScatterReq->CompletionStatus = status;
        DEV_FINISH_SCATTER_OPERATION(pScatterReq);
    } else {
        if (status == A_PENDING) {
            status = A_OK;
        }
    }

    return status;
}

A_STATUS HIFDevCopyScatterListToFromDMABuffer(HIF_SCATTER_REQ *pReq, A_BOOL FromDMA)
{
    A_UINT8         *pDMABuffer = NULL;
    int             i, remaining;
    A_UINT32        length;

    pDMABuffer = pReq->pScatterBounceBuffer;

    if (pDMABuffer == NULL) {
        A_ASSERT(FALSE);
        return A_EINVAL;
    }

    remaining = (int)pReq->TotalLength;

    for (i = 0; i < pReq->ValidScatterEntries; i++) {

        length = min((int)pReq->ScatterList[i].Length, remaining);

        if (length != (int)pReq->ScatterList[i].Length) {
            A_ASSERT(FALSE);
                /* there is a problem with the scatter list */
            return A_EINVAL;
        }

        if (FromDMA) {
                /* from DMA buffer */
            A_MEMCPY(pReq->ScatterList[i].pBuffer, pDMABuffer , length);
        } else {
                /* to DMA buffer */
            A_MEMCPY(pDMABuffer, pReq->ScatterList[i].pBuffer, length);
        }

        pDMABuffer += length;
        remaining -= length;
    }

    return A_OK;
}

