/*****************************************************************************
 *   Cop->right(C)2009-2019 by VSF Team                                       *
 *                                                                           *
 *  Licensed under the Apache License, Version 2.0 (the "License");          *
 *  you may not use this file except in compliance with the License.         *
 *  You may obtain a cop-> of the License at                                  *
 *                                                                           *
 *     http://www.apache.org/licenses/LICENSE-2.0                            *
 *                                                                           *
 *  Unless required by applicable law or agreed to in writing, software      *
 *  distributed under the License is distributed on an "AS IS" BASIS,        *
 *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. *
 *  See the License for the specific language governing permissions and      *
 *  limitations under the License.                                           *
 *                                                                           *
 ****************************************************************************/


/*============================ INCLUDES ======================================*/

#include "hal/vsf_hal_cfg.h"

#if VSF_HAL_USE_DMA == ENABLED

#include "hal/vsf_hal.h"

#include "HME_MCU.h"
#include "sys_reg.h"
#include "hme_dma.h"

/*============================ MACROS ========================================*/

#ifndef VSF_HW_DMA_CFG_MULTI_CLASS
#   define VSF_HW_DMA_CFG_MULTI_CLASS           VSF_DMA_CFG_MULTI_CLASS
#endif

#define VSF_DMA_CFG_IMP_PREFIX                  vsf_hw
#define VSF_DMA_CFG_IMP_UPCASE_PREFIX           VSF_HW

#ifndef VSF_HW_CFG_dma_PROTECT_LEVEL
#   define VSF_HW_CFG_dma_PROTECT_LEVEL         interrupt
#endif

#define vsf_hw_dma_protect                      vsf_protect(VSF_HW_CFG_dma_PROTECT_LEVEL)
#define vsf_hw_dma_unprotect                    vsf_unprotect(VSF_HW_CFG_dma_PROTECT_LEVEL)

#define VSF_HW_DMA_CHANNEL                      1
#define VSF_HW_DMA_VIRTUAL_CHANNEL              8

/*============================ MACROFIED FUNCTIONS ===========================*/
/*============================ TYPES =========================================*/

typedef struct dma_channel_reg_t {
    __IOM uint64_t SAR;            /*!< (@ 0x00000000) SAR0      */
    __IOM uint64_t DAR;            /*!< (@ 0x00000008) DAR0      */
    __IOM uint64_t LLP;            /*!< (@ 0x00000010) LLP0      */
    __IOM uint64_t CTL;            /*!< (@ 0x00000018) CTL0      */
    __IOM uint64_t SSTAT;          /*!< (@ 0x00000020) SSTAT0    */
    __IOM uint64_t DSTAT;          /*!< (@ 0x00000028) DSTAT0    */
    __IOM uint64_t SSTATAR;        /*!< (@ 0x00000030) SSTATAR0  */
    __IOM uint64_t DSTATAR;        /*!< (@ 0x00000038) DSTATAR0  */
    __IOM uint64_t CFG;
    __IOM uint64_t SGR;            /*!< (@ 0x00000048) SGR0      */
    __IOM uint64_t DSR;            /*!< (@ 0x00000050) DSR0      */
} dma_channel_reg_t;

typedef struct dma_chn_cfg_t {
    vsf_slist_node_t        node;

    vsf_dma_channel_cfg_t   cfg;
    struct {
        uint32_t            src_address;
        uint32_t            dst_address;
        uint32_t            count;
    } req;
    int8_t                  hw_chn;
} dma_chn_cfg_t;

dcl_vsf_bitmap(dma_vt_chn_bitmap_t, VSF_HW_DMA_VIRTUAL_CHANNEL);
dcl_vsf_bitmap(dma_hw_chn_bitmap_t, VSF_HW_DMA_CHANNEL);

typedef struct VSF_MCONNECT(VSF_DMA_CFG_IMP_PREFIX, _dma_t) {
#if VSF_HW_DMA_CFG_MULTI_CLASS == ENABLED
    vsf_dma_t                               vsf_dma;
#endif
    DMAC_Type                               *reg;
    IRQn_Type                               irqn;
    
    struct {
        vsf_bitmap(dma_vt_chn_bitmap_t)     req_bitmap;
        vsf_slist_queue_t                   start_queue;
        dma_chn_cfg_t                       cfgs[VSF_HW_DMA_VIRTUAL_CHANNEL];
    } vt;
    
    struct {
        vsf_bitmap(dma_hw_chn_bitmap_t)     bitmap;
        int8_t                              vt_maps[VSF_HW_DMA_CHANNEL];
    } hw;
} VSF_MCONNECT(VSF_DMA_CFG_IMP_PREFIX, _dma_t);

/*============================ MACROS ========================================*/
/*============================ TYPES =========================================*/
/*============================ GLOBAL VARIABLES ==============================*/
/*============================ LOCAL VARIABLES ===============================*/
/*============================ PROTOTYPES ====================================*/
/*============================ IMPLEMENTATION ================================*/

static void __dma_chn_cancel(VSF_MCONNECT(VSF_DMA_CFG_IMP_PREFIX, _dma_t) *dma_ptr, int8_t chn)
{
    VSF_HAL_ASSERT((0 <= chn) && (chn < VSF_HW_DMA_CHANNEL));
    dma_channel_reg_t * channel_reg = (dma_channel_reg_t *)dma_ptr->reg + chn;

    channel_reg->CFG |= (1 << 8); //suspend
    while ((channel_reg->CFG & ((uint64_t)1 << 9)) == 0);

    dma_ptr->reg->CHENREG = 0x100 << chn;
    dma_ptr->reg->MASKTFR = 0x100 << chn;
}

static uint32_t __dma_chn_get_count(VSF_MCONNECT(VSF_DMA_CFG_IMP_PREFIX, _dma_t) *dma_ptr,
                                    uint8_t channel)
{
    VSF_HAL_ASSERT(channel >= 0);
    VSF_HAL_ASSERT(channel < dimof(dma_ptr->vt.cfgs));

    dma_channel_reg_t * channel_reg = (dma_channel_reg_t *)dma_ptr->reg + channel;
    return (channel_reg->CTL >> 32) & 0x3FF;
}

vsf_err_t VSF_MCONNECT(VSF_DMA_CFG_IMP_PREFIX, _dma_init)(
    VSF_MCONNECT(VSF_DMA_CFG_IMP_PREFIX, _dma_t) *dma_ptr
) {
    vsf_slist_queue_init(&dma_ptr->vt.start_queue);

    dma_ptr->reg->DMACFGREG_b.DMA_EN = 1;
    
    for (int i = 0; i < dimof(dma_ptr->hw.vt_maps); i++) {
        dma_ptr->hw.vt_maps[i] = -1;
    }
    
    NVIC_SetPriority(dma_ptr->irqn, vsf_arch_prio_highest);
    NVIC_EnableIRQ(dma_ptr->irqn);

    return VSF_ERR_NONE;
}

void VSF_MCONNECT(VSF_DMA_CFG_IMP_PREFIX, _dma_fini)(
    VSF_MCONNECT(VSF_DMA_CFG_IMP_PREFIX, _dma_t) *dma_ptr
) {
    VSF_HAL_ASSERT(dma_ptr != NULL);

    NVIC_DisableIRQ(dma_ptr->irqn);
    
    dma_ptr->reg->DMACFGREG_b.DMA_EN = 0;
    
    memset(&dma_ptr->hw, 0, sizeof(dma_ptr->hw));
    memset(&dma_ptr->vt, 0, sizeof(dma_ptr->vt));
}

vsf_dma_capability_t VSF_MCONNECT(VSF_DMA_CFG_IMP_PREFIX, _dma_capability)(
    VSF_MCONNECT(VSF_DMA_CFG_IMP_PREFIX, _dma_t) *dma_ptr
) {
    VSF_HAL_ASSERT(dma_ptr != NULL);
    return (vsf_dma_capability_t) {
        .irq_mask          = VSF_DMA_IRQ_MASK_CPL | VSF_DMA_IRQ_MASK_ERROR,
        .max_request_count = VSF_HW_DMA_VIRTUAL_CHANNEL,
        .channel_count     = VSF_HW_DMA_CHANNEL,
    };
}

int8_t VSF_MCONNECT(VSF_DMA_CFG_IMP_PREFIX, _dma_channel_request)(
    VSF_MCONNECT(VSF_DMA_CFG_IMP_PREFIX, _dma_t) *dma_ptr
) {
    VSF_HAL_ASSERT(dma_ptr != NULL);
    int8_t chn;
    
    vsf_protect_t state = vsf_hw_dma_protect();
        chn = vsf_bitmap_ffz(&dma_ptr->vt.req_bitmap, VSF_HW_DMA_VIRTUAL_CHANNEL);
        if ((chn >= 0) && (chn < VSF_HW_DMA_VIRTUAL_CHANNEL)) {
            vsf_bitmap_set(&dma_ptr->vt.req_bitmap, chn);
        } else {
            chn = -1;
        }
    vsf_hw_dma_unprotect(state);
        
    if (chn >= 0) {
        memset(&dma_ptr->vt.cfgs[chn], 0, sizeof(dma_ptr->vt.cfgs[chn]));
    }
        
    return chn;
}

void VSF_MCONNECT(VSF_DMA_CFG_IMP_PREFIX, _dma_channel_release)(
    VSF_MCONNECT(VSF_DMA_CFG_IMP_PREFIX, _dma_t) *dma_ptr,
    int8_t channel
) {
    VSF_HAL_ASSERT(dma_ptr != NULL);
    VSF_HAL_ASSERT(channel >= 0);
    VSF_HAL_ASSERT(channel < dimof(dma_ptr->vt.cfgs));

    if (vsf_slist_queue_is_in(dma_chn_cfg_t, node, &dma_ptr->vt.start_queue, &dma_ptr->vt.cfgs[channel])) {
        VSF_ASSERT(0);
    }

    vsf_protect_t state = vsf_hw_dma_protect();
        vsf_bitmap_clear(&dma_ptr->vt.req_bitmap, channel);
    vsf_hw_dma_unprotect(state);   
}

vsf_err_t VSF_MCONNECT(VSF_DMA_CFG_IMP_PREFIX, _dma_channel_config)(
    VSF_MCONNECT(VSF_DMA_CFG_IMP_PREFIX, _dma_t) *dma_ptr,
    int8_t channel,
    vsf_dma_channel_cfg_t *cfg_ptr
) {   
    
    VSF_HAL_ASSERT(dma_ptr != NULL);
    VSF_HAL_ASSERT(cfg_ptr != NULL);
    VSF_HAL_ASSERT(channel >= 0);
    VSF_HAL_ASSERT(channel < dimof(dma_ptr->vt.cfgs));

    dma_ptr->vt.cfgs[channel].cfg = *cfg_ptr;
    dma_ptr->vt.cfgs[channel].req.src_address = NULL;
    dma_ptr->vt.cfgs[channel].req.dst_address = NULL;
    dma_ptr->vt.cfgs[channel].req.count = 0;
    
    return VSF_ERR_NONE;
}

static void __dma_chn_config(VSF_MCONNECT(VSF_DMA_CFG_IMP_PREFIX, _dma_t) *dma_ptr,
                             uint8_t chn, dma_chn_cfg_t *chn_ptr)
{   
    uint64_t ctl_value = (chn_ptr->cfg.mode & VSF_DMA_MODE_ALL_BITS_MASK) |
                         (1ull << 0) | ((uint64_t)chn_ptr->req.count << 32);

    uint64_t cfg_value = 0;
    uint64_t dir       = chn_ptr->cfg.mode & VSF_DMA_DIRECTION_MASK;
    switch (dir) {
    case VSF_DMA_MEMORY_TO_MEMORY:
        break;
    case VSF_DMA_PERIPHERA_TO_MEMORY:
        cfg_value |= (uint64_t)chn_ptr->cfg.src_idx << 39;
        break;
    case VSF_DMA_MEMORY_TO_PERIPHERAL:
        cfg_value |= (uint64_t)chn_ptr->cfg.dst_idx << 43;
        break;
    case VSF_DMA_PERIPHERA_TO_PERIPHERAL:
        cfg_value |= ((uint64_t)chn_ptr->cfg.src_idx << 39) |
                     ((uint64_t)chn_ptr->cfg.dst_idx << 43);
        break;
    }
        
    dma_channel_reg_t *channel_reg = (dma_channel_reg_t *)dma_ptr->reg + chn;
    
    while (dma_ptr->reg->CHENREG & (0x1 << chn));
    dma_ptr->reg->MASKTFR = 0x100 << chn;

    channel_reg->SAR = chn_ptr->req.src_address;
    channel_reg->DAR = chn_ptr->req.dst_address;
    channel_reg->CTL = ctl_value;
    channel_reg->CFG = cfg_value;

    dma_ptr->reg->MASKTFR = (0x100 << chn) | (0x1 << chn);
    dma_ptr->reg->CHENREG = (0x100 << chn) | (0x1 << chn);
}

vsf_err_t VSF_MCONNECT(VSF_DMA_CFG_IMP_PREFIX, _dma_channel_start)(
    VSF_MCONNECT(VSF_DMA_CFG_IMP_PREFIX, _dma_t) *dma_ptr,
    int8_t channel,
    uint32_t src_address,
    uint32_t dst_address,
    uint32_t count
) {
    VSF_HAL_ASSERT(dma_ptr != NULL);
    VSF_HAL_ASSERT(channel >= 0);
    VSF_HAL_ASSERT(channel < dimof(dma_ptr->vt.cfgs));
    
    dma_ptr->vt.cfgs[channel].req.src_address = src_address;
    dma_ptr->vt.cfgs[channel].req.dst_address = dst_address;
    dma_ptr->vt.cfgs[channel].req.count = count;
    
    bool need = false;
    
    vsf_protect_t state = vsf_hw_dma_protect();
        // find idle hw channel
        int_fast32_t hw_chn = vsf_bitmap_ffz(&dma_ptr->hw.bitmap, VSF_HW_DMA_CHANNEL);
        if (hw_chn >= 0) {
            vsf_bitmap_set(&dma_ptr->hw.bitmap, hw_chn);
            dma_ptr->hw.vt_maps[hw_chn] = channel;
            dma_ptr->vt.cfgs[channel].hw_chn = hw_chn;
            need = true;
        } else {
            vsf_slist_queue_enqueue(dma_chn_cfg_t, node, &dma_ptr->vt.start_queue, &dma_ptr->vt.cfgs[channel]);
        }
    vsf_hw_dma_unprotect(state);
  
    if (need) {
        __dma_chn_config(dma_ptr, hw_chn, &dma_ptr->vt.cfgs[channel]);
    }

    
    return VSF_ERR_NONE;
}

vsf_err_t VSF_MCONNECT(VSF_DMA_CFG_IMP_PREFIX, _dma_channel_cancel)(
    VSF_MCONNECT(VSF_DMA_CFG_IMP_PREFIX, _dma_t) *dma_ptr,
    int8_t channel
) {
    VSF_HAL_ASSERT(dma_ptr != NULL);
    VSF_HAL_ASSERT(channel >= 0);
    VSF_HAL_ASSERT(channel < dimof(dma_ptr->vt.cfgs));
    

    vsf_protect_t state = vsf_hw_dma_protect();
        dma_chn_cfg_t *chn_cfg = &dma_ptr->vt.cfgs[channel];
        if (chn_cfg->hw_chn >= 0) {
            dma_ptr->hw.vt_maps[chn_cfg->hw_chn] = -1;
            __dma_chn_cancel(dma_ptr, chn_cfg->hw_chn);
        }
        if (vsf_slist_queue_is_in(dma_chn_cfg_t, node, &dma_ptr->vt.start_queue, chn_cfg)) {
            vsf_slist_queue_dequeue(dma_chn_cfg_t, node, &dma_ptr->vt.start_queue, chn_cfg);
        }
    vsf_hw_dma_unprotect(state);

    return VSF_ERR_NONE;
}

vsf_dma_channel_status_t VSF_MCONNECT(VSF_DMA_CFG_IMP_PREFIX, _dma_channel_status)(
    VSF_MCONNECT(VSF_DMA_CFG_IMP_PREFIX, _dma_t) *dma_ptr,
    int8_t channel)
{
    VSF_HAL_ASSERT(dma_ptr != NULL);
    VSF_HAL_ASSERT(dma_ptr != NULL);
    VSF_HAL_ASSERT(channel >= 0);
    VSF_HAL_ASSERT(channel < dimof(dma_ptr->vt.cfgs));

    return (vsf_dma_channel_status_t) {
        .is_busy = dma_ptr->vt.cfgs[channel].hw_chn >= 0,
    };
}

uint32_t VSF_MCONNECT(VSF_DMA_CFG_IMP_PREFIX, _dma_channel_get_transferred_count)(
    VSF_MCONNECT(VSF_DMA_CFG_IMP_PREFIX, _dma_t) *dma_ptr,
    int8_t channel)
{
    VSF_HAL_ASSERT(dma_ptr != NULL);

    if (dma_ptr->vt.cfgs[channel].hw_chn >= 0) {
        return __dma_chn_get_count(dma_ptr, channel);
    } else {
        return 0;
    }
}

static void VSF_MCONNECT(__, VSF_DMA_CFG_IMP_PREFIX, _dma_irqhandler)(
    VSF_MCONNECT(VSF_DMA_CFG_IMP_PREFIX, _dma_t) *dma_ptr
) {
    VSF_HAL_ASSERT(NULL != dma_ptr);
    
    DMAC_Type *reg = dma_ptr->reg;
    uint32_t status_int = reg->STATUSINT;

    if (status_int & (1 << 4)) {
        uint32_t status_err = DMAC->STATUSERR;
        reg->CLEARERR = status_err;
        VSF_HAL_ASSERT(status_err != 0);
    }

    if (status_int & (1 << 3)) {
        reg->CLEARDSTTRAN = reg->STATUSDSTTRAN;
    }

    if (status_int & (1 << 2)) {
        reg->CLEARSRCTRAN = reg->STATUSSRCTRAN;
    }

    if (status_int & (1 << 1)) {
        reg->CLEARBLOCK = reg->STATUSBLOCK;
    }

    if (status_int & (1 << 0)) {
        uint32_t status_sfer = reg->STATUSTFR;
        reg->CLEARTFR = status_sfer;
        int hw_chn;

        while (status_sfer != 0) {
            hw_chn = vsf_ffs32(status_sfer);
            status_sfer &= ~(1 << hw_chn);
            int chn = dma_ptr->hw.vt_maps[hw_chn];
            
            VSF_ASSERT(chn >= 0);
            VSF_ASSERT(chn < 3);

            vsf_protect_t state = vsf_hw_dma_protect();
                dma_chn_cfg_t *next_chn_ptr = NULL;
                vsf_slist_queue_dequeue(dma_chn_cfg_t, node, &dma_ptr->vt.start_queue, next_chn_ptr);
                if (next_chn_ptr == NULL) {
                    dma_ptr->hw.vt_maps[hw_chn] = -1;
                    vsf_bitmap_clear(&dma_ptr->hw.bitmap, hw_chn);
                } else {
                    next_chn_ptr->hw_chn = hw_chn;
                    dma_ptr->hw.vt_maps[hw_chn] = (next_chn_ptr - dma_ptr->vt.cfgs);

                }
            vsf_hw_dma_unprotect(state);
                           
            dma_chn_cfg_t *chn_cfg = &dma_ptr->vt.cfgs[chn];
            chn_cfg->hw_chn = -1;
            if (chn_cfg->cfg.isr.handler_fn != NULL) {
                chn_cfg->cfg.isr.handler_fn(chn_cfg->cfg.isr.target_ptr, (vsf_dma_t *)dma_ptr, chn, VSF_DMA_IRQ_MASK_CPL);
            }

            if (next_chn_ptr != NULL) {
               __dma_chn_config(dma_ptr, hw_chn, next_chn_ptr);                               
            }
        }
    }
}

/*============================ MACROFIED FUNCTIONS ===========================*/

#define VSF_DMA_CFG_REIMPLEMENT_API_CAPABILITY  ENABLED
#define VSF_DMA_CFG_IMP_LV0(__IDX, __HAL_OP)                                    \
    VSF_MCONNECT(VSF_DMA_CFG_IMP_PREFIX, _dma_t)                                \
        VSF_MCONNECT(VSF_DMA_CFG_IMP_PREFIX, _dma, __IDX) = {                   \
        .reg  = VSF_MCONNECT(VSF_DMA_CFG_IMP_UPCASE_PREFIX, _DMA, __IDX,_REG),  \
        .irqn = VSF_MCONNECT(VSF_DMA_CFG_IMP_UPCASE_PREFIX, _DMA, __IDX,_IRQN), \
        __HAL_OP                                                                \
    };                                                                          \
    VSF_CAL_ROOT void VSF_MCONNECT(VSF_DMA_CFG_IMP_UPCASE_PREFIX, _DMA, __IDX,  \
                                   _IRQHandler)(void) {                         \
        uintptr_t ctx = vsf_hal_irq_enter();                                    \
        VSF_MCONNECT(__, VSF_DMA_CFG_IMP_PREFIX, _dma_irqhandler)(              \
            &VSF_MCONNECT(VSF_DMA_CFG_IMP_PREFIX, _dma, __IDX)                  \
        );                                                                      \
        vsf_hal_irq_leave(ctx);                                                 \
    }
#include "hal/driver/common/dma/dma_template.inc"

#endif /* VSF_HAL_USE_DMA */