
#include "vsf_hal.h"
#include "dma_task.h"


#if 0

#define TRANSFER_SIZE 32

static uint32_t src_arry[TRANSFER_SIZE];
static uint32_t dst_arry[TRANSFER_SIZE];
volatile bool __dma_flag = false;

static void __dma_handler(void *target_ptr, vsf_dma_t *dma_ptr, int8_t channel,
                          vsf_dma_irq_mask_t irq_mask)
{
    if (irq_mask & VSF_DMA_IRQ_MASK_CPL) {
		__dma_flag = true;
	}
}

int vsf_dma_mem_to_mem(void)
{    
    int i;
    vsf_dma_t *dma = (vsf_dma_t *)&vsf_hw_dma0;
    vsf_err_t  err;

    int chn = vsf_dma_channel_request(dma);
    if (chn < 0) {
        VSF_ASSERT(0);
        return -1;
    }

    vsf_dma_channel_cfg_t cfg = {
        .mode = VSF_DMA_MEMORY_TO_MEMORY | VSF_DMA_SRC_ADDR_INCREMENT |
                VSF_DMA_DST_ADDR_INCREMENT | VSF_DMA_SRC_WIDTH_BYTES_4 |
                VSF_DMA_DST_WIDTH_BYTES_4 | VSF_DMA_SRC_BURST_LENGTH_4 |
                VSF_DMA_DST_BURST_LENGTH_4,
        .src_idx = 0,
        .dst_idx = 0,
        .isr     = {
                .handler_fn = __dma_handler,
                .target_ptr = NULL,
        }};
    err = vsf_dma_channel_config(dma, chn, &cfg);
    if (err != VSF_ERR_NONE) {
        VSF_ASSERT(0);
        vsf_dma_channel_release(dma, chn);
        return -1;
    }

    
    for (i = 0; i < dimof(src_arry); i++) {
        src_arry[i] = i;
    }
    err = vsf_dma_channel_start(dma, chn, (uint32_t)src_arry,
                                (uint32_t)dst_arry, dimof(src_arry));
    if (err != VSF_ERR_NONE) {
        VSF_ASSERT(0);

        vsf_dma_channel_release(dma, chn);
        return -1;
    }

    while (!__dma_flag);

    if (memcmp(src_arry, dst_arry, sizeof(src_arry)) != 0) {
        return 0;
    }

    vsf_dma_fini(dma);

    return 1;
}

#else

#define DMA_TRANSFER_A_SIZE   256
#define DMA_TRANSFER_B_SIZE   256

volatile bool __dma_request_a = false;
volatile bool __dma_request_b = false;

typedef struct {
    uint32_t src_arry[DMA_TRANSFER_A_SIZE];
    
    uint32_t canary_0;
    uint32_t dst_arry[DMA_TRANSFER_A_SIZE];
    uint32_t canary_1;
} test_a_t;

typedef struct {
    uint32_t src_arry[DMA_TRANSFER_A_SIZE];
    
    uint32_t canary_0;
    uint32_t dst_arry[DMA_TRANSFER_A_SIZE];
    uint32_t canary_1;
} test_b_t;

test_a_t test_a;
test_b_t test_b;

static void __dma_request_a_handler(void *target_ptr, vsf_dma_t *dma_ptr, int8_t channel,
                                    vsf_dma_irq_mask_t irq_mask)
{
    if (irq_mask & VSF_DMA_IRQ_MASK_CPL) {
		__dma_request_a = true;
	}
}

static void __dma_request_b_handler(void *target_ptr, vsf_dma_t *dma_ptr, int8_t channel,
                                    vsf_dma_irq_mask_t irq_mask)
{
    if (irq_mask & VSF_DMA_IRQ_MASK_CPL) {
		__dma_request_b = true;
	}
}


int vsf_dma_mem_to_mem(void)
{    
    int i;
    vsf_dma_t *dma = (vsf_dma_t *)&vsf_hw_dma0;
    vsf_err_t  err;
    
    printf("a: %p/%p, b: %p/%p\n", test_a.src_arry, test_a.dst_arry, test_b.src_arry, test_b.dst_arry);
    
    for (i = 0; i < dimof(test_a.src_arry); i++) {
        test_a.src_arry[i] = i;
    }
    
    for (i = 0; i < dimof(test_b.src_arry); i++) {
        test_b.src_arry[i] = dimof(test_b.src_arry) - i;
    }

    int req_a_chn = vsf_dma_channel_request(dma);
    if (req_a_chn < 0) {
        VSF_ASSERT(0);
        return -1;
    }

    vsf_dma_channel_cfg_t req_a_cfg = {
        .mode = VSF_DMA_MEMORY_TO_MEMORY | VSF_DMA_SRC_ADDR_INCREMENT |
                VSF_DMA_DST_ADDR_INCREMENT | VSF_DMA_SRC_WIDTH_BYTES_4 |
                VSF_DMA_DST_WIDTH_BYTES_4 | VSF_DMA_SRC_BURST_LENGTH_4 |
                VSF_DMA_DST_BURST_LENGTH_4,
        .src_idx = 0,
        .dst_idx = 0,
        .isr     = {
                .handler_fn = __dma_request_a_handler,
                .target_ptr = NULL,
        }};
    err = vsf_dma_channel_config(dma, req_a_chn, &req_a_cfg);
    if (err != VSF_ERR_NONE) {
        VSF_ASSERT(0);
        vsf_dma_channel_release(dma, req_a_chn);
        return -1;
    }
        
    int req_b_chn = vsf_dma_channel_request(dma);
    if (req_b_chn < 0) {
        VSF_ASSERT(0);
        return -1;
    }
    vsf_dma_channel_cfg_t req_b_cfg = {
        .mode = VSF_DMA_MEMORY_TO_MEMORY | VSF_DMA_SRC_ADDR_INCREMENT |
                VSF_DMA_DST_ADDR_INCREMENT | VSF_DMA_SRC_WIDTH_BYTES_4 |
                VSF_DMA_DST_WIDTH_BYTES_4 | VSF_DMA_SRC_BURST_LENGTH_4 |
                VSF_DMA_DST_BURST_LENGTH_4,
        .src_idx = 0,
        .dst_idx = 0,
        .isr     = {
                .handler_fn = __dma_request_b_handler,
                .target_ptr = NULL,
        }};
    err = vsf_dma_channel_config(dma, req_b_chn, &req_b_cfg);
    if (err != VSF_ERR_NONE) {
        VSF_ASSERT(0);
        vsf_dma_channel_release(dma, req_b_chn);
        return -1;
    }    

    err = vsf_dma_channel_start(dma, req_a_chn, (uint32_t)test_a.src_arry, (uint32_t)test_a.dst_arry, dimof(test_a.src_arry));
    if (err != VSF_ERR_NONE) {
        VSF_ASSERT(0);
        vsf_dma_channel_release(dma, req_a_chn);
        return -1;
    }
    //while (!__dma_request_a );

    err = vsf_dma_channel_start(dma, req_b_chn, (uint32_t)test_b.src_arry, (uint32_t)test_b.dst_arry, dimof(test_b.src_arry));
    if (err != VSF_ERR_NONE) {
        VSF_ASSERT(0);
        vsf_dma_channel_release(dma, req_b_chn);
        return -1;
    }

    //while (!__dma_request_b);
    
    while (!__dma_request_a || !__dma_request_b);

    if (memcmp(test_a.src_arry, test_a.dst_arry, sizeof(test_a.src_arry)) != 0) {
        VSF_ASSERT(0);
        return 0;
    }
    
    if (memcmp(test_b.src_arry, test_b.dst_arry, sizeof(test_b.src_arry)) != 0) {
        VSF_ASSERT(0);
        return 0;
    }

    vsf_dma_fini(dma);
    printf("test pass\n");

    return 1;
}
#endif