#include "qe_log.h"
#include "qe_list.h"
#include "qe_macros.h"
#include "qe_assert.h"
#include "qe_memory.h"
#include "qe_driver.h"
#include "qe_dmaengine.h"



QELOG_DOMAIN("axi-dma");



#define AXIDMA_NUM_DESCS       (64)
#define AXIDMA_NUM_APP_WORDS	(5)


typedef struct
{
    volatile qe_u32 next_desc;
    volatile qe_u32 next_desc_msb;
    volatile qe_u32 buf_addr;
    volatile qe_u32 buf_addr_msb;
    volatile qe_u32 mcdma_control;
    volatile qe_u32 vsize_stride;
    volatile qe_u32 control;
    volatile qe_u32 status;
    volatile qe_u32 apps[AXIDMA_NUM_APP_WORDS];
} qe_aligned(64) axidma_hw_desc;

typedef struct
{
    axidma_hw_desc hw;
    qe_list list;
} qe_aligned(64) axidma_tx_segment;

typedef struct
{
    qe_dma_tx_desc async_tx;
    qe_list segments;
    qe_list node;
    qe_bool cyclic;
} axidma_tx_descriptor;

struct axidma_device;

typedef struct
{
    struct axidma_device *dev;

    qe_dma_chan chan;

    qe_list pending_list;
    qe_list active_list;
    qe_list done_list;
    qe_list free_seg_list;

    qe_list free_seg_list;
    
    axidma_tx_segment *segs;
    axidma_tx_segment *cyclic_seg;
    
    qe_uint id;
    qe_uint irq;
    qe_bool has_sg;
    qe_bool cyclic;
} axidma_chan;

typedef struct
{
    volatile qe_u32 control;        /* 0x00 MM2SDMA Control register */
    volatile qe_u32 status;         /* 0x04 MM2SDMA Status register */
    volatile qe_u32 reserved0[4];    /* 0x08~0x14 */
    volatile qe_u32 address;        /* 0x18 MM2S Source Address. Lower 32 bits of address. */
    volatile qe_u32 address_hi;     /* 0x1C MM2S Source Address. Upper 32 bits of address. */
    volatile qe_u32 reserved1[2];
    volatile qe_u32 length;
} axidma_reg;

typedef struct axidma_device
{
    qe_dma_dev dma;

    axidma_reg *reg;

    qe_intc_dev *intc;

    axidma_chan *channels;
    qe_uint num_channels;
} axidma_dev;

#define to_axidma_chan(dchan) \
	qe_container_of(dchan, axidma_chan, chan)



static qe_dma_chan *axidma_get_chan(qe_dma_dev *dma, qe_int id)
{
    axidma_dev *dev = (axidma_dev *)dma->dev.priv;

    if (id >= dev->num_channels) {
        qe_error("chan id %d out of range", id);
        return QE_NULL;
    }

    return &dev->channels[id].chan;
}

static qe_ret axidma_alloc_chan(qe_dma_chan *dchan)
{
    int i;
    qe_ubase addr;
    axidma_chan *chan = to_axidma_chan(dchan);
    axidma_dev *dev = chan->dev;

    /* <todo: alloc dma coherent memory> */
    chan->segs = qe_malloc(sizeof(axidma_tx_segment) * AXIDMA_NUM_DESCS);
    if (!chan->segs) {
        qe_error("alloc desc segments failed");
        return qe_err_mem;
    }

    for (i=0; i<AXIDMA_NUM_DESCS; i++) {
        addr = (qe_ubase)(chan->segs + sizeof(axidma_tx_segment) *
                ((i + 1) % AXIDMA_NUM_DESCS));
        chan->segs[i].hw.next_desc = (qe_u32)(addr & 0xFFFFFFFF);
        chan->segs[i].hw.next_desc_msb = (qe_u32)((addr >> 32) & 0xFFFFFFFF);
        qe_list_append(&chan->segs[i].list, &chan->free_seg_list);
    }

    if (chan->cyclic) {
        /* <todo: alloc dma coherent memory> */
        chan->cyclic_seg = qe_malloc(sizeof(axidma_tx_segment));
        if (!chan->cyclic_seg) {
            qe_error("chan[%d] alloc desc segment for cyclic failed", chan->id);
        }
    }

    return qe_ok;
}

static void dma_test_free_chan(qe_dma_chan *dchan)
{
    dma_test_chan *chan = to_dma_test_chan(dchan);

    free_descriptors(chan);

    qe_free(chan->segs);

    qe_dma_pool_destroy(chan->desc_pool);
}

static const qe_dma_ops dma_test_ops = {
    .get_chan = axidma_get_chan,
    .alloc_chan = axidma_alloc_chan,
    .free_chan = axidma_free_chan,
    .device_issue_pending = axidma_issue_pending,
};

static qe_ret axidma_probe(const void *fdt, int offset)
{
    int len;
    const char *name;
    const char *name_intc;
    qe_intc_dev *intc;
    axidma_dev *dev;




    return qe_ok;
}

static const qe_device_id axidma_ids[] = {
    {.compatible="xlnx,axi-dma"},
    {}
};

QE_DRIVER(axi_dma) = {
    .name = "axi-dma",
    .of_match = axidma_ids,
    .probe = axidma_probe,
};

QE_DRIVER_FORCE_EXPORT(axi_dma);