/**
 * Copyright (C) 2021 - 2031 O-Cubes Co., Ltd.
 */

/****************************************************************
 *  @file    dma.h
 *  @brief   DMA engine source file
 *  @version v1.0
 *  @date    03. Apr. 2023
 ****************************************************************/
#include <string.h>
#include <assert.h>

#include "printf.h"
#include "dma.h"
#include "syscounter.h"
#include "core.h"
#include "common.h"

#define CHAN_ACTIVE_TIMEOUT                     (5 * TIME_US_VAL)
#define CHAN_MAX_DESC_NR                        64

#define GET_CHAN_DESC_PTR(chan, id)             ((chan_desc_t_ptr)(&glb_chan_desc[chan][id]))
#define GET_CHAN_DESC_FIRST_LLP(chan)           ((UINTPTR)glb_chan_desc[chan])
#define GET_CHAN_DESC_SIZE(chan)                (sizeof(glb_chan_desc[chan]))

#define GET_CHAN_NEXT_DESC_UINTPTR(chan, next)  ((UINTPTR)&glb_chan_desc[chan][next])
#define SET_CHAN_DESC_SADDR(chan, id, saddr)    (GET_CHAN_DESC_PTR(chan, id)->sAddr = saddr)
#define SET_CHAN_DESC_DADDR(chan, id, daddr)    (GET_CHAN_DESC_PTR(chan, id)->dAddr = daddr)
#define SET_CHAN_DESC_BUFFSIZE(chan, id, size)  (GET_CHAN_DESC_PTR(chan, id)->buffSize = size)
#define SET_CHAN_DESC_NEXTADDR(chan, id, next)  (GET_CHAN_DESC_PTR(chan, id)->nextAddr = next)

#define GET_CHAN_DESC_SADDR(chan, id)           (GET_CHAN_DESC_PTR(chan, id)->sAddr)
#define GET_CHAN_DESC_DADDR(chan, id)           (GET_CHAN_DESC_PTR(chan, id)->dAddr)
#define GET_CHAN_DESC_BUFFSIZE(chan, id)        (GET_CHAN_DESC_PTR(chan, id)->buffSize)
#define GET_CHAN_DESC_NEXTADDR(chan, id)        (GET_CHAN_DESC_PTR(chan, id)->nextAddr)

#define DMA_DEBUG(info, args ...)               printf("[DMA]: "info, ##args)

static __align(16) struct chan_desc glb_chan_desc[DMA_CHAN_NR_MAX][CHAN_MAX_DESC_NR];

static chan_xfer_cb_t glb_chan_xfer_cbs[DMA_CHAN_NR_MAX] = { NULL };

static u64 glb_s_timestamp[DMA_CHAN_NR_MAX];
static u64 glb_e_timestamp[DMA_CHAN_NR_MAX];

static inline void register_cb(enum dma_chan_nr chan, chan_xfer_cb_t cb)
{
	glb_chan_xfer_cbs[chan] = cb;
}

static inline void xfer_cb(enum dma_chan_nr chan, u32 status)
{
	if (glb_chan_xfer_cbs[chan])
		glb_chan_xfer_cbs[chan](chan, status);
}

static void dma_reset(void)
{
	/**
	 * DMA has no software reset, abort all channels to reset dma
	 */
	DMA_CHIP_REG_PTR()->abort = DMA_ABORT;
}

static void dma_chip_init(void)
{
	u32 val = DMA_CHIP_REG_PTR()->int_delay;

	DMA_CHIP_REG_PTR()->timeout = 0xFFFF;

	val = SET_REG_BITS(val, STARVATION_THRLD_POS, STARVATION_THRLD_LEN, 0xFFFF);
	val &= ~DMA_LP_ENABLE;
	DMA_CHIP_REG_PTR()->int_delay = val;
}

static void dma_chan_priority(enum dma_chan_nr chan, enum chan_priority priority)
{
	u32 pos = 0;
	u32 val = DMA_CHIP_REG_PTR()->priority;

	pos = chan * 2;

	val = SET_REG_BITS(val, pos, CH_PRIOR_LEN, priority);
	DMA_CHIP_REG_PTR()->priority = val;
}

static void dma_chan_weight(enum dma_chan_nr chan, u16 weight)
{
	u32 w;
	u32 pos;
	u32 index;

	pos = (chan % 2) * 16;
	index = chan / 2;

	w = DMA_CHIP_REG_PTR()->weight[index];
	w = SET_REG_BITS(w, pos, CH_WEIGHT_LEN, weight);
	DMA_CHIP_REG_PTR()->weight[index] = w;
}

static inline void dma_chan_weight_update(void)
{
	DMA_CHIP_REG_PTR()->wght_update = WEIGHT_UPDATE;
}

static void dma_chan_wrperiph_num(enum dma_chan_nr chan, u32 line)
{
	u32 num;
	u32 pos;
	u32 index;

	pos = (chan % 4) * 8;
	index = chan / 4;

	num = DMA_CHIP_REG_PTR()->wr_periph[index];
	num = SET_REG_BITS(num, pos, PERIPH_LINE_LEN, line);
	DMA_CHIP_REG_PTR()->wr_periph[index] = num;
}

static void dma_chan_rdperiph_num(enum dma_chan_nr chan, u32 line)
{
	u32 num;
	u32 pos;
	u32 index;

	pos = (chan % 4) * 8;
	index = chan / 4;

	num = DMA_CHIP_REG_PTR()->rd_periph[index];
	num = SET_REG_BITS(num, pos, PERIPH_LINE_LEN, line);
	DMA_CHIP_REG_PTR()->rd_periph[index] = num;
}

static inline void dma_chan_enable(enum dma_chan_nr chan)
{
	DMA_CHAN_REG_PTR(chan)->ch_en = CHAN_ENABLE;
}

static inline void dma_chan_disable(enum dma_chan_nr chan)
{
	DMA_CHAN_REG_PTR(chan)->ch_en &= ~CHAN_ENABLE;
}

static inline void dma_chan_start(enum dma_chan_nr chan)
{
	DMA_CHAN_REG_PTR(chan)->ch_start = CHAN_START;
}

static inline void dma_multi_chan_start(u8 mask)
{
	u32 val = DMA_CHIP_REG_PTR()->chan_start;

	val = SET_REG_BITS(val, MULTI_CHAN_START_POS, MULTI_CHAN_START_LEN, mask);

	DMA_CHIP_REG_PTR()->chan_start = val;
}

static inline void dma_chan_abort(enum dma_chan_nr chan)
{
	DMA_CHAN_REG_PTR(chan)->ch_abort = CHAN_ABORT;
}

static inline void dma_chan_suspend(enum dma_chan_nr chan)
{
	DMA_CHAN_REG_PTR(chan)->ch_suspend = CHAN_SUSPEND;
}

static inline void dma_chan_resume(enum dma_chan_nr chan)
{
	DMA_CHAN_REG_PTR(chan)->ch_suspend &= ~CHAN_SUSPEND;
}

__maybe_unused static inline void dma_chan_irq_set(enum dma_chan_nr chan, u32 mask)
{
	DMA_CHAN_REG_PTR(chan)->int_set = (mask & CHAN_IRQ_MASK);
}

static inline void dma_chan_irq_clr(enum dma_chan_nr chan, u32 mask)
{
	DMA_CHAN_REG_PTR(chan)->int_clr = (mask & CHAN_IRQ_MASK);
}

static inline void dma_chan_irq_mask(enum dma_chan_nr chan, u32 mask)
{
	DMA_CHAN_REG_PTR(chan)->int_mask &= ~(mask & CHAN_IRQ_MASK);
}

static inline u32 dma_chan_irq_status(enum dma_chan_nr chan)
{
	return DMA_CHAN_REG_PTR(chan)->int_stat;
}

static inline u32 dma_chip_irq_status(void)
{
	return DMA_CHIP_REG_PTR()->status;
}

static inline BOOL dma_chan_is_hw_active(enum dma_chan_nr chan)
{
	u32 val = DMA_CHAN_REG_PTR(chan)->ch_active;

	return !!(val & (CHAN_RD_ACTIVE | CHAN_WR_ACTIVE));
}

static inline BOOL dma_chan_has_suspend(enum dma_chan_nr chan)
{
	u32 val = DMA_CHAN_REG_PTR(chan)->ch_active;
	u32 suspend = (CHAN_RD_SUSPENDED | CHAN_RD_SUSPENDED);

	return (val & suspend) == suspend;
}

static BOOL dma_chan_release_timeout(enum dma_chan_nr chan)
{
	u64 lastStamp;
	u64 currStamp;
	u32 timeout;
	BOOL active;

	lastStamp = syscnt_read_counter();

	do {
		active = dma_chan_is_hw_active(chan);
		currStamp = syscnt_read_counter();
		timeout = syscnt_tick_to_usec(currStamp - lastStamp);
	} while (active && (timeout < CHAN_ACTIVE_TIMEOUT));

	return (active ? TRUE : FALSE);
}

static inline u32 cal_desc_nr(u32 xferLen)
{
	u32 descNR = 0;
	u32 buffSize = DMA_XFER_BUFFER_SIZE;

	descNR = DIV_ROUND_UP(xferLen, buffSize);

	return descNR;
}

__maybe_unused static void dump_dma_chan_desc(enum dma_chan_nr chan)
{
	u32 i;
	struct chan_desc *ptr = GET_CHAN_DESC_PTR(chan, 0);

	for (i = 0; i < CHAN_MAX_DESC_NR; i++) {
		DMA_DEBUG("sAddr: 0x%08lx, dAddr: 0x%08lx, buffSize: 0x%08lx, nextAddr: 0x%08lx\n",
			GET_CHAN_DESC_SADDR(chan, i),
			GET_CHAN_DESC_DADDR(chan, i),
			GET_CHAN_DESC_BUFFSIZE(chan, i),
			GET_CHAN_DESC_NEXTADDR(chan, i));

		if (ptr->nextAddr & CMD_LAST)
			return;

		ptr++;
	}
}

static inline void check_desc_capacity(u32 xferLen)
{
	u32 descNR = cal_desc_nr(xferLen);

	assert(descNR <= CHAN_MAX_DESC_NR);	//×î´ó64K
}

static inline void clr_desc_content(enum dma_chan_nr chan)
{
	//TODO, memset
}

static u32 cal_lli_bytes(enum dma_chan_nr chan)	//lliÊÇÊ²Ã´µÄËõÐ´£¿
{
	u32 i;
	u32 bytes = 0;
	struct chan_desc *ptr = GET_CHAN_DESC_PTR(chan, 0);

	for (i = 0; i < CHAN_MAX_DESC_NR; i++) {
		bytes += ptr->buffSize;
		if (ptr->nextAddr & CMD_LAST)
			break;

		ptr++;
	}

	return bytes;
}

static void dma_cfg_first_lli(enum dma_chan_nr chan)
{
	DMA_CHAN_REG_PTR(chan)->rd_addr   = 0;
	DMA_CHAN_REG_PTR(chan)->wr_addr   = 0;
	DMA_CHAN_REG_PTR(chan)->xfer_len  = 0;

	/* Write first LLP */
	DMA_CHAN_REG_PTR(chan)->next_addr = GET_CHAN_DESC_FIRST_LLP(chan);
}

static void dma_issue_pending(enum dma_chan_nr chan)
{
	dma_cfg_first_lli(chan);

	dma_chan_irq_mask(chan, 0);
	dma_chan_enable(chan);
}

static void dma_periph_xfer_wait(enum dma_chan_nr chan)
{
	u32 status;

	do {
		status = dma_chan_irq_status(chan);
	} while ((status & CHAN_IRQ_END) != CHAN_IRQ_END);

	dma_chan_irq_clr(chan, status);
}

static void dma_chan_release(enum dma_chan_nr chan, u32 status)
{
	BOOL active;

	active = dma_chan_release_timeout(chan);

	if (!status) {
		glb_e_timestamp[chan] = syscnt_read_counter();
	} else if (active) {
		DMA_DEBUG("CHAN(%d) TRANSFER TIMEOUT ...\n", chan);
	} else {
		DMA_DEBUG("CHAN(%d) TRANSFER ERROR(0x%08lx) ...\n", chan, status);
	}
}

static void dma_chan_xfer_cb(enum dma_chan_nr chan, u32 status)
{
	dma_chan_release(chan, status);

	xfer_cb(chan, status);
}

void dma_init(void)
{
	int chan;

	dma_reset();
	dma_chip_init();

	for (chan = DMA_CHAN_NR_0; chan < DMA_CHAN_NR_MAX; chan++) {
		dma_chan_irq_mask(chan, 0);
		dma_chan_irq_clr(chan, CHAN_IRQ_ALL);
	}
}

inline void dma_enter_lpmode(void)
{
	DMA_CHIP_REG_PTR()->int_delay |= DMA_LP_ENABLE;
}

int dma_request_chan(int chan)
{
	int index;

	if (chan < 0 || chan >= DMA_CHAN_NR_MAX)
		return -1;

	if (!dma_chan_is_hw_active(chan))
		return chan;

	for (index = DMA_CHAN_NR_0; index < DMA_CHAN_NR_MAX; index++) {
		if (!dma_chan_is_hw_active(chan))//´Ë´¦ÊÇ·ñÓ¦¸Ã¸Ä³É(index)?
			return index;
	}//2024-4-7ÕâÒ»¶ÎÊ²Ã´ÒâË¼£¿indexÆðºÎÓÃ£¿

	return -1;
}

inline void dma_flush_lli_dcache(int chan)
{
	dcache_clean_range((uint32_t *)GET_CHAN_DESC_FIRST_LLP(chan), GET_CHAN_DESC_SIZE(chan));
}

inline void dma_flush_data_dcache(u32 addr, u32 len)
{
	if (len)
		dcache_clean_range((uint32_t *)addr, len);
}

inline void dma_invalidate_data_dcache(u32 addr, u32 len)
{
	if (len)
		dcache_invalid_range((uint32_t *)addr, len);
}

int dma_xfer_block_add(int chan, enum xfer_dir dir, u32 srcaddr, u32 dstaddr, u32 len)
{
	u32 buffsize = DMA_XFER_BUFFER_SIZE;
	u32 xferLen;
	u32 id = 0;

	check_desc_capacity(len);
	clr_desc_content(chan);

	while (len) {
		xferLen = len;

		if (xferLen > buffsize)
			xferLen = buffsize;

		SET_CHAN_DESC_SADDR(chan, id, srcaddr);
		SET_CHAN_DESC_DADDR(chan, id, dstaddr);
		SET_CHAN_DESC_BUFFSIZE(chan, id, xferLen);
		SET_CHAN_DESC_NEXTADDR(chan, id, GET_CHAN_NEXT_DESC_UINTPTR(chan, id + 1));

		len -= xferLen;

		switch (dir)
		{
		case XFER_DIR_MEM2DEV:
			srcaddr += xferLen;
			dstaddr = dstaddr;
		break;
		case XFER_DIR_DEV2MEM:
			dstaddr += xferLen;
			srcaddr = srcaddr;
		break;
		case XFER_DIR_MEM2MEM:
		default:
			srcaddr += xferLen;
			dstaddr += xferLen;
		break;
		}

		if (len)
			id++;
	}

	SET_CHAN_DESC_NEXTADDR(chan, id, CMD_LAST);

	return 0;
}

void dma_register_cb(int chan, chan_xfer_cb_t cb)
{
	if (cb)
		register_cb(chan, cb);
}

void dma_cfg_slave(int chan, slave_cfg_t_ptr cfg)
{
	u32 reqnum;
	u32 srcWidth, dstWidth;
	u32 ackBytes;
	u32 priority;

	u32 rd_burst = 0, wr_burst = 0;

	reqnum   = cfg->slaveId;
	srcWidth = cfg->srcAddrWidth;
	dstWidth = cfg->dstAddrWidth;
	priority = cfg->priority;

	srcWidth  = !srcWidth ? SLAVE_BUSWIDTH_1_BYTE : srcWidth;
	dstWidth  = !dstWidth ? SLAVE_BUSWIDTH_1_BYTE : dstWidth;

	switch (cfg->dir)
	{
	case XFER_DIR_MEM2MEM:
	{
		rd_burst = SET_REG_BITS(rd_burst, RD_BURST_SIZE_POS, RD_BURST_SIZE_LEN, srcWidth);
		rd_burst |= RD_INCR;

		wr_burst = SET_REG_BITS(wr_burst, WR_BURST_SIZE_POS, WR_BURST_SIZE_LEN, dstWidth);
		wr_burst |= WR_INCR;
	}
	break;
	case XFER_DIR_MEM2DEV:
	{
		rd_burst = SET_REG_BITS(rd_burst, RD_BURST_SIZE_POS, RD_BURST_SIZE_LEN, srcWidth);
		rd_burst |= RD_INCR;

		wr_burst = SET_REG_BITS(wr_burst, WR_BURST_SIZE_POS, WR_BURST_SIZE_LEN, dstWidth);
		wr_burst |= DST_IS_PERIPH;
		wr_burst = SET_REG_BITS(wr_burst, TX_SINGLE_BYTES_POS, TX_SINGLE_BYTES_LEN, dstWidth - 1);

		dma_chan_wrperiph_num(chan, reqnum);
	}
	break;
	case XFER_DIR_DEV2MEM:
	{
		rd_burst = SET_REG_BITS(rd_burst, RD_BURST_SIZE_POS, RD_BURST_SIZE_LEN, srcWidth);
		rd_burst |= SRC_IS_PERIPH;
		rd_burst = SET_REG_BITS(rd_burst, RX_SINGLE_BYTES_POS, RX_SINGLE_BYTES_LEN, srcWidth - 1);

		wr_burst = SET_REG_BITS(wr_burst, WR_BURST_SIZE_POS, WR_BURST_SIZE_LEN, dstWidth);
		wr_burst |= WR_INCR;

		dma_chan_rdperiph_num(chan, reqnum);
	}
	break;
	}

	ackBytes = DMA_CHAN_REG_PTR(chan)->ack_bytes;
	ackBytes = SET_REG_BITS(ackBytes, TX_ACK_BYTES_POS, TX_ACK_BYTES_LEN, cfg->txAck);
	ackBytes = SET_REG_BITS(ackBytes, RX_ACK_BYTES_POS, RX_ACK_BYTES_LEN, cfg->rxAck);
	//ÕâÈý¾äÖ´ÐÐ½á¹ûºóackBytes = £¿
	DMA_CHAN_REG_PTR(chan)->rd_burst  = rd_burst;
	DMA_CHAN_REG_PTR(chan)->wr_burst  = wr_burst;
	DMA_CHAN_REG_PTR(chan)->ack_bytes = ackBytes;

	dma_chan_priority(chan, priority);
}

void dma_xfer_start(int chan)
{
	dma_issue_pending(chan);

	glb_s_timestamp[chan] = syscnt_read_counter();

	dma_chan_start(chan);
}

inline void dma_xfer_pending(int chan)
{
	dma_issue_pending(chan);
}

void dma_xfer_multi_chan_start(u8 mask)
{
	int chan;
	u32 chan_mask = mask;

	for (chan = DMA_CHAN_NR_0; chan < DMA_CHAN_NR_MAX; chan++) {
		if (chan_mask & BIT(chan)) {
			glb_s_timestamp[chan] = syscnt_read_counter();
		}
	}

	dma_multi_chan_start(mask);
}

inline void dma_periph_xfer_wait_finish(int chan)
{
	dma_periph_xfer_wait(chan);
}

inline void dma_xfer_abort(int chan)
{
	dma_chan_abort(chan);
}

inline void dma_xfer_suspend(int chan)
{
	dma_chan_suspend(chan);
}

inline void dma_xfer_unsuspend(int chan)
{
	dma_chan_resume(chan);
}

int  dma_free_chan(int chan)
{
	BOOL active;

	active = dma_chan_is_hw_active(chan);
	if (!active)
		dma_chan_disable(chan);

	return active ? -1 : 0;
}

void dma_print_xfer_rate(int chan)
{
	float tick = 0;
	u32 bytes;
	float KBytesPerSec = 0;

	tick = (glb_e_timestamp[chan] > glb_s_timestamp[chan]) ?
				syscnt_tick_to_usec(glb_e_timestamp[chan] - glb_s_timestamp[chan])
				: 0;
	bytes = cal_lli_bytes(chan);
	if (tick)
		KBytesPerSec = (float)(bytes >> 10) * TIME_US_VAL / tick;

	DMA_DEBUG("CHAN(%d), TRANSFER RATE, len: %lu, elapsed: %0.2fms, rate: %0.2fKBytes/s...\n",
				chan, bytes, tick/1000, KBytesPerSec);
}

void dma_interrupt_handler(void *arg)
{
	int chan;
	u32 status;
	u32 int0_status = dma_chip_irq_status();

	(void)arg;
	for (chan = DMA_CHAN_NR_0; chan < DMA_CHAN_NR_MAX; chan++) {
		/* which channel generated interrupt */
		if (!(int0_status & BIT(chan)))
			continue;

		status = dma_chan_irq_status(chan);

		/* Disable channel interrupt */
		dma_chan_irq_mask(chan, CHAN_IRQ_ALL);
		dma_chan_irq_clr(chan, status);

		dma_chan_xfer_cb(chan, status & CHAN_IRQ_ERR_MASK);

		/* Reenable channel interrupt */
		dma_chan_irq_mask(chan, 0);
	}
}

