#include <common.h>
#include <malloc.h>
#include <asm/io.h>
#include <asm/arch/hardware.h>
#include <fh_axi_dma.h>
#include <linux/compiler.h>

//#define AXI_DMA_DEBUG 
#if defined(AXI_DMA_DEBUG)
#define FH_AXI_DMA_DEBUG(fmt, args...) printf(fmt, ##args);
#else
#define FH_AXI_DMA_DEBUG(fmt, args...)
#endif


#define FH_DMA_ASSERT(expr) if (!(expr)) { \
        printf("Assertion failed! %s:line %d\n", \
        __func__, __LINE__); \
        while (1)   \
           ;       \
        }

#define DMA_CONTROLLER_NUMBER (1)
#define FH_CHANNEL_MAX_TRANSFER_SIZE (2048)

#ifndef BIT
#define BIT(x) (1 << (x))
#endif
#ifndef GENMASK
#define GENMASK(h, l) (((~0UL) << (l)) & (~0UL >> (sizeof(long) * 8 - 1 - (h))))
#endif
#ifndef GENMASK_ULL
#define GENMASK_ULL(h, l)                                                      \
	(((~0ULL) << (l)) & (~0ULL >> (sizeof(long long) * 8 - 1 - (h))))
#endif

#define MIN(a, b) (((a) < (b)) ? (a) : (b))

/* DMAC_CFG */
#define DMAC_EN_POS			0
#define DMAC_EN_MASK		BIT(DMAC_EN_POS)
#define INT_EN_POS			1
#define INT_EN_MASK			BIT(INT_EN_POS)

/* CH_CTL_H */

//axi bus read
#define CH_CTL_H_ARLEN_EN(n) ((n) << 6)	/* 32+6 = 38 */
#define CH_CTL_H_ARLEN(n) ((n) << 7)	/* 32+7 = 39 */

// axi bus write 
#define CH_CTL_H_AWLEN_EN(n) ((n) << 15) /* 32+15 = 47 */
#define CH_CTL_H_AWLEN(n) ((n) << 16)	/* 32+16 = 48 */


#define CH_CTL_H_IOC_BLKTFR		BIT(26)
#define CH_CTL_H_LLI_LAST		BIT(30)
#define CH_CTL_H_LLI_VALID		BIT(31)


enum {
	DWAXIDMAC_ARWLEN_1		= 0,
	DWAXIDMAC_ARWLEN_2		= 1,
	DWAXIDMAC_ARWLEN_4		= 3,
	DWAXIDMAC_ARWLEN_8		= 7,
	DWAXIDMAC_ARWLEN_16		= 15,
	DWAXIDMAC_ARWLEN_32		= 31,
	DWAXIDMAC_ARWLEN_64		= 63,
	DWAXIDMAC_ARWLEN_128		= 127,
	DWAXIDMAC_ARWLEN_256		= 255,
	DWAXIDMAC_ARWLEN_MIN		= DWAXIDMAC_ARWLEN_1,
	DWAXIDMAC_ARWLEN_MAX		= DWAXIDMAC_ARWLEN_256
};



/* CH_CTL_L */
#define CH_CTL_L_LAST_WRITE_EN		BIT(30)

#define CH_CTL_L_AR_CACHE(n) ((n) << 22)
#define CH_CTL_L_AW_CACHE(n) ((n) << 26)

#define CH_CTL_L_DST_WIDTH_POS		11
#define CH_CTL_L_SRC_WIDTH_POS		8
#define CH_CTL_L_DST_INC_POS		6
#define CH_CTL_L_SRC_INC_POS		4
enum {
	DWAXIDMAC_BURST_TRANS_LEN_1	= 0,
	DWAXIDMAC_BURST_TRANS_LEN_4,
	DWAXIDMAC_BURST_TRANS_LEN_8,
	DWAXIDMAC_BURST_TRANS_LEN_16,
	DWAXIDMAC_BURST_TRANS_LEN_32,
	DWAXIDMAC_BURST_TRANS_LEN_64,
	DWAXIDMAC_BURST_TRANS_LEN_128,
	DWAXIDMAC_BURST_TRANS_LEN_256,
	DWAXIDMAC_BURST_TRANS_LEN_512,
	DWAXIDMAC_BURST_TRANS_LEN_1024
};


enum {
	DWAXIDMAC_CH_CTL_L_INC		= 0,
	DWAXIDMAC_CH_CTL_L_NOINC
};

#define CH_CTL_L_DST_MAST		BIT(2)
#define CH_CTL_L_SRC_MAST		BIT(0)

/* CH_CFG_H */
#define CH_CFG_H_PRIORITY_POS		17
#define CH_CFG_H_HS_SEL_DST_POS		4
#define CH_CFG_H_HS_SEL_SRC_POS		3
#define CH_CFG_H_TT_FC_POS		0

#define CH_CFG_H_DST_OSR_LMT(n) ((n) << 27)
#define CH_CFG_H_SRC_OSR_LMT(n) ((n) << 23)
enum {
	DWAXIDMAC_HS_SEL_HW		= 0,
	DWAXIDMAC_HS_SEL_SW
};

#define CH_CFG_H_TT_FC_POS		0
enum {
	DWAXIDMAC_TT_FC_MEM_TO_MEM_DMAC	= 0,
	DWAXIDMAC_TT_FC_MEM_TO_PER_DMAC,
	DWAXIDMAC_TT_FC_PER_TO_MEM_DMAC,
	DWAXIDMAC_TT_FC_PER_TO_PER_DMAC,
	DWAXIDMAC_TT_FC_PER_TO_MEM_SRC,
	DWAXIDMAC_TT_FC_PER_TO_PER_SRC,
	DWAXIDMAC_TT_FC_MEM_TO_PER_DST,
	DWAXIDMAC_TT_FC_PER_TO_PER_DST
};

/* CH_CFG_L */
#define CH_CFG_L_DST_MULTBLK_TYPE_POS	2
#define CH_CFG_L_SRC_MULTBLK_TYPE_POS	0
enum {
	DWAXIDMAC_MBLK_TYPE_CONTIGUOUS	= 0,
	DWAXIDMAC_MBLK_TYPE_RELOAD,
	DWAXIDMAC_MBLK_TYPE_SHADOW_REG,
	DWAXIDMAC_MBLK_TYPE_LL
};

/**
 * DW AXI DMA channel interrupts
 *
 * @DWAXIDMAC_IRQ_NONE: Bitmask of no one interrupt
 * @DWAXIDMAC_IRQ_BLOCK_TRF: Block transfer complete
 * @DWAXIDMAC_IRQ_DMA_TRF: Dma transfer complete
 * @DWAXIDMAC_IRQ_SRC_TRAN: Source transaction complete
 * @DWAXIDMAC_IRQ_DST_TRAN: Destination transaction complete
 * @DWAXIDMAC_IRQ_SRC_DEC_ERR: Source decode error
 * @DWAXIDMAC_IRQ_DST_DEC_ERR: Destination decode error
 * @DWAXIDMAC_IRQ_SRC_SLV_ERR: Source slave error
 * @DWAXIDMAC_IRQ_DST_SLV_ERR: Destination slave error
 * @DWAXIDMAC_IRQ_LLI_RD_DEC_ERR: LLI read decode error
 * @DWAXIDMAC_IRQ_LLI_WR_DEC_ERR: LLI write decode error
 * @DWAXIDMAC_IRQ_LLI_RD_SLV_ERR: LLI read slave error
 * @DWAXIDMAC_IRQ_LLI_WR_SLV_ERR: LLI write slave error
 * @DWAXIDMAC_IRQ_INVALID_ERR: LLI invalid error or Shadow register error
 * @DWAXIDMAC_IRQ_MULTIBLKTYPE_ERR: Slave Interface Multiblock type error
 * @DWAXIDMAC_IRQ_DEC_ERR: Slave Interface decode error
 * @DWAXIDMAC_IRQ_WR2RO_ERR: Slave Interface write to read only error
 * @DWAXIDMAC_IRQ_RD2RWO_ERR: Slave Interface read to write only error
 * @DWAXIDMAC_IRQ_WRONCHEN_ERR: Slave Interface write to channel error
 * @DWAXIDMAC_IRQ_SHADOWREG_ERR: Slave Interface shadow reg error
 * @DWAXIDMAC_IRQ_WRONHOLD_ERR: Slave Interface hold error
 * @DWAXIDMAC_IRQ_LOCK_CLEARED: Lock Cleared Status
 * @DWAXIDMAC_IRQ_SRC_SUSPENDED: Source Suspended Status
 * @DWAXIDMAC_IRQ_SUSPENDED: Channel Suspended Status
 * @DWAXIDMAC_IRQ_DISABLED: Channel Disabled Status
 * @DWAXIDMAC_IRQ_ABORTED: Channel Aborted Status
 * @DWAXIDMAC_IRQ_ALL_ERR: Bitmask of all error interrupts
 * @DWAXIDMAC_IRQ_ALL: Bitmask of all interrupts
 */
enum {
	DWAXIDMAC_IRQ_NONE		= 0,
	DWAXIDMAC_IRQ_BLOCK_TRF		= BIT(0),
	DWAXIDMAC_IRQ_DMA_TRF		= BIT(1),
	DWAXIDMAC_IRQ_SRC_TRAN		= BIT(3),
	DWAXIDMAC_IRQ_DST_TRAN		= BIT(4),
	DWAXIDMAC_IRQ_SRC_DEC_ERR	= BIT(5),
	DWAXIDMAC_IRQ_DST_DEC_ERR	= BIT(6),
	DWAXIDMAC_IRQ_SRC_SLV_ERR	= BIT(7),
	DWAXIDMAC_IRQ_DST_SLV_ERR	= BIT(8),
	DWAXIDMAC_IRQ_LLI_RD_DEC_ERR	= BIT(9),
	DWAXIDMAC_IRQ_LLI_WR_DEC_ERR	= BIT(10),
	DWAXIDMAC_IRQ_LLI_RD_SLV_ERR	= BIT(11),
	DWAXIDMAC_IRQ_LLI_WR_SLV_ERR	= BIT(12),
	DWAXIDMAC_IRQ_INVALID_ERR	= BIT(13),
	DWAXIDMAC_IRQ_MULTIBLKTYPE_ERR	= BIT(14),
	DWAXIDMAC_IRQ_DEC_ERR		= BIT(16),
	DWAXIDMAC_IRQ_WR2RO_ERR		= BIT(17),
	DWAXIDMAC_IRQ_RD2RWO_ERR	= BIT(18),
	DWAXIDMAC_IRQ_WRONCHEN_ERR	= BIT(19),
	DWAXIDMAC_IRQ_SHADOWREG_ERR	= BIT(20),
	DWAXIDMAC_IRQ_WRONHOLD_ERR	= BIT(21),
	DWAXIDMAC_IRQ_LOCK_CLEARED	= BIT(27),
	DWAXIDMAC_IRQ_SRC_SUSPENDED	= BIT(28),
	DWAXIDMAC_IRQ_SUSPENDED		= BIT(29),
	DWAXIDMAC_IRQ_DISABLED		= BIT(30),
	DWAXIDMAC_IRQ_ABORTED		= BIT(31),
	DWAXIDMAC_IRQ_ALL_ERR		= (GENMASK(21, 16) | GENMASK(14, 5)),
	DWAXIDMAC_IRQ_ALL		= GENMASK(31, 0)
};

#define AXI_DMA_CTLL_DST_WIDTH(n) ((n) << 11) /* bytes per element */
#define AXI_DMA_CTLL_SRC_WIDTH(n) ((n) << 8)

#define AXI_DMA_CTLL_DST_INC_MODE(n) ((n) << 6)
#define AXI_DMA_CTLL_SRC_INC_MODE(n) ((n) << 4)

#define AXI_DMA_CTLL_DST_MSIZE(n) ((n) << 18)
#define AXI_DMA_CTLL_SRC_MSIZE(n) ((n) << 14)


#define AXI_DMA_CTLL_DMS(n) ((n) << 2)
#define AXI_DMA_CTLL_SMS(n) ((n) << 0)

//caution ,diff with ahb dma
#define AXI_DMA_CFGH_FC(n) ((n) << 0)

#define AXI_DMA_CFGH_DST_PER(n) ((n) << 3)

/*********************************
 *
 * 
 *
 *********************************/
/* this is the ip reg offset....don't change!!!!!!! */
#define DW_DMA_MAX_NR_CHANNELS 8

/*
 * Redefine this macro to handle differences between 32- and 64-bit
 * addressing, big vs. little endian, etc.
 */
#define DW_REG(name)  \
    u32 name; \
    u32 __pad_##name

/* Hardware register definitions. */
struct dw_axi_dma_chan_regs
{
	DW_REG(SAR);			/* 0x0 ~ 0x7*/
    DW_REG(DAR);			/* 0x8 ~ 0xf*/
	DW_REG(BLOCK_TS);		/* 0x10 ~ 0x17*/
    u32 CTL_LO;				
    u32 CTL_HI;				/* 0x18 ~ 0x1f*/
    u32 CFG_LO;
    u32 CFG_HI;				/* 0x20 ~ 0x27*/
    DW_REG(LLP);			/* 0x28 ~ 0x2f*/
    u32 STATUS_LO;
    u32 STATUS_HI;			/* 0x30 ~ 0x37*/
    DW_REG(SWHS_SRC);		/* 0x38 ~ 0x3f*/
	DW_REG(SWHS_DST);		/* 0x40 ~ 0x47*/
	DW_REG(BLK_TFR_RESU);
	DW_REG(ID);
	DW_REG(QOS);
	DW_REG(SSTAT);
	DW_REG(DSTAT);
	DW_REG(SSTATAR);
	DW_REG(DSTATAR);
	u32 INTSTATUS_EN_LO;
	u32 INTSTATUS_EN_HI;
	u32 INTSTATUS_LO;
	u32 INTSTATUS_HI;
	u32 INTSIGNAL_LO;
	u32 INTSIGNAL_HI;
	u32 INTCLEAR_LO;
	u32 INTCLEAR_HI;
	u32 rev[24];
};

struct dw_axi_dma_regs
{
	DW_REG(ID);					/* 0x0 */
	DW_REG(COMPVER);			/* 0x8 */
	DW_REG(CFG);				/* 0x10 */
	u32 CHEN_LO;				/* 0x18 */
	u32 CHEN_HI;				/* 0x1c */
	DW_REG(reserved_20_27);		/* 0x20 */
	DW_REG(reserved_28_2f);		/* 0x28 */
	DW_REG(INTSTATUS);			/* 0x30 */
	DW_REG(COM_INTCLEAR);		/* 0x38 */
	DW_REG(COM_INTSTATUS_EN);	/* 0x40 */
	DW_REG(COM_INTSIGNAL_EN);	/* 0x48 */
	DW_REG(COM_INTSTATUS);		/* 0x50 */
	DW_REG(RESET);				/* 0x58 */
	u32 reserved[40];			/* 0x60 */
	struct dw_axi_dma_chan_regs CHAN[DW_DMA_MAX_NR_CHANNELS];/* 0x100 */
};

struct dw_axi_dma
{
    void *regs;
};


#define __dma_raw_writeb(v, a) (*(volatile unsigned char *)(a) = (v))
#define __dma_raw_writew(v, a) (*(volatile unsigned short *)(a) = (v))
#define __dma_raw_writel(v, a) (*(volatile unsigned int *)(a) = (v))

#define __dma_raw_readb(a) (*(volatile unsigned char *)(a))
#define __dma_raw_readw(a) (*(volatile unsigned short *)(a))
#define __dma_raw_readl(a) (*(volatile unsigned int *)(a))

#define dw_readl(dw, name) \
    __dma_raw_readl(&(((struct dw_axi_dma_regs *)dw->regs)->name))
#define dw_writel(dw, name, val) \
    __dma_raw_writel((val), &(((struct dw_axi_dma_regs *)dw->regs)->name))
#define dw_readw(dw, name) \
    __dma_raw_readw(&(((struct dw_axi_dma_regs *)dw->regs)->name))
#define dw_writew(dw, name, val) \
    __dma_raw_writew((val), &(((struct dw_axi_dma_regs *)dw->regs)->name))


#define DMA_LLI_SIZE        4096
static __attribute__((aligned(64))) struct axi_dma_lli g_dma_rx_lli[DMA_LLI_SIZE];
static __attribute__((aligned(64))) struct axi_dma_lli g_dma_tx_lli[DMA_LLI_SIZE];

struct dw_axi_dma g_axi_dma_obj;

struct axi_dma_lli *p_axi_dma_tx_lli = 0;
struct axi_dma_lli *p_axi_dma_rx_lli = 0;



#define channel_set_bit(dw, reg, mask) \
    dw_writel(dw, reg, ((mask) << 8) | (mask))
#define channel_clear_bit(dw, reg, mask) dw_writel(dw, reg, ((mask) << 8) | 0)


static void axi_dma_reset(struct dw_axi_dma *axi_dma_obj)
{
	u32 ret;
	dw_writel(axi_dma_obj, RESET, 1);
	do{
		ret = dw_readl(axi_dma_obj, RESET);
	}while(ret);
}

static axi_dma_enable(struct dw_axi_dma *axi_dma_obj)
{
	u32 ret;
	ret = dw_readl(axi_dma_obj, CFG);
	ret |= BIT(DMAC_EN_POS);
	dw_writel(axi_dma_obj, CFG, ret);
}

static axi_dma_global_isr_set(struct dw_axi_dma *axi_dma_obj, u32 enable)
{
	u32 ret;
	ret = dw_readl(axi_dma_obj, CFG);
	ret &= ~BIT(INT_EN_POS);
	ret |= enable << INT_EN_POS;
	dw_writel(axi_dma_obj, CFG, ret);
}

void dma_init(void){

    g_axi_dma_obj.regs = (void *)DMAC0_REG_BASE;
    p_axi_dma_tx_lli = (struct dw_lli *)&g_dma_tx_lli[0];
    p_axi_dma_rx_lli = (struct dw_lli *)&g_dma_rx_lli[0];
	axi_dma_reset(&g_axi_dma_obj);
	axi_dma_global_isr_set(&g_axi_dma_obj, 0);
	axi_dma_enable(&g_axi_dma_obj);
}

void *get_dma_tx_lli_head(void){
	return (void *)p_axi_dma_tx_lli;
}

void *get_dma_rx_lli_head(void){
	return (void *)p_axi_dma_rx_lli;
}

void wait_dma_xfer_done(u32 channel){
	struct dw_axi_dma *temp_dwc;
	u32 isr_channel_x;
	temp_dwc = &g_axi_dma_obj;

    do{
         isr_channel_x = dw_readl(temp_dwc, CHAN[channel].INTSTATUS_LO);
    }while((isr_channel_x & DWAXIDMAC_IRQ_DMA_TRF) != DWAXIDMAC_IRQ_DMA_TRF);
 	dw_writel(temp_dwc, CHAN[channel].INTCLEAR_LO, isr_channel_x);
}

static void dump_lli(struct axi_dma_lli *p_lli)
{
	FH_AXI_DMA_DEBUG("SAR: 0x%08x DAR: 0x%08x LLP: 0x%08x BTS 0x%08x CTL: 0x%08x:%08x\n",
		(p_lli->sar_lo),
		(p_lli->dar_lo),
		(p_lli->llp_lo),
		(p_lli->block_ts_lo),
		(p_lli->ctl_hi),
		(p_lli->ctl_lo));
}

static u32 cal_lli_size(struct dma_transfer *p_transfer)
{

    u32 lli_number                = 0;
    u32 channel_max_trans_per_lli = 0;
    u32 c_msize;
    c_msize = FH_CHANNEL_MAX_TRANSFER_SIZE;
    channel_max_trans_per_lli = (p_transfer->period_len != 0) ? 
    (MIN(p_transfer->period_len, c_msize)) : (c_msize);
    lli_number = (p_transfer->trans_len % channel_max_trans_per_lli) ? 1 : 0;
    lli_number += p_transfer->trans_len / channel_max_trans_per_lli;
    FH_AXI_DMA_DEBUG("cal lli size is %x\n",lli_number);
    return lli_number;

}

void axi_dma_ctl_init(u32 *low, u32 *high){
    //cache
  //  *low |= CH_CTL_L_AR_CACHE(3) | CH_CTL_L_AW_CACHE(0);
    //burst len
  //  *high |= CH_CTL_H_ARLEN_EN(1) | CH_CTL_H_ARLEN(0xf) | CH_CTL_H_AWLEN_EN(0) | CH_CTL_H_AWLEN(0xf);
}

void axi_dma_cfg_init(u32 *low, u32 *high){
    //out standing
  //  *high |= CH_CFG_H_DST_OSR_LMT(7) | CH_CFG_H_SRC_OSR_LMT(7);
}

void handle_single_transfer(struct dma_transfer *p_transfer,void *p_lli_in){

    u32 i;
    u32 lli_cal_size;
	u32 reg;
    volatile u32 ret_status;
    struct dw_axi_dma *temp_dwc;
    u32 temp_src_add = 0;
    u32 temp_dst_add = 0;
    u32 trans_total_len = 0;
    u32 temp_trans_size = 0;
    u32 max_trans_size = 0;;
	u32 src_inc_mode;
	u32 dst_inc_mode;
	u32 cfg_low;
    u32 cfg_high;
    temp_dwc = &g_axi_dma_obj;
    struct axi_dma_lli *p_lli = 0;
    struct dma_transfer *dma_trans_desc = p_transfer;
    trans_total_len = p_transfer->trans_len;
    lli_cal_size = cal_lli_size(p_transfer);
    FH_DMA_ASSERT(lli_cal_size <= DMA_LLI_SIZE);

    memset((void *)p_lli_in, 0,lli_cal_size * sizeof(struct axi_dma_lli));
    p_lli = (struct axi_dma_lli *)p_lli_in;
    max_trans_size = FH_CHANNEL_MAX_TRANSFER_SIZE;


    if(p_transfer->period_len != 0){
        max_trans_size = MIN(max_trans_size, p_transfer->period_len);
    }

    for (i = 0; i < lli_cal_size; i++)
    {
        /* parse trans para... */
        /* para add: */
        switch (dma_trans_desc->dst_inc_mode)
        {
        case DW_DMA_SLAVE_INC:
            temp_dst_add =
                dma_trans_desc->dst_add +
                i * max_trans_size * (1 << dma_trans_desc->src_width);
            break;
        case DW_DMA_SLAVE_DEC:
            temp_dst_add =
                dma_trans_desc->dst_add -
                i * max_trans_size * (1 << dma_trans_desc->src_width);
            break;
        case DW_DMA_SLAVE_FIX:
            temp_dst_add = dma_trans_desc->dst_add;
            break;
        }

        switch (dma_trans_desc->src_inc_mode)
        {
        case DW_DMA_SLAVE_INC:
            temp_src_add =
                dma_trans_desc->src_add +
                i * max_trans_size * (1 << dma_trans_desc->src_width);
            break;
        case DW_DMA_SLAVE_DEC:
            temp_src_add =
                dma_trans_desc->src_add -
                i * max_trans_size * (1 << dma_trans_desc->src_width);
            break;
        case DW_DMA_SLAVE_FIX:
            temp_src_add = dma_trans_desc->src_add;
            break;
        }
        if(p_transfer->src_reload_flag == ADDR_RELOAD){
            temp_src_add = dma_trans_desc->src_add;
        }
        if(p_transfer->dst_reload_flag == ADDR_RELOAD){
            temp_dst_add = dma_trans_desc->dst_add;
        }
        p_lli[i].sar_lo = temp_src_add;
        p_lli[i].dar_lo = temp_dst_add;
        /* para ctl */
        temp_trans_size = (trans_total_len / max_trans_size)
                                ? max_trans_size
                                : (trans_total_len % max_trans_size);
        trans_total_len -= temp_trans_size;

        /* block size , axi dma should - 1*/
        p_lli[i].block_ts_lo = temp_trans_size - 1;
		/* lli is vaild */
		p_lli[i].ctl_hi = CH_CTL_H_LLI_VALID;
		//p_lli[i].ctl_hi |= CH_CTL_H_IOC_BLKTFR;
		/* axi dma inc mode 0 = inc.  1 = fix  */
		dst_inc_mode = (dma_trans_desc->dst_inc_mode == DW_DMA_SLAVE_INC) ? 0 : 1;
		src_inc_mode = (dma_trans_desc->src_inc_mode == DW_DMA_SLAVE_INC) ? 0 : 1;

		p_lli[i].ctl_lo = AXI_DMA_CTLL_DST_WIDTH(dma_trans_desc->dst_width) |
		AXI_DMA_CTLL_SRC_WIDTH(dma_trans_desc->src_width) |
		AXI_DMA_CTLL_DST_MSIZE(dma_trans_desc->dst_msize) |
		AXI_DMA_CTLL_SRC_MSIZE(dma_trans_desc->src_msize) |
		AXI_DMA_CTLL_DMS(dma_trans_desc->dst_master_sel) |
		AXI_DMA_CTLL_SMS(dma_trans_desc->src_master_sel) |
		AXI_DMA_CTLL_DST_INC_MODE(dst_inc_mode) |
		AXI_DMA_CTLL_SRC_INC_MODE(src_inc_mode);
		axi_dma_ctl_init(&p_lli[i].ctl_lo, &p_lli[i].ctl_hi);
        if (trans_total_len > 0)
			p_lli[i].llp_lo = &p_lli[i + 1];
		else
			p_lli[i].ctl_hi |= CH_CTL_H_LLI_LAST;

		FH_AXI_DMA_DEBUG("plli add is %x\n",(int)&p_lli[i]);
		dump_lli(&p_lli[i]);
		
    }


	/* enable link list... */
    dw_writel(temp_dwc, CHAN[dma_trans_desc->channel_number].CFG_LO,
	3 << CH_CFG_L_DST_MULTBLK_TYPE_POS | 3<< CH_CFG_L_SRC_MULTBLK_TYPE_POS);
	/* flow control */
    dw_writel(temp_dwc, CHAN[dma_trans_desc->channel_number].CFG_HI,
	AXI_DMA_CFGH_FC(dma_trans_desc->fc_mode));


    switch (dma_trans_desc->fc_mode)
    {
    case DMA_M2M:

        break;
    case DMA_M2P:
        ret_status = dw_readl(temp_dwc, CHAN[dma_trans_desc->channel_number].CFG_HI);
		ret_status &= ~0x18;
		ret_status |= dma_trans_desc->dst_per << 12;
		dw_writel(temp_dwc, CHAN[dma_trans_desc->channel_number].CFG_HI, ret_status);

		
        break;
    case DMA_P2M:
        ret_status = dw_readl(temp_dwc, CHAN[dma_trans_desc->channel_number].CFG_HI);
		ret_status &= ~0x18;
		ret_status |= dma_trans_desc->src_per << 7;
		dw_writel(temp_dwc, CHAN[dma_trans_desc->channel_number].CFG_HI, ret_status);

        break;

    default:
        break;
    }
	dw_writel(temp_dwc, CHAN[dma_trans_desc->channel_number].LLP,  &p_lli[0]);
	//clean isr status.
 	dw_writel(temp_dwc, CHAN[dma_trans_desc->channel_number].INTCLEAR_LO, 0xffffffff);
	dw_writel(temp_dwc, CHAN[dma_trans_desc->channel_number].INTCLEAR_HI, 0xffffffff);
	//open transfer done isr.
	dw_writel(temp_dwc, CHAN[dma_trans_desc->channel_number].INTSTATUS_EN_LO, DWAXIDMAC_IRQ_DMA_TRF);

    /* rewrite cfg.. */
    cfg_low  = dw_readl(temp_dwc, CHAN[dma_trans_desc->channel_number].CFG_LO);
    cfg_high = dw_readl(temp_dwc, CHAN[dma_trans_desc->channel_number].CFG_HI);
    axi_dma_cfg_init(&cfg_low, &cfg_high);
    dw_writel(temp_dwc, CHAN[dma_trans_desc->channel_number].CFG_LO, cfg_low);
    dw_writel(temp_dwc, CHAN[dma_trans_desc->channel_number].CFG_HI, cfg_high);

    channel_set_bit(temp_dwc, CHEN_LO,
                    lift_shift_bit_num(dma_trans_desc->channel_number));

	dump_lli(&p_lli[0]);
}

