/*
 * Copyright (C) 2017 Hisilicon Limited.
 *
 * This program is free software; you can redistribute it and /or modify it
 * under the terms of the GNU General Public License as published by the Free
 * Software Foundation; either version 2 of the License, or (at your option)
 * any later version
 */
#ifdef CONFIG_GENERIC_BUG
#undef CONFIG_GENERIC_BUG
#endif
#ifdef CONFIG_BUG
#undef CONFIG_BUG
#endif

#ifdef CONFIG_DEBUG_BUGVERBOSE
#undef CONFIG_DEBUG_BUGVERBOSE
#endif
#include <asm/io.h>
#include <linux/dma-mapping.h>
#include <linux/delay.h>
#include <linux/errno.h>
#include <linux/jiffies.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/types.h>

#include "devdrv_dma.h"
#include <linux/timex.h>
#include <linux/rtc.h>

#define DEVDRV_DMA_AXI_WRITE 0x100

/* dma channel sq submit interface */
void devdrv_dma_ch_sq_submit(struct devdrv_dma_channel *dma_chan)
{
    devdrv_set_dma_sq_tail(dma_chan->io_base, dma_chan->sq_tail);
}

u32 devdrv_get_dma_sqcq_side(struct devdrv_dma_channel *dma_chan)
{
    return (dma_chan->flag >> DEVDRV_DMA_SQCQ_SIDE_BIT) & 1;
}

/* not all sq bds will respond to cq, and multiple sq may be merged. Need to consider the merged sq */
void devdrv_dma_done_sq_proc(struct devdrv_dma_channel *dma_chan, u32 cq_sqhd, u32 status)
{
    struct devdrv_dma_soft_bd *soft_bd = NULL;
    u32 sq_index;
    u32 cur_status;
    u32 sq_cnt = (cq_sqhd + dma_chan->sq_depth - dma_chan->sq_head + 1) % dma_chan->sq_depth;
    u32 val = 0;

    for (sq_index = 0; sq_index < sq_cnt; sq_index++) {
        /* merged cq status is ok */
        cur_status = (dma_chan->sq_head == cq_sqhd) ? status : 0;

        soft_bd = dma_chan->dma_soft_bd + dma_chan->sq_head;

        /* do not pay attention to soft bd, like the second bd sent by small packet */
        if (soft_bd->valid == DEVDRV_DISABLE) {
            goto next;
        }
        rmb();
        /* the front bd in chain copy */
        if (soft_bd->owner_bd >= 0) {
            /* status error needs to be set to the last bd */
            if (cur_status != 0) {
                soft_bd = dma_chan->dma_soft_bd + soft_bd->owner_bd;
                soft_bd->status = cur_status;
            }
        } else {
            /* if there is no error in front of bd, assign the status of the last sq. */
            if (soft_bd->status == -1) {
                soft_bd->status = cur_status;
            }

            devdrv_dma_reg_rd(dma_chan->io_base, DEVDRV_DMA_QUEUE_INT_STS, &val);
            if ((val == DEVDRV_DMA_AXI_WRITE) || (val == BAR_EXCEP_VAL_32_BIT)) {
                soft_bd->status = -2;
            }
            wmb();
            if (soft_bd->wait_type != DEVDRV_DMA_WAIT_INTR) {
                soft_bd->valid = DEVDRV_DISABLE;
                goto next;
            }

            /* synchronous mode release semaphore wake-up waiting task */
            if (soft_bd->copy_type == DEVDRV_DMA_SYNC) {
                up(&soft_bd->sync_sem);
            }
            /* asynchronous mode callback completion function */
            else {
                if (soft_bd->callback_func) {
                    soft_bd->callback_func(soft_bd->priv, soft_bd->trans_id, soft_bd->status);
                }
            }
        }
        soft_bd->valid = DEVDRV_DISABLE;
    next:
        dma_chan->sq_head = (dma_chan->sq_head + 1) % dma_chan->sq_depth;
    }

    if (status) {
        devdrv_err("%s, dma copy failed, sq:%d, error code: 0x%x\n", dev_driver_string(dma_chan->dev), cq_sqhd, status);
    }
}

void devdrv_dma_done_task(unsigned long data)
{
    struct devdrv_dma_cq_node *p_cur_last_cq = NULL;
    struct devdrv_dma_channel *dma_chan = (struct devdrv_dma_channel *)((uintptr_t)data);
    u32 head;
    int cnt = 0;

    spin_lock_bh(&dma_chan->cq_lock);

    while (1) {
        head = (dma_chan->cq_head + 1) % (dma_chan->cq_depth);
        p_cur_last_cq = dma_chan->cq_desc_base + head;

        /* invalid cq, break */
        if (!devdrv_dma_get_cq_valid(p_cur_last_cq, dma_chan->rounds)) {
            break;
        }

        rmb();

        /* Reach the threshold and schedule out */
        if (cnt >= DMA_DONE_BUDGET) {
            tasklet_schedule(&dma_chan->dma_done_task);
            break;
        }

        devdrv_dma_done_sq_proc(dma_chan, devdrv_dma_get_cq_sqhd(p_cur_last_cq),
            devdrv_dma_get_cq_status(p_cur_last_cq));

        devdrv_dma_set_cq_invalid(p_cur_last_cq);
        dma_chan->cq_head = head;

        if (dma_chan->cq_head == (dma_chan->cq_depth - 1)) {
            dma_chan->rounds++;
        }

        cnt++;
    }
    mb();
    if (cnt > 0) {
        devdrv_set_dma_cq_head(dma_chan->io_base, dma_chan->cq_head);
    }

    spin_unlock_bh(&dma_chan->cq_lock);
}

irqreturn_t devdrv_dma_done_interrupt(int irq, void *data)
{
    struct devdrv_dma_channel *dma_chan = (struct devdrv_dma_channel *)data;

    if (dma_chan->status == DEVDRV_DISABLE) {
        devdrv_err("dma done interrupt: dma chan %d disable\n", dma_chan->chan_id);
        return IRQ_HANDLED;
    }

    rmb();

    tasklet_schedule(&dma_chan->dma_done_task);

    return IRQ_HANDLED;
}

void devdrv_dma_err_proc(struct devdrv_dma_channel *dma_chan)
{
    u32 queue_init_sts = 0;
    devdrv_err("err chan id: %d\n", dma_chan->chan_id);

    devdrv_record_dma_dxf_info(dma_chan->io_base, &queue_init_sts);
    devdrv_err("sq vir base addr is 0x%pK\n", dma_chan->sq_desc_base);
    devdrv_err("cq vir base addr is 0x%pK\n", dma_chan->cq_desc_base);
    devdrv_err("software sq head %d\n", dma_chan->sq_head);
    devdrv_err("software sq tail %d\n", dma_chan->sq_tail);
    devdrv_err("software cq head %d\n", dma_chan->cq_head);

    devdrv_dfx_dma_report_to_bbox(dma_chan, queue_init_sts);
}

void devdrv_dma_err_task(struct work_struct *p_work)
{
    struct devdrv_dma_channel *dma_chan = container_of(p_work, struct devdrv_dma_channel, err_work);

    devdrv_dma_err_proc(dma_chan);
}


irqreturn_t devdrv_dma_err_interrupt(int irq, void *data)
{
    struct devdrv_dma_channel *dma_chan = (struct devdrv_dma_channel *)data;

    if (dma_chan->status == DEVDRV_DISABLE) {
        devdrv_err("dma err interrupt: dma chan %d disable\n", dma_chan->chan_id);
        return IRQ_HANDLED;
    }

    rmb();

    /* start work queue */
    schedule_work(&dma_chan->err_work);

    return IRQ_HANDLED;
}

/* Avoid cq merge cause int loss when the previous sq's ldie==1, and current sq's ldie==0 & rdie==1, set ldie=1. */
u32 devdrv_dma_get_amended_ldie(struct devdrv_dma_channel *dma_chan, u32 ldie, u32 rdie)
{
    u32 amended_ldie = ldie;
    u32 irq_type = dma_chan->last_irq_type;

    if (irq_type == DEVDRV_DMA_SQ_LDIE_ENABEL && rdie == 1 && ldie == 0) {
        amended_ldie = 1;
    }
    dma_chan->last_irq_type = ldie + (rdie << 1);

    return amended_ldie;
}

void devdrv_dma_fill_sq_desc(struct devdrv_dma_channel *dma_chan, struct devdrv_dma_sq_node *sq_desc,
    struct devdrv_dma_node *dma_node, struct devdrv_asyn_dma_para_info *para_info, int intr_flag)
{
    u32 rdie = 0;
    u32 ldie = 0;
    u32 msi = 0;
    u32 attr = 0;
    u32 wd_barrier = 0;
    u32 rd_barrier = 0;
    u32 opcode;

    if (dma_node->direction == DEVDRV_DMA_DEVICE_TO_HOST) {
        opcode = DEVDRV_DMA_WRITE;
    } else {
        opcode = DEVDRV_DMA_READ;
    }

    if (para_info != NULL) {
        if (!(para_info->interrupt_and_attr_flag & DEVDRV_ATTR_FLAG)) {
            attr = DEVDRV_DMA_RO_RELEX_ORDER;
        }
        if (para_info->interrupt_and_attr_flag & DEVDRV_WD_BARRIER_FLAG) {
            wd_barrier = 1;
        }
        if (para_info->interrupt_and_attr_flag & DEVDRV_RD_BARRIER_FLAG) {
            rd_barrier = 1;
        }
    }

    if (1 == intr_flag) {
        if (devdrv_get_dma_sqcq_side(dma_chan) == DEVDRV_DMA_REMOTE_SIDE) {
            rdie = 1;
            msi = dma_chan->done_irq;
        } else {
            if (para_info != NULL) {
                if (para_info->interrupt_and_attr_flag & DEVDRV_REMOTE_IRQ_FLAG) {
                    rdie = 1;
                    msi = para_info->remote_msi_vector;
                    dma_chan->remote_irq_cnt++;

                    /* add a local irq to update local SQ head and tail */
                    if (dma_chan->remote_irq_cnt == DEVDRV_DMA_MAX_REMOTE_IRQ) {
                        ldie = 1;
                        dma_chan->remote_irq_cnt = 0;
                    }
                }
                if (para_info->interrupt_and_attr_flag & DEVDRV_LOCAL_IRQ_FLAG)
                    ldie = 1;
            } else {
                ldie = 1;
            }
        }
    }

    ldie = devdrv_dma_get_amended_ldie(dma_chan, ldie, rdie);

    /* fill addr */
    devdrv_dma_set_sq_addr_info(sq_desc, dma_node->src_addr, dma_node->dst_addr, dma_node->size);

    /* fill attr */
    devdrv_dma_set_sq_attr(sq_desc, opcode, attr, DEVDRV_PF_NUM, wd_barrier, rd_barrier);

    /* fill interrupt info */
    devdrv_dma_set_sq_irq(sq_desc, rdie, ldie, msi);
}

void devdrv_dma_fill_soft_bd(int wait_type, int copy_type, struct devdrv_dma_soft_bd *soft_bd,
    struct devdrv_asyn_dma_para_info *para_info)
{
    if (para_info != NULL) {
        soft_bd->priv = para_info->priv;
        soft_bd->trans_id = para_info->trans_id;
        soft_bd->callback_func = para_info->finish_notify;
    } else {
        soft_bd->priv = NULL;
        soft_bd->trans_id = 0;
        soft_bd->callback_func = NULL;
    }
    soft_bd->copy_type = copy_type;
    soft_bd->wait_type = wait_type;
    soft_bd->owner_bd = -1;
    soft_bd->status = -1;
    sema_init(&soft_bd->sync_sem, 0);
    soft_bd->valid = DEVDRV_ENABLE;
}

int devdrv_dma_para_check(u32 dev_id, enum devdrv_dma_data_type type, struct devdrv_dma_node *dma_node, u32 node_cnt,
    int copy_type, struct devdrv_asyn_dma_para_info *para_info)
{
    u32 i;
    int type_tmp;

    type_tmp = (int)type;

    if (type_tmp >= DEVDRV_DMA_DATA_TYPE_MAX || type_tmp < DEVDRV_DMA_DATA_COMMON) {
        devdrv_err("type %d is invalid.\n", type_tmp);
        return -EINVAL;
    }

    if (node_cnt == 0) {
        devdrv_err("node_cnt is 0.\n");
        return -EINVAL;
    }

    if (dma_node == NULL) {
        devdrv_err("dma_node is null.\n");
        return -EINVAL;
    }

    for (i = 0; i < node_cnt; i++) {
        if (dma_node[i].size == 0) {
            devdrv_err("dma_node %d: src_addr, dst_addr, size %x is error.\n", i, dma_node[i].size);
            return -EINVAL;
        }

        if ((dma_node[i].direction != DEVDRV_DMA_DEVICE_TO_HOST) &&
            (dma_node[i].direction != DEVDRV_DMA_HOST_TO_DEVICE)) {
            devdrv_err("dma_node %d: direction %d is error.\n", i, dma_node[i].direction);
            return -EINVAL;
        }
    }

    if (copy_type == DEVDRV_DMA_ASYNC) {
        if (para_info == NULL) {
            devdrv_err("async mode para_info is null.\n");
            return -EINVAL;
        }

        if (!(para_info->interrupt_and_attr_flag & DEVDRV_LOCAL_REMOTE_IRQ_FLAG)) {
            if (para_info->finish_notify) {
                devdrv_info("para_info para is error\n");
                return -EINVAL;
            }
        }
    }

    return 0;
}

struct devdrv_dma_channel *devdrv_dma_get_chan(u32 dev_id, enum devdrv_dma_data_type type)
{
    struct devdrv_dma_dev *dma_dev = NULL;
    struct data_type_chan *data_chan = NULL;
    int chan_id;

    if (type >= DEVDRV_DMA_DATA_TYPE_MAX) {
        return NULL;
    }

    dma_dev = devdrv_get_dma_dev(dev_id);
    if (dma_dev == NULL) {
        devdrv_err("dma_dev(dev_id %u) is NULL\n", dev_id);
        return NULL;
    }

    data_chan = &dma_dev->data_chan[type];

    chan_id = data_chan->chan_start_id + ((data_chan->last_use_chan + 1) % data_chan->chan_num);
    data_chan->last_use_chan = chan_id;

    return &dma_dev->dma_chan[chan_id];
}

int devdrv_dma_get_sq_idle_bd_cnt(struct devdrv_dma_channel *dma_chan)
{
    u32 sq_tail, sq_head, sq_depth, sq_access;

    sq_tail = dma_chan->sq_tail;
    sq_head = dma_chan->sq_head;
    sq_depth = dma_chan->sq_depth;

    sq_access = sq_depth - ((sq_tail + sq_depth - sq_head) % sq_depth) - DEVDRV_DMA_CH_SQ_DESC_RSV;
    return sq_access;
}

struct devdrv_dma_soft_bd *devdrv_get_soft_bd(struct devdrv_dma_channel *dma_chan)
{
    struct devdrv_dma_soft_bd *soft_bd = NULL;

    soft_bd = dma_chan->dma_soft_bd + dma_chan->sq_tail;

    return soft_bd;
}

int devdrv_dma_wait_for_space_enough(u32 dev_id, struct devdrv_dma_channel *dma_chan, int wait_flag, u32 node_cnt)
{
    /* 10s */
    u32 dma_wait_chan_avail_timeout = DEVDRV_DMA_WAIT_CHAN_AVAIL_TIMEOUT;
    int sq_idle_bd_cnt = 0;
    do {
        sq_idle_bd_cnt = devdrv_dma_get_sq_idle_bd_cnt(dma_chan);
        if (sq_idle_bd_cnt >= (int)node_cnt) {
            break;
        }

        spin_unlock_bh(&dma_chan->lock);

        if (wait_flag == DEVDRV_DISABLE) {
            return -ENOSPC;
        }

        dma_wait_chan_avail_timeout--;
        if (dma_wait_chan_avail_timeout == 0) {
            devdrv_err("devid %u dma channel[%d] sq space(tail:%d,head:%d,depth:%d,"
                "left:%d) not enough, need bd num %d\n",
                dev_id, dma_chan->chan_id, dma_chan->sq_tail, dma_chan->sq_head, dma_chan->sq_depth, sq_idle_bd_cnt,
                node_cnt);

            return -ENOSPC;
        }
        udelay(1000);
        spin_lock_bh(&dma_chan->lock);
    } while (dma_wait_chan_avail_timeout > 0);

    return 0;
}

void devdrv_dma_chan_dfx_print(struct devdrv_dma_channel *dma_chan)
{
    u32 sq_tail, cq_tail, cq_head;
    void __iomem *io_base = dma_chan->io_base;

    devdrv_dma_reg_rd(io_base, DEVDRV_DMA_QUEUE_SQ_TAIL, &sq_tail);
    devdrv_dma_reg_rd(io_base, DEVDRV_DMA_QUEUE_CQ_HEAD, &cq_head);
    devdrv_dma_reg_rd(io_base, DEVDRV_DMA_QUEUE_CQ_TAIL, &cq_tail);
    devdrv_err("hardware sq tail:%d, cq head:%d, cq tail:%d; software sq tail:%d, sq head:%d, cq head:%d\n",
        sq_tail, cq_head, cq_tail, dma_chan->sq_tail, dma_chan->sq_head, dma_chan->cq_head);
}

int devdrv_dma_chan_sync_wait(u32 dev_id, struct devdrv_dma_channel *dma_chan, struct devdrv_dma_soft_bd *soft_bd)
{
    int ret;
    int wait_cnt;
    struct timespec64 start_time;
    struct timespec64 end_time;


    if (soft_bd->wait_type == DEVDRV_DMA_WAIT_INTR) {
        ktime_get_real_ts64(&(start_time));
        ret = down_timeout(&soft_bd->sync_sem, DEVDRV_DMA_COPY_TIMEOUT);
        ktime_get_real_ts64(&(end_time));
        if (ret) {
            /* call done task if timeout */
            ret = 0;
            devdrv_dma_done_task((unsigned long)(uintptr_t)dma_chan);
            /* check soft_bd valid */
            if (soft_bd->valid == DEVDRV_ENABLE) {
                ret = -ETIMEDOUT;
                devdrv_dma_chan_dfx_print(dma_chan);
                devdrv_err("devid %u dma channel[%d] time out ret = %d, wait time:%lld, \n", dev_id, dma_chan->chan_id,
                    ret,
                    (((u64)(end_time.tv_sec)) * 1000000000 + end_time.tv_nsec) -
                    (((u64)(start_time.tv_sec)) * 1000000000 + start_time.tv_nsec));
            }
        }
    } else {
        /* query mode */
        ret = 0;
        wait_cnt = 0;
        do {
            /* check cq status,update soft_bd */
            devdrv_dma_done_task((unsigned long)(uintptr_t)dma_chan);
            /* check soft_bd valid */
            if (soft_bd->valid == DEVDRV_DISABLE) {
                break;
            }

            rmb();

            if (wait_cnt++ > DEVDRV_DMA_QUERY_MAX_WAIT_TIME) {
                ret = -ENOSPC;
                devdrv_err("devid %u dma channel[%d] wait timeout\n", dev_id, dma_chan->chan_id);
                break;
            }

            usleep_range(1, 2);
        } while (1);
    }

    mb();
    if (soft_bd->status != 0) {
        devdrv_err("devid %u dma channel[%d] dma copy failed, status %d\n", dev_id, dma_chan->chan_id, soft_bd->status);
        ret = -EINVAL;
    }

    return ret;
}

int devdrv_dma_chan_copy(u32 dev_id, struct devdrv_dma_channel *dma_chan, struct devdrv_dma_node *dma_node,
    u32 node_cnt, int wait_type, int copy_type, struct devdrv_asyn_dma_para_info *para_info)
{
    struct devdrv_dma_soft_bd *soft_bd = NULL;
    struct devdrv_dma_sq_node *sq_desc = NULL;
    u32 chan_id = 0;
    u32 last_sq_id;
    u32 sq_index;
    int sq_idle_bd_cnt;
    int ret = 0;
    int intr_flag = (wait_type == DEVDRV_DMA_WAIT_INTR) ? 1 : 0;

    /* wait till chan space enough */
    spin_lock_bh(&dma_chan->lock);
    chan_id = dma_chan->chan_id;
    sq_idle_bd_cnt = devdrv_dma_get_sq_idle_bd_cnt(dma_chan);
    if (sq_idle_bd_cnt < (int)node_cnt) {
        spin_unlock_bh(&dma_chan->lock);
        devdrv_warn("devid %d chan id %d no space, idle bd %d need %d\n", dev_id, chan_id, sq_idle_bd_cnt, node_cnt);
        return -ENOSPC;
    }

    last_sq_id = (dma_chan->sq_tail + node_cnt - 1) % dma_chan->sq_depth;

    for (sq_index = 0; sq_index < node_cnt; sq_index++) {
        sq_desc = dma_chan->sq_desc_base + dma_chan->sq_tail;
        soft_bd = devdrv_get_soft_bd(dma_chan);
        if (soft_bd == NULL) {
            spin_unlock_bh(&dma_chan->lock);
            devdrv_err("dev_id %u dma channel[%d] soft_bd is NULL, sq_index:%d, sq_tail:%d\n", dev_id, chan_id,
                sq_index, dma_chan->sq_tail);
            return -EINVAL;
        }

        if (memset_s((void *)sq_desc, DEVDRV_DMA_SQ_DESC_SIZE, 0, DEVDRV_DMA_SQ_DESC_SIZE) != 0) {
            spin_unlock_bh(&dma_chan->lock);
            devdrv_err("memset_s failed\n");
            return -EIO;
        }
        if (sq_index < node_cnt - 1) {
            devdrv_dma_fill_sq_desc(dma_chan, sq_desc, &dma_node[sq_index], para_info, 0);
            soft_bd->owner_bd = last_sq_id;
            soft_bd->valid = DEVDRV_ENABLE;
        } else {
            devdrv_dma_fill_sq_desc(dma_chan, sq_desc, &dma_node[sq_index], para_info, intr_flag);
            devdrv_dma_fill_soft_bd(wait_type, copy_type, soft_bd, para_info);
        }

        dma_chan->sq_tail = (dma_chan->sq_tail + 1) % dma_chan->sq_depth;
    }
    wmb();
    devdrv_dma_ch_sq_submit(dma_chan);

    spin_unlock_bh(&dma_chan->lock);

    if ((copy_type == DEVDRV_DMA_SYNC) && (soft_bd != NULL)) {
        ret = devdrv_dma_chan_sync_wait(dev_id, dma_chan, soft_bd);
    }

    return ret;
}
int devdrv_dma_copy(u32 dev_id, enum devdrv_dma_data_type type, int instance, struct devdrv_dma_node *dma_node,
    u32 node_cnt, int wait_type, int copy_type, struct devdrv_asyn_dma_para_info *para_info)
{
    struct devdrv_dma_dev *dma_dev = NULL;
    struct devdrv_dma_channel *dma_chan = NULL;
    struct data_type_chan *data_chan = NULL;
    struct devdrv_dma_node *dma_pos = NULL;
    u32 proc_cnt = 0;
    u32 left_cnt = 0;
    int chan_id, i;
    int ret = 0;

    dma_dev = devdrv_get_dma_dev(dev_id);
    if (dma_dev == NULL || type >= DEVDRV_DMA_DATA_TYPE_MAX) {
        devdrv_err("dma_dev(dev_id %u) is NULL\n", dev_id);
        return -EINVAL;
    }
    devdrv_debug("type %x, instance %d, node_cnt %x, copy_type %d\n", type, instance, node_cnt, copy_type);
    data_chan = &dma_dev->data_chan[type];
    left_cnt = node_cnt;
    dma_pos = dma_node;
    while (left_cnt > 0) {
        /* To fix node_out out of sq range,send (DEVDRV_MAX_DMA_CH_SQ_DEPTH / 2) packets at a time */
        proc_cnt = (left_cnt >= (DEVDRV_MAX_DMA_CH_SQ_DEPTH / 2)) ? (DEVDRV_MAX_DMA_CH_SQ_DEPTH / 2) : left_cnt;
        if (instance == DEVDRV_INVALID_INSTANCE) {
            for (i = 0; i < data_chan->chan_num; i++) {
                chan_id = data_chan->chan_start_id + ((i + data_chan->last_use_chan + 1) % data_chan->chan_num);
                dma_chan = &dma_dev->dma_chan[chan_id];
                ret = devdrv_dma_chan_copy(dev_id, dma_chan, dma_pos, proc_cnt, wait_type, copy_type, para_info);
                if (ret != -ENOSPC) {
                    data_chan->last_use_chan = chan_id;
                    break;
                }
            }
            if (ret != 0) {
                devdrv_err("dev_id %u type %x, instance %d, node_cnt %x, copy_type %d dma copy fail\n",
                    dev_id, type, instance, node_cnt, copy_type);
                return ret;
            }
        } else {
            chan_id = data_chan->chan_start_id + ((u32)instance % (u32)data_chan->chan_num);
            dma_chan = &dma_dev->dma_chan[chan_id];

            ret = devdrv_dma_chan_copy(dev_id, dma_chan, dma_pos, proc_cnt, wait_type, copy_type, para_info);
            if (ret != 0) {
                return ret;
            }
        }
        left_cnt -= proc_cnt;
        dma_pos = dma_pos + proc_cnt;
    }
    return ret;
}

int devdrv_dma_copy_sml_pkt(u32 dev_id, enum devdrv_dma_data_type type, dma_addr_t dst, const void *data, u32 size)
{
    struct devdrv_dma_channel *dma_chan = NULL;
    struct devdrv_dma_sq_node *sq_desc = NULL;
    struct devdrv_dma_soft_bd *soft_bd = NULL;
    int ret = 0;

    dma_chan = devdrv_dma_get_chan(dev_id, type);
    if (NULL == dma_chan) {
        devdrv_err("call devdrv_dma_get_chan failed, dev_id %d.\n", dev_id);
        return -EINVAL;
    }

    if (((dma_chan->flag >> DEVDRV_DMA_SML_PKT_BIT) & 1) == DEVDRV_DISABLE) {
        devdrv_err("devid %u this channel not support small packet\n", dev_id);
        return -EINVAL;
    }

    if (size > DEVDRV_DMA_SML_PKT_DATA_SIZE) {
        devdrv_err("devid %u pkt size %d is too big.\n", dev_id, size);
        return -EINVAL;
    }

    spin_lock_bh(&dma_chan->lock);


    if (devdrv_dma_get_sq_idle_bd_cnt(dma_chan) < DEVDRV_DMA_SML_PKT_SQ_DESC_NUM) {
        spin_unlock_bh(&dma_chan->lock);
        devdrv_err("devid %u dma channel[%d] sq space not enough in small pkt.\n", dev_id, dma_chan->chan_id);
        return -ENOSPC;
    }

    sq_desc = dma_chan->sq_desc_base + dma_chan->sq_tail;

    if (memset_s((void *)sq_desc, DEVDRV_DMA_SQ_DESC_SIZE, 0, DEVDRV_DMA_SQ_DESC_SIZE) != 0) {
        spin_unlock_bh(&dma_chan->lock);
        devdrv_err("memset_s failed\n");
        return -EINVAL;
    }

    /* fill addr */
    devdrv_dma_set_sq_addr_info(sq_desc, 0, dst, size);

    /* fill attr */
    devdrv_dma_set_sq_attr(sq_desc, DEVDRV_DMA_SMALL_PACKET, 0, DEVDRV_PF_NUM, 1, 1);

    /* fill interrupt info */
    devdrv_dma_set_sq_irq(sq_desc, 0, 1, 0);

    soft_bd = dma_chan->dma_soft_bd + dma_chan->sq_tail;
    devdrv_dma_fill_soft_bd(DEVDRV_DMA_WAIT_INTR, DEVDRV_DMA_SYNC, soft_bd, NULL);

    dma_chan->sq_tail = (dma_chan->sq_tail + 1) % dma_chan->sq_depth;
    sq_desc = dma_chan->sq_desc_base + dma_chan->sq_tail;

    if (memcpy_s((void *)sq_desc, size, data, size) != 0) {
        spin_unlock_bh(&dma_chan->lock);
        devdrv_err("memcpy_s failed\n");
        return -EINVAL;
    }
    dma_chan->sq_tail = (dma_chan->sq_tail + 1) % dma_chan->sq_depth;

    wmb();
    devdrv_dma_ch_sq_submit(dma_chan);

    spin_unlock_bh(&dma_chan->lock);

    ret = down_timeout(&soft_bd->sync_sem, DEVDRV_DMA_COPY_TIMEOUT);
    if (ret) {
        devdrv_err("devid %u dma channel[%d] time out, small pkt\n", dev_id, dma_chan->chan_id);
    }
    if (soft_bd->status != 0) {
        devdrv_err("devid %u dma channel[%d] dma copy failed, small pkt, status %x\n", dev_id, dma_chan->chan_id,
            soft_bd->status);
        ret = soft_bd->status;
    }

    return ret;
}

int devdrv_dma_sync_copy_plus(u32 dev_id, enum devdrv_dma_data_type type, int instance, u64 src, u64 dst, u32 size,
    enum devdrv_dma_direction direction)
{
    int ret;
    struct devdrv_dma_node dma_node;

    dma_node.src_addr = src;
    dma_node.dst_addr = dst;
    dma_node.size = size;
    dma_node.direction = direction;

    ret = devdrv_dma_para_check(dev_id, type, &dma_node, 1, DEVDRV_DMA_SYNC, NULL);
    if (ret) {
        devdrv_err("devdrv_dma_sync_copy, para check failed\n");
        return ret;
    }

    ret = devdrv_dma_copy(dev_id, type, instance, &dma_node, 1, DEVDRV_DMA_WAIT_INTR, DEVDRV_DMA_SYNC, NULL);
    if (ret == -ENOSPC) {
        devdrv_warn("dev_id : 0x%x dma sync copy fail, ret = NOSPC\n", dev_id);
    } else if (ret) {
        devdrv_err("dev_id : 0x%x dma sync copy fail\n", dev_id);
    }

    return ret;
}
EXPORT_SYMBOL(devdrv_dma_sync_copy_plus);

int devdrv_dma_sync_copy(u32 dev_id, enum devdrv_dma_data_type type, u64 src, u64 dst, u32 size,
    enum devdrv_dma_direction direction)
{
    return devdrv_dma_sync_copy_plus(dev_id, type, DEVDRV_INVALID_INSTANCE, src, dst, size, direction);
}
EXPORT_SYMBOL(devdrv_dma_sync_copy);

int devdrv_dma_async_copy_plus(u32 dev_id, enum devdrv_dma_data_type type, int instance, u64 src, u64 dst, u32 size,
    enum devdrv_dma_direction direction, struct devdrv_asyn_dma_para_info *para_info)
{
    int ret;
    struct devdrv_dma_node dma_node;

    dma_node.src_addr = src;
    dma_node.dst_addr = dst;
    dma_node.size = size;
    dma_node.direction = direction;

    ret = devdrv_dma_para_check(dev_id, type, &dma_node, 1, DEVDRV_DMA_ASYNC, para_info);
    if (ret) {
        devdrv_err("devdrv_dma_async_copy, para check failed\n");
        return ret;
    }

    ret = devdrv_dma_copy(dev_id, type, instance, &dma_node, 1, DEVDRV_DMA_WAIT_INTR, DEVDRV_DMA_ASYNC, para_info);
    if (ret == -ENOSPC) {
        devdrv_warn("dev_id : 0x%x dma async copy fail, ret = NOSPC\n", dev_id);
    } else if (ret) {
        devdrv_err("dev_id : 0x%x dma async copy fail\n", dev_id);
    }

    return ret;
}
EXPORT_SYMBOL(devdrv_dma_async_copy_plus);

int devdrv_dma_async_copy(u32 dev_id, enum devdrv_dma_data_type type, u64 src, u64 dst, u32 size,
    enum devdrv_dma_direction direction, struct devdrv_asyn_dma_para_info *para_info)
{
    return devdrv_dma_async_copy_plus(dev_id, type, DEVDRV_INVALID_INSTANCE, src, dst, size, direction, para_info);
}
EXPORT_SYMBOL(devdrv_dma_async_copy);

int devdrv_dma_sync_link_copy_plus(u32 dev_id, enum devdrv_dma_data_type type, int wait_type, int instance,
    struct devdrv_dma_node *dma_node, u32 node_cnt)
{
    int ret;

    ret = devdrv_dma_para_check(dev_id, type, dma_node, node_cnt, DEVDRV_DMA_SYNC, NULL);
    if (ret) {
        devdrv_err("devdrv_dma_sync_link_copy, para check failed\n");
        return ret;
    }

    ret = devdrv_dma_copy(dev_id, type, instance, dma_node, node_cnt, wait_type, DEVDRV_DMA_SYNC, NULL);
    if (ret == -ENOSPC) {
        devdrv_warn("dev_id : 0x%x dma sync link copy fail, ret = NOSPC\n", dev_id);
    } else if (ret) {
        devdrv_err("dev_id : 0x%x dma sync link copy fail\n", dev_id);
    }

    return ret;
}
EXPORT_SYMBOL(devdrv_dma_sync_link_copy_plus);

int devdrv_dma_sync_link_copy(u32 dev_id, enum devdrv_dma_data_type type, int wait_type,
    struct devdrv_dma_node *dma_node, u32 node_cnt)
{
    return devdrv_dma_sync_link_copy_plus(dev_id, type, wait_type, DEVDRV_INVALID_INSTANCE, dma_node, node_cnt);
}
EXPORT_SYMBOL(devdrv_dma_sync_link_copy);

int devdrv_dma_async_link_copy_plus(u32 dev_id, enum devdrv_dma_data_type type, int instance,
    struct devdrv_dma_node *dma_node, u32 node_cnt, struct devdrv_asyn_dma_para_info *para_info)
{
    int ret;

    ret = devdrv_dma_para_check(dev_id, type, dma_node, node_cnt, DEVDRV_DMA_ASYNC, para_info);
    if (ret) {
        devdrv_err("devdrv_dma_async_link_copy, para check failed\n");
        return ret;
    }

    ret =
        devdrv_dma_copy(dev_id, type, instance, dma_node, node_cnt, DEVDRV_DMA_WAIT_INTR, DEVDRV_DMA_ASYNC, para_info);
    if (ret == -ENOSPC) {
        devdrv_warn("dev_id : 0x%x dma async link copy fail, ret = NOSPC\n", dev_id);
    } else if (ret) {
        devdrv_err("dev_id : 0x%x dma async link copy fail\n", dev_id);
    }

    return ret;
}
EXPORT_SYMBOL(devdrv_dma_async_link_copy_plus);

int devdrv_dma_async_link_copy(u32 dev_id, enum devdrv_dma_data_type type, struct devdrv_dma_node *dma_node,
    u32 node_cnt, struct devdrv_asyn_dma_para_info *para_info)
{
    return devdrv_dma_async_link_copy_plus(dev_id, type, DEVDRV_INVALID_INSTANCE, dma_node, node_cnt, para_info);
}
EXPORT_SYMBOL(devdrv_dma_async_link_copy);

STATIC void devdrv_free_dma_sq_cq(struct devdrv_dma_channel *dma_chan)
{
    u64 sq_size, cq_size;

    dma_chan->status = DEVDRV_DISABLE;
    wmb();

    if (dma_chan->sq_desc_base) {
        sq_size = ((u64)sizeof(struct devdrv_dma_sq_node)) * dma_chan->sq_depth;

        dma_free_coherent(dma_chan->dev, sq_size, dma_chan->sq_desc_base, dma_chan->sq_desc_dma);

        dma_chan->sq_desc_base = NULL;
    }

    if (dma_chan->cq_desc_base) {
        cq_size = ((u64)sizeof(struct devdrv_dma_cq_node)) * dma_chan->cq_depth;

        dma_free_coherent(dma_chan->dev, cq_size, dma_chan->cq_desc_base, dma_chan->cq_desc_dma);

        dma_chan->cq_desc_base = NULL;
    }

    if (dma_chan->dma_soft_bd) {
        kfree(dma_chan->dma_soft_bd);
        dma_chan->dma_soft_bd = NULL;
    }
}

STATIC int devdrv_alloc_dma_sq_cq(struct devdrv_dma_channel *dma_chan)
{
    struct devdrv_dma_soft_bd *soft_virt_addr = NULL;
    void *sq_virt_addr = NULL;
    void *cq_virt_addr = NULL;
    struct device *dev = NULL;
    u64 soft_size;
    u64 sq_size;
    u64 cq_size;
    int i;

    dev = dma_chan->dev;
    sq_size = DEVDRV_DMA_SQ_DESC_SIZE * DEVDRV_MAX_DMA_CH_SQ_DEPTH;
    cq_size = DEVDRV_DMA_CQ_DESC_SIZE * DEVDRV_MAX_DMA_CH_CQ_DEPTH;
    soft_size = sizeof(struct devdrv_dma_soft_bd) * DEVDRV_MAX_DMA_CH_CQ_DEPTH;

    sq_virt_addr = dma_alloc_coherent(dev, sq_size, &dma_chan->sq_desc_dma, GFP_KERNEL);
    if (sq_virt_addr == NULL) {
        devdrv_err("%s, dma channel[%d] sq alloc fail\n", dev_driver_string(dev), dma_chan->chan_id);
        return -ENOMEM;
    }
    dma_chan->sq_desc_base = (struct devdrv_dma_sq_node *)sq_virt_addr;
    dma_chan->sq_depth = DEVDRV_MAX_DMA_CH_SQ_DEPTH;

    cq_virt_addr = dma_alloc_coherent(dev, cq_size, &dma_chan->cq_desc_dma, GFP_KERNEL);
    if (cq_virt_addr == NULL) {
        devdrv_err("%s, dma channel[%d] cq alloc fail\n", dev_driver_string(dev), dma_chan->chan_id);
        devdrv_free_dma_sq_cq(dma_chan);
        return -ENOMEM;
    }
    dma_chan->cq_desc_base = (struct devdrv_dma_cq_node *)cq_virt_addr;
    dma_chan->cq_depth = DEVDRV_MAX_DMA_CH_CQ_DEPTH;

    /* DMA_QUEUE_SQ_BASE/DMA_QUEUE_CQ_BASE Note:the address must be 64Bytes aligned. */
    if (((dma_chan->sq_desc_dma % DEVDRV_DMA_REG_ALIGN_SIZE) != 0) ||
        ((dma_chan->cq_desc_dma % DEVDRV_DMA_REG_ALIGN_SIZE) != 0)) {
        devdrv_err("%s, dma channel[%d] address dont aligned with 64B.\n",
            dev_driver_string(dev), dma_chan->chan_id);
        devdrv_free_dma_sq_cq(dma_chan);
        return -EFAULT;
    }

    devdrv_debug("dma channel[%d]\n", dma_chan->chan_id);
    soft_virt_addr = (struct devdrv_dma_soft_bd *)kzalloc(soft_size, GFP_KERNEL);
    if (soft_virt_addr == NULL) {
        devdrv_err("%s, dma channel[%d] cq alloc fail\n", dev_driver_string(dev), dma_chan->chan_id);
        devdrv_free_dma_sq_cq(dma_chan);
        return -ENOMEM;
    }
    dma_chan->dma_soft_bd = soft_virt_addr;

    for (i = 0; i < DEVDRV_MAX_DMA_CH_CQ_DEPTH; i++) {
        soft_virt_addr->valid = DEVDRV_DISABLE;
    }

    return 0;
}

int devdrv_dma_init_chan(struct devdrv_dma_dev *dma_dev, int channel_id, int dma_chan_id, u32 done_irq, u32 err_irq,
    int err_irq_flag)
{
    int ret;

    struct devdrv_dma_channel *dma_chan = &dma_dev->dma_chan[channel_id];
    dma_chan->dev = dma_dev->dev;
    ret = devdrv_alloc_dma_sq_cq(dma_chan);
    if (ret) {
        devdrv_err("%s, alloc_dma_sq_cq failed\n", dev_driver_string(dma_dev->dev));
        return ret;
    }
    dma_chan->local_id = (u32)channel_id;
    dma_chan->chan_id = (u32)dma_chan_id;
    /*lint -e571 */
    dma_chan->io_base = dma_dev->io_base + (u64)dma_chan_id * DEVDRV_DMA_CHAN_OFFSET;
    /*lint +e571 */
    dma_chan->sq_tail = 0;
    dma_chan->sq_head = 0;
    dma_chan->cq_head = dma_chan->cq_depth - 1;
    dma_chan->done_irq = -1;
    dma_chan->err_irq = -1;
    dma_chan->rounds = 0;

    if (dma_dev->sq_cq_side == DEVDRV_DMA_REMOTE_SIDE) {
        /* flags of DMA chan used in host */
        dma_chan->flag =
            (DEVDRV_DMA_REMOTE_SIDE << DEVDRV_DMA_SQCQ_SIDE_BIT) | (DEVDRV_DISABLE << DEVDRV_DMA_SML_PKT_BIT);
    } else {
        /* flags of DMA chan used in device */
        dma_chan->flag =
            (DEVDRV_DMA_LOCAL_SIDE << DEVDRV_DMA_SQCQ_SIDE_BIT) | (DEVDRV_ENABLE << DEVDRV_DMA_SML_PKT_BIT);
    }

    /* reset DMA channel before init */
    ret = devdrv_dma_ch_cfg_reset(dma_chan->io_base);
    if (ret) {
        devdrv_err("%s, dma_ch_cfg_reset fail\n", dev_driver_string(dma_dev->dev));
        return ret;
    }

    devdrv_dma_ch_cfg_init(dma_chan->io_base, dma_chan->sq_desc_dma, dma_chan->cq_desc_dma, dma_chan->sq_depth,
        dma_chan->cq_depth, DEVDRV_PF_NUM, devdrv_get_dma_sqcq_side(dma_chan));

    devdrv_dma_check_sram_init_status(dma_dev->io_base, DEVDRV_DMA_TIMEOUT);
    /* enable DMA channel */
    devdrv_set_dma_chan_en(dma_chan->io_base, 1);

    dma_chan->done_irq = (int)done_irq;
    tasklet_init(&dma_chan->dma_done_task, devdrv_dma_done_task, (uintptr_t)dma_chan);
    (void)devdrv_register_irq_func(dma_dev->drvdata, done_irq, devdrv_dma_done_interrupt, dma_chan, "dma_done_irq");

    if (err_irq_flag) {
        /* err interrupt we do some dfx words, so use wordqueue which can sleep */
        dma_chan->err_irq = (int)err_irq;
        INIT_WORK(&dma_chan->err_work, devdrv_dma_err_task);
        (void)devdrv_register_irq_func(dma_dev->drvdata, err_irq, devdrv_dma_err_interrupt, dma_chan, "dma_err_irq");
    } else {
        dma_chan->err_irq = -1;
    }

    spin_lock_init(&dma_chan->lock);
    spin_lock_init(&dma_chan->cq_lock);

    if (DEVDRV_DMA_REMOTE_SIDE == dma_dev->sq_cq_side) {
        ret = devdrv_notify_dma_err_irq(dma_dev->drvdata, dma_chan_id, err_irq);
        if (ret) {
            devdrv_err("%s, chan(%d) use dma chan(%d) notify err irq failed\n", dev_driver_string(dma_dev->dev),
                channel_id, dma_chan_id);
        }
    }

    dma_chan->status = DEVDRV_ENABLE;
    return ret;
}

void devdrv_dma_guard_work(struct work_struct *p_work)
{
    struct delayed_work *delayed_work = container_of(p_work, struct delayed_work, work);
    struct devdrv_dma_dev *dma_dev = container_of(delayed_work, struct devdrv_dma_dev, guard_work);
    struct devdrv_dma_cq_node *p_cur_last_cq = NULL;
    struct devdrv_dma_channel *dma_chan = NULL;
    u32 head;
    u32 i;

    for (i = 0; i < dma_dev->chan_count; i++) {
        dma_chan = &dma_dev->dma_chan[i];
        head = (dma_chan->cq_head + 1) % (dma_chan->cq_depth);
        p_cur_last_cq = dma_chan->cq_desc_base + head;

        if (devdrv_dma_get_cq_valid(p_cur_last_cq, dma_chan->rounds)) {
            tasklet_schedule(&dma_chan->dma_done_task);
        }
    }

    schedule_delayed_work(delayed_work, 1 * HZ);
}

void devdrv_dma_chan_disable(struct devdrv_dma_dev *dma_dev)
{
    u32 i;
    struct devdrv_dma_channel *dma_chan = NULL;

    if (dma_dev->guard_work.work.func != NULL) {
        cancel_delayed_work_sync(&dma_dev->guard_work);
    }

    /* disable dma chan, unregister irq */
    for (i = 0; i < dma_dev->chan_count; i++) {
        dma_chan = &dma_dev->dma_chan[i];

        dma_chan->status = DEVDRV_DISABLE;

        /* chan has not init */
        if (dma_chan->dev == NULL) {
            continue;
        }
        if (dma_chan->err_irq >= 0) {
            devdrv_unregister_irq_func(dma_dev->drvdata, dma_chan->err_irq, dma_chan);
            dma_chan->err_irq = -1;
            cancel_work_sync(&dma_chan->err_work);
        }
        if (dma_chan->done_irq > 0) {
            devdrv_unregister_irq_func(dma_dev->drvdata, dma_chan->done_irq, dma_chan);
            dma_chan->done_irq = -1;
            tasklet_kill(&dma_chan->dma_done_task);
        }
    }
}

struct devdrv_dma_dev *devdrv_dma_init(const char *role, void *drvdata, struct device *dev, void __iomem *io_base,
    int done_irq_base, int done_irq_num, int err_irq_base, int err_irq_num)
{
    struct devdrv_dma_dev *dma_dev = NULL;
    u32 size;
    int i;
    int done_irq = done_irq_base;
    int err_irq = err_irq_base;
    int err_irq_flag = (err_irq_num == 1) ? 0 : 1;
    int channel_id;
    int chan_count;
    u32 sq_cq_side;

    if (!strcmp(role, "remote")) {
        channel_id = DMA_CHAN_REMOTE_USED_START_INDEX;
        chan_count = DMA_CHAN_REMOTE_USED_NUM;
        sq_cq_side = DEVDRV_DMA_REMOTE_SIDE;
    } else {
        channel_id = DMA_CHAN_LOCAL_USED_START_INDEX;
        chan_count = DMA_CHAN_LOCAL_USED_NUM;
        sq_cq_side = DEVDRV_DMA_LOCAL_SIDE;
    }

    if (chan_count > done_irq_num) {
        devdrv_err("%s, done irq num %d or err irq num %d is not enough for chan num %d\n", dev_driver_string(dev),
            done_irq_num, err_irq_num, chan_count);
        return NULL;
    }

    size = sizeof(struct devdrv_dma_dev) + sizeof(struct devdrv_dma_channel) * chan_count;

    dma_dev = (struct devdrv_dma_dev *)kzalloc(size, GFP_KERNEL);
    if (dma_dev == NULL) {
        devdrv_err("%s, dma dev alloc failed!\n", dev_driver_string(dev));
        return NULL;
    }

    dma_dev->dev = dev;
    dma_dev->io_base = io_base;
    dma_dev->chan_count = chan_count;
    dma_dev->drvdata = drvdata;

    /* init data type to dma chan map */
    dma_dev->data_chan[DEVDRV_DMA_DATA_COMMON].chan_start_id = 0;
    dma_dev->data_chan[DEVDRV_DMA_DATA_COMMON].chan_num = 1;
    dma_dev->data_chan[DEVDRV_DMA_DATA_COMMON].last_use_chan = dma_dev->data_chan[DEVDRV_DMA_DATA_COMMON].chan_start_id;

    dma_dev->data_chan[DEVDRV_DMA_DATA_PCIE_MSG].chan_start_id =
        dma_dev->data_chan[DEVDRV_DMA_DATA_COMMON].chan_start_id + dma_dev->data_chan[DEVDRV_DMA_DATA_COMMON].chan_num;
    dma_dev->data_chan[DEVDRV_DMA_DATA_PCIE_MSG].chan_num = 1;
    dma_dev->data_chan[DEVDRV_DMA_DATA_PCIE_MSG].last_use_chan =
        dma_dev->data_chan[DEVDRV_DMA_DATA_PCIE_MSG].chan_start_id;

    dma_dev->data_chan[DEVDRV_DMA_DATA_TRAFFIC].chan_start_id =
        dma_dev->data_chan[DEVDRV_DMA_DATA_PCIE_MSG].chan_start_id +
        dma_dev->data_chan[DEVDRV_DMA_DATA_PCIE_MSG].chan_num;
    dma_dev->data_chan[DEVDRV_DMA_DATA_TRAFFIC].chan_num = chan_count - 2;
    dma_dev->data_chan[DEVDRV_DMA_DATA_TRAFFIC].last_use_chan =
        dma_dev->data_chan[DEVDRV_DMA_DATA_TRAFFIC].chan_start_id;

    dma_dev->sq_cq_side = sq_cq_side;

    if (devdrv_check_dl_dlcmsm_state(drvdata) != 0) {
        devdrv_err("%s, check dlcmsm state fail\n", dev_driver_string(dev));
        kfree(dma_dev);
        dma_dev = NULL;
        return NULL;
    }

    for (i = 0; i < chan_count; i++) {
        if (0 != devdrv_dma_init_chan(dma_dev, i, channel_id++, done_irq++, err_irq++, err_irq_flag)) {
            devdrv_err("%s, dma init chan %d fail\n", dev_driver_string(dev), i);
            devdrv_dma_exit(dma_dev);
            return NULL;
        }
    }

    INIT_DELAYED_WORK(&dma_dev->guard_work, devdrv_dma_guard_work);
    schedule_delayed_work(&dma_dev->guard_work, 1 * HZ);

    return dma_dev;
}

void devdrv_dma_exit(struct devdrv_dma_dev *dma_dev)
{
    u32 i;
    struct devdrv_dma_channel *dma_chan = NULL;

    if (dma_dev == NULL) {
        return;
    }

    if (dma_dev->guard_work.work.func != NULL) {
        cancel_delayed_work_sync(&dma_dev->guard_work);
    }

    for (i = 0; i < dma_dev->chan_count; i++) {
        dma_chan = &dma_dev->dma_chan[i];

        /* chan has not init */
        if (dma_chan->dev == NULL) {
            continue;
        }

        if (dma_chan->err_irq >= 0) {
            devdrv_unregister_irq_func(dma_dev->drvdata, dma_chan->err_irq, dma_chan);
            dma_chan->err_irq = -1;
            cancel_work_sync(&dma_chan->err_work);
        }

        if (dma_chan->done_irq > 0) {
            devdrv_unregister_irq_func(dma_dev->drvdata, dma_chan->done_irq, dma_chan);
            dma_chan->done_irq = -1;
            tasklet_kill(&dma_chan->dma_done_task);
        }

        (void)devdrv_dma_ch_cfg_reset(dma_chan->io_base);

        devdrv_free_dma_sq_cq(dma_chan);
    }
    kfree(dma_dev);
    dma_dev = NULL;
}
