#include <linux/init.h>
#include <linux/module.h>
#include <linux/dmaengine.h>
#include <linux/dma-mapping.h>
#include <linux/slab.h>
#include <linux/fs.h>
#include <linux/device.h>
#include <linux/device/class.h>

#define DMA_BUFFER_LEN 100
struct private_data {
    int major;
    struct class *cls;
    struct device *dev;
    struct dma_chan *channel;
    void *src;
    void *dest;
    dma_addr_t src_addr;
    dma_addr_t dest_addr;
};

static struct private_data g_pd;

static int dma_open(struct inode *inode, struct file *file)
{
    int ret = 0;
    struct dma_chan *chan;
    dma_cap_mask_t mask;
    file->private_data = &g_pd;

    g_pd.src = dma_alloc_coherent(g_pd.dev, DMA_BUFFER_LEN, &g_pd.src_addr, GFP_KERNEL);
    if (!g_pd.src) {
        dev_err(g_pd.dev, "allocate dma src memory failed\n");
        ret = -ENOMEM;
        goto ALLOCATE_SRC_FAILED;
    }
    g_pd.dest = dma_alloc_coherent(g_pd.dev, DMA_BUFFER_LEN, &g_pd.dest_addr, GFP_KERNEL);
    if (!g_pd.dest) {
        dev_err(g_pd.dev, "allocate dma dest memory failed\n");
        ret = -ENOMEM;
        goto ALLOCATE_DEST_FAILED;
    }

    dma_cap_zero(mask);
    dma_cap_set(DMA_MEMCPY, mask); // mem2mem

    chan = dma_request_channel(mask, NULL, NULL);
    if (IS_ERR(chan)) {
        dev_err(g_pd.dev, "dma request channel error");
        goto DMA_REQUEST_CHANNEL_ERR;
    }
    g_pd.channel = chan;

    return 0;
DMA_REQUEST_CHANNEL_ERR:
    dma_free_coherent(g_pd.dev, DMA_BUFFER_LEN, g_pd.dest, g_pd.dest_addr);
ALLOCATE_DEST_FAILED:
    dma_free_coherent(g_pd.dev, DMA_BUFFER_LEN, g_pd.src, g_pd.src_addr);
ALLOCATE_SRC_FAILED:
    return ret;
}

static int dma_release(struct inode *inode, struct file *file)
{
    struct private_data *p = file->private_data;
    dma_release_channel(p->channel);
    dma_free_coherent(p->dev, DMA_BUFFER_LEN, p->dest, p->dest_addr);
    dma_free_coherent(p->dev, DMA_BUFFER_LEN, p->src, p->src_addr);
    return 0;
}

static ssize_t dma_read(struct file *file, char __user *data, size_t len, loff_t *pos)
{
    struct private_data *p = file->private_data;
    unsigned long size_no_copy;
    size_no_copy = copy_to_user(p->dest, data, len);
    return len - size_no_copy;
}

static void dma_callback(void *data)
{
    struct private_data *p = data;
    dev_info(p->dev, "dma transfer done.dest=%s\n", (char*)p->dest);
}

static ssize_t dma_write(struct file *file, const char __user *data, size_t len, loff_t *pos)
{
    struct private_data *p = file->private_data;
    unsigned long size_no_copy;
    struct dma_async_tx_descriptor *txd;
    if (len > DMA_BUFFER_LEN) {
        dev_err(p->dev, "write %ld bytes are larger than DMA BUFFER LENGTH %d\n", len, DMA_BUFFER_LEN);
        return -ENOMEM;
    }
    size_no_copy = copy_from_user(p->src, data, len);
    if (size_no_copy) {
        dev_err(p->dev, "there are %lu bytes wasn't be copied\n", size_no_copy);
    }
    txd = dmaengine_prep_dma_memcpy(p->channel, p->dest_addr, p->src_addr, DMA_BUFFER_LEN, DMA_PREP_INTERRUPT |
        DMA_CTRL_ACK);
    txd->callback = dma_callback;
    txd->callback_param = p;

    dmaengine_submit(txd);
    dma_async_issue_pending(p->channel);
    return len - size_no_copy;
}

static struct file_operations g_fops = {
    .open = dma_open,
    .release = dma_release,
    .read = dma_read,
    .write = dma_write,
};

static int __init rk3588_dma_init(void)
{
    int ret = 0;
    struct class *cls;
    struct device *dev;

    ret = register_chrdev(0, "DMA", &g_fops);
    if (ret < 0) {
        printk("[%s][%d]register char device failed.ret=%d\n", __func__, __LINE__, ret);
        goto REGISTER_CHAR_DEV_ERR;
    }
    g_pd.major = ret;

    cls = class_create(THIS_MODULE, "DMA");
    if (!cls) {
        printk("[%s][%d]create class failed.\n", __func__, __LINE__);
        goto CLASS_CREATE_ERR;
    }
    g_pd.cls = cls;

    dev = device_create(cls, NULL, MKDEV(g_pd.major, 0), &g_pd, "my_dma");
    if (!dev) {
        printk("[%s][%d]create device failed\n", __func__, __LINE__);
        goto DEVICE_CREATE_ERR;
    }
    /* 如果要启用dma功能，需要先设置DMA掩码，否则dma内存会申请失败 */
    if (!dev->dma_mask) {
        dev->dma_mask = &dev->coherent_dma_mask;
    }
    if (dma_set_mask(dev, DMA_BIT_MASK(64))) {
        if (dma_set_mask(dev, DMA_BIT_MASK(32))) {
            dev_err(dev, "No suitable DMA mask available\n");
            ret = -EIO;
            goto DEVICE_CREATE_ERR;
        }
    }
    g_pd.dev = dev;

    return 0;
DEVICE_CREATE_ERR:
    class_destroy(cls);
CLASS_CREATE_ERR:
    unregister_chrdev(g_pd.major, "DMA");
REGISTER_CHAR_DEV_ERR:
    return ret;
}

static void __exit rk3588_dma_exit(void)
{
    device_destroy(g_pd.cls, MKDEV(g_pd.major, 0));
    class_destroy(g_pd.cls);
    unregister_chrdev(g_pd.major, "DMA");
}

module_init(rk3588_dma_init);
module_exit(rk3588_dma_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Yujie Guo");
MODULE_DESCRIPTION("rk3588-dma driver for Learning");
