// SPDX-License-Identifier: GPL-2.0
/*
 * Copyright (C) 2016-2019, LomboTech Co.Ltd.
 * Author: lomboswer <lomboswer@lombotech.com>
 *
 * lb util driver - Driver of LomboTech lombo util
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 */

#include <linux/device.h>
#include <linux/dma-buf.h>
#include <linux/dma-direct.h>
#include <linux/err.h>
#include <linux/export.h>
#include <linux/file.h>
#include <linux/freezer.h>
#include <linux/fs.h>
#include <linux/list.h>
#include <linux/memblock.h>
#include <linux/miscdevice.h>
#include <linux/kernel.h>
#include <linux/uaccess.h>
#include <linux/platform_device.h>
#include <linux/of_device.h>
#include <linux/of.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/of_reserved_mem.h>
#include <asm/cacheflush.h>
#include "lombo_util.h"

/* for arm64 & riscv-64, the funcs should be implemented later */
#ifndef CONFIG_64BIT
#include "dma.h"
#else
#ifndef CONFIG_ARCH_LOMBO_N5
#define dmac_map_area			__dma_map_area
#define dmac_unmap_area			__dma_unmap_area
#define flush_cache_all()		do { } while (0)
#else
#define dmac_map_area(addr, size, dir)   do { } while (0)
#define dmac_unmap_area(addr, size, dir) do { } while (0)
#define flush_cache_all()		 do { } while (0)
#endif
#endif

#define LB_MISC_NAME "lb_util"

static struct platform_device *pdev_util;

typedef struct {
	void			*virt_addr;
	dma_addr_t		phy_addr;
	size_t			size;
} coherent_buf_t;

union lb_ioctl_arg {
	struct util_fd2phy fd2phy;
	struct util_sync sync;
	struct util_alloc_data alloc_data;
	struct util_virt_data virt_data;
};

static struct sg_table *util_map_dma_buf(struct dma_buf_attachment *attachment,
					enum dma_data_direction direction)
{
	pr_debug("func %s, line %d\n", __func__, __LINE__);
	return NULL;
}

static void util_unmap_dma_buf(struct dma_buf_attachment *attachment,
			      struct sg_table *table,
			      enum dma_data_direction direction)
{
	pr_debug("func %s, line %d\n", __func__, __LINE__);
}

static int util_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
{
	coherent_buf_t *buffer = dmabuf->priv;
	unsigned long pfn_start = (buffer->phy_addr >> PAGE_SHIFT) +
					vma->vm_pgoff;
	unsigned long size = vma->vm_end - vma->vm_start;
	int ret = 0;

	ret = remap_pfn_range(vma, vma->vm_start, pfn_start,
				size, vma->vm_page_prot);
	if (ret)
		pr_err("%s: remap_pfn_range failed at [0x%lx  0x%lx]\n",
			__func__, vma->vm_start, vma->vm_end);
	else
		pr_debug("map %pad to 0x%lx, size: 0x%lx\n", &buffer->phy_addr,
			vma->vm_start, size);

	return ret;
}

static void util_coherent_free(coherent_buf_t *buf)
{
	if (!buf || !buf->virt_addr)
		return;

	pr_debug("free coherent phy %pad\n", &buf->phy_addr);
	dma_free_coherent(&pdev_util->dev, buf->size,
		buf->virt_addr, buf->phy_addr);
	kfree(buf);
}

static void util_dma_buf_release(struct dma_buf *dmabuf)
{
	coherent_buf_t *buffer = dmabuf->priv;

	util_coherent_free(buffer);
}

static void *util_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset)
{
	return NULL;
}

static const struct dma_buf_ops dma_buf_ops = {
	.map_dma_buf = util_map_dma_buf,
	.unmap_dma_buf = util_unmap_dma_buf,
	.mmap = util_mmap,
	.map = util_dma_buf_kmap,
	.release = util_dma_buf_release,
};

static int util_coherent_alloc(struct util_alloc_data *data)
{
	DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
	int fd;
	struct dma_buf *dmabuf = NULL;
	coherent_buf_t *coherent_buf = NULL;

	if (data == NULL || data->len <= 0) {
		pr_err("alloc data params err!!\n");
		return -EINVAL;
	}

	coherent_buf = kzalloc(sizeof(coherent_buf_t), GFP_KERNEL);
	if (IS_ERR(coherent_buf)) {
		pr_err("alloc struct coherent buf err!!\n");
		return -ENOMEM;
	}

	coherent_buf->size = data->len;
	coherent_buf->virt_addr = dma_alloc_coherent(&pdev_util->dev,
		coherent_buf->size, &coherent_buf->phy_addr, GFP_KERNEL);
	if (!coherent_buf->virt_addr) {
		pr_err("alloc coherent mem err!!\n");
		return -ENOMEM;
	}
	pr_debug("alloc virt %p, phy %pad, size %zu\n",
		coherent_buf->virt_addr, &coherent_buf->phy_addr,
		coherent_buf->size);

	exp_info.ops = &dma_buf_ops;
	exp_info.size = coherent_buf->size;
	exp_info.flags = O_RDWR;
	exp_info.priv = coherent_buf;

	dmabuf = dma_buf_export(&exp_info);
	if (IS_ERR(dmabuf)) {
		util_coherent_free(coherent_buf);
		pr_err("dma buf export err!!\n");
		return -EFAULT;
	}

	fd = dma_buf_fd(dmabuf, O_CLOEXEC);
	if (fd < 0)
		dma_buf_put(dmabuf);

	data->fd = fd;
	data->phy_addr = coherent_buf->phy_addr;

	return 0;
}

static long lb_misc_ioctl(struct file *filp,
	unsigned int cmd, unsigned long arg)
{
	int ret = 0;
	unsigned int dir;
	union lb_ioctl_arg data;

	if (pdev_util == NULL) {
		pr_err("misc dev not registered!\n");
		return -EFAULT;
	}

	if (_IOC_SIZE(cmd) > sizeof(data))
		return -EINVAL;

	dir = _IOC_DIR(cmd);

	/*
	 * The copy_from_user is unconditional here for both read and write
	 * to do the validate. If there is no write for the ioctl, the
	 * buffer is cleared
	 */
	if (copy_from_user(&data, (void __user *)arg, _IOC_SIZE(cmd)))
		return -EFAULT;

	if (!(dir & _IOC_WRITE))
		memset(&data, 0, sizeof(data));

	switch (cmd) {
	case LB_UTIL_FD2PHY: {
		struct dma_buf_attachment *attachment;
		struct dma_buf *buf;
		struct sg_table *sg_table;

		buf = dma_buf_get(data.fd2phy.fd);
		if (!buf) {
			pr_err("lbmisc: failed to get dmabuf.\n");
			return -EFAULT;
		}
		attachment = dma_buf_attach(buf, &pdev_util->dev);
		if (!attachment) {
			pr_err("lbmisc: failed attach dmabuf.\n");
			dma_buf_put(buf);
			return -EFAULT;
		}
		sg_table = dma_buf_map_attachment(attachment,
				DMA_BIDIRECTIONAL);
		if (!sg_table) {
			pr_err("lbmisc: failed map dmabuf attachment.\n");
			dma_buf_detach(buf, attachment);
			dma_buf_put(buf);
			return -EFAULT;
		}
		data.fd2phy.phy_addr = sg_phys(sg_table->sgl);
#ifdef CONFIG_ARCH_LOMBO_N5
		data.fd2phy.phy_addr = phys_to_dma(&pdev_util->dev,
						data.fd2phy.phy_addr);
#endif
		dma_buf_unmap_attachment(attachment,
				sg_table, DMA_BIDIRECTIONAL);
		dma_buf_detach(buf, attachment);
		dma_buf_put(buf);
		break;
	}
	case LB_UTIL_SYNC: {
		struct dma_buf_attachment *attachment;
		struct dma_buf *buf;
		struct sg_table *sg_table;

		buf = dma_buf_get(data.sync.fd);
		if (!buf) {
			pr_err("lbmisc: failed to get dmabuf.\n");
			return -EFAULT;
		}
		attachment = dma_buf_attach(buf, &pdev_util->dev);
		if (!attachment) {
			pr_err("lbmisc: failed attach dmabuf.\n");
			dma_buf_put(buf);
			return -EFAULT;
		}
		sg_table = dma_buf_map_attachment(attachment,
			DMA_BIDIRECTIONAL);
		if (!sg_table) {
			pr_err("lbmisc: failed map dmabuf attachment.\n");
			dma_buf_detach(buf, attachment);
			dma_buf_put(buf);
			return -EFAULT;
		}
		dma_buf_unmap_attachment(attachment,
			sg_table, DMA_BIDIRECTIONAL);
		dma_buf_detach(buf, attachment);
		dma_buf_put(buf);
		break;
	}
	case LB_UTIL_ALLOC: {
		ret = util_coherent_alloc(&data.alloc_data);
		if (ret) {
			pr_err("alloc coherent err!!\n");
			return ret;
		}
		break;
	}
	case LB_UTIL_FLUSH_PART: {
		/* sync the cache */
		dmac_map_area(data.virt_data.virt,
			data.virt_data.size, DMA_BIDIRECTIONAL);
		dmac_unmap_area(data.virt_data.virt,
			data.virt_data.size, DMA_BIDIRECTIONAL);
		break;
	}
	case LB_UTIL_FLUSH_ALL: {
		flush_cache_all();
		break;
	}
	default:
		return -ENOTTY;
	}

	if (dir & _IOC_READ) {
		if (copy_to_user((void __user *)arg, &data, _IOC_SIZE(cmd)))
			return -EFAULT;
	}

	return ret;
}

static const struct file_operations lb_misc_fops = {
	.owner          = THIS_MODULE,
	.unlocked_ioctl = lb_misc_ioctl,
#ifdef CONFIG_COMPAT
	.compat_ioctl	= lb_misc_ioctl,
#endif
};

static struct miscdevice lb_miscdevice = {
	.minor = MISC_DYNAMIC_MINOR,
	.name = LB_MISC_NAME,
	.fops = &lb_misc_fops,
};

static int lb_util_probe(struct platform_device *pdev)
{
	int ret = 0;

	pdev_util = pdev;
	ret = misc_register(&lb_miscdevice);
	if (ret)
		pr_err("lbmisc: failed to register misc device.\n");

#if defined(CONFIG_ARCH_LOMBO_N5V1)
	ret = of_reserved_mem_device_init(&pdev->dev);
	if (ret < 0)
		dev_warn(&pdev->dev, "bindding dev to coherent mem failed\n");
#endif

	return ret;
}

int lb_util_remove(struct platform_device *pdev)
{
	misc_deregister(&lb_miscdevice);

	return 0;
}

static const struct of_device_id lb_util_of_match[] = {
	{ .compatible = "lombo, lbutil", },
	{},
};

static struct platform_driver lb_util_driver = {
	.probe		= lb_util_probe,
	.remove		= lb_util_remove,
	.driver		= {
		.name		= "lb_util",
		.of_match_table	= lb_util_of_match,
		.owner		= THIS_MODULE,
	},
};

static int __init lb_util_init(void)
{
	return platform_driver_register(&lb_util_driver);
}

static void __exit lb_util_exit(void)
{
	platform_driver_unregister(&lb_util_driver);
}

module_init(lb_util_init);
module_exit(lb_util_exit);
