#include <linux/module.h>
#include <linux/init.h>
#include <linux/genalloc.h>
#include <asm-generic/sizes.h>
#include <linux/clk.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
#include <linux/slab.h>

struct l2mem_dev {
	struct device *dev;
	void __iomem *virt_base;

	struct gen_pool *pool;
	struct clk *clk;
	struct clk *pclk;
};

static struct gen_pool *l2mem_pool;

void *l2mem_alloc(size_t len)
{
	unsigned long vaddr;

	if (!l2mem_pool)
		return NULL;

	vaddr = gen_pool_alloc(l2mem_pool, len);
	if (!vaddr)
		return NULL;

	return (void *)vaddr;
}
EXPORT_SYMBOL(l2mem_alloc);

void *l2mem_dma_alloc(size_t len, dma_addr_t *dma)
{
	void *vaddr;

	if (!l2mem_pool)
		return NULL;

	vaddr = gen_pool_dma_alloc(l2mem_pool, len, dma);
	if (!vaddr)
		return NULL;

	return vaddr;
}
EXPORT_SYMBOL(l2mem_dma_alloc);

void l2mem_free(void *addr, size_t len)
{
	gen_pool_free(l2mem_pool, (unsigned long) addr, len);
}
EXPORT_SYMBOL(l2mem_free);


/*
 * REVISIT This supports CPU and DMA access to/from L2MEM, but it
 * doesn't (yet?) support some other notable uses of L2MEM:  as TCM
 * for data and/or instructions; and holding code needed to enter
 * and exit suspend states (while DRAM can't be used).
 */

static int l2mem_probe(struct platform_device *pdev)
{
	struct l2mem_dev *l2mem;
	struct resource *res;
	size_t size;
	int granularity = 0;
	int ret;
	struct device_node *np = pdev->dev.of_node;

	l2mem = devm_kzalloc(&pdev->dev, sizeof(*l2mem), GFP_KERNEL);
	if (!l2mem)
		return -ENOMEM;

	l2mem->dev = &pdev->dev;

	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (!res) {
		dev_err(l2mem->dev, "found no memory resource\n");
		return -EINVAL;
	}

	size = resource_size(res);

	ret = of_property_read_u32(pdev->dev.of_node, "granularity", &granularity);
	if (ret || granularity < 0) {
		dev_err(l2mem->dev, "parse property granularity error %d\n", ret);
		return ret;
	}

	if (!devm_request_mem_region(l2mem->dev, res->start, size, pdev->name)) {
		dev_err(l2mem->dev, "could not request region for resource\n");
		return -EBUSY;
	}

	if (of_property_read_bool(pdev->dev.of_node, "no-memory-wc"))
		l2mem->virt_base = devm_ioremap(l2mem->dev, res->start, size);
	else
		l2mem->virt_base = devm_ioremap_wc(l2mem->dev, res->start, size);
	if (!l2mem->virt_base)
		return -ENOMEM;

	l2mem->pool = devm_gen_pool_create(l2mem->dev, ilog2(granularity),
					  NUMA_NO_NODE, NULL);
	if (IS_ERR(l2mem->pool))
		return PTR_ERR(l2mem->pool);

	ret = gen_pool_add_virt(l2mem->pool, (unsigned long)l2mem->virt_base,
					res->start, size, -1);

	l2mem_pool = l2mem->pool;

	if (ret < 0) {
		dev_err(l2mem->dev, "failed to register l2mem poll: %d\n", ret);
		return ret;
	}

	l2mem->clk = of_clk_get_by_name(np, "core");
	if (IS_ERR(l2mem->clk)) {
		dev_err(l2mem->dev, "failed to get l2mem clk\n");
		return PTR_ERR(l2mem->clk);
	}

	clk_prepare_enable(l2mem->clk);

	l2mem->pclk = of_clk_get_by_name(np, "port");
	if (IS_ERR(l2mem->pclk)) {
		dev_err(l2mem->dev, "failed to get l2mem pclk\n");
		return PTR_ERR(l2mem->pclk);
	}

	clk_prepare_enable(l2mem->pclk);

	platform_set_drvdata(pdev, l2mem);

	dev_info(l2mem->dev, "L2MEM pool: %zu KiB @ 0x%p\n",
		gen_pool_size(l2mem->pool) / 1024, l2mem->virt_base);

	return 0;
}

static int l2mem_remove(struct platform_device *pdev)
{
	struct l2mem_dev *l2mem = platform_get_drvdata(pdev);

	if (gen_pool_avail(l2mem->pool) < gen_pool_size(l2mem->pool))
		dev_err(l2mem->dev, "removed while L2MEM allocated\n");

	l2mem_pool = NULL;

	/* do not disable l2mem_coreclk */
	if (l2mem->pclk)
		clk_disable_unprepare(l2mem->pclk);

	return 0;
}


#ifdef CONFIG_OF
static const struct of_device_id l2mem_dt_ids[] = {
	{ .compatible = "fh,fh-l2mem" },
	{}
};
#endif

static struct platform_driver l2mem_driver = {
	.driver = {
		.name = "l2mem",
		.of_match_table = of_match_ptr(l2mem_dt_ids),
	},
	.probe = l2mem_probe,
	.remove = l2mem_remove,
};

static int __init l2mem_init(void)
{
	return platform_driver_register(&l2mem_driver);
}

postcore_initcall(l2mem_init);
