// SPDX-License-Identifier: GPL-2.0
/*
 *
 * Copyright (C) 2016-2018, LomboTech Co.Ltd.
 * Authors:
 *	lomboswer <lomboswer@lombotech.com>
 *
 * This program is free software; you can redistribute  it and/or modify it
 * under  the terms of  the GNU General  Public License as published by the
 * Free Software Foundation;  either version 2 of the  License, or (at your
 * option) any later version.
 */

#include <drm/drmP.h>
#include <linux/clk.h>
#include <linux/component.h>

#include "csp_vgss_top.h"
#include "lombo_drv.h"

#define LOMBO_VGSS_EXT_SCLK_NUM		VGSS_CLK_PARENT_MAX
#define LOMBO_VGSS_SCLK_NUM		VGSS_CLK_PARENT_MAX

/**
 * @ext_sclk: vgss_sck(x)_ext. this sclk is invalid if @clk is null.
 * @ext_psclk: parent clk of @ext_sclk.
 * @req_rate: request rate.
 * @real_rate: real rate of this sclk.
 * @use_adpll: whether apply inner-adpll.
 * @adpll_cfg: configure of adpll.
 * @slock: lock for enable/disable.
 * @en_cnt: enable count.
 */
struct lombo_vgss_sclk {
	struct clk *ext_sclk;
	struct clk *ext_psclk;
	unsigned long req_rate;
	unsigned long real_rate;
	int use_adpll;
	struct vgss_adpll_config adpll_cfg;

	struct mutex slock;
	int en_cnt;
};

/**
 * @mod_id: see enum vgss_mod.
 * @sclk_id: source clock id of module clock.
 * @req_rate: request rate.
 * @real_rate: the actual rate after enabled.
 * @div: div factor.
 * @ref_cnt: referent count.
 * @list: list node to vgss_clk_list.
 * @slock: lock for enable/disable.
 * @en_cnt: enable count.
 */
struct lombo_vgss_clk {
	struct lombo_vgss_top *top;
	unsigned int mod_id;
	unsigned int sclk_id;
	unsigned long req_rate;
	unsigned long real_rate;
	u32 div;

	struct kref ref_cnt;
	struct list_head list;

	struct mutex slock;
	int en_cnt;
};

struct lombo_vgss_top {
	void __iomem *reg;

	struct clk *ahb_gate;
	struct clk *ahb_reset;
	struct clk *mem_axi_gate;
	struct clk *ext_sclk[LOMBO_VGSS_EXT_SCLK_NUM];
	struct clk *ext_psclk[LOMBO_VGSS_EXT_SCLK_NUM];

	struct lombo_vgss_sclk sclk[LOMBO_VGSS_SCLK_NUM];
	struct list_head clk_list; /* list of module clks */

	/* for vgss top access */
	struct mutex slock;
	int en_cnt;
};

struct lombo_vgss_clk *lombo_vgss_get_clk(struct device *vgss_top,
	struct device *mod, unsigned int vgss_mod, unsigned int sclk_id)
{
	struct lombo_vgss_top *top = NULL;
	struct lombo_vgss_clk *vgss_clk = NULL;

	if (!mod || !vgss_top) {
		DRM_ERROR("null pointer, mod=%p, vgss_top=%p\n", mod, vgss_top);
		return NULL;
	}
	if (vgss_mod > VGSS_MOD_TOP) {
		DRM_DEV_ERROR(mod, "inval vgss_mode=%d\n", vgss_mod);
		return NULL;
	}
	if (sclk_id >= LOMBO_VGSS_SCLK_NUM) {
		DRM_DEV_ERROR(mod, "inval sclk_id=%d\n", sclk_id);
		return NULL;
	}

	top = dev_get_drvdata(vgss_top);
	if (!top) {
		DRM_DEV_ERROR(vgss_top, "vgss_top is null\n");
		return NULL;
	}

	/**
	 * here not use spinlock for clk_list, the reason is:
	 * lombo_vgss_get_clk should be called by moules in driver-bind.
	 * Mouldes are binded in sequence.
	 */
	list_for_each_entry(vgss_clk, &top->clk_list, list) {
		if (vgss_clk->mod_id == vgss_mod) {
			if (vgss_clk->sclk_id != sclk_id) {
				DRM_DEV_ERROR(mod, "no match sclk_id (%d,%d)\n",
					      vgss_clk->sclk_id, sclk_id);
				return NULL;
			}
			kref_get(&vgss_clk->ref_cnt);
			return vgss_clk;
		}
	}

	vgss_clk = kzalloc(sizeof(*vgss_clk), GFP_KERNEL);
	if (!vgss_clk) {
		DRM_DEV_ERROR(vgss_top, "alloc for vgss_clk struct fail\n");
		return NULL;
	}

	vgss_clk->top = top;
	vgss_clk->mod_id = vgss_mod;
	vgss_clk->sclk_id = sclk_id;
	kref_init(&vgss_clk->ref_cnt);
	mutex_init(&vgss_clk->slock);
	list_add_tail(&vgss_clk->list, &top->clk_list);

	return vgss_clk;

}

static void lombo_vgss_clk_free(struct kref *kref)
{
	struct lombo_vgss_clk *vgss_clk =
		container_of(kref, struct lombo_vgss_clk, ref_cnt);

	list_del(&vgss_clk->list);
	if (vgss_clk->en_cnt > 0)
		DRM_ERROR("vgss_mod=%d not disabled\n", vgss_clk->mod_id);

	kfree(vgss_clk);
}

int lombo_vgss_put_clk(struct lombo_vgss_clk **clk)
{
	struct lombo_vgss_clk *vgss_clk;

	if (!(clk && (*clk))) {
		DRM_ERROR("%d\n", __LINE__);
		return -EINVAL;
	}

	vgss_clk = *clk;
	kref_put(&vgss_clk->ref_cnt, lombo_vgss_clk_free);
	*clk = NULL;

	return 0;
}

uint32_t lombo_vgss_clk_get_parent(
	struct lombo_vgss_clk *child)
{
	if (child)
		return child->sclk_id;
	else
		return LOMBO_VGSS_SCLK_NUM;
}

int lombo_vgss_clk_set_parent(
	struct lombo_vgss_clk *child, uint32_t sclk_id)
{
	if (child) {
		child->sclk_id = sclk_id;
		DRM_DEBUG_KMS("child=%d,sclk_id=%d\n", child->mod_id, sclk_id);
	}

	return 0;
}

unsigned long lombo_vgss_clk_get_rate(struct lombo_vgss_clk *clk)
{
	if (clk)
		return clk->real_rate;

	return 0;
}

int lombo_vgss_clk_set_rate(struct lombo_vgss_clk *clk, unsigned long rate)
{
	if (clk)
		clk->req_rate = rate;
	return 0;
}

static inline void lombo_vgss_top_clk_assert(
	struct lombo_vgss_top *top, int sw)
{
	mutex_lock(&top->slock);
	top->en_cnt++;
	if (!sw && (top->en_cnt == 1)) {
		clk_prepare_enable(top->ahb_gate);
		clk_prepare_enable(top->ahb_reset);
		clk_prepare_enable(top->mem_axi_gate);
		csp_vgss_clk_assert(VGSS_MOD_TOP);
	}
	mutex_unlock(&top->slock);
}

static inline void lombo_vgss_top_clk_deassert(
	struct lombo_vgss_top *top, int sw)
{
	mutex_lock(&top->slock);
	top->en_cnt--;
	if (!sw && (top->en_cnt == 0)) {
		csp_vgss_clk_deassert(VGSS_MOD_TOP);
		clk_disable_unprepare(top->mem_axi_gate);
		clk_disable_unprepare(top->ahb_reset);
		clk_disable_unprepare(top->ahb_gate);
	}
	mutex_unlock(&top->slock);
}

static int lombo_vgss_enable_sclk(struct lombo_vgss_top *top,
	struct lombo_vgss_clk *clk, int sw)
{
	struct lombo_vgss_sclk *sclk = &top->sclk[clk->sclk_id];
	unsigned long real_rate;
	int en_cnt;
	int ret = 0;

	if (!sclk->ext_sclk) {
		DRM_ERROR("null ext_sclk of sclk%d for vgss_clk%d\n",
			  clk->sclk_id, clk->mod_id);
		return -EINVAL;
	}

	lombo_vgss_top_clk_assert(top, sw);

	mutex_lock(&sclk->slock);
	sclk->en_cnt++;
	en_cnt = sclk->en_cnt;
	if (en_cnt > 1) {
		goto out;
	} else if (en_cnt < 1) {
		ret = -EINVAL;
		goto out;
	}

	if (sclk->req_rate > 0)
		clk_set_rate(sclk->ext_sclk, sclk->req_rate);
	if (!sw)
		clk_prepare_enable(sclk->ext_sclk);
	real_rate = clk_get_rate(sclk->ext_sclk);

	if (sclk->use_adpll) {
		uint64_t tmp;
		struct vgss_adpll_config *cfg = &(sclk->adpll_cfg);

		tmp = sclk->req_rate;
		do_div(tmp, real_rate);
		cfg->n = (u32)tmp;
		if (!sw) {
			ret = csp_vgss_adpll_enable(cfg);
			if (ret) {
				clk_disable_unprepare(sclk->ext_sclk);
				ret = -ETIMEDOUT;
				goto out;
			}
		}

		real_rate *= cfg->n;
	}
	sclk->real_rate = real_rate;

out:
	mutex_unlock(&sclk->slock);

	if (ret) {
		lombo_vgss_top_clk_deassert(top, sw);
		DRM_ERROR("ret=%d\n", ret);
		return ret;
	}
	DRM_DEBUG_KMS("vgss_mod[%d];sclk[%d]:real_rate=%lu. en_cnt=%d\n",
		      clk->mod_id, clk->sclk_id, sclk->real_rate, en_cnt);

	return 0;
}

static int lombo_vgss_disable_sclk(
	struct lombo_vgss_top *top, struct lombo_vgss_clk *clk)
{
	struct lombo_vgss_sclk *sclk = &top->sclk[clk->sclk_id];
	int en_cnt;
	int ret = 0;

	if (!sclk->ext_sclk) {
		DRM_ERROR("null-ext_sclk of sclk%d for vgss_clk%d\n",
			  clk->sclk_id, clk->mod_id);
		return -EINVAL;
	}

	mutex_lock(&sclk->slock);
	sclk->en_cnt--;
	en_cnt = sclk->en_cnt;
	if (en_cnt > 0) {
		goto out;
	} else if (en_cnt < 0) {
		ret = -EINVAL;
		goto out;
	}

	if (sclk->use_adpll)
		csp_vgss_adpll_disable();

	clk_disable_unprepare(sclk->ext_sclk);
	sclk->real_rate = 0;

out:
	mutex_unlock(&sclk->slock);

	lombo_vgss_top_clk_deassert(top, 0);

	DRM_DEBUG_KMS("vgss_mod[%d]:sclk[%d],real_rate=%lu. en_cnt=%d\n",
		      clk->mod_id, clk->sclk_id, sclk->real_rate, en_cnt);

	return ret;
}

static int lombo_vgss_enable_clk(struct lombo_vgss_top *top,
	struct lombo_vgss_clk *clk, int sw)
{
	struct lombo_vgss_sclk *sclk = &(top->sclk[clk->sclk_id]);
	int en_cnt;
	uint64_t tmp;

	mutex_lock(&clk->slock);
	clk->en_cnt++;
	en_cnt = clk->en_cnt;
	if (en_cnt == 1) {
		/* calc div */
		if (clk->req_rate > 0) {
			uint32_t rem;

			tmp = sclk->real_rate;
			if (clk->mod_id == VGSS_MOD_TIMER)
				do_div(tmp, 1024);
			rem = do_div(tmp, clk->req_rate);
			if ((rem > (uint32_t)((clk->req_rate + 1) >> 1))
				|| (tmp == 0))
				tmp++;
			clk->div = (uint32_t)tmp;
		} else {
			clk->div = 1;
		}

		if (!sw) {
			mutex_lock(&top->slock);
			csp_vgss_clk_assert(clk->mod_id);
			csp_vgss_clk_set_parent(clk->mod_id, clk->sclk_id);
			csp_vgss_clk_set_divider(clk->mod_id, clk->div);
			csp_vgss_clk_enable(clk->mod_id);
			if (clk->mod_id == VGSS_MOD_CQI) {
				csp_vgss_clk_assert(VGSS_MOD_ABT);
				csp_vgss_clk_enable(VGSS_MOD_ABT);
			}
			mutex_unlock(&top->slock);
		}

		tmp = sclk->real_rate;
		if (clk->mod_id == VGSS_MOD_TIMER)
			do_div(tmp, clk->div * 1024);
		else
			do_div(tmp, clk->div);
		clk->real_rate = (unsigned long)tmp;
	}
	mutex_unlock(&clk->slock);

	DRM_DEBUG_KMS("vgss_mod[%d],en_cnt=%d\n", clk->mod_id, en_cnt);
	DRM_DEBUG_KMS("sclk_rate=%lu, req=%lu,real=%lu,div=%u\n",
		      sclk->real_rate, clk->req_rate, clk->real_rate, clk->div);

	return 0;
}

static int lombo_vgss_disable_clk(
	struct lombo_vgss_top *top, struct lombo_vgss_clk *clk)
{
	int en_cnt;

	mutex_lock(&clk->slock);
	clk->en_cnt--;
	en_cnt = clk->en_cnt;
	if (en_cnt == 0) {
		mutex_lock(&top->slock);
		csp_vgss_clk_disable(clk->mod_id);
		csp_vgss_clk_deassert(clk->mod_id);
		if (clk->mod_id == VGSS_MOD_CQI) {
			csp_vgss_clk_disable(VGSS_MOD_ABT);
			csp_vgss_clk_deassert(VGSS_MOD_ABT);
		}
		mutex_unlock(&top->slock);
		clk->real_rate = 0;
	}
	mutex_unlock(&clk->slock);

	DRM_DEBUG_KMS("vgss_mod[%d], en_cnt=%d\n", clk->mod_id, en_cnt);

	return 0;
}

int lombo_vgss_clk_prepare_enable(struct lombo_vgss_clk *clk, int sw)
{
	struct lombo_vgss_top *top;
	int ret;

	if (!clk || (clk->sclk_id >= LOMBO_VGSS_SCLK_NUM)) {
		DRM_ERROR("clk%d or invalid sclk_id%d\n",
			  clk ? clk->mod_id : VGSS_MOD_SUB_MAX,
			  clk ? clk->sclk_id : (LOMBO_VGSS_SCLK_NUM + 1));
		return -EINVAL;
	}

	DRM_DEBUG_KMS("clk=%d,sw=%d\n", clk->mod_id, sw);
	top = clk->top;
	ret = lombo_vgss_enable_sclk(top, clk, sw);
	if (!ret)
		ret = lombo_vgss_enable_clk(top, clk, sw);

	return ret;
}

int lombo_vgss_clk_disable_unprepare(struct lombo_vgss_clk *clk)
{
	struct lombo_vgss_top *top;

	if (!clk || (clk->sclk_id >= LOMBO_VGSS_SCLK_NUM)) {
		DRM_ERROR("clk%p or invalid sclk_id%d\n", clk,
			  clk ? clk->sclk_id : 0);
		return -EINVAL;
	}

	top = clk->top;
	lombo_vgss_disable_clk(top, clk);
	lombo_vgss_disable_sclk(top, clk);

	return 0;
}

struct lombo_vgss_clk *lombo_vgss_get_enable_tclk(struct device *vgss_top,
	unsigned long req_rate, unsigned long *real_rate)
{
	struct lombo_vgss_clk *clk = lombo_vgss_get_clk(vgss_top,
						      vgss_top,
						      VGSS_MOD_TIMER,
						      1);
	int ret;

	if (!clk)
		return NULL;

	if (req_rate)
		lombo_vgss_clk_set_rate(clk, req_rate);

	ret = lombo_vgss_clk_prepare_enable(clk, 0);
	if (ret) {
		lombo_vgss_put_clk(&clk);
		return NULL;
	}

	if (real_rate)
		*real_rate = lombo_vgss_clk_get_rate(clk);

	return clk;
}

static int lombo_vgss_parse_sclk(struct device *dev,
	struct lombo_vgss_top *top)
{
	struct device_node *np_sclk;
	char name[16] = { 0 };
	char name_1[16] = { 0 };
	int i;
	uint32_t value;

	/* parse bus clks */
	top->ahb_gate = devm_clk_get(dev, "ahb_gate");
	if (IS_ERR(top->ahb_gate)) {
		DRM_DEV_ERROR(dev, "failed to get vgss ahb_gate\n");
		return -ENODEV;
	}
	top->ahb_reset = devm_clk_get(dev, "ahb_reset");
	if (IS_ERR(top->ahb_reset)) {
		DRM_DEV_ERROR(dev, "failed to get vgss ahb_reset\n");
		return -ENODEV;
	}
	top->mem_axi_gate = devm_clk_get(dev, "mem_axi_gate");
	if (IS_ERR(top->mem_axi_gate)) {
		DRM_DEV_ERROR(dev, "failed to get vgss mem_axi_gate\n");
		return -ENODEV;
	}

	/* parse ext sclk & psclk */
	strncpy(name, "ext_sclk0", sizeof(name));
	strncpy(name_1, "ext_psclk0", sizeof(name_1));
	for (i = 0; i < LOMBO_VGSS_EXT_SCLK_NUM; i++) {
		struct clk *clk;
		struct clk *pclk = NULL;

		name[8] = i + '0';
		name_1[9] = i + '0';

		clk = devm_clk_get(dev, name);
		if (IS_ERR(clk))
			continue;
		top->ext_sclk[i] = clk;

		pclk = devm_clk_get(dev, name_1);
		if (!IS_ERR(pclk)) {
			clk_set_parent(clk, pclk);
			top->ext_psclk[i] = pclk;
		}
		DRM_DEV_DEBUG_DRIVER(dev, "has %s\n", name);
	}

	/* create inner sclk */
	strncpy(name, "sclk0", sizeof(name));
	for (i = 0; i < LOMBO_VGSS_SCLK_NUM; i++) {
		struct lombo_vgss_sclk *sclk = &(top->sclk[i]);

		name[4] = '0' + i;
		np_sclk = of_get_child_by_name(dev->of_node, name);
		if (IS_ERR_OR_NULL(np_sclk) ||
			!of_device_is_available(np_sclk))
			continue;

		if (!of_property_read_u32(np_sclk, "ext_sclk", &value)) {
			DRM_DEV_DEBUG_DRIVER(dev, "%s: ext_sclk=%d\n",
					     name, value);
			if (value < LOMBO_VGSS_EXT_SCLK_NUM) {
				sclk->ext_sclk = top->ext_sclk[value];
				sclk->ext_psclk = top->ext_psclk[value];
			}
		}
		if (!sclk->ext_sclk) {
			DRM_DEV_ERROR(dev, "not ext_sclk for sclk%d\n", i);
			return -EINVAL;
		}

		if (!of_property_read_u32(np_sclk, "clk_rate", &value))
			sclk->req_rate = value;

		sclk->use_adpll = 0;
		if (!of_property_read_u32_index(np_sclk,
						"adpll_tune", 0, &value)) {
			sclk->adpll_cfg.tune0 = value;
			sclk->use_adpll++;
		}
		if (!of_property_read_u32_index(np_sclk,
						"adpll_tune", 1, &value)) {
			sclk->adpll_cfg.tune1 = value;
			sclk->use_adpll++;
		}
		sclk->use_adpll = (sclk->use_adpll == 2) ? 1 : 0;

		DRM_DEV_DEBUG_DRIVER(dev, "%s:req_rate=%lu, adpll=%d.",
			name, sclk->req_rate, sclk->use_adpll);
		DRM_DEV_DEBUG_DRIVER(dev, "adpll_tune[0x%x,0x%x]\n",
			sclk->adpll_cfg.tune0, sclk->adpll_cfg.tune1);

		mutex_init(&sclk->slock);
	}

	return 0;
}

static int lombo_vgss_bind(struct device *dev,
	struct device *master, void *master_data)
{
	struct drm_device *drm = master_data;
	struct lombo_drv *drv = drm->dev_private;

	DRM_DEV_DEBUG_DRIVER(dev, "\n");

	drv->vgss_top = dev;

	return 0;
}

static void lombo_vgss_unbind(struct device *dev,
	struct device *master, void *master_data)
{
	DRM_DEV_DEBUG_DRIVER(dev, "\n");
}

static const struct component_ops vgss_comp_ops = {
	.bind	= lombo_vgss_bind,
	.unbind	= lombo_vgss_unbind,
};

static int lombo_vgss_probe(struct platform_device *pdev)
{
	struct device *dev = &pdev->dev;
	struct lombo_vgss_top *top;
	struct resource *res;
	int ret;

	DRM_DEV_DEBUG_DRIVER(dev, "\n");
	top = devm_kzalloc(dev, sizeof(*top), GFP_KERNEL);
	if (!top)
		return -ENOMEM;

	dev_set_drvdata(dev, top);

	/* parse register */
	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	top->reg = devm_ioremap_resource(dev, res);
	if (IS_ERR(top->reg))
		return PTR_ERR(top->reg);
	csp_vgss_set_register_base((uintptr_t)top->reg);

	ret = lombo_vgss_parse_sclk(dev, top);
	if (ret)
		return ret;

	INIT_LIST_HEAD(&top->clk_list);
	mutex_init(&top->slock);

	return component_add(&pdev->dev, &vgss_comp_ops);
}

static int lombo_vgss_remove(struct platform_device *pdev)
{
	struct device *dev = &pdev->dev;

	DRM_DEV_DEBUG_DRIVER(&pdev->dev, "%d\n", __LINE__);
	csp_vgss_set_register_base(0);
	dev_set_drvdata(dev, NULL);
	component_del(&pdev->dev, &vgss_comp_ops);
	return 0;
}

static const struct of_device_id vgss_of_table[] = {
	{ .compatible = "lombo,lombo-n9v1-vgss" },
	{ .compatible = "lombo,lombo-n7v3-vgss" },
	{ .compatible = "lombo,lombo-n7v5-vgss" },
	{ .compatible = "lombo,lombo-n5v1-vgss" },
	{ }
};
MODULE_DEVICE_TABLE(of, vgss_of_table);

static struct platform_driver lombo_vgss_platform_driver = {
	.probe		= lombo_vgss_probe,
	.remove		= lombo_vgss_remove,
	.driver		= {
		.owner = THIS_MODULE,
		.name		= "lombo-vgss",
		.of_match_table	= vgss_of_table,
	},
};
module_platform_driver(lombo_vgss_platform_driver);

MODULE_AUTHOR("lomboswer <lomboswer@lombotech.com>");
MODULE_DESCRIPTION("Lombo SoC VGSS TOP Driver");
MODULE_LICENSE("GPL");
