/* SPDX-License-Identifier: GPL-2.0 */
/*
 * Copyright (C) STMicroelectronics 2018 - All Rights Reserved
 * Author: Olivier Bideau <olivier.bideau@st.com> for STMicroelectronics.
 * Author: Gabriel Fernandez <gabriel.fernandez@st.com> for STMicroelectronics.
 */

#ifdef CONFIG_RTOS_SECURE_CLK_SUPPORT

#include <linux/arm-smccc.h>
#include <linux/mm.h>

/*
 *
 * Security management
 *
 */

#define STM32_SVC_RCC	0x82001000
#define STM32_WRITE	0x1
#define STM32_SET_BITS	0x2
#define STM32_CLR_BITS	0x3
#define CLK_IS_BASIC	BIT(5) /* Basic clk, can't do a to_clk_foo() */

#define SMC(class, op, address, val)\
	({\
	struct arm_smccc_res res;\
	arm_smccc_smc(class, op, address, val,\
			0, 0, 0, 0, &res);\
	})

static u32 stm32_clk_writel_secure(u32 value, void __iomem *reg)
{
	struct arm_smccc_res res;
	u32 address;

	address = offset_in_page(reg);

	arm_smccc_smc(STM32_SVC_RCC, STM32_WRITE, address, value, 0, 0, 0,
		      0, &res);

	if (res.a0)
		pr_warn("%s: Failed to write in secure mode at 0x%x (err = %ld)\n"
				, __func__
				, address
				, res.a0);

	return res.a0;
}

static u32 stm32_clk_bit_secure(u32 cmd, u32 value, void __iomem *reg)
{
	struct arm_smccc_res res;
	u32 address;

	address = offset_in_page(reg);

	arm_smccc_smc(STM32_SVC_RCC, cmd, address, value, 0, 0, 0,
		      0, &res);

	if (res.a0)
		pr_warn("%s: Failed to write in secure mode at 0x%x (err = %ld)\n"
				, __func__
				, address
				, res.a0);

	return res.a0;
}

static inline u32 clk_readl(u32 __iomem *reg)
{
	return readl(reg);
}

static inline void clk_writel(u32 val, u32 __iomem *reg)
{
	writel(val, reg);
}

static void clk_sgate_endisable(struct clk_hw *hw, int enable)
{
	struct clk_gate *gate = to_clk_gate(hw);
	unsigned long flags = 0;
	u32 cmd;

	spin_lock_irqsave(gate->lock, flags);

	if (enable)
		cmd = STM32_SET_BITS;
	else
		cmd = STM32_CLR_BITS;

	stm32_clk_bit_secure(cmd, BIT(gate->bit_idx), gate->reg);

	spin_unlock_irqrestore(gate->lock, flags);
}

static int clk_sgate_enable(struct clk_hw *hw)
{
	clk_sgate_endisable(hw, 1);

	return 0;
}

static void clk_sgate_disable(struct clk_hw *hw)
{
	clk_sgate_endisable(hw, 0);
}

static const struct clk_ops clk_sgate_ops = {
	.enable = clk_sgate_enable,
	.disable = clk_sgate_disable,
	.is_enabled = clk_gate_is_enabled,
};

static u8 clk_smux_get_parent(struct clk_hw *hw)
{
	return clk_mux_ops.get_parent(hw);
}

static int clk_smux_set_parent(struct clk_hw *hw, u8 index)
{
	struct clk_mux *mux = to_clk_mux(hw);
	u32 val;
	unsigned long flags = 0;

	if (mux->table) {
		index = mux->table[index];
	} else {
		if (mux->flags & CLK_MUX_INDEX_BIT)
			index = 1 << index;

		if (mux->flags & CLK_MUX_INDEX_ONE)
			index++;
	}

	spin_lock_irqsave(mux->lock, flags);

	val = clk_readl(mux->reg);
	val &= ~(mux->mask << mux->shift);
	val |= index << mux->shift;

	stm32_clk_writel_secure(val, mux->reg);

	spin_unlock_irqrestore(mux->lock, flags);

	return 0;
}

static const struct clk_ops clk_smux_ops = {
	.get_parent = clk_smux_get_parent,
	.set_parent = clk_smux_set_parent,
	.determine_rate = __clk_mux_determine_rate,
};

static struct clk_hw *clk_hw_register_smux(struct device *dev,
					   const char *name,
					   const char * const *parent_names,
					   u8 num_parents,
					   unsigned long flags,
					   void __iomem *reg, u8 shift,
					   u8 width,
					   u8 clk_mux_flags,
					   spinlock_t *lock)
{
	u32 mask = BIT(width) - 1;
	struct clk_mux *mux;
	struct clk_hw *hw;
	struct clk_init_data init;
	int ret;

	/* allocate the mux */
	mux = kzalloc(sizeof(*mux), GFP_KERNEL);
	if (!mux)
		return ERR_PTR(-ENOMEM);

	init.name = name;

	init.ops = &clk_smux_ops;

	init.flags = flags | CLK_IS_BASIC;
	init.parent_names = parent_names;
	init.num_parents = num_parents;

	/* struct clk_mux assignments */
	mux->reg = reg;
	mux->shift = shift;
	mux->mask = mask;
	mux->flags = clk_mux_flags;
	mux->lock = lock;
	mux->table = NULL;
	mux->hw.init = &init;

	hw = &mux->hw;
	ret = clk_hw_register(dev, hw);
	if (ret) {
		kfree(mux);
		hw = ERR_PTR(ret);
	}

	return hw;
}

static struct clk_hw *
secure_clk_hw_register_mux(struct device *dev,
		      struct clk_hw_onecell_data *clk_data,
		      void __iomem *base, spinlock_t *lock,
		      const struct clock_config *cfg)
{
	struct mux_cfg *mux_cfg = cfg->cfg;

	if (!_is_soc_secured(base))
		return clk_hw_register_mux(dev, cfg->name, cfg->parent_names,
					   cfg->num_parents, cfg->flags,
					   mux_cfg->reg_off + base,
					   mux_cfg->shift,
					   mux_cfg->width,
					   mux_cfg->mux_flags,
					   lock);
	else
		return clk_hw_register_smux(dev, cfg->name,
					    cfg->parent_names,
					    cfg->num_parents, cfg->flags,
					    mux_cfg->reg_off + base,
					    mux_cfg->shift,
					    mux_cfg->width,
					    mux_cfg->mux_flags,
					    lock);
}

struct clk_div_secure {
	struct clk_divider div;
	u8 secure;
};

#define to_clk_div_secure(_hw) container_of(_hw, struct clk_div_secure, div)

static unsigned long clk_sdivider_recalc_rate(struct clk_hw *hw,
					      unsigned long parent_rate)
{
	return clk_divider_ops.recalc_rate(hw, parent_rate);
}

static long clk_sdivider_round_rate(struct clk_hw *hw, unsigned long rate,
				    unsigned long *prate)
{
	return clk_divider_ops.round_rate(hw, rate, prate);
}

#define div_mask(width) ((1 << (width)) - 1)

static int clk_sdivider_set_rate(struct clk_hw *hw, unsigned long rate,
				 unsigned long parent_rate)
{
	struct clk_divider *divider = to_clk_divider(hw);
	int value;
	unsigned long flags = 0;
	u32 val;

	value = divider_get_val(rate, parent_rate, divider->table,
				divider->width, divider->flags);

	if (value < 0)
		return value;

	spin_lock_irqsave(divider->lock, flags);

	if (divider->flags & CLK_DIVIDER_HIWORD_MASK) {
		val = div_mask(divider->width) << (divider->shift + 16);
	} else {
		val = clk_readl(divider->reg);
		val &= ~(div_mask(divider->width) << divider->shift);
	}
	val |= (u32)value << divider->shift;

	stm32_clk_writel_secure(val, divider->reg);

	spin_unlock_irqrestore(divider->lock, flags);

	return 0;
}

static const struct clk_ops clk_sdivider_ops = {
	.recalc_rate = clk_sdivider_recalc_rate,
	.round_rate = clk_sdivider_round_rate,
	.set_rate = clk_sdivider_set_rate,
};

static struct clk_hw *
clk_hw_register_sdivider_table(struct device *dev, const char *name,
			       const char *parent_name,
			       unsigned long flags,
			       void __iomem *reg,
			       u8 shift, u8 width,
			       u8 clk_divider_flags,
			       const struct clk_div_table *table,
			       spinlock_t *lock)
{
	struct clk_divider *div;
	struct clk_hw *hw;
	struct clk_init_data init;
	int ret;

	/* allocate the divider */
	div = kzalloc(sizeof(*div), GFP_KERNEL);
	if (!div)
		return ERR_PTR(-ENOMEM);

	init.name = name;
	if (clk_divider_flags & CLK_DIVIDER_READ_ONLY)
		init.ops = &clk_divider_ro_ops;
	else
		init.ops = &clk_sdivider_ops;

	init.flags = flags | CLK_IS_BASIC;
	init.parent_names = (parent_name ? &parent_name : NULL);
	init.num_parents = (parent_name ? 1 : 0);

	/* struct clk_divider assignments */
	div->reg = reg;
	div->shift = shift;
	div->width = width;
	div->flags = clk_divider_flags;
	div->lock = lock;
	div->hw.init = &init;
	div->table = table;

	/* register the clock */
	hw = &div->hw;

	ret = clk_hw_register(dev, hw);
	if (ret) {
		kfree(div);
		hw = ERR_PTR(ret);
	}

	return hw;
}

static struct clk_hw *
secure_clk_hw_register_divider_table(struct device *dev,
				struct clk_hw_onecell_data *clk_data,
				void __iomem *base, spinlock_t *lock,
				const struct clock_config *cfg)
{
	struct div_cfg *div_cfg = cfg->cfg;

	if (!_is_soc_secured(base))
		return clk_hw_register_divider_table(dev, cfg->name,
						     cfg->parent_name,
						     cfg->flags,
						     div_cfg->reg_off + base,
						     div_cfg->shift,
						     div_cfg->width,
						     div_cfg->div_flags,
						     div_cfg->table,
						     lock);
	else
		return clk_hw_register_sdivider_table(dev, cfg->name,
						      cfg->parent_name,
						      cfg->flags,
						      div_cfg->reg_off + base,
						      div_cfg->shift,
						      div_cfg->width,
						      div_cfg->div_flags,
						      div_cfg->table,
						      lock);
}

static int mp1_sgate_clk_enable(struct clk_hw *hw)
{
	struct clk_gate *gate = to_clk_gate(hw);
	unsigned long flags = 0;

	spin_lock_irqsave(gate->lock, flags);

	stm32_clk_bit_secure(STM32_SET_BITS, BIT(gate->bit_idx),
			     gate->reg);

	spin_unlock_irqrestore(gate->lock, flags);

	return 0;
}

static void mp1_sgate_clk_disable(struct clk_hw *hw)
{
	struct clk_gate *gate = to_clk_gate(hw);
	unsigned long flags = 0;

	spin_lock_irqsave(gate->lock, flags);

	stm32_clk_bit_secure(STM32_SET_BITS, BIT(gate->bit_idx),
			     gate->reg + RCC_CLR);

	spin_unlock_irqrestore(gate->lock, flags);
}

static const struct clk_ops mp1_sgate_clk_ops = {
	.enable		= mp1_sgate_clk_enable,
	.disable	= mp1_sgate_clk_disable,
	.is_enabled	= clk_gate_is_enabled,
};

static int mp1_s_mgate_clk_enable(struct clk_hw *hw)
{
	struct clk_gate *gate = to_clk_gate(hw);
	struct stm32_clk_mgate *clk_mgate = to_clk_mgate(gate);

	clk_mgate->mgate->flag |= clk_mgate->mask;

	mp1_sgate_clk_enable(hw);

	return  0;
}

static void mp1_s_mgate_clk_disable(struct clk_hw *hw)
{
	struct clk_gate *gate = to_clk_gate(hw);
	struct stm32_clk_mgate *clk_mgate = to_clk_mgate(gate);

	clk_mgate->mgate->flag &= ~clk_mgate->mask;

	if (clk_mgate->mgate->flag == 0)
		mp1_sgate_clk_disable(hw);
}

static const struct clk_ops mp1_s_mgate_clk_ops = {
	.enable		= mp1_s_mgate_clk_enable,
	.disable	= mp1_s_mgate_clk_disable,
	.is_enabled	= clk_gate_is_enabled,

};

static u8 clk_s_mmux_get_parent(struct clk_hw *hw)
{
	return clk_smux_ops.get_parent(hw);
}

static int clk_s_mmux_set_parent(struct clk_hw *hw, u8 index)
{
	struct clk_mux *mux = to_clk_mux(hw);
	struct stm32_clk_mmux *clk_mmux = to_clk_mmux(mux);
	struct clk_hw *hwp;
	int ret, n;

	ret = clk_smux_ops.set_parent(hw, index);
	if (ret)
		return ret;

	hwp = clk_hw_get_parent(hw);

	for (n = 0; n < clk_mmux->mmux->nbr_clk; n++)
		if (clk_mmux->mmux->hws[n] != hw)
			clk_hw_reparent(clk_mmux->mmux->hws[n], hwp);

	return 0;
}

static const struct clk_ops clk_s_mmux_ops = {
	.get_parent = clk_s_mmux_get_parent,
	.set_parent = clk_s_mmux_set_parent,
	.determine_rate = __clk_mux_determine_rate,
};

#define SMUX(_id, _name, _parents, _flags,\
	     _offset, _shift, _width, _mux_flags)\
{\
	.id		= _id,\
	.name		= _name,\
	.parent_names	= _parents,\
	.num_parents	= ARRAY_SIZE(_parents),\
	.flags		= _flags,\
	.cfg =  &(struct mux_cfg) {\
		.reg_off	= _offset,\
		.shift		= _shift,\
		.width		= _width,\
		.mux_flags	= _mux_flags,\
	},\
	.func = secure_clk_hw_register_mux,\
}

#define SDIV_TABLE(_id, _name, _parent, _flags, _offset, _shift, _width,\
		   _div_flags, _div_table)\
{\
	.id		= _id,\
	.name		= _name,\
	.parent_name	= _parent,\
	.flags		= _flags,\
	.cfg =  &(struct div_cfg) {\
		.reg_off	= _offset,\
		.shift		= _shift,\
		.width		= _width,\
		.div_flags	= _div_flags,\
		.table		= _div_table,\
	},\
	.func = secure_clk_hw_register_divider_table,\
}

#define SDIV(_id, _name, _parent, _flags, _offset, _shift, _width,\
	     _div_flags)\
	SDIV_TABLE(_id, _name, _parent, _flags, _offset, _shift, _width,\
		   _div_flags, NULL)

#define _S_GATE(_gate_offset, _gate_bit_idx, _gate_flags)\
	_STM32_GATE(_gate_offset, _gate_bit_idx, _gate_flags,\
		    NULL, NULL, &clk_sgate_ops)

#define SGATE(_id, _name, _parent, _flags, _offset, _bit_idx, _gate_flags)\
	STM32_GATE(_id, _name, _parent, _flags,\
		   _S_GATE(_offset, _bit_idx, _gate_flags))

#define _S_GATE_MP1(_gate_offset, _gate_bit_idx, _gate_flags)\
	_STM32_GATE(_gate_offset, _gate_bit_idx, _gate_flags,\
		    NULL, &mp1_gate_clk_ops, &mp1_sgate_clk_ops)

#define SGATE_MP1(_id, _name, _parent, _flags, _offset, _bit_idx, _gate_flags)\
	STM32_GATE(_id, _name, _parent, _flags,\
		   _S_GATE_MP1(_offset, _bit_idx, _gate_flags))

#define _S_DIV(_div_offset, _div_shift, _div_width, _div_flags, _div_table)\
	_STM32_DIV(_div_offset, _div_shift, _div_width,\
		   _div_flags, _div_table, NULL, &clk_sdivider_ops)

#define _S_MUX(_offset, _shift, _width, _mux_flags)\
	_STM32_MUX(_offset, _shift, _width, _mux_flags,\
		   NULL, NULL, &clk_smux_ops)

#endif
