// SPDX-License-Identifier: GPL-2.0

/**
 * Copyright (C) 2025 Huawei Technologies Co., Ltd.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 * 
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 * 
 * You should have received a copy of the GNU General Public License along
 * with this program; if not, see <https://www.gnu.org/licenses/>.
 *
 * @file cman_drv.c
 * @brief Kunpeng Cache Management kernel driver implementation
 * @details The kernel driver implementation for Kunpeng Cache Management Framework.
 * Contains the following functionalities:
 * - L2 Instruction/Data partitioning
 * - Reset config functionality for all the above features
 */
#define pr_fmt(fmt) KBUILD_MODNAME ": %s: " fmt, __func__

#include <linux/init.h>
#include <linux/module.h>
#include <linux/cdev.h>
#include <linux/ioctl.h>
#include <linux/sched.h>
#include <linux/uaccess.h>
#include <linux/fs.h>
#include <linux/io.h>
#include <linux/slab.h>
#include <linux/sched/mm.h>
#include <linux/version.h>

#include "cman_drv.h"
#include "platform_info.h"
#include "hw_regs_access.h"

/**
 * L2 I/D partition related definitions
 */
#define L2_NUM_ALLOWED_CWS(cws)							(cws - 2)
#define L2_PART_GET_MASK_FROM_REGVAL(val, shift, mask)	((val >> shift) & mask)

#define REG_CLEAN						0

/** @struct cman_dev
 *  @brief  Struct containing Cache management Framework control device information.
 *  @var    cman_dev::dev_num
 *  Member 'dev_num' contains internal representation of Cache management Framework control device number.
 *  @var    cman_dev::cdev
 *  Member 'cdev' contains kernel’s internal structure that represents char devices.
 */
struct cman_dev {
	dev_t dev_num;
	struct cdev cdev;
};

struct cman_dev local;

struct cman_dev *dev;
struct class *dev_class;

/**
 * ID of process which works with the driver.
 */
static pid_t process_id;

/**
 * Counter for number of times open method is called for calling process.
 */
static u32 opens_num = 0;

/**
 * Spinlock variable used by Kunpeng Cache Management kernel driver.
 */
struct mutex cman_lock;

/**
 * Checks whether provided L2 I/D partition masks are valid.
 *
 * @param  l2_partition_dmask L2 partition data mask.
 * @param  l2_partition_imask L2 partition instruction mask.
 * @return                    0 in case provided masks are valid.
 *                            EINVAL in case provided mask is out of boundaries (exceeds maximum value).
 *                            EPERM  in case provided mask cuts off more instructions or data than allowed by HW.
 */
static noinline s32
cman_drv_l2_check_set_masks(u64 l2_partition_dmask, u64 l2_partition_imask)
{
	if (l2_partition_dmask > hisi_hw_caps.l2.max_mask) {
		pr_err("L2 cache way data mask is out of boundaries\n");
		return EINVAL;
	}
	if (l2_partition_imask > hisi_hw_caps.l2.max_mask) {
		pr_err("L2 cache way instruction mask is out of boundaries\n");
		return EINVAL;
	}
	if (hweight32(l2_partition_dmask & hisi_hw_caps.l2.max_mask) > L2_NUM_ALLOWED_CWS(hisi_hw_caps.l2.cws_num)) {
		pr_err("L2 Data mask - masking more than %llu ways may cause L2 Cache and CPU hang\n",
			L2_NUM_ALLOWED_CWS(hisi_hw_caps.l2.cws_num));
		return EPERM;
	}
	if (hweight32(l2_partition_imask & hisi_hw_caps.l2.max_mask) > L2_NUM_ALLOWED_CWS(hisi_hw_caps.l2.cws_num)) {
		pr_err("L2 Instruction mask - masking more than %llu ways may cause L2 Cache and CPU hang\n",
			L2_NUM_ALLOWED_CWS(hisi_hw_caps.l2.cws_num));
		return EPERM;
	}

	return 0;
}

/**
 * Retrieves L2 D- and I-masks from the L2 partition register value.
 *
 * @param  l2_partition_regval L2 I/D partition register value.
 * @param  l2_partition_dmask  Pointer to a variable to store mask for data requests.
 * @param  l2_partition_imask  Pointer to a variable to store mask for instructions.
 */
static noinline void
cman_drv_l2_partition_get_masks_from_regval(u64 l2_partition_regval, u64 *l2_partition_dmask, u64 *l2_partition_imask)
{
	*l2_partition_dmask = L2_PART_GET_MASK_FROM_REGVAL(l2_partition_regval, 0, hisi_hw_caps.l2.max_mask);
	*l2_partition_imask = L2_PART_GET_MASK_FROM_REGVAL(l2_partition_regval, hisi_hw_caps.l2.cws_num, hisi_hw_caps.l2.max_mask);
}

/**
 * Constructs L2 partition register value from D- and I-masks provided.
 *
 * @param  l2_partition_dmask  Mask for data requests.
 * @param  l2_partition_imask  Mask for instructions.
 * @return                     Value to be written to L2 partition register.
 */
static noinline u64
cman_drv_l2_part_construct_regval(u64 l2_partition_dmask, u64 l2_partition_imask)
{
	return ((l2_partition_imask & hisi_hw_caps.l2.max_mask) << hisi_hw_caps.l2.cws_num) | (l2_partition_dmask & hisi_hw_caps.l2.max_mask);
}

/**
 * Accepts userspace data, prepares it and reads L2 I/D partition register.
 *
 * @param  arg Arguments passed from the userspace.
 * @return     0 in case of success.
 *             EBADE         Wrong kernel-space address (fail to copy data between user- and
 *             kernel spaces, etc.).
 *             EINVAL        Some of input parameters is incorrect.
 *             ECANCELED     CPU is not online.
 */
static noinline s64
cman_drv_l2_partition_get(struct l2_partition_ioctl_args __user *arg)
{
	struct l2_partition_ioctl_args args;
	u64 l2_partition_regval = 0;

	if (copy_from_user(&args, arg, sizeof(struct l2_partition_ioctl_args))) {
		pr_err("Failed to copy data from userspace\n");
		return EBADE;
	}

	if (args.cpu < 0 || args.cpu >= nr_cpu_ids) {
		pr_err("cpu id %d is out of boundaries\n", args.cpu);
		return EINVAL;
	}

	if (!cman_drv_is_cpu_online(args.cpu))
		return ECANCELED;

	set_cpus_allowed_ptr(current, cpumask_of(args.cpu));
	l2_partition_regval = cman_drv_l2_partition_reg_read();
	cman_drv_l2_partition_get_masks_from_regval(l2_partition_regval, &args.l2_partition_dmask, &args.l2_partition_imask);
	pr_debug("L2 Partition read: cpu id: %d, D-mask: 0x%llx, L2 I-mask: 0x%llx\n",
		args.cpu,
		args.l2_partition_dmask,
		args.l2_partition_imask);

	if (copy_to_user(arg, &args, sizeof(struct l2_partition_ioctl_args))) {
		pr_err("Failed to copy data to userspace\n");
		return EBADE;
	}

	return 0;
}

/**
 * Applies L2 I/D partition configuration for the specified CPU.
 *
 * @param cpu CPU id.
 * @param val Value representing L2 I/D partiiton configuration to be applied.
 * @return    0 in case of successful operation.
 *            ECANCELED  CPU is not online.
 *            EIO        Error during write to one of HW registers.
 */
static noinline s64
cman_drv_l2_partition_set_cpu(s32 cpu, u64 val)
{
	if (!cman_drv_is_cpu_online(cpu))
		return ECANCELED;

	set_cpus_allowed_ptr(current, cpumask_of(cpu));
	if (!cman_drv_l2_partition_reg_write(val)) {
		pr_err("Could not write to L2 I/D partition register for CPU: %d\n", cpu);
		return EIO;
	}

	return 0;
}

/**
 * Accepts userspace data, prepares it and writes to L2 I/D partition register.
 *
 * @param  arg Arguments passed from the userspace.
 * @return     0 in case of success.
 *             EBADE         Wrong kernel-space address (fail to copy data between user- and
 *             kernel spaces, etc.).
 *             EINVAL        Provided L2 I/D partition mask is out of boundaries (exceeds maximum value).
 *             EPERM         Provided L2 I/D partition mask cuts off more instructions or data than allowed by HW.
 *             EIO           Error during write to one of HW registers.
 *             ECANCELED     CPU is not online.
 */
static noinline s64
cman_drv_l2_partition_set(struct l2_partition_ioctl_args __user *arg)
{
	struct l2_partition_ioctl_args args;
	u64 l2_partition_regval = 0;
	s64 ret = -1;

	if (copy_from_user(&args, arg, sizeof(struct l2_partition_ioctl_args))) {
		pr_err("Failed to copy data from userspace\n");
		return EBADE;
	}

	ret = cman_drv_l2_check_set_masks(args.l2_partition_dmask, args.l2_partition_imask);
	if (ret != 0) {
		return ret;
	}
	l2_partition_regval = cman_drv_l2_part_construct_regval(args.l2_partition_dmask, args.l2_partition_imask);

	if (args.cpu < 0 || args.cpu >= nr_cpu_ids) {
		pr_err("cpu id %d is out of boundaries\n", args.cpu);
		return EINVAL;
	} else {
		ret = cman_drv_l2_partition_set_cpu(args.cpu, l2_partition_regval);
		if (ret != 0)
			return ret;
		pr_debug("L2 Partition write success: cpu id: %d, D-mask: 0x%llx, I-mask: 0x%llx\n",
			args.cpu,
			args.l2_partition_dmask,
			args.l2_partition_imask);
	}

	return 0;
}

/**
 * Takes current version of driver and copies it to userspace.
 *
 * @param  arg Arguments passed from the userspace.
 * @return     0 in case of success.
 *             EBADE Wrong kernel-space address (fail to copy data between user- and
 *             kernel spaces, etc.).
 */
static noinline s64
cman_drv_version(s32 __user *arg)
{
	s32 args = CMAN_DRV_VER;

	if (copy_to_user(arg, &args, sizeof(args))) {
		pr_err("Failed to copy data to userspace\n");
		return EBADE;
	}

	return 0;
}

/**
 * Takes current hardware-dependent constants and copy them to userspace.
 *
 * @param  arg Arguments passed from the userspace.
 * @return     0 in case of success.
 *             EBADE Wrong kernel-space address (fail to copy data between user- and
 *             kernel spaces, etc.).
 */
static noinline s64
cman_drv_get_hw_info(struct hw_info_ioctl_args __user *arg)
{
	struct hw_info_ioctl_args args = {
		.l2_segment_size      = hisi_hw_caps.l2.restrict_granularity,
		.l2_capacity          = hisi_hw_caps.l2.capacity };

	if (copy_to_user(arg, &args, sizeof(struct hw_info_ioctl_args))) {
		pr_err("Failed to copy data to userspace\n");
		return EBADE;
	}

	return 0;
}

/**
 * Resets L2 I/D partition configuration for the specified CPU.
 *
 * @param  cpu  Core ID.
 * @return      0 in case of successful reset operation.
 *              ECANCELED  CPU is not online.
 *              EIO        Error during write to one of HW registers.
 */
static noinline s64
cman_drv_reset_l2_partition_cpu(s32 cpu)
{
	if (!cman_drv_is_cpu_online(cpu))
		return ECANCELED;

	set_cpus_allowed_ptr(current, cpumask_of(cpu));
	if (!cman_drv_l2_partition_reg_write(REG_CLEAN)) {
		pr_err("L2 I/D partition configuration reset failed for CPU: %d\n", cpu);
		return EIO;
	}

	return 0;
}

/**
 * Accepts userspace data and resets L2 partition configuration.
 * @param  arg  Arguments passed from the userspace.
 * @return      0 in case of successful reset operation.
 *              EBADE         Wrong kernel-space address (fail to copy data between user- and
 *              kernel spaces, etc.).
 *              EINVAL        Some of input parameters is incorrect (e.g. wrong cpu).
 *              ECANCELED     CPU is not online.
 *              EIO           Error during write to one of HW registers.
 */
static noinline s64
cman_drv_reset_l2_partition(s32 __user *arg)
{
	s32 cpu = -1;
	s64 ret = -1;

	if (copy_from_user(&cpu, arg, sizeof(s32))) {
		pr_err("Failed to copy userspace data\n");
		return EBADE;
	}

	if (cpu < 0 || cpu >= nr_cpu_ids) {
		pr_err("cpu id %d is out of boundaries\n", cpu);
		return EINVAL;
	} else {
		ret = cman_drv_reset_l2_partition_cpu(cpu);
		if (ret != 0)
			return ret;
		pr_debug("L2 I/D partition reset successful for CPU %d\n", cpu);
	}

	return 0;
}

/**
 * Implementation for ioctl method of Cache Management Framework control device driver.
 * This function is executed at every ioctl call on the Cache Management Framework control device file.
 *
 * Note: this function does not imply exclusive registers access and is supposed
 * to be wrapped around with spinlock.
 *
 * @param  cmd  ioctl command that was called from the userspace.
 * @param  arg  Arguments passed from the userspace.
 * @return      0 in case of successful ioctl execution.
 *              EBADE         Wrong kernel-space address (fail to copy data between user- and
 *              kernel spaces, etc.).
 *              EINVAL        Some of input parameters is incorrect (e.g. wrong ccl).
 *              EPERM         Provided L2 I/D partition mask cuts off more instructions or data than allowed by HW.
 *              EIO           Error during write to one of HW registers.
 *              ECANCELED     CPU is not online.
 *              ENOTTY        Unknown ioctl request.
 */
static noinline s64
cman_drv_ioctl_unsafe(u32 cmd, unsigned long arg)
{
	void __user *uarg = (void __user *)arg;

	switch (cmd) {
	case CMAN_L2_PART_GET:
		return cman_drv_l2_partition_get((struct l2_partition_ioctl_args __user *)uarg);
	case CMAN_L2_PART_SET:
		return cman_drv_l2_partition_set((struct l2_partition_ioctl_args __user *)uarg);
	case CMAN_VERSION:
		return cman_drv_version((s32 __user *)uarg);
	case CMAN_HW_INFO:
		return cman_drv_get_hw_info((struct hw_info_ioctl_args __user *)uarg);
	case CMAN_RESET_L2_PART:
		return cman_drv_reset_l2_partition((s32 __user *)uarg);
	default:
		return ENOTTY;
	}
}

/**
 * Support for unlocked_ioctl() method of Cache Management Framework control device driver.
 * This function is executed at every ioctl call on the Cache Management Framework control device file.
 *
 * Note: implementation deferred to cman_drv_ioctl_unsafe().
 *
 * @param  filp File pointer to the file that was passed by the application.
 * @param  cmd  ioctl command that was called from the userspace.
 * @param  arg  Arguments passed from the userspace.
 * @return      0 in case of successful ioctl execution.
 *              EBADE         Wrong kernel-space address (fail to copy data between user- and
 *              kernel spaces, etc.).
 *              EINVAL        Some of input parameters is incorrect (e.g. wrong ccl).
 *              EPERM         Provided L2 I/D partition mask cuts off more instructions or data than allowed by HW.
 *              EIO           Error during write to one of HW registers.
 *              ECANCELED     CPU is not online.
 *              ENOTTY        Unknown ioctl request.
 */
static noinline long
cman_drv_unlocked_ioctl(struct file *filp, u32 cmd, unsigned long arg)
{
	s64 ret = -1;

	mutex_lock(&cman_lock);
	ret = cman_drv_ioctl_unsafe(cmd, arg);
	mutex_unlock(&cman_lock);

	return ret;
}

/**
 * Implementation for open() method of Cache Management Framework control device driver.
 * It ensures exclusive access to a device to a single PID.
 *
 * @param  inode inode number of the file being worked on.
 * @param  filp  File pointer to the file that was passed by the application.
 * @return       0 in case of successful ioctl execution.
 *               -EBUSY in case device is already opened by another process.
 *               -EMFILE in case too many file descriptors are opened for the device.
 */
static s32
cman_drv_open(struct inode *inode, struct file *filp)
{
	s32 ret = 0;

	mutex_lock(&cman_lock);
	if (opens_num) {
		if (current->pid != process_id) {
			ret = -EBUSY;
			goto fini;
		}
		if (opens_num == U32_MAX) {
			ret = -EMFILE;
			goto fini;
		}
	} else {
		process_id = current->pid;
	}
	opens_num++;
fini:
	mutex_unlock(&cman_lock);

	return ret;
}

/**
 * Implementation for release() method of Cache Management Framework control device driver.
 *
 * @param  inode inode number of the file being worked on.
 * @param  filp  File pointer to the file that was passed by the application.
 * @return       0 - always.
 */
static s32
cman_drv_close(struct inode *inode, struct file *filp)
{
	mutex_lock(&cman_lock);
	if (!inode || !filp) {
		pr_err("file_close: invalid parameters\n");
		goto error_end;
	}
	if (opens_num > 0) {
		opens_num--;
		goto fini;
	} else {
		pr_err("file_close: opens_num underflow\n");
		goto error_end;
	}
error_end:
	mutex_unlock(&cman_lock);
	return -EINVAL;
fini:
	mutex_unlock(&cman_lock);
	return 0;
}

/**
 * Structure containing set of functions describing method to implementation mappings.
 * Each field in the structure points to the function in the driver
 * that implements a specific operation (open, release, unlocked_ioctl).
 */
static const struct file_operations fops = {
	.owner = THIS_MODULE,
	.open = cman_drv_open,
	.release = cman_drv_close,
	.unlocked_ioctl = cman_drv_unlocked_ioctl,
};

/**
 * Method of device class structure, sets permissions of Cache Management Framework control device driver.
 *
 * @param  dev Device structure repsresenting Cache Management Framework control device.
 * @param  env Environment buffer structure.
 * @return     0 in case of successful permissions setting.
 *             -ENOMEM in case of failure (no space was available).
 */
#if LINUX_VERSION_CODE < KERNEL_VERSION(6, 3, 0)
static int
cman_drv_dev_uevent(struct device *dev, struct kobj_uevent_env *env)
#else
static int
cman_drv_dev_uevent(const struct device *dev, struct kobj_uevent_env *env)
#endif
{
	int ret = -1;

	ret = add_uevent_var(env, "DEVMODE=%#o", 0666);
	if (ret != 0)
		pr_err("Failed to set the permissions for device\n");
	return ret;
}

/**
 * Function called at kernel boot time or module insertion.
 * Contains driver initialization code, device creation & configuration.
 *
 * @return  0 in case of successful permissions setting.
 *          Negative error code in case of failures.
 */
static s32 __init cman_drv_init(void)
{
#ifdef CONFIG_ARCH_HISI
	s32 result;

	dev = &local;

	result = alloc_chrdev_region(&dev->dev_num, 0, DEVICE_COUNT, "cman_dev");
	if (result < 0) {
		pr_err("Cannot allocate major number for device\n");
		goto fail_alloc_chrdev_region;
	}

	cdev_init(&dev->cdev, &fops);
	dev->cdev.owner = THIS_MODULE;

	result = cdev_add(&dev->cdev, dev->dev_num, 1);
	if (result < 0) {
		pr_err("Cannot add major number for device\n");
		goto fail_add_cdev;
	}

	/**
	 * Creating struct class
	 */
#if LINUX_VERSION_CODE < KERNEL_VERSION(6, 4, 0)
	dev_class = class_create(THIS_MODULE, "cman_drv_class");
#else
	dev_class = class_create("cman_drv_class");
#endif
	if (IS_ERR(dev_class)) {
		result = -EEXIST;
		pr_err("Cannot create the struct class for device\n");
		goto fail_create_class;
	}
	dev_class->dev_uevent = cman_drv_dev_uevent;

	/**
	 *Creating device
	 */
	if (IS_ERR(device_create(dev_class, NULL, dev->dev_num, NULL, "cman_dev"))) {
		result = -EINVAL;
		pr_err("Cannot create the device\n");
		goto fail_create_device;
	}

	process_id = 0;
	opens_num = 0;

	mutex_init(&cman_lock);

	mutex_lock(&cman_lock);
	result = cman_drv_platform_info_init();
	mutex_unlock(&cman_lock);
	if (result < 0)
		goto fail_platform_init;
	return 0;

fail_platform_init:
	device_destroy(dev_class, dev->dev_num);
fail_create_device:
	class_destroy(dev_class);
fail_create_class:
	cdev_del(&dev->cdev);
fail_add_cdev:
	unregister_chrdev_region(dev->dev_num, DEVICE_COUNT);
fail_alloc_chrdev_region:
	return result;
#else
	return 0;
#endif
}

/**
 * Function called at module unload from the system.
 * Contains driver clean-up code.
 */
static void __exit cman_drv_exit(void)
{
#ifdef CONFIG_ARCH_HISI
	device_destroy(dev_class, dev->dev_num);
	class_destroy(dev_class);
	cdev_del(&dev->cdev);
	unregister_chrdev_region(dev->dev_num, DEVICE_COUNT);
#endif
}

/**
 * Cache Management Framework control driver initialization entry point.
 * It calls function to be run at kernel boot time or module insertion.
 */
module_init(cman_drv_init);

/**
 * Cache Management Framework control driver exit point.
 * It calls function to be run when the module is unloaded from the system.
 */
module_exit(cman_drv_exit);
