// SPDX-License-Identifier: GPL-2.0-only
/*
 * Copyright (c) Huawei Technologies Co., Ltd. 2023-2023. All rights reserved.
 * Author: Huawei OS Kernel Lab
 * Create: Thu Aug 15 16:14:23 2023
 */

#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/workqueue.h>
#include <linux/fs.h>
#include <liblinux/sysfs.h>
#include <linux/kobject.h>
#include <linux/rwsem.h>
#include <linux/llist.h>

#define SYS_OP_CREATE_FILE	0
#define SYS_OP_CREATE_DIR	1
#define SYS_OP_CREATE_LINK	2
#define SYS_OP_DELETE		3

struct sysfs_work {
	struct llist_node lnode;
	struct work_struct work;
	int op;
	mode_t mode;
	char *name;

	/* file create*/
	struct liblinux_pal_kernfs_ops *fops;
	void *ctx;

	/* create link */
	char *linkdata;
};

static DECLARE_RWSEM(sysfs_lock);
static bool async_delay = true;
static LLIST_HEAD(delayed_list);
static struct workqueue_struct *sysfs_wq;
static void sysfs_work(struct work_struct *work);

extern void liblinux_get_cpus_allowed(struct cpumask *allowed_mask,
				      const struct cpumask *new_mask);

/*
 * Try not to choose default_affinity_cpus and boot_cpu to avoid competing with
 * other kworkers or the main thread for cpu resources.
 */
static int get_sysfs_cpu(void)
{
	static int sysfs_cpu = -1U;
	struct cpumask non_boot_mask;
	struct cpumask default_affinity_mask;
	struct cpumask mask;
	int boot_cpu;
	int ret;

	if (sysfs_cpu != -1U)
		return sysfs_cpu;

	boot_cpu = get_boot_cpu_id();
	cpumask_copy(&non_boot_mask, cpu_online_mask);
	cpumask_clear_cpu(boot_cpu, &non_boot_mask);
	/* Fallback to boot_cpu if non_boot_mask is empty */
	if (cpumask_empty(&non_boot_mask)) {
		sysfs_cpu = boot_cpu;
		return sysfs_cpu;
	}

	liblinux_get_cpus_allowed(&default_affinity_mask, &non_boot_mask);
	ret = cpumask_andnot(&mask, &non_boot_mask, &default_affinity_mask);
	/* Fallback to default_affinity_cpus if mask is empty */
	if (ret == 0)
		sysfs_cpu = cpumask_any(&non_boot_mask);
	else
		sysfs_cpu = cpumask_any(&mask);

	return sysfs_cpu;
}

static struct sysfs_work *alloc_work(int op)
{
	struct sysfs_work *w;

	w = kzalloc(sizeof(*w), GFP_KERNEL);
	if (w == NULL)
		return ERR_PTR(-ENOMEM);

	INIT_WORK(&w->work, sysfs_work);
	w->op = op;
	return w;
}

static void free_work(struct sysfs_work *w)
{
	if (w) {
		kfree(w->name);
		kfree(w->linkdata);
		kfree(w);
	}
}

static void do_work(struct sysfs_work *w)
{
	down_read(&sysfs_lock);
	if (sysfs_wq) {
		if (async_delay)
			llist_add(&w->lnode, &delayed_list);
		else
			queue_work_on(get_sysfs_cpu(), sysfs_wq, &w->work);
	} else {
		sysfs_work(&w->work);
	}
	up_read(&sysfs_lock);
}

static void sysfs_work(struct work_struct *work)
{
	int rc;
	struct sysfs_work *w = container_of(work, struct sysfs_work, work);

	switch (w->op) {
	case SYS_OP_CREATE_DIR:
		rc = liblinux_pal_sysfs_create_dir(w->name, w->mode);
		break;
	case SYS_OP_CREATE_FILE:
		rc = liblinux_pal_sysfs_create_file(w->name, w->mode, w->fops, w->ctx);
		break;
	case SYS_OP_CREATE_LINK:
		rc = liblinux_pal_sysfs_create_link(w->name, w->mode, w->linkdata);
		break;
	case SYS_OP_DELETE:
		rc = liblinux_pal_sysfs_delete(w->name);
		break;
	default:
		panic("unsupported op %d\n", w->op);
	}

	if (rc < 0)
		pr_err("file %s do op %d failed %d\n", w->name, w->op, rc);

	free_work(w);
}

int liblinux_sysfs_create_dir(const char *name, unsigned int mode)
{
	struct sysfs_work *w;

	w = alloc_work(SYS_OP_CREATE_DIR);
	if (IS_ERR(w))
		return PTR_ERR(w);

	w->name = kstrdup(name, GFP_KERNEL);
	if (w->name == NULL)
		goto failed;

	w->mode = mode;
	do_work(w);

	return 0;
failed:
	free_work(w);
	return -ENOMEM;
}

int liblinux_sysfs_create_file(const char *name, unsigned int mode,
				struct liblinux_pal_kernfs_ops *fops,
				void *ctx)
{
	struct sysfs_work *w;

	w = alloc_work(SYS_OP_CREATE_FILE);
	if (IS_ERR(w))
		return PTR_ERR(w);

	w->name = kstrdup(name, GFP_KERNEL);
	if (w->name == NULL)
		goto failed;

	w->fops = fops;
	w->ctx = ctx;
	w->mode = mode;
	do_work(w);

	return 0;
failed:
	free_work(w);
	return -ENOMEM;
}

int liblinux_sysfs_create_link(const char *name, unsigned int mode,
				const char *linkdata)
{
	struct sysfs_work *w;

	w = alloc_work(SYS_OP_CREATE_LINK);
	if (IS_ERR(w))
		return PTR_ERR(w);

	w->name = kstrdup(name, GFP_KERNEL);
	if (w->name == NULL)
		goto failed;

	w->linkdata = kstrdup(linkdata, GFP_KERNEL);
	if (w->linkdata == NULL)
		goto failed;

	w->mode = mode;
	do_work(w);

	return 0;
failed:
	free_work(w);
	return -ENOMEM;
}

int liblinux_sysfs_delete(const char *name)
{
	struct sysfs_work *w;

	w = alloc_work(SYS_OP_DELETE);
	if (IS_ERR(w))
		return PTR_ERR(w);

	w->name = kstrdup(name, GFP_KERNEL);
	if (w->name == NULL)
		goto failed;

	do_work(w);
	return 0;
failed:
	free_work(w);
	return -ENOMEM;
}

/* should be called with sysfs_lock write lock help */
static void flush_delayed_sysfs(void)
{
	struct sysfs_work *w, *t;
	struct llist_node *head;

	async_delay = false;
	head = llist_del_all(&delayed_list);
	if (head) {
		head = llist_reverse_order(head);
		llist_for_each_entry_safe(w, t, head, lnode)
			queue_work_on(get_sysfs_cpu(), sysfs_wq, &w->work);
	}
}

#define SYSFS_ATTR_WO(_name) \
	struct kobj_attribute _name##_attr = __ATTR_WO(_name)

static ssize_t flush_store(struct kobject *kobj, struct kobj_attribute *attr,
			   const char *buf, size_t count)
{
	down_write(&sysfs_lock);
	if (sysfs_wq) {
		pr_info("flush async sysfs\n");
		flush_delayed_sysfs();
		flush_workqueue(sysfs_wq);
		pr_info("async sysfs flushed\n");

		destroy_workqueue(sysfs_wq);
		sysfs_wq = NULL;
	}
	up_write(&sysfs_lock);

	return count;
}
SYSFS_ATTR_WO(flush);

static struct attribute *sysfs_attrs[] = {
	&flush_attr.attr,
	NULL
};

static const struct attribute_group sysfs_attr_group = {
	.attrs = sysfs_attrs,
};

void liblinux_sysfs_async_init(void)
{
	int ret;
	struct kobject *sysfs_obj;

	sysfs_obj = kobject_create_and_add("sysfs", fs_kobj);
	if (!sysfs_obj)
		return;

	ret = sysfs_create_group(sysfs_obj, &sysfs_attr_group);
	if (ret < 0) {
		kobject_put(sysfs_obj);
		return;
	}

	if (num_online_cpus() > 1U) {
		down_write(&sysfs_lock);
		sysfs_wq = create_singlethread_workqueue("async_sysfs");
		up_write(&sysfs_lock);
	}
}

static int __init liblinux_sysfs_async_start(void)
{
	down_write(&sysfs_lock);
	if (sysfs_wq)
		flush_delayed_sysfs();
	up_write(&sysfs_lock);
	return 0;
}
pure_initcall(liblinux_sysfs_async_start);
