/* SPDX-License-Identifier: GPL-2.0 */
/*
 * Copyright (c) Huawei Technologies Co., Ltd. 2018-2019.
 * Create: 2023
 */

#include <linux/sysctl.h>
#include <linux/euleros_mm.h>

/* Only allowed to swap page which been madvised */
int sysctl_swap_madvised_only;

static int __init setup_swap_madvised_only(char *str)
{
	int ret = 0;
	bool enabled = false;

	ret = kstrtobool(str, &enabled);
	if (!ret)
		sysctl_swap_madvised_only = (int)enabled;
	return (ret == 0);
}
__setup("swap_madvised_only=", setup_swap_madvised_only);

int vma_allowed_swap_pages(struct vm_area_struct *vma)
{
	return sysctl_swap_madvised_only ?
		(vma->vm_flags & VM_SWAPABLE ? 1 : 0) : 1;
}

int mem_cgroup_drop_caches(struct cgroup_subsys_state *css, struct cftype *cft,
				u64 val)
{
	struct mem_cgroup *memcg = mem_cgroup_from_css(css);

	if (val != 1)
		return -EINVAL;
	pr_info("%s %d: drop cgroup pagecache\n",
			current->comm, task_pid_nr(current));
	iterate_supers(drop_pagecache_sb, memcg);
	return 0;
}

#ifdef CONFIG_MEMCG
/*
 * Safely invalidate one page from its pagecache mapping.
 * It only drops clean, unused pages. The page must be locked.
 *
 * Returns 1 if the page is successfully invalidated, otherwise 0.
 */
int mem_cgroup_invalidate_inode_page(struct page *page, void *memcg)
{
	if (memcg && (page_memcg(page) != memcg))
		return 0;
	return invalidate_inode_page(page);
}
#else
int mem_cgroup_invalidate_inode_page(struct page *page, void *memcg)
{
	return invalidate_inode_page(page);
}
#endif

unsigned long memcg_invalidate_mapping_pages(struct address_space *mapping,
		pgoff_t start, pgoff_t end, unsigned long *nr_pageveci, void *memcg)
{
	return __invalidate_mapping_pages(mapping, start, end, nr_pageveci, memcg);
}

#ifdef CONFIG_SYSCTL
#define DROP_SLABS_LIMIT 7
#define DROP_SLABS_MAX_SCAN_NUM 100
#define DROP_SLABS_SLEEP_TIME_MS 50
#define DROP_SLABS_FREED_TARGET 10

int sysctl_drop_slabs = 1;

unsigned int sysctl_drop_slabs_limit = DROP_SLABS_LIMIT;

static unsigned long drop_slab_node_limit(int nid)
{
	unsigned long freed;
	unsigned int scan_num = 0;

	do {
		struct mem_cgroup *memcg = NULL;

		freed = 0;
		memcg = mem_cgroup_iter(NULL, NULL, NULL);
		do
			freed += shrink_slab(GFP_KERNEL, nid, memcg, sysctl_drop_slabs_limit);
		while ((memcg = mem_cgroup_iter(NULL, memcg, NULL)) != NULL);

		scan_num++;
		if (scan_num >= DROP_SLABS_MAX_SCAN_NUM)
			break;
		if (signal_pending(current))
			return freed;
		msleep_interruptible(DROP_SLABS_SLEEP_TIME_MS);
	} while (freed > DROP_SLABS_FREED_TARGET);

	return freed;
}

static void drop_slab_limit(void)
{
	int nid;
	unsigned long nr_dropped = 0;

	for_each_online_node(nid) {
		nr_dropped += drop_slab_node_limit(nid);
	}
	pr_info("%s %d: dropped slabs %ld.\n",
		current->comm, task_pid_nr(current), nr_dropped);
}

static int twelve = 12;

static int drop_slabs_sysctl_handler(struct ctl_table *table, int write,
	void __user *buffer, size_t *length, loff_t *ppos)
{
	int ret;

	ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
	if (ret)
		return ret;
	if (write)
		drop_slab_limit();
	return 0;
}

static struct ctl_table euleros_mm_sysctls[] = {
	{
		.procname	= "swap_madvised_only",
		.data		= &sysctl_swap_madvised_only,
		.maxlen		= sizeof(sysctl_swap_madvised_only),
		.mode		= 0644,
		.proc_handler	= proc_dointvec_minmax,
		.extra1		= SYSCTL_ZERO,
		.extra2		= SYSCTL_ONE,
	},
	{
		.procname       = "drop_slabs",
		.data           = &sysctl_drop_slabs,
		.maxlen         = sizeof(int),
		.mode           = 0644,
		.proc_handler   = drop_slabs_sysctl_handler,
		.extra1         = SYSCTL_ONE,
		.extra2         = SYSCTL_ONE,
	},
	{
		.procname       = "drop_slabs_limit",
		.data           = &sysctl_drop_slabs_limit,
		.maxlen         = sizeof(sysctl_drop_slabs_limit),
		.mode           = 0644,
		.proc_handler   = proc_dointvec_minmax,
		.extra1         = SYSCTL_ZERO,
		.extra2         = &twelve,
	},
	{}
};

static int __init euleros_mm_sysctl_init(void)
{
	register_sysctl_init("vm", euleros_mm_sysctls);
	return 0;
}
late_initcall(euleros_mm_sysctl_init);
#endif /* CONFIG_SYSCTL */
