// SPDX-License-Identifier: GPL-2.0
#include <linux/kobject.h>
#include <linux/mm.h>
#include <linux/swap_ahead.h>
#include <linux/kthread.h>
#include <linux/swap.h>
#include <linux/uaccess.h>
#include <linux/delay.h>
#include <linux/rmap.h>
#include <linux/sched/task.h>
#include <linux/sched/mm.h>
#include <linux/sched/cputime.h>
#include <linux/parser.h>
#include <linux/magic.h>
#include "../../fs/proc/internal.h"
#include <linux/sched/coredump.h>

DEFINE_STATIC_KEY_FALSE(enable_swap_ever_used);

/* basic abilities */
struct swap_ahead_task {
	struct task_struct *task;
	atomic_t scans_to_run;
	s64 last_ktime;
	s64 last_stime;
	s64 ns_this_round;
	wait_queue_head_t wait;
};
static struct swap_ahead_task *tasks[MAX_NUMNODES];
static DEFINE_MUTEX(swap_ahead_mutex);
static struct task_struct *swap_aheadd_task;
static DECLARE_WAIT_QUEUE_HEAD(swap_ahead_wait);
#ifdef CONFIG_SYSFS
static bool enable_inherit __read_mostly = true;
static bool tmpfs_swap __read_mostly = true;
#endif
int disable_swap_default;

#define read_scans_to_run(nid) atomic_read(&tasks[nid]->scans_to_run)
#define clear_scans_to_run(nid) atomic_set(&tasks[nid]->scans_to_run, 0)
#define inc_scans_to_run(nid) atomic_inc(&tasks[nid]->scans_to_run)
#define dec_scans_to_run(nid) atomic_dec(&tasks[nid]->scans_to_run)
static atomic64_t ns_left_to_run;
static s64 next_charge_ns;

static inline struct swap_ahead_task *alloc_swap_ahead_task(void)
{
	struct swap_ahead_task *result =
		kmalloc(sizeof(struct swap_ahead_task), GFP_KERNEL);
	if (!result)
		return NULL;

	atomic_set(&result->scans_to_run, 0);
	result->last_stime = 0;
	result->ns_this_round = 0;
	init_waitqueue_head(&result->wait);

	return result;
}

static inline s64 get_stime(void)
{
	u64 stime, utime;

	task_cputime_adjusted(current, &utime, &stime);

	return stime;
}

static inline s64 get_ktime(void)
{
	return ktime_get_ns();
}

#define RATIO_DENOMINATOR 100
static bool enabled;
static unsigned ratio_threshold __read_mostly = 60;
static int scan_interval_msec __read_mostly = 100;
static int scan_len __read_mostly = 6000;
static int cpu_limit __read_mostly = 3;

static inline s64 get_scan_interval_ns(void)
{
	return NSEC_PER_MSEC * (s64)READ_ONCE(scan_interval_msec);
}

static inline bool swap_ahead_should_wake_up(int nid)
{
	if (kthread_should_stop())
		return true;
	return (atomic64_read(&ns_left_to_run) > 0 &&
		read_scans_to_run(nid) > 0);
}

static inline void wait_for_next_round(int nid)
{
	wait_event_interruptible(tasks[nid]->wait,
						swap_ahead_should_wake_up(nid));
}

/* functions provided for vmscan */
int get_swap_ahead_scan_len(void)
{
	return READ_ONCE(scan_len);
}

static inline void update_time(struct swap_ahead_task *task)
{
	task->last_ktime = get_ktime();
}

static inline void deduct_ns_to_run(struct swap_ahead_task *task)
{
	s64 new_ktime = get_ktime();
	s64 new_stime = get_stime();
	s64 time_elasped;

	if (task->last_stime == new_stime) {
		time_elasped = new_ktime - task->last_ktime;
		task->ns_this_round += time_elasped;
	} else {
		time_elasped =
			new_stime - task->last_stime - task->ns_this_round;
		task->ns_this_round = 0;
		task->last_stime = new_stime;
	}
	task->last_ktime = new_ktime;
	if (time_elasped > 0)
		atomic64_sub(time_elasped, &ns_left_to_run);
}

void swap_ahead_might_sleep(int nid)
{
	cond_resched();
	deduct_ns_to_run(tasks[nid]);

	while (atomic64_read(&ns_left_to_run) < 0) {
		if (kthread_should_stop())
			break;
		wait_for_next_round(nid);
		update_time(tasks[nid]);
	}
	if (kthread_should_stop())
		return;
}

static inline bool node_beyond_ratio(pg_data_t *pgdat, u64 threshold)
{
	int i;
	struct zone *zone;
	unsigned long nr_pages = 0;
	unsigned long free_pages = 0;

	if (!threshold)
		return true;

	for (i = 0; i < MAX_NR_ZONES; i++) {
		zone = pgdat->node_zones + i;
		nr_pages += zone_managed_pages(zone);
		free_pages += zone_page_state(zone, NR_FREE_PAGES);
	}

	/* (used / total) > threshold% */
	return (RATIO_DENOMINATOR * (nr_pages - free_pages) >
							nr_pages * threshold);
}

static inline bool swap_ahead_is_ready(int nid)
{
	if (kthread_should_stop())
		return true;
	return (READ_ONCE(tasks[nid]) != NULL);
}

/* main threads */
static int swap_ahead(void *p)
{
	struct task_struct *tsk = current;
	struct reclaim_state reclaim_state = {
		.reclaimed_slab = 0,
	};
	int nid = (struct swap_ahead_task **)p - tasks;
	const struct cpumask *cpumask = cpumask_of_node(nid);

	if (!cpumask_empty(cpumask))
		set_cpus_allowed_ptr(tsk, cpumask);

	set_user_nice(current, MAX_NICE);
	tsk->reclaim_state = &reclaim_state;
	tsk->flags |= PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD;

	wait_event_interruptible(swap_ahead_wait, swap_ahead_is_ready(nid));

	for (; ;) {
		if (kthread_should_stop())
			break;
		update_time(tasks[nid]);

		for (; read_scans_to_run(nid) > 0; dec_scans_to_run(nid)) {
			if (kthread_should_stop())
				break;
			if (!node_beyond_ratio(NODE_DATA(nid),
					       READ_ONCE(ratio_threshold))) {
				clear_scans_to_run(nid);
				break;
			}

			swap_ahead_node(nid);
			swap_ahead_might_sleep(nid);
		}

		wait_for_next_round(nid);
	}

	tsk->reclaim_state = NULL;
	tsk->flags &= ~(PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD);

	return 0;
}

static bool kswapd_is_running(pg_data_t *pgdat)
{
	return pgdat->kswapd && (pgdat->kswapd->state == TASK_RUNNING);
}

static inline void charge_run_time(void)
{
	s64 interval = get_scan_interval_ns();
	s64 new_next_charge_ns = ktime_get_ns() + interval;
	s64 ns_to_charge, next_charge_local;

	next_charge_local = READ_ONCE(next_charge_ns);
	if (new_next_charge_ns <= next_charge_local)
		return;

	interval = min(interval, new_next_charge_ns - next_charge_local);
	/* interval * cpu% */
#ifdef CONFIG_ARM
	ns_to_charge = (s32)interval * READ_ONCE(cpu_limit) / RATIO_DENOMINATOR;
#else
	ns_to_charge = interval * READ_ONCE(cpu_limit) / RATIO_DENOMINATOR;
#endif
	ns_to_charge = min(ns_to_charge,
			   ns_to_charge - atomic64_read(&ns_left_to_run));
	if (ns_to_charge > 0)
		atomic64_add(ns_to_charge, &ns_left_to_run);

	next_charge_ns = new_next_charge_ns;
}

static inline void wake_swap_ahead(void)
{
	int nid;

	charge_run_time();

	for_each_node_state(nid, N_MEMORY) {
		if (!read_scans_to_run(nid))
			continue;
		if (kswapd_is_running(NODE_DATA(nid))) {
			clear_scans_to_run(nid);
			continue;
		}
		wake_up_interruptible(&tasks[nid]->wait);
	}
}

static inline void free_task_by_nid(int nid)
{
	kfree(tasks[nid]);
	tasks[nid] = NULL;
}

static inline int start_swap_ahead(int nid)
{
	int ret;
	struct swap_ahead_task *t = alloc_swap_ahead_task();

	if (!t)
		return -ENOMEM;

	t->task = kthread_run(swap_ahead, &tasks[nid], "swap_ahead%d", nid);
	if (IS_ERR(t->task)) {
		ret = PTR_ERR(t->task);
		kfree(t);
		pr_err("swap_ahead: kthread_run returns %d\n", ret);
		return ret;
	}

	/* avoid changing tasks[nid] to NULL outside our lock */
	tasks[nid] = t;
	return 0;
}

static inline bool find_node_beyond_thresholds(void)
{
	int nid, ret;
	unsigned int threshold;
	bool found;

	threshold = READ_ONCE(ratio_threshold);
	found = false;
	for_each_node_state(nid, N_MEMORY) {
		pg_data_t *pgdat = NODE_DATA(nid);

		if (!tasks[nid]) {
			/* we don't have any thread on this node, create one */
			ret = start_swap_ahead(nid);
			if (ret)
				continue;
		}

		if (node_beyond_ratio(pgdat, threshold)) {
			/* avoid int overflow here */
			if (read_scans_to_run(nid) < INT_MAX)
				inc_scans_to_run(nid);
			found = true;
		}
	}

	return found;
}

static void sleep_to_next_scan(s64 next_scan_ns)
{
	for (; ;) {
		unsigned int ms_to_sleep;
		s64 cur_ns;

		if (kthread_should_stop())
			break;

		cur_ns = ktime_get_ns();
		if (next_scan_ns <= cur_ns)
			break;
#ifdef CONFIG_ARM
		ms_to_sleep = (s32)(next_scan_ns - cur_ns) / NSEC_PER_MSEC + 1;
#else
		ms_to_sleep = (next_scan_ns - cur_ns) / NSEC_PER_MSEC + 1;
#endif
		if (ms_to_sleep <= 1000) {
			msleep_interruptible(ms_to_sleep);
			break;
		}

		msleep_interruptible(500);
	}
}

static int swap_aheadd(void *unused)
{
	s64 next_scan_ns = ktime_get_ns();

	atomic64_set(&ns_left_to_run, 0);

	for (; ;) {
		if (kthread_should_stop())
			break;
		if (find_node_beyond_thresholds())
			wake_swap_ahead();

		next_scan_ns += get_scan_interval_ns();
		sleep_to_next_scan(next_scan_ns);
	}

	return 0;
}

/* functions provided for vmscan */
static bool is_vma_noevict(struct page *page, struct vm_area_struct *vma,
			   unsigned long addr, void *arg)
{
	if (test_bit(MMF_DISABLE_SWAP, &vma->vm_mm->flags)) {
		*(bool *)arg = true;
		return false;
	}
	if (!is_tmpfs_swapped() && vma->vm_file) {
		if (vma->vm_file->f_inode->i_sb->s_magic == TMPFS_MAGIC) {
			*(bool *)arg = true;
			return false;
		}
	}
	return true;
}

static inline bool is_page_noevict(struct page *page)
{
	bool noevict = false;
	struct rmap_walk_control rwc = {
		.rmap_one = is_vma_noevict,
		.arg = (void *)&noevict,
		.anon_lock = page_lock_anon_vma_read,
	};

	rmap_walk(page, &rwc);

	return noevict;
}

bool mm_noevict_page(struct page *page)
{
	if (unlikely(PageKsm(page)))
		return false;
	if (!static_key_enabled(&enable_swap_ever_used))
		return false;
	return is_page_noevict(page);
}

/* sysfs interfaces */
static void stop_all_tasks(void)
{
	int nid;

	for (nid = 0; nid < MAX_NUMNODES; nid++) {
		if (!tasks[nid])
			continue;

		kthread_stop(tasks[nid]->task);
		free_task_by_nid(nid);
	}
}

static int swap_ahead_enable(void)
{
	if (enabled)
		return 0;

	swap_aheadd_task = kthread_run(swap_aheadd, NULL, "swap_aheadd");
	if (IS_ERR(swap_aheadd_task))
		return PTR_ERR(swap_aheadd_task);

	enabled = true;
	return 0;
}

static void swap_ahead_disable(void)
{
	if (!enabled)
		return;

	kthread_stop(swap_aheadd_task);
	swap_aheadd_task = NULL;
	stop_all_tasks();

	enabled = false;
}

static ssize_t sysfs_enabled_show(struct kobject *kobj,
				  struct kobj_attribute *attr, char *buf)
{
	if (enabled)
		return sprintf(buf, "1\n");
	else
		return sprintf(buf, "0\n");
}

static ssize_t sysfs_enabled_store(struct kobject *kobj,
				   struct kobj_attribute *attr,
				   const char *buf, size_t count)
{
	int err = 0;

	mutex_lock(&swap_ahead_mutex);
	if (sysfs_streq(buf, "1"))
		err = swap_ahead_enable();
	else if (sysfs_streq(buf, "0"))
		swap_ahead_disable();
	else
		err = -EINVAL;
	mutex_unlock(&swap_ahead_mutex);

	if (err)
		return err;

	return count;
}
struct kobj_attribute sysfs_enabled_attr =
	__ATTR(enabled, 0600, sysfs_enabled_show, sysfs_enabled_store);

static ssize_t
sysfs_ratio_threshold_show(struct kobject *kobj,
			   struct kobj_attribute *attr, char *buf)
{
	return sprintf(buf, "%u\n", ratio_threshold);
}

static ssize_t
sysfs_ratio_threshold_store(struct kobject *kobj, struct kobj_attribute *attr,
			    const char *buf, size_t count)
{
	int err;
	unsigned int value;

	err = kstrtouint(buf, 0, &value);
	if (err)
		return err;
	if (value > RATIO_DENOMINATOR)
		return -EINVAL;

	ratio_threshold = value;
	return count;
}
struct kobj_attribute sysfs_ratio_threshold_attr =
	__ATTR(scan_trigger_ratio, 0600, sysfs_ratio_threshold_show,
	       sysfs_ratio_threshold_store);

static ssize_t sysfs_scan_interval_show(struct kobject *kobj,
					struct kobj_attribute *attr, char *buf)
{
	return sprintf(buf, "%d\n", scan_interval_msec);
}

static ssize_t
sysfs_scan_interval_store(struct kobject *kobj, struct kobj_attribute *attr,
			  const char *buf, size_t count)
{
	int err, value;

	err = kstrtoint(buf, 0, &value);
	if (err)
		return err;
	if (value < 1)
		return -EINVAL;

	scan_interval_msec = value;
	next_charge_ns = 0;
	atomic64_set(&ns_left_to_run, 0);

	return count;
}
struct kobj_attribute sysfs_scan_interval_attr =
	__ATTR(scan_interval_msec, 0600, sysfs_scan_interval_show,
	       sysfs_scan_interval_store);

static ssize_t sysfs_scan_len_show(struct kobject *kobj,
				   struct kobj_attribute *attr, char *buf)
{
	return sprintf(buf, "%d\n", scan_len);
}

static ssize_t
sysfs_scan_len_store(struct kobject *kobj, struct kobj_attribute *attr,
		     const char *buf, size_t count)
{
	int err, value, nid;

	err = kstrtoint(buf, 0, &value);
	if (err)
		return err;
	if (value < 1)
		return -EINVAL;

	if (scan_len == value)
		goto out;

	scan_len = value;
	/*
	 * Reset scans_to_run array,
	 * to make sure it's not effected by previous value.
	 */
	mutex_lock(&swap_ahead_mutex);
	if (enabled)
		for (nid = 0; nid < MAX_NUMNODES; nid++) {
			if (tasks[nid])
				clear_scans_to_run(nid);
		}
	mutex_unlock(&swap_ahead_mutex);

out:
	return count;
}
struct kobj_attribute sysfs_scan_len_attr =
	__ATTR(scan_len_ratio, 0600, sysfs_scan_len_show,
	       sysfs_scan_len_store);

static ssize_t sysfs_cpu_limit_show(struct kobject *kobj,
				    struct kobj_attribute *attr, char *buf)
{
	return sprintf(buf, "%d\n", cpu_limit);
}

static ssize_t
sysfs_cpu_limit_store(struct kobject *kobj, struct kobj_attribute *attr,
		      const char *buf, size_t count)
{
	int err, value;

	err = kstrtoint(buf, 0, &value);
	if (err)
		return err;
	if (value < 0)
		return -EINVAL;
	if (cpu_limit == value)
		return count;

	cpu_limit = value;
	atomic64_set(&ns_left_to_run, 0);

	return count;
}
struct kobj_attribute sysfs_cpu_limit_attr =
	__ATTR(scan_cpu_limit, 0600, sysfs_cpu_limit_show, sysfs_cpu_limit_store);

static struct attribute *sysfs_attrs[] = {
	&sysfs_enabled_attr.attr,
	&sysfs_ratio_threshold_attr.attr,
	&sysfs_scan_interval_attr.attr,
	&sysfs_scan_len_attr.attr,
	&sysfs_cpu_limit_attr.attr,
	NULL,
};

static const struct attribute_group sysfs_attr_group = {
	.attrs = sysfs_attrs,
};

int swap_ahead_sysfs_init(struct kobject *swap_kobj)
{
	int err;
	struct kobject *swap_ahead_root;

	swap_ahead_root = kobject_create_and_add("swap_ahead", swap_kobj);
	if (!swap_ahead_root) {
		pr_err("failed to create swap_ahead kobject\n");
		return -ENOMEM;
	}

	err = sysfs_create_group(swap_ahead_root, &sysfs_attr_group);
	if (err) {
		pr_err("failed to register swap_ahead group\n");
		goto delete_obj;
	}

	return 0;

delete_obj:
	kobject_put(swap_ahead_root);
	return err;
}

#ifdef CONFIG_SYSFS
static int setup_enable_swap(char *p)
{
	disable_swap_default = 0;
	if (!p)
		return 0;
	if (!strcmp(p, "0")) {
		disable_swap_default = 1;
		static_branch_enable(&enable_swap_ever_used);
	}
	return 0;
}
early_param("enable_swap_default", setup_enable_swap);

static ssize_t
sysfs_enable_inherit_store(struct kobject *kobj, struct kobj_attribute *attr,
				const char *buf, size_t count)
{
	if (sysfs_streq(buf, "1"))
		enable_inherit = true;
	else if (sysfs_streq(buf, "0"))
		enable_inherit = false;
	else
		return -EINVAL;
	return count;
}

static ssize_t
sysfs_enable_inherit_show(struct kobject *kobj,
				struct kobj_attribute *attr, char *buf)
{
	if (enable_inherit)
		return sprintf(buf, "1\n");
	else
		return sprintf(buf, "0\n");
}
struct kobj_attribute sysfs_enable_inherit_attr =
	__ATTR(enable_inherit, 0600, sysfs_enable_inherit_show, sysfs_enable_inherit_store);


bool swap_enable_inherit(void)
{
	return enable_inherit;
}

static ssize_t
sysfs_tmpfs_swap_store(struct kobject *kobj, struct kobj_attribute *attr,
				const char *buf, size_t count)
{
	if (sysfs_streq(buf, "1")) {
		tmpfs_swap = true;
	} else if (sysfs_streq(buf, "0")) {
		tmpfs_swap = false;
		static_branch_enable(&enable_swap_ever_used);
	} else {
		return -EINVAL;
	}
	return count;
}

static ssize_t
sysfs_tmpfs_swap_show(struct kobject *kobj,
				struct kobj_attribute *attr, char *buf)
{
	if (tmpfs_swap)
		return sprintf(buf, "1\n");
	else
		return sprintf(buf, "0\n");
}
struct kobj_attribute sysfs_tmpfs_swap_attr =
	__ATTR(tmpfs_swap, 0600, sysfs_tmpfs_swap_show, sysfs_tmpfs_swap_store);


bool is_tmpfs_swapped(void)
{
	return tmpfs_swap;
}
#endif /* CONFIG_SYSFS */

static ssize_t proc_enable_swap_read(struct file *file, char __user *buf,
				      size_t count, loff_t *ppos)
{
	struct task_struct *task = get_proc_task(file_inode(file));
	struct mm_struct *mm;
	int ret, enable_swap;
	size_t len;
	char buffer[PROC_NUMBUF];

	if (!task)
		return -ESRCH;

	ret = 0;
	mm = get_task_mm(task);
	if (mm) {
		enable_swap = test_bit(MMF_DISABLE_SWAP, &mm->flags) ? 0 : 1;
		len = snprintf(buffer, sizeof(buffer), "%d\n", enable_swap);
		mmput(mm);
		ret = simple_read_from_buffer(buf, count, ppos, buffer, len);
	}

	put_task_struct(task);

	return ret;
}

static ssize_t proc_enable_swap_write(struct file *file, const char __user *buf,
				      size_t count, loff_t *ppos)
{
	struct task_struct *task;
	struct mm_struct *mm;
	bool enable_swap;
	unsigned int val;
	int ret;

	ret = kstrtouint_from_user(buf, count, 0, &val);
	if (ret < 0)
		return ret;
	if (val == 0)
		enable_swap = false;
	else if (val == 1)
		enable_swap = true;
	else
		return -EINVAL;

	ret = -ESRCH;
	task = get_proc_task(file_inode(file));
	if (!task)
		goto out_no_task;

	mm = get_task_mm(task);
	if (!mm)
		goto out_no_mm;
	ret = 0;

	if (enable_swap)
		clear_bit(MMF_DISABLE_SWAP, &mm->flags);
	else {
		set_bit(MMF_DISABLE_SWAP, &mm->flags);
		static_branch_enable(&enable_swap_ever_used);
	}

	mmput(mm);
out_no_mm:
	put_task_struct(task);
out_no_task:
	if (ret < 0)
		return ret;
	return count;
}

const struct file_operations proc_enable_swap_operations = {
	.write	= proc_enable_swap_write,
	.read	= proc_enable_swap_read,
	.llseek	= generic_file_llseek,
};

void swap_ahead_fork_mm(struct mm_struct *mm)
{
	if (swap_enable_inherit()) {
		mm->flags = current->mm->flags &
			(MMF_INIT_MASK | MMF_DISABLE_SWAP_MASK);
	} else {
		mm->flags = current->mm->flags & MMF_INIT_MASK;
		mm->flags = mm->flags | (disable_swap_default << MMF_DISABLE_SWAP);
	}
}

void swap_ahead_init_mm(struct mm_struct *mm, unsigned long default_dump_filter)
{
	mm->flags = (default_dump_filter | (disable_swap_default << MMF_DISABLE_SWAP));
}
