/*
 * Copyright (c) Huawei Technologies Co., Ltd. 2018-2021.
 * Description: support tasklock feature
 * Author: fanglinxu <fanglinxu@huawei.com>
 * Create: 2018-08-29
 */

#include <linux/init.h>
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/cdev.h>
#include <linux/device.h>
#include <asm/uaccess.h>
#include <asm/page.h>
#include <linux/mm.h>
#include <linux/err.h>
#include <linux/param.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/sched.h>
#include <linux/proc_fs.h>
#include <linux/sysctl.h>
#include <linux/time.h>
#include <linux/tasklock.h>
#include <linux/task_struct_extend.h>
#include <linux/miscdevice.h>
#include "sched/sched.h"

#define SIGNAL_TASKLOCK_TIMEOUT		(SIGRTMIN + 16)
#define NS_TO_MS_OFFSET_BITS		20

#define tasklock_err(err_info, ...)		pr_err("tasklock: " err_info, ##__VA_ARGS__)
#define tasklock_info(info, ...)		pr_info("tasklock: " info, ##__VA_ARGS__)

static unsigned long min_timeout_ms;
static unsigned long max_timeout_ms = HZ > 1000 ? ULONG_MAX / HZ * 1000 : ULONG_MAX;
static unsigned long preempt_disable_timeout_ms;
static int no_tasklock = 1;

#ifdef CONFIG_RTOS_TASKLOCK_NOTIFIERS
BLOCKING_NOTIFIER_HEAD(return_user_notifier);
EXPORT_SYMBOL(return_user_notifier);

void ret_to_user_hook(void)
{
	blocking_notifier_call_chain(&return_user_notifier, 0, NULL);
}
#endif

struct tasklock_info *get_tasklock_page(struct task_struct *tsk)
{
	struct rtos_task_struct *rtos_task;

	rtos_task = task_to_rtos_task(tsk);
	return rtos_task->tasklock.tasklock_ctrl_page;
}
EXPORT_SYMBOL(get_tasklock_page);

void set_tasklock_page(struct task_struct *tsk, struct tasklock_info *page)
{
	struct rtos_task_struct *rtos_task;

	rtos_task = task_to_rtos_task(tsk);
	rtos_task->tasklock.tasklock_ctrl_page = page;
}

static inline unsigned long long tasklock_get_timeout_threshold(struct task_struct *tsk)
{
	return tasklock_mode(tsk) == TASKLOCK_ENABLE_PROC_TIMEOUT ?
		preempt_disable_timeout_ms : tasklock_custom_timeout(tsk);
}

static int tasklock_timeout_check(struct task_struct *tsk, unsigned long long timeout)
{
	struct timespec64 current_time;
	unsigned long long now, offset, disable_start;
	struct pt_regs *regs = NULL;

	disable_start = tasklock_locktime(tsk);
	ktime_get_real_ts64(&current_time);
	now = (unsigned long long)timespec64_to_ns(&current_time);
	offset = (now - disable_start) >> NS_TO_MS_OFFSET_BITS;

	if (offset >= timeout) {
		tasklock_locktime(tsk) = 0;
		tasklock_mode(tsk) = TASKLOCK_DISABLE;
		tasklock_info("PREEMPT_DISABLE_TIMEOUT comm:%s  pid:%d  tgid:%d\n",
					tsk->comm, tsk->pid, tsk->tgid);
		tasklock_info("disable_start:%llu now:%llu offset:%llu\n", disable_start, now, offset);
		regs = task_pt_regs(tsk);
		show_regs(regs);
		send_sig(SIGNAL_TASKLOCK_TIMEOUT, tsk, 1);
		return 1;
	}

	return 0;
}

void tasklock_task_fork(struct task_struct *tsk)
{
	unsigned long page;

	if (no_tasklock)
		return;

	if (get_tasklock_page(tsk->group_leader) != NULL) {
		page = __get_free_page(GFP_KERNEL);
		if (likely(page)) {
			SetPageReserved(virt_to_page(page));
			set_tasklock_page(tsk, (struct tasklock_info *)page);
			tasklock_mode(tsk) = 0;
			tasklock_locktime(tsk) = 0;
			tasklock_custom_timeout(tsk) = 0;
			tasklock_set_tif(tsk) = 0;
			tasklock_task_status_schedule(tsk) = 0;
			tasklock_nivcsw(tsk) = 0;
		}
	}
}

void tasklock_task_exit(struct task_struct *tsk)
{
	void *page;

	page = (void *)get_tasklock_page(tsk);
	if (page != NULL) {
		set_tasklock_page(tsk, NULL);
		ClearPageReserved(virt_to_page(page));
		free_page((uintptr_t)page);
	}
}

void tasklock_sched_tick(void)
{
	struct task_struct *current_tsk;
	unsigned long long timeout;

	if (no_tasklock)
		return;

	current_tsk = current;
	if (get_tasklock_page(current_tsk) == NULL)
		return;

	if (tasklock_mode(current_tsk) == TASKLOCK_DISABLE &&
		tasklock_set_tif(current_tsk) == 1) {
			tasklock_set_tif(current_tsk) = 0;
			set_thread_flag(TIF_NEED_RESCHED);
	}

	if (tasklock_mode(current_tsk) != TASKLOCK_DISABLE) {
		timeout = tasklock_get_timeout_threshold(current_tsk);
		if (timeout && tasklock_locktime(current_tsk))
			tasklock_timeout_check(current_tsk, timeout);
	}
}

bool tasklock_skip_sched(bool preempt)
{
	unsigned long flags;
	struct rq *rq = NULL;
	long current_state;
	struct task_struct *current_tsk;
	unsigned long long timeout;

	if (no_tasklock)
		return false;

	preempt_disable();
	local_irq_save(flags);

	current_tsk = current;
	if (get_tasklock_page(current_tsk) == NULL)
		goto us_ctrl_need_resched;

	current_state = current_tsk->state;
	if ((current_tsk->exit_state == 0) &&
		(tasklock_mode(current_tsk) != TASKLOCK_DISABLE) &&
		((current_state == TASK_RUNNING) || preempt)) {
		timeout = tasklock_get_timeout_threshold(current_tsk);
		if (timeout != 0) {
			if (!(tasklock_locktime(current_tsk)))
				goto save_task_status;
			if (tasklock_timeout_check(current_tsk, timeout))
				goto clear_tif_flag;
		}

		if (tif_need_resched()) {
			clear_thread_flag(TIF_NEED_RESCHED);
			tasklock_set_tif(current_tsk) = 1;
		} else {
			tasklock_set_tif(current_tsk) = 0;
		}

		rq = this_rq();
		rq->clock_update_flags &= ~(RQCF_ACT_SKIP | RQCF_REQ_SKIP);
		local_irq_restore(flags);
		preempt_enable_no_resched();
		return true;
	}

clear_tif_flag:
	tasklock_set_tif(current_tsk) = 0;
save_task_status:
	/*
	 * if task was not skipped, record the state for distinguish the switch
	 * counts in tasklock_switch_count_collect
	 */
	tasklock_task_status_schedule(current_tsk) = current_state;
us_ctrl_need_resched:
	local_irq_restore(flags);
	preempt_enable_no_resched();

	return 0;
}

/*
 * tasklock_switch_count_collect() - record voluntary switch on nonvoluntary_ctxt_switches
 */
void tasklock_switch_count_collect(unsigned long **addr, struct task_struct *tsk)
{
	struct task_struct *current_tsk;

	if (no_tasklock)
		return;

	current_tsk = current;
	if (get_tasklock_page(current_tsk) == NULL)
		return;

	if (tasklock_mode(current_tsk) != TASKLOCK_DISABLE &&
		(*addr == &tsk->nivcsw) &&
		(tasklock_task_status_schedule(current_tsk) != TASK_RUNNING)) {
		tasklock_nivcsw(tsk) += 1;
		tasklock_task_status_schedule(current_tsk) = 0;
	}
}

void tasklock_update_sched(void)
{
	struct task_struct *current_tsk;
	unsigned long long timeout;
	if (no_tasklock)
		return;

	current_tsk = current;
	if ((get_tasklock_page(current_tsk) != NULL) &&
		(current_tsk->exit_state == 0) &&
		(current_tsk->state == TASK_RUNNING) &&
		(tasklock_mode(current_tsk) != TASKLOCK_DISABLE)) {
		timeout = tasklock_get_timeout_threshold(current_tsk);
		if (timeout && tasklock_locktime(current_tsk))
			tasklock_timeout_check(current_tsk, timeout);
	}
}

static struct ctl_table tasklock_sysctls[] = {
	{
		.procname	= "sched_preempt_disable_timeout",
		.data		= &preempt_disable_timeout_ms,
		.maxlen		= sizeof(unsigned long),
		.mode		= 0640,
		.proc_handler	= proc_doulongvec_minmax,
		.extra1		= (unsigned long *)&min_timeout_ms,
		.extra2		= (unsigned long *)&max_timeout_ms,
	},
	{
		.procname       = "sched_preempt_disable",
		.data           = &no_tasklock,
		.maxlen         = sizeof(int),
		.mode           = 0640,
		.proc_handler	= proc_dointvec_minmax,
		.extra1         = SYSCTL_ZERO,
		.extra2         = SYSCTL_ONE,
	},
	{}
};

/* save /proc/sys/kernel root */
static struct ctl_table_header *sysctls_root_table;

/*
 * tasklock_proc_init() - init proc interface
 */
static int tasklock_proc_init(void)
{
	sysctls_root_table = register_sysctl("kernel", tasklock_sysctls);
	if (!sysctls_root_table)
		return -ENOMEM;

	return 0;
}

static int sched_ctrl_us_mmap(struct file *flip, struct vm_area_struct *vma)
{
	unsigned long kernel_addr;
	unsigned long pfn;
	unsigned long vmsize;
	struct task_struct *current_tsk;

	if (no_tasklock) {
		pr_warn("unable to map while tasklock disabled.\n");
		return -EACCES;
	}

	current_tsk = current;
	if (get_tasklock_page(current_tsk) == NULL) {
		kernel_addr = get_zeroed_page(GFP_KERNEL);
		if (!kernel_addr) {
			tasklock_err("Allocate memory failed\n");
			tasklock_task_exit(current_tsk);
			return -ENOMEM;
		}
		SetPageReserved(virt_to_page(kernel_addr));
		set_tasklock_page(current_tsk, (struct tasklock_info *)kernel_addr);

		if (get_tasklock_page(current_tsk->group_leader) == NULL) {
			kernel_addr = get_zeroed_page(GFP_KERNEL);
			if (!kernel_addr) {
				tasklock_err("Allocate memory failed\n");
				tasklock_task_exit(current_tsk);
				return -ENOMEM;
			}
			SetPageReserved(virt_to_page(kernel_addr));
			set_tasklock_page(current_tsk->group_leader, (struct tasklock_info *)kernel_addr);
		}
	}

	pfn = virt_to_pfn(get_tasklock_page(current_tsk));
	vmsize = vma->vm_end - vma->vm_start;
	if (vmsize != PAGE_SIZE) {
		tasklock_err("mmap failed(vmsize != PAGE_SIZE)\n");
		tasklock_task_exit(current_tsk);
		return -ENXIO;
	}
	if (remap_pfn_range(vma, vma->vm_start, pfn, vmsize, vma->vm_page_prot)) {
		tasklock_task_exit(current_tsk);
		tasklock_err("mmap failed(remap_pfn_range memory failed)\n");
		return -EAGAIN;
	}

	return 0;
}

int proc_pid_tasklock_status(struct seq_file *m,
	struct pid_namespace *ns, struct pid *pid, struct task_struct *task)
{
	unsigned long long timestamp, nivcsw, status, custom_timeout;

	if (get_tasklock_page(task) == NULL) {
		status = TASKLOCK_DISABLE;
		timestamp = 0;
		nivcsw = 0;
		custom_timeout = 0;
	} else {
		status = tasklock_mode(task);
		timestamp = tasklock_locktime(task);
		nivcsw = tasklock_nivcsw(task);
		custom_timeout = tasklock_custom_timeout(task);
	}

	seq_printf(m, "preempt_status: \t%s\n"
			"start_time(ns): \t%llu\n"
			"custom_timeout(ms): \t%llu\n"
			"tasklock_nivcsw: \t%llu\n",
			(status == TASKLOCK_DISABLE) ? "enabled" : "disabled",
			timestamp,
			custom_timeout,
			nivcsw);

	return 0;
}

struct file_operations sched_ctrl_us_fops = {
	.owner = THIS_MODULE,
	.mmap = sched_ctrl_us_mmap,
};

static struct miscdevice sched_ctrl_misc_dev = {
	.minor = MISC_DYNAMIC_MINOR,
	.name = "sched_ctrl",
	.fops = &sched_ctrl_us_fops,
};

static int __init sched_ctrl_us_dev_init(void)
{
	int ret;

	ret = misc_register(&sched_ctrl_misc_dev);
	if (ret < 0) {
		tasklock_err("sched_ctrl create failed.\n");
		return ret;
	}

	tasklock_info("sched_ctrl created.\n");

	ret = tasklock_proc_init();
	if (ret < 0) {
		tasklock_err("tasklock_proc_init fail, Error: %d\n", ret);
		misc_deregister(&sched_ctrl_misc_dev);
		return ret;
	}

	return 0;
}

late_initcall(sched_ctrl_us_dev_init);
