/*
 * Copyright (c) Huawei Technologies Co., Ltd. 2018-2020.
 * Description: support softlockup enhance feature
 * Author: chenshiyan <chenshiyan@huawei.com>
 * Create: 2018-08-29
 */
#include <linux/nmi.h>
#include <linux/sched/debug.h>

#ifdef CONFIG_RTOS_KBOX
#include <linux/set_kbox_region.h>
#endif
#include <linux/task_struct_extend.h>

#ifdef CONFIG_RTOS_DEBUG_SOFTLOCKUP
#ifdef CONFIG_RTOS_SOFTLOCKUP_OWNER_TIME
#include <linux/jiffies.h>
#endif

#ifdef CONFIG_RTOS_DEBUG_LOCK_MEM_DUMP
#include <asm/traps.h>
#include <linux/hal/fiq_glue.h>
#include <linux/rtos_dump.h>
#endif

#ifdef CONFIG_RTOS_KBOX
static unsigned int deadlock_region_size;
int deadlock_region_id;
#endif

#define DEFAULT_THRESH_TIME_S	60
#define DEFAULT_THRESH_TIME_MS	0

#define RETURN_BREAK 			1

#ifdef CONFIG_RTOS_KBOX
#define MAX_UINT		(~0U)
#define MAX_LOG_SIZE		(MAX_UINT / 1024)
#endif

static DEFINE_PER_CPU(int, softlockup_check_disabled);
static ATOMIC_NOTIFIER_HEAD(rtos_softlockup_chain);

int __read_mostly softlockup_thresh = MAX_THRESH_TIME_S;

int softlockup_thresh_time[RTOS_SOFTLOCKUP_THRESH_TIME_SIZE] = {
	DEFAULT_THRESH_TIME_S,
	DEFAULT_THRESH_TIME_MS
};

#define WATCHDOG_THRESH_HIGH	999
static int thresh_low;
static int thresh_high = WATCHDOG_THRESH_HIGH;

void softlockup_locking_init(struct task_struct *task)
{
	struct rtos_task_struct *rtos_tsk = task_to_rtos_task(task);
	rtos_tsk->rtos_softlockup.spin_locking = NULL;
	rtos_tsk->rtos_softlockup.rw_locking = NULL;
}

void softlockup_check_disable(void)
{
	preempt_disable();
	__this_cpu_inc(softlockup_check_disabled);
}

void softlockup_check_enable(void)
{
	__this_cpu_dec(softlockup_check_disabled);
	preempt_enable();
}

int softlockup_check_ignore(void)
{
	return __this_cpu_read(softlockup_check_disabled) > 0;
}

#ifdef CONFIG_RTOS_KBOX
static int proc_deadlock_region_size(struct ctl_table *table, int write,
				void __user *buffer,
				size_t *lenp, loff_t *ppos)
{
	int tmpid;
	int ret;
	unsigned int old_deadlock_region_size;
	unsigned int region_size;

	if (!write)
		return proc_dointvec_minmax(table, write, buffer, lenp, ppos);

	old_deadlock_region_size = deadlock_region_size;
	ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
	if (ret)
		return ret;

	if (deadlock_region_size == 0) {
		deadlock_region_size = old_deadlock_region_size;
		return -EINVAL;
	}
	if (deadlock_region_size > MAX_LOG_SIZE) {
		pr_err("input softlockup_log_size too large!\n");
		deadlock_region_size = old_deadlock_region_size;
		return -EINVAL;
	}

	region_size = deadlock_region_size * 1024;
	tmpid = kern_kbox_register_region("deadlock", region_size, NULL);
	if (tmpid < 0) {
		deadlock_region_size = old_deadlock_region_size;
		pr_err("register deadlock region fail!\n");
		return tmpid;
	}

	deadlock_region_id = tmpid;
	return 0;
}
#endif

int rtos_softlockup_notifier_register(struct notifier_block *nb)
{
	if (nb == NULL) {
		pr_err("Invalid parameter, softlockup notifier register fail\n");
		return -EINVAL;
	}

	if (nb->notifier_call == NULL) {
		pr_err("notifier_call is NULL, softlockup notifier register fail\n");
		return -EINVAL;
	}

	return atomic_notifier_chain_register(&rtos_softlockup_chain, nb);
}
EXPORT_SYMBOL_NS(rtos_softlockup_notifier_register, HW_RTOS_NS);

int rtos_softlockup_notifier_unregister(struct notifier_block *nb)
{
	if (nb == NULL) {
		pr_err("softlockup notifier unregister fail\n");
		return -EINVAL;
	}

	return atomic_notifier_chain_unregister(&rtos_softlockup_chain, nb);
}
EXPORT_SYMBOL_NS(rtos_softlockup_notifier_unregister, HW_RTOS_NS);

static int rtos_softlockup_call_chain(struct task_struct *tsk)
{
	return atomic_notifier_call_chain(&rtos_softlockup_chain, 0, tsk);
}

static int show_spinlock_info(raw_spinlock_t *lock, rwlock_t *rwlock, struct task_struct **pp)
{
	struct task_struct *p = *pp;
	struct task_struct *owner = NULL;
	struct task_struct *tmptsk = NULL;

	owner = lock->spinlock_owner.owner_task;
	if (!owner) {
		pr_info("[CPU#%d] ERROR:the lock's owner is NULL!\n", task_cpu(p));
		sched_show_task(p);
		return RETURN_BREAK;
	}

	if (lock->spinlock_owner.owner_pid == 0) {
		pr_info("[CPU#%d] the task [%s] is waiting for lock: [0x%p] (owner_pid=0), ",
			task_cpu(p), p->comm, lock);
		pr_info("this lock's owner task is : pid=%d ,comm=%s\n",
			owner->pid, owner->comm);
		sched_show_task(p);
		*pp = owner;
		if (rwlock)
			pr_info("[CPU#%d] INFO:the rwlock info may recorded by ksnapshot\n",
				task_cpu(p));
		return 0;
	}

	rcu_read_lock();
	tmptsk = find_task_by_vpid(lock->spinlock_owner.owner_pid);
	if (!tmptsk || tmptsk->pid != lock->spinlock_owner.owner_pid ||
		lock->spinlock_owner.owner_pid != owner->pid) {
		rcu_read_unlock();
		pr_info("[CPU#%d] ERROR:the task [%s] is waiting for lock: [0x%p], ",
			task_cpu(p), p->comm, lock);
		pr_info("but the lock's owner task maybe have exited!\n");
		sched_show_task(p);
		return RETURN_BREAK;
	}
	rcu_read_unlock();
	pr_info("[CPU#%d] the task [%s] is waiting for lock: [0x%p], ",
		task_cpu(p), p->comm, lock);
	pr_info("this lock's owner task is : pid=%d ,comm=%s\n", owner->pid,
		owner->comm);

	sched_show_task(p);
	*pp = owner;

	if (rwlock)
		pr_info("[CPU#%d] INFO:the rwlock info may recorded by ksnapshot\n",
			task_cpu(p));
	return 0;
}

static int show_rwlock_info(rwlock_t *rwlock, struct task_struct **pp)
{
	struct task_struct *tmptsk = NULL;
	struct task_struct *owner = NULL;
	struct task_struct *p = *pp;

	owner = rwlock->rwlock_owner.owner_task;
	if (!owner) {
		pr_info("[CPU#%d] ERROR:the rwlock's owner is NULL!\n",
			task_cpu(p));
		sched_show_task(p);
		return RETURN_BREAK;
	}

	if (rwlock->rwlock_owner.owner_pid == 0) {
		pr_info("[CPU#%d] the task [%s] is waiting for rwlock: [0x%p] (owner_pid=0), ",
			task_cpu(p), p->comm, rwlock);
		pr_info("this rwlock's owner task is : pid=%d ,comm=%s\n",
			owner->pid, owner->comm);
		sched_show_task(p);
		*pp = owner;
		return 0;
	}

	rcu_read_lock();
	tmptsk = find_task_by_vpid(rwlock->rwlock_owner.owner_pid);
	if (!tmptsk || tmptsk->pid != rwlock->rwlock_owner.owner_pid ||
	    rwlock->rwlock_owner.owner_pid != owner->pid) {
		rcu_read_unlock();
		pr_info("[CPU#%d] ERROR:the task [%s] is waiting for rwlock: [0x%p], ",
			task_cpu(p), p->comm, rwlock);
		pr_info("but the rwlock's owner task maybe have exited!\n");
		sched_show_task(p);
		return RETURN_BREAK;
	}
	rcu_read_unlock();
	pr_info("[CPU#%d] the task [%s] is waiting for rwlock: [0x%p], ",
		task_cpu(p), p->comm, rwlock);
	pr_info("this rwlock's owner task is : pid=%d ,comm=%s\n", owner->pid,
		owner->comm);

	sched_show_task(p);
	*pp = owner;

	return 0;
}

static void softlockup_dump_lock_mem(unsigned int is_panic, raw_spinlock_t *lock, rwlock_t *rwlock)
{
#ifdef CONFIG_RTOS_DEBUG_LOCK_MEM_DUMP
	if (is_panic == 0)
		return;
	if (lock != NULL) {
		pr_info("spinlock addr:0x%p\n", &lock->raw_lock);
		rtos_dump_lock_mem((uintptr_t)&lock->raw_lock);
	}

	if (rwlock != NULL) {
		pr_info("rwlock addr:0x%p\n", &rwlock->raw_lock);
		rtos_dump_lock_mem((uintptr_t)&rwlock->raw_lock);
	}
#endif
}

void show_lock_info(struct task_struct *tsk, unsigned int is_panic)
{
	int i;
	int cpunum = num_online_cpus();
	struct task_struct *p = NULL;
	raw_spinlock_t *lock = NULL;
	rwlock_t *rwlock = NULL;
	int ret;

	p = tsk;
	if (p->flags & PF_EXITING)
		return;

	read_lock(&tasklist_lock);
	for (i = 0; i < cpunum; i++) {
		lock = task_to_rtos_task(p)->rtos_softlockup.spin_locking;
		rwlock = task_to_rtos_task(p)->rtos_softlockup.rw_locking;

		pr_info("------------------CPU#%d-----------------------------------\n",
			task_cpu(p));
		softlockup_dump_lock_mem(is_panic, lock, rwlock);
		if (!lock && !rwlock) {
			pr_info("[CPU#%d] the task [%s] is not waiting for a lock,maybe a delay or deadcircle!\n",
				task_cpu(p), p->comm);
			sched_show_task(p);
			break;
		}

		/*
		 * If spinlock exist dead lock, we will only handle spinlock.
		 * Otherwise we handle rwlock.
		 */
		if (lock)
			ret = show_spinlock_info(lock, rwlock, &p);
		else
			ret = show_rwlock_info(rwlock, &p);

		if (ret == RETURN_BREAK)
			break;
	}
	read_unlock(&tasklist_lock);
}

void show_lock_info_with_call(struct task_struct *tsk, unsigned int is_panic)
{
	int ret;

	pr_info("=====================SOFTLOCKUP INFO BEGIN=======================\n");
	show_lock_info(tsk, is_panic);
	pr_info("=====================SOFTLOCKUP CALL CHAIN START=================\n");
	ret = rtos_softlockup_call_chain(tsk);
	if (ret != NOTIFY_DONE)
		pr_err("run rtos_softlockup_call_chain fail, return value:[0x%x]", ret);
	pr_info("=====================SOFTLOCKUP CALL CHAIN END===================\n");
	pr_info("=====================SOFTLOCKUP INFO END=========================\n");
}

void show_softlockup_info(struct task_struct *t)
{
	struct task_struct *task = NULL;
	struct rtos_task_struct *rtos_tsk = NULL;
	raw_spinlock_t *lock = NULL;
	rwlock_t *rwlock = NULL;

	if (!t) {
		pr_err("%s\n", "show_softlockup_info: task is NULL!");
		return;
	}

	rtos_tsk = task_to_rtos_task(t);
	lock = rtos_tsk->rtos_softlockup.spin_locking;
	rwlock = rtos_tsk->rtos_softlockup.rw_locking;

	if (lock) {
		task = lock->spinlock_owner.owner_task;
#ifdef CONFIG_RTOS_DEBUG_LOCK_MEM_DUMP
		if (entry_fiq_mode == true) {
			pr_info("spinlock addr:0x%p\n", &lock->raw_lock);
			rtos_dump_lock_mem((uintptr_t)&lock->raw_lock);
		}
#endif
	}

	if (lock != NULL && task != NULL) {
#ifdef CONFIG_RTOS_SOFTLOCKUP_OWNER_TIME
		pr_info("%s %d %p %s %d %u\t\n", t->comm, t->pid, lock, task->comm, task->pid,
			(jiffies_to_msecs(jiffies) - jiffies_to_msecs(lock->spinlock_owner.owner_time)));
#else
		pr_info("%s %d %p %s %d\t\n", t->comm, t->pid, lock, task->comm, task->pid);
#endif
	}

	task = NULL;
	if (rwlock)
		task = rwlock->rwlock_owner.owner_task;

	if (rwlock != NULL && task != NULL) {
#ifdef CONFIG_RTOS_SOFTLOCKUP_OWNER_TIME
		pr_info("rwlock info: %s %d %p %s %d %u\t\n", t->comm, t->pid, lock, task->comm,
			task->pid, (jiffies_to_msecs(jiffies) - jiffies_to_msecs(rwlock->rwlock_owner.owner_time)));
#else
		pr_info("rwlock info: %s %d %p %s %d\t\n", t->comm, t->pid, lock, task->comm,
			task->pid);
#endif
	}
}
EXPORT_SYMBOL_GPL(show_softlockup_info);

raw_spinlock_t *__lockfunc_attr set_current_locking(raw_spinlock_t *lock)
{
	raw_spinlock_t *prelock = NULL;
	struct rtos_task_struct *rtos_tsk = task_to_rtos_task(current);

	prelock = rtos_tsk->rtos_softlockup.spin_locking;
	rtos_tsk->rtos_softlockup.spin_locking = lock;
	return prelock;
}
EXPORT_SYMBOL(set_current_locking);

#ifdef CONFIG_SMP
inline void __lockfunc_attr check_owner(raw_spinlock_t *lock)
{
	if (current != lock->spinlock_owner.owner_task) {
		pr_emerg("BUG: spinlock 0x%p on CPU#%d, %s/%d, owner %s/%d\n",
			 lock, raw_smp_processor_id(), current->comm, task_pid_nr(current),
			 lock->spinlock_owner.owner_task->comm, lock->spinlock_owner.owner_pid);
		dump_stack();
	}
}
EXPORT_SYMBOL(check_owner);
#endif

inline void __lockfunc_attr clear_current_locking(raw_spinlock_t *lock, raw_spinlock_t *prelock)
{
	struct rtos_task_struct *rtos_tsk = task_to_rtos_task(current);

	rtos_tsk->rtos_softlockup.spin_locking = prelock;
	lock->spinlock_owner.owner_task = current;
	lock->spinlock_owner.owner_pid = current->pid;
#ifdef CONFIG_RTOS_SOFTLOCKUP_OWNER_TIME
	lock->spinlock_owner.owner_time = jiffies;
#endif
}
EXPORT_SYMBOL(clear_current_locking);

inline void __lockfunc_attr set_lock_owner(raw_spinlock_t *lock)
{
	lock->spinlock_owner.owner_task = current;
	lock->spinlock_owner.owner_pid = current->pid;
#ifdef CONFIG_RTOS_SOFTLOCKUP_OWNER_TIME
	lock->spinlock_owner.owner_time = jiffies;
#endif
}
EXPORT_SYMBOL(set_lock_owner);

rwlock_t *__lockfunc_attr set_current_rw_locking(rwlock_t *lock)
{
	rwlock_t *prelock = NULL;
	struct rtos_task_struct *rtos_tsk = task_to_rtos_task(current);

	prelock = rtos_tsk->rtos_softlockup.rw_locking;
	rtos_tsk->rtos_softlockup.rw_locking = lock;
	return prelock;
}
EXPORT_SYMBOL(set_current_rw_locking);

inline void __lockfunc_attr clear_current_rw_locking(rwlock_t *prelock)
{
	struct rtos_task_struct *rtos_tsk = task_to_rtos_task(current);

	rtos_tsk->rtos_softlockup.rw_locking = prelock;
}
EXPORT_SYMBOL(clear_current_rw_locking);

inline void __lockfunc_attr set_rw_lock_owner(rwlock_t *lock)
{
	lock->rwlock_owner.owner_task = current;
	lock->rwlock_owner.owner_pid = current->pid;
#ifdef CONFIG_RTOS_SOFTLOCKUP_OWNER_TIME
	lock->rwlock_owner.owner_time = jiffies;
#endif
}
EXPORT_SYMBOL(set_rw_lock_owner);

#define MAX_NTFS_SYSCTL_LEN 2
static struct ctl_table_header *sysctls_root_table;
static struct ctl_table softlockup_tables[] = {
	{
		.procname   = "watchdog_thresh",
		.data       = softlockup_thresh_time,
		.maxlen     = MAX_NTFS_SYSCTL_LEN * sizeof(int),
		.mode       = 0640,
		.proc_handler = proc_softlockup_thresh,
		.extra1     = &thresh_low,
		.extra2     = &thresh_high,
	},
#ifdef CONFIG_RTOS_KBOX
	{
		.procname       = "softlockup_log_size",
		.data           = &deadlock_region_size,
		.maxlen         = sizeof(int),
		.mode           = 0640,
		.proc_handler   = proc_deadlock_region_size,
		.extra1         = &thresh_low,
	},
#endif
	{}
};

/* Define the parent directory /proc/sys/kernel. */
static struct ctl_table sysctls_root[] = {
	{
		.procname   = "kernel",
		.mode       = 0555,
		.child      = softlockup_tables
	},
	{}
};

static int __init softlockup_proc_init(void)
{
	sysctls_root_table = register_sysctl_table(sysctls_root);
	if (!sysctls_root_table)
		return -ENOMEM;

#ifdef CONFIG_RTOS_DEBUG_SOFTLOCKUP_GRADING
	softlockup_grading_proc_init();
#endif

	return 0;
}

MODULE_LICENSE("GPL");
module_init(softlockup_proc_init);
#else
void show_softlockup_info(struct task_struct *t)
{
}
EXPORT_SYMBOL_GPL(show_softlockup_info);
#endif
