/*
 * Copyright (c) Huawei Technologies Co., Ltd. 2018-2020.
 * Description: support softlockup enhance feature
 * Author: chenshiyan <chenshiyan@huawei.com>
 * Create: 2018-08-29
 */
#ifndef __RTOS_SOFTLOCKUP_H__
#define __RTOS_SOFTLOCKUP_H__

#ifdef CONFIG_RTOS_DEBUG_SOFTLOCKUP
#include <linux/threads.h>
#include <linux/rwlock.h>

#ifdef CONFIG_RTOS_DEBUG_SOFTLOCKUP_GRADING
#include <linux/rtos_softlockup_grading.h>
#endif

#define MIN_THRESH_TIME_MS	100
#define MAX_THRESH_TIME_S	60000

#if defined(CONFIG_RTOS_DEBUG_SOFTLOCKUP_OPTIMIZE) && defined(CONFIG_SMP)
#include <asm/current.h>
#define __lockfunc_attr
#else
#define __lockfunc_attr __attribute__((section(".spinlock.text")))
#endif

struct rtos_softlockup_t {
	raw_spinlock_t *spin_locking;
	rwlock_t *rw_locking;
};

#ifdef CONFIG_RTOS_KBOX
extern int deadlock_region_id;
#endif
#define RTOS_SOFTLOCKUP_THRESH_TIME_SIZE	2
extern int softlockup_thresh;
extern int softlockup_thresh_time[RTOS_SOFTLOCKUP_THRESH_TIME_SIZE];
extern void show_lock_info(struct task_struct *tsk, unsigned int is_panic);
extern void show_lock_info_with_call(struct task_struct *tsk, unsigned int is_panic);
extern raw_spinlock_t *__lockfunc_attr set_current_locking(raw_spinlock_t *lock);
#ifdef CONFIG_SMP
extern void  __lockfunc_attr check_owner(raw_spinlock_t *lock);
#endif
extern void __lockfunc_attr clear_current_locking(raw_spinlock_t *lock, raw_spinlock_t *prelock);
extern void __lockfunc_attr set_lock_owner(raw_spinlock_t *lock);
extern rwlock_t *__lockfunc_attr set_current_rw_locking(rwlock_t *lock);
extern void __lockfunc_attr clear_current_rw_locking(rwlock_t *prelock);
extern void __lockfunc_attr set_rw_lock_owner(rwlock_t *lock);
extern void show_softlockup_info(struct task_struct *t);
extern void softlockup_check_disable(void);
extern void softlockup_check_enable(void);
extern int softlockup_check_ignore(void);
extern int proc_softlockup_thresh(struct ctl_table *table, int write,
				  void __user *buffer, size_t *lenp,
				  loff_t *ppos);

extern void softlockup_locking_init(struct task_struct *task);

static inline void softlockup_spinlock_owner_init(raw_spinlock_t *lock)
{
	lock->spinlock_owner.owner_task = NULL;
	lock->spinlock_owner.owner_pid = PID_MAX_LIMIT + 1;
}

static inline void softlockup_rwlock_owner_init(rwlock_t *lock)
{
	lock->rwlock_owner.owner_task = NULL;
	lock->rwlock_owner.owner_pid = PID_MAX_LIMIT + 1;
}

#ifndef CONFIG_DEBUG_SPINLOCK
static inline void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock)
{
	raw_spinlock_t *prelock = set_current_locking(lock);

	__acquire(lock);
	arch_spin_lock(&lock->raw_lock);
	clear_current_locking(lock, prelock);
}

static inline void do_raw_spin_lock_flags(raw_spinlock_t *lock,
	unsigned long *flags) __acquires(lock)
{
	raw_spinlock_t *prelock = set_current_locking(lock);

	__acquire(lock);
	arch_spin_lock_flags(&lock->raw_lock, *flags);
	clear_current_locking(lock, prelock);
}

static inline int do_raw_spin_trylock(raw_spinlock_t *lock)
{
	int ret = arch_spin_trylock(&(lock)->raw_lock);
	if (ret)
		set_lock_owner(lock);
	return ret;
}

static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock)
{
#ifdef CONFIG_SMP
#ifdef CONFIG_RTOS_DEBUG_SOFTLOCKUP_OPTIMIZE
	/* This check is moved out from check_owner for better performance */
	if (unlikely(current != lock->spinlock_owner.owner_task))
		check_owner(lock);
#else
	check_owner(lock);
#endif
#endif
	lock->spinlock_owner.owner_task = NULL;
	lock->spinlock_owner.owner_pid = PID_MAX_LIMIT + 1;
	arch_spin_unlock(&lock->raw_lock);
	__release(lock);
}

static inline void do_raw_read_lock(rwlock_t *rwlock) __acquires(lock)
{
	rwlock_t *prelock = set_current_rw_locking(rwlock);

	__acquire(lock);
	arch_read_lock(&rwlock->raw_lock);
	clear_current_rw_locking(prelock);
}

static inline void do_raw_read_lock_flags(rwlock_t *rwlock, unsigned long *flags) __acquires(lock)
{
	rwlock_t *prelock = set_current_rw_locking(rwlock);

	__acquire(lock);
	arch_read_lock_flags(&rwlock->raw_lock, *flags);
	clear_current_rw_locking(prelock);
}

static inline int do_raw_read_trylock(rwlock_t *rwlock)
{
	return arch_read_trylock(&(rwlock)->raw_lock);
}

static inline void do_raw_read_unlock(rwlock_t *rwlock) __releases(lock)
{
	arch_read_unlock(&rwlock->raw_lock);
	__release(lock);
}

static inline void do_raw_write_lock(rwlock_t *rwlock) __acquires(lock)
{
	rwlock_t *prelock = set_current_rw_locking(rwlock);

	__acquire(lock);
	arch_write_lock(&rwlock->raw_lock);
	clear_current_rw_locking(prelock);
	set_rw_lock_owner(rwlock);
}

static inline void do_raw_write_lock_flags(rwlock_t *rwlock, unsigned long *flags) __acquires(lock)
{
	rwlock_t *prelock = set_current_rw_locking(rwlock);

	__acquire(lock);
	arch_write_lock_flags(&rwlock->raw_lock, *flags);
	clear_current_rw_locking(prelock);
	set_rw_lock_owner(rwlock);
}

static inline int do_raw_write_trylock(rwlock_t *rwlock)
{
	int ret = arch_write_trylock(&(rwlock)->raw_lock);
	if (ret)
		set_rw_lock_owner(rwlock);

	return ret;
}

static inline void do_raw_write_unlock(rwlock_t *rwlock) __releases(lock)
{
	rwlock->rwlock_owner.owner_task = NULL;
	rwlock->rwlock_owner.owner_pid = PID_MAX_LIMIT + 1;
	arch_write_unlock(&rwlock->raw_lock);
	__release(lock);
}
#endif
#else
struct rtos_softlockup_t {};
extern void show_softlockup_info(struct task_struct *t);
#endif /* CONFIG_RTOS_DEBUG_SOFTLOCKUP */
#endif /* __RTOS_SOFTLOCKUP_H */
