// SPDX-License-Identifier: GPL-2.0-or-later
 /*
 * ocfs2_lockglue.c
 *
 * Glue to the underlying lock stack.
 */
#include <linux/types.h>
#include <linux/mm.h>
#include <linux/kthread.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
#include <linux/quotaops.h>

#define MLOG_MASK_PREFIX ML_LOCK_GLUE
#include "cluster/masklog.h"
#include "cluster/tcp.h"

#include "ocfs2.h"
#include "ocfs2_lockingver.h"

#include "alloc.h"
#include "dcache.h"
#include "adl/adlapi.h"
#include "lockglue.h"
#include "extent_map.h"
#include "file.h"
#include "heartbeat.h"
#include "inode.h"
#include "journal.h"
#include "stackglue.h"
#include "slot_map.h"
#include "super.h"
#include "quota.h"
#include "uptodate.h"
#include "refcounttree.h"

#include "dlmglue.h"
#include "adlglue.h"
#include "adl/adlcommon.h"

static void ocfs2_drop_osb_locks(struct ocfs2_super *osb);

static void ocfs2_build_lock_name(enum ocfs2_lock_type type,
				  u64 blkno,
				  u32 generation,
				  char *name)
{
	int len;

	BUG_ON(type >= OCFS2_NUM_LOCK_TYPES);

	len = snprintf(name, OCFS2_LOCK_ID_MAX_LEN, "%c%s%016llx%08x",
		       ocfs2_lock_type_char(type), OCFS2_LOCK_ID_PAD,
		       (long long)blkno, generation);

	BUG_ON(len != (OCFS2_LOCK_ID_MAX_LEN - 1));

	mlog(0, "built lock resource with name: %s\n", name);
}

static DEFINE_SPINLOCK(ocfs2_lock_tracking_lock);

void ocfs2_add_lockres_tracking(struct ocfs2_lock_res *res,
				struct ocfs2_lock_debug *lock_debug)
{
	mlog(0, "Add tracking for lockres %s\n", res->l_name);

	spin_lock(&ocfs2_lock_tracking_lock);
	list_add(&res->l_debug_list, &lock_debug->d_lockres_tracking);
	spin_unlock(&ocfs2_lock_tracking_lock);
}

static DEFINE_SPINLOCK(ocfs2_deadlock_tracking_lock);

void ocfs2_add_lockres_deadlock_tracking(struct ocfs2_lock_res *res,
				struct ocfs2_deadlock_debug *deadlock_debug)
{
	unsigned long flags;

	mlog(0, "Add deadlock_tracking for lockres %s\n", res->l_name);
	spin_lock_irqsave(&ocfs2_deadlock_tracking_lock, flags);
	list_move(&res->l_deadlock_list, &deadlock_debug->d_deadlock_tracking);
	spin_unlock_irqrestore(&ocfs2_deadlock_tracking_lock, flags);
}

void ocfs2_remove_lockres_deadlock_tracking(struct ocfs2_lock_res *res)
{
	unsigned long flags;

	spin_lock_irqsave(&ocfs2_deadlock_tracking_lock, flags);
	if (!list_empty(&res->l_deadlock_list))
		list_del_init(&res->l_deadlock_list);
	spin_unlock_irqrestore(&ocfs2_deadlock_tracking_lock, flags);
}

void ocfs2_remove_lockres_tracking(struct ocfs2_lock_res *res)
{
	spin_lock(&ocfs2_lock_tracking_lock);
	if (!list_empty(&res->l_debug_list))
		list_del_init(&res->l_debug_list);
	spin_unlock(&ocfs2_lock_tracking_lock);
}

#ifdef CONFIG_OCFS2_FS_STATS
static inline void ocfs2_init_start_time(struct ocfs2_mask_waiter *mw)
{
	mw->mw_lock_start = ktime_get();
}
#else
static inline void ocfs2_init_start_time(struct ocfs2_mask_waiter *mw)
{
}
#endif

void ocfs2_lockres_hung_check_woker(struct work_struct *work)
{
	struct ocfs2_super *osb;
	struct ocfs2_lock_res *res =
			container_of(work, struct ocfs2_lock_res,
				l_hung_check_work.work);
	unsigned long flags;

	if (!(res->l_flags & OCFS2_LOCK_INITIALIZED))
		return;

	osb = ocfs2_get_lockres_osb(res);

	mlog(ML_ERROR, "%s: %s had locked for %u s due to blocking IO of block device!\n",
			osb->uuid_str, res->l_name,
			jiffies_to_msecs(jiffies - res->diff.adlres.l_lock_time) / 1000);

	if (ocfs2_disk_lock(osb)) {
		print_disk_lockres(osb, res);
		ocfs2_add_lockres_deadlock_tracking(res, osb->osb_deadlock_debug);
	}

	if (res->l_hung_invalid) {
		if (!ocfs2_test_invalid_fs(osb))
			o2hb_handle_invalid(osb->uuid_str);
	} else {
		spin_lock_irqsave(&res->l_lock, flags);
		if (res->l_ro_holders || res->l_ex_holders)
			schedule_delayed_work(&res->l_hung_check_work,
					ocfs2_lock_hung_check_ms(osb));
		spin_unlock_irqrestore(&res->l_lock, flags);
	}
}

void ocfs2_lock_res_init_once(struct ocfs2_lock_res *res)
{
	/* This also clears out the lock status block */
	memset(res, 0, sizeof(struct ocfs2_lock_res));
	spin_lock_init(&res->l_lock);
	init_waitqueue_head(&res->l_event);
	INIT_LIST_HEAD(&res->l_blocked_list);
	INIT_LIST_HEAD(&res->l_deadlock_list);
	INIT_LIST_HEAD(&res->l_mask_waiters);
	INIT_DELAYED_WORK(&res->l_hung_check_work,
			ocfs2_lockres_hung_check_woker);
	INIT_LIST_HEAD(&res->l_holders);
}

void ocfs2_lockres_set_hung_info(struct ocfs2_lock_res *res,
		bool hung_check, bool hung_invalid)
{
	res->l_hung_check = hung_check;
	res->l_hung_invalid = hung_invalid;
}

void ocfs2_inode_lock_res_init(struct ocfs2_lock_res *res,
			       enum ocfs2_lock_type type,
			       bool hung_check,
			       bool hung_invalid,
			       unsigned int generation,
			       struct inode *inode)
{
	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);

	ocfs2_lockres_set_hung_info(res, hung_check, hung_invalid);
	ocfs2_build_lock_name(type, OCFS2_I(inode)->ip_blkno,
			      generation, res->l_name);

	osb->active_lockstack->l_ops->inode_lock_res_init(res,
			type, generation, inode);
}

struct ocfs2_super *ocfs2_get_inode_osb(struct ocfs2_lock_res *lockres)
{
	struct inode *inode = ocfs2_lock_res_inode(lockres);

	return OCFS2_SB(inode->i_sb);
}

struct ocfs2_super *ocfs2_get_qinfo_osb(struct ocfs2_lock_res *lockres)
{
	struct ocfs2_mem_dqinfo *info = lockres->l_priv;

	return OCFS2_SB(info->dqi_gi.dqi_sb);
}

struct ocfs2_super *ocfs2_get_file_osb(struct ocfs2_lock_res *lockres)
{
	struct ocfs2_file_private *fp = lockres->l_priv;

	return OCFS2_SB(fp->fp_file->f_mapping->host->i_sb);
}

__u64 ocfs2_get_dentry_lock_ino(struct ocfs2_lock_res *lockres)
{
	__be64 inode_blkno_be;

	memcpy(&inode_blkno_be, &lockres->l_name[OCFS2_DENTRY_LOCK_INO_START],
			sizeof(__be64));

	return be64_to_cpu(inode_blkno_be);
}

struct ocfs2_super *ocfs2_get_dentry_osb(struct ocfs2_lock_res *lockres)
{
	struct ocfs2_dentry_lock *dl = lockres->l_priv;

	return OCFS2_SB(dl->dl_inode->i_sb);
}

void ocfs2_dentry_lock_res_init(struct ocfs2_dentry_lock *dl,
				u64 parent, struct inode *inode)
{
	int len;
	u64 inode_blkno = OCFS2_I(inode)->ip_blkno;
	__be64 inode_blkno_be = cpu_to_be64(inode_blkno);
	struct ocfs2_lock_res *lockres = &dl->dl_lockres;
	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);

	ocfs2_lock_res_init_once(lockres);

	/*
	 * Unfortunately, the standard lock naming scheme won't work
	 * here because we have two 16 byte values to use. Instead,
	 * we'll stuff the inode number as a binary value. We still
	 * want error prints to show something without garbling the
	 * display, so drop a null byte in there before the inode
	 * number. A future version of OCFS2 will likely use all
	 * binary lock names. The stringified names have been a
	 * tremendous aid in debugging, but now that the debugfs
	 * interface exists, we can mangle things there if need be.
	 *
	 * NOTE: We also drop the standard "pad" value (the total lock
	 * name size stays the same though - the last part is all
	 * zeros due to the memset in ocfs2_lock_res_init_once()
	 */
	len = snprintf(lockres->l_name, OCFS2_DENTRY_LOCK_INO_START,
		       "%c%016llx",
		       ocfs2_lock_type_char(OCFS2_LOCK_TYPE_DENTRY),
		       (long long)parent);

	BUG_ON(len != (OCFS2_DENTRY_LOCK_INO_START - 1));

	memcpy(&lockres->l_name[OCFS2_DENTRY_LOCK_INO_START], &inode_blkno_be,
	       sizeof(__be64));

	osb->active_lockstack->l_ops->dentry_lock_res_init(lockres, dl, inode);
}

static void ocfs2_super_block_lock_res_init(struct ocfs2_lock_res *res,
		struct ocfs2_super *osb)
{
	/* Superblock lockres doesn't come from a slab so we call init
	 * once on it manually.  */
	ocfs2_lock_res_init_once(res);
	ocfs2_lockres_set_hung_info(res, 1, 0);
	ocfs2_build_lock_name(OCFS2_LOCK_TYPE_SUPER_BLOCK, OCFS2_SUPER_BLOCK_BLKNO,
			0, res->l_name);
	osb->active_lockstack->l_ops->super_block_lock_res_init(res, osb);
}

static void ocfs2_super_lock_res_init(struct ocfs2_lock_res *res,
				      struct ocfs2_super *osb)
{
	/* Superblock lockres doesn't come from a slab so we call init
	 * once on it manually.  */
	ocfs2_lock_res_init_once(res);
	ocfs2_lockres_set_hung_info(res, 1, 1);
	ocfs2_build_lock_name(OCFS2_LOCK_TYPE_SUPER, OCFS2_SUPER_BLOCK_BLKNO,
			0, res->l_name);
	osb->active_lockstack->l_ops->super_lock_res_init(res, osb);
}

static void ocfs2_rename_lock_res_init(struct ocfs2_lock_res *res,
				       struct ocfs2_super *osb)
{
	/* Rename lockres doesn't come from a slab so we call init
	 * once on it manually.  */
	ocfs2_lock_res_init_once(res);
	ocfs2_lockres_set_hung_info(res, 1, 0);
	ocfs2_build_lock_name(OCFS2_LOCK_TYPE_RENAME, 0, 0, res->l_name);
	osb->active_lockstack->l_ops->rename_lock_res_init(res, osb);
}

static void ocfs2_nfs_sync_lock_res_init(struct ocfs2_lock_res *res,
					 struct ocfs2_super *osb)
{
	/* nfs_sync lockres doesn't come from a slab so we call init
	 * once on it manually.  */
	ocfs2_lock_res_init_once(res);
	ocfs2_lockres_set_hung_info(res, 1, 0);
	ocfs2_build_lock_name(OCFS2_LOCK_TYPE_NFS_SYNC, 0, 0, res->l_name);
	osb->active_lockstack->l_ops->nfs_sync_lock_res_init(res, osb);
}

static void ocfs2_orphan_scan_lock_res_init(struct ocfs2_lock_res *res,
					    struct ocfs2_super *osb)
{
	ocfs2_lock_res_init_once(res);
	ocfs2_lockres_set_hung_info(res, 1, 0);
	ocfs2_build_lock_name(OCFS2_LOCK_TYPE_ORPHAN_SCAN, 0, 0, res->l_name);
	osb->active_lockstack->l_ops->orphan_scan_lock_res_init(res, osb);
}

void ocfs2_file_lock_res_init(struct ocfs2_lock_res *lockres,
			      struct ocfs2_file_private *fp)
{
	struct inode *inode = fp->fp_file->f_mapping->host;
	struct ocfs2_inode_info *oi = OCFS2_I(inode);
	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);

	ocfs2_lock_res_init_once(lockres);
	ocfs2_build_lock_name(OCFS2_LOCK_TYPE_FLOCK, oi->ip_blkno,
			      inode->i_generation, lockres->l_name);
	osb->active_lockstack->l_ops->file_lock_res_init(lockres, fp);
}

void ocfs2_qinfo_lock_res_init(struct ocfs2_lock_res *lockres,
			       struct ocfs2_mem_dqinfo *info)
{
	struct ocfs2_super *osb = OCFS2_SB(info->dqi_gi.dqi_sb);

	ocfs2_lock_res_init_once(lockres);
	ocfs2_build_lock_name(OCFS2_LOCK_TYPE_QINFO, info->dqi_gi.dqi_type,
			      0, lockres->l_name);
	osb->active_lockstack->l_ops->qinfo_lock_res_init(lockres, info);
}

void ocfs2_refcount_lock_res_init(struct ocfs2_lock_res *lockres,
				  struct ocfs2_super *osb, u64 ref_blkno,
				  unsigned int generation)
{
	ocfs2_lock_res_init_once(lockres);
	ocfs2_build_lock_name(OCFS2_LOCK_TYPE_REFCOUNT, ref_blkno,
			      generation, lockres->l_name);
	osb->active_lockstack->l_ops->refcount_lock_res_init(lockres, osb,
			ref_blkno, generation);
}

void ocfs2_lock_res_free(struct ocfs2_super *osb, struct ocfs2_lock_res *res)
{
	if (!(res->l_flags & OCFS2_LOCK_INITIALIZED))
		return;

	ocfs2_remove_lockres_tracking(res);
	ocfs2_remove_lockres_deadlock_tracking(res);

	mlog_bug_on_msg(!list_empty(&res->l_blocked_list),
			"Lockres %s is on the blocked list\n",
			res->l_name);
	mlog_bug_on_msg(!list_empty(&res->l_mask_waiters),
			"Lockres %s has mask waiters pending\n",
			res->l_name);
	mlog_bug_on_msg(spin_is_locked(&res->l_lock),
			"Lockres %s is locked\n",
			res->l_name);
	mlog_bug_on_msg(res->l_ro_holders,
			"Lockres %s has %u ro holders\n",
			res->l_name, res->l_ro_holders);
	mlog_bug_on_msg(res->l_ex_holders,
			"Lockres %s has %u ex holders\n",
			res->l_name, res->l_ex_holders);

	if (osb->active_lockstack->l_ops->lock_res_free)
		osb->active_lockstack->l_ops->lock_res_free(res);

	res->l_flags = 0UL;
}

void ocfs2_print_lock_holder_pids(struct seq_file *m, struct ocfs2_lock_res *lockres)
{
	seq_printf(m, "%d\t%d\t", lockres->l_ro_holder_pid,
			lockres->l_ex_holder_pid);
}

void lockres_set_flags(struct ocfs2_lock_res *lockres,
			      unsigned long newflags)
{
	struct ocfs2_mask_waiter *mw, *tmp;

	assert_spin_locked(&lockres->l_lock);

	lockres->l_flags = newflags;

	list_for_each_entry_safe(mw, tmp, &lockres->l_mask_waiters, mw_item) {
		if ((lockres->l_flags & mw->mw_mask) != mw->mw_goal)
			continue;

		list_del_init(&mw->mw_item);
		mw->mw_status = 0;
		complete(&mw->mw_complete);
	}
}

void lockres_or_flags(struct ocfs2_lock_res *lockres, unsigned long or)
{
	lockres_set_flags(lockres, lockres->l_flags | or);
}

void lockres_clear_flags(struct ocfs2_lock_res *lockres,
				unsigned long clear)
{
	lockres_set_flags(lockres, lockres->l_flags & ~clear);
}

void ocfs2_init_mask_waiter(struct ocfs2_mask_waiter *mw)
{
	INIT_LIST_HEAD(&mw->mw_item);
	init_completion(&mw->mw_complete);
	ocfs2_init_start_time(mw);
}

int ocfs2_wait_for_mask(struct ocfs2_mask_waiter *mw)
{
	wait_for_completion(&mw->mw_complete);
	/* Re-arm the completion in case we want to wait on it again */
	reinit_completion(&mw->mw_complete);
	return mw->mw_status;
}

void lockres_add_mask_waiter(struct ocfs2_lock_res *lockres,
				    struct ocfs2_mask_waiter *mw,
				    unsigned long mask,
				    unsigned long goal)
{
	BUG_ON(!list_empty(&mw->mw_item));

	assert_spin_locked(&lockres->l_lock);

	list_add_tail(&mw->mw_item, &lockres->l_mask_waiters);
	mw->mw_mask = mask;
	mw->mw_goal = goal;
}

/* returns 0 if the mw that was removed was already satisfied, -EBUSY
 * if the mask still hadn't reached its goal */
int __lockres_remove_mask_waiter(struct ocfs2_lock_res *lockres,
				      struct ocfs2_mask_waiter *mw)
{
	int ret = 0;

	assert_spin_locked(&lockres->l_lock);
	if (!list_empty(&mw->mw_item)) {
		if ((lockres->l_flags & mw->mw_mask) != mw->mw_goal)
			ret = -EBUSY;

		list_del_init(&mw->mw_item);
		init_completion(&mw->mw_complete);
	}

	return ret;
}

int lockres_remove_mask_waiter(struct ocfs2_lock_res *lockres,
				      struct ocfs2_mask_waiter *mw)
{
	unsigned long flags;
	int ret = 0;

	spin_lock_irqsave(&lockres->l_lock, flags);
	ret = __lockres_remove_mask_waiter(lockres, mw);
	spin_unlock_irqrestore(&lockres->l_lock, flags);

	return ret;
}

int ocfs2_wait_for_mask_interruptible(struct ocfs2_mask_waiter *mw,
					     struct ocfs2_lock_res *lockres)
{
	int ret;

	ret = wait_for_completion_interruptible(&mw->mw_complete);
	if (ret)
		lockres_remove_mask_waiter(lockres, mw);
	else
		ret = mw->mw_status;
	/* Re-arm the completion in case we want to wait on it again */
	reinit_completion(&mw->mw_complete);
	return ret;
}

void lockres_complete_mask_waiter(struct ocfs2_lock_res *lockres,
						int error)
{
	struct ocfs2_mask_waiter *mw, *tmp;
	unsigned long flags;

	spin_lock_irqsave(&lockres->l_lock, flags);
	list_for_each_entry_safe(mw, tmp, &lockres->l_mask_waiters, mw_item) {
		mlog(ML_NOTICE, "Complete lockres name=%s, level=%d\n",
				lockres->l_name, lockres->l_level);
		list_del_init(&mw->mw_item);
		mw->mw_status = error;
		complete(&mw->mw_complete);
	}
	spin_unlock_irqrestore(&lockres->l_lock, flags);
}

void ocfs2_complete_all_lockres(struct o2nm_node *node, int node_num,
			void *data, u64 hb_generation)
{
	struct ocfs2_super *osb = (struct ocfs2_super *)data;
	struct ocfs2_inode_info *oi = NULL, *tmp_oi = NULL;
	struct ocfs2_lock_res *lockres = NULL;
	unsigned long flags;

	mlog(ML_BASTS, "%s: begin complete all lockres!\n", osb->uuid_str);

	if (!osb->active_lockstack->l_ops->lockres_complete_mask_waiter)
		return;

	/* inode lockres */
	spin_lock(&osb->osb_inode_list_lock);
	list_for_each_entry_safe(oi, tmp_oi, &osb->osb_inode_list, ip_osb_list) {
		mlog(ML_BASTS, "%s: complete inode mask waiter, ip_blkno = %llu\n",
				osb->uuid_str, oi->ip_blkno);
		osb->active_lockstack->l_ops->lockres_complete_mask_waiter(
				&oi->ip_rw_lockres, -ESHUTDOWN);
		osb->active_lockstack->l_ops->lockres_complete_mask_waiter(
				&oi->ip_inode_lockres, -ESHUTDOWN);
		osb->active_lockstack->l_ops->lockres_complete_mask_waiter(
				&oi->ip_open_lockres, -ESHUTDOWN);
	}
	spin_unlock(&osb->osb_inode_list_lock);

	spin_lock_irqsave(&osb->blocked_list_lock, flags);
	list_for_each_entry(lockres, &osb->blocked_lock_list, l_blocked_list) {
		mlog(ML_BASTS, "%s: complete lockres mask waiter in blocked lock list, name=%s\n",
			osb->uuid_str, lockres->l_name);
		osb->active_lockstack->l_ops->lockres_complete_mask_waiter(
				lockres, -ESHUTDOWN);
	}
	spin_unlock_irqrestore(&osb->blocked_list_lock, flags);
}

/* Grants us an EX lock on the data and metadata resources, skipping
 * the normal cluster directory lookup. Use this ONLY on newly created
 * inodes which other nodes can't possibly see, and which haven't been
 * hashed in the inode hash yet. This can give us a good performance
 * increase as it'll skip the network broadcast normally associated
 * with creating a new lock resource. */
int ocfs2_create_new_inode_locks(struct inode *inode)
{
	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);

	BUG_ON(!ocfs2_inode_is_new(inode));

	mlog(0, "Inode %llu\n", (unsigned long long)OCFS2_I(inode)->ip_blkno);

	return osb->active_lockstack->l_ops->create_inode_locks(inode);
}

int ocfs2_rw_lock(struct inode *inode, int write)
{
	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);

	if (osb->active_lockstack->l_ops->rw_lock)
		return osb->active_lockstack->l_ops->rw_lock(inode, write);
	else
		return 0;
}

void ocfs2_rw_unlock(struct inode *inode, int write)
{
	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);

	if (osb->active_lockstack->l_ops->rw_unlock)
		osb->active_lockstack->l_ops->rw_unlock(inode, write);
}

/*
 * ocfs2_open_lock always get PR mode lock.
 */
int ocfs2_open_lock(struct inode *inode)
{
	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);

	return osb->active_lockstack->l_ops->open_lock(inode);
}

int ocfs2_try_open_lock(struct inode *inode, int write)
{
	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);

	return osb->active_lockstack->l_ops->try_open_lock(inode, write);
}

/*
 * ocfs2_open_unlock unlock PR and EX mode open locks.
 */
void ocfs2_open_unlock(struct inode *inode)
{
	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);

	osb->active_lockstack->l_ops->open_unlock(inode);
}

/*
 * ocfs2_file_lock() and ocfs2_file_unlock() map to a single pair of
 * flock() calls. The locking approach this requires is sufficiently
 * different from all other cluster lock types that we implement a
 * separate path to the "low-level" dlm calls. In particular:
 *
 * - No optimization of lock levels is done - we take at exactly
 *   what's been requested.
 *
 * - No lock caching is employed. We immediately downconvert to
 *   no-lock at unlock time. This also means flock locks never go on
 *   the blocking list).
 *
 * - Since userspace can trivially deadlock itself with flock, we make
 *   sure to allow cancellation of a misbehaving applications flock()
 *   request.
 *
 * - Access to any flock lockres doesn't require concurrency, so we
 *   can simplify the code by requiring the caller to guarantee
 *   serialization of dlmglue flock calls.
 */
int ocfs2_file_lock(struct file *file, int ex, int trylock)
{
	struct ocfs2_super *osb = OCFS2_SB(file->f_mapping->host->i_sb);

	return osb->active_lockstack->l_ops->file_lock(file, ex, trylock);
}

void ocfs2_file_unlock(struct file *file)
{
	struct ocfs2_super *osb = OCFS2_SB(file->f_mapping->host->i_sb);

	osb->active_lockstack->l_ops->file_unlock(file);
}

/* Determine whether a lock resource needs to be refreshed, and
 * arbitrate who gets to refresh it.
 *
 *   0 means no refresh needed.
 *
 *   > 0 means you need to refresh this and you MUST call
 *   ocfs2_complete_lock_res_refresh afterwards. */
int ocfs2_should_refresh_lock_res(struct ocfs2_lock_res *lockres)
{
	unsigned long flags;
	int status = 0;

refresh_check:
	spin_lock_irqsave(&lockres->l_lock, flags);
	if (!(lockres->l_flags & OCFS2_LOCK_NEEDS_REFRESH)) {
		spin_unlock_irqrestore(&lockres->l_lock, flags);
		goto bail;
	}

	if (lockres->l_flags & OCFS2_LOCK_REFRESHING) {
		spin_unlock_irqrestore(&lockres->l_lock, flags);

		ocfs2_wait_on_refreshing_lock(lockres);
		goto refresh_check;
	}

	/* Ok, I'll be the one to refresh this lock. */
	lockres_or_flags(lockres, OCFS2_LOCK_REFRESHING);
	spin_unlock_irqrestore(&lockres->l_lock, flags);

	status = 1;
bail:
	mlog(0, "status %d\n", status);
	return status;
}

int ocfs2_assign_bh(struct inode *inode,
			   struct buffer_head **ret_bh,
			   struct buffer_head *passed_bh)
{
	int status;

	if (passed_bh) {
		/* Ok, the update went to disk for us, use the
		 * returned bh. */
		*ret_bh = passed_bh;
		get_bh(*ret_bh);

		return 0;
	}

	status = ocfs2_read_inode_block(inode, ret_bh);
	if (status < 0)
		mlog_errno(status);

	return status;
}

/*
 * returns < 0 error if the callback will never be called, otherwise
 * the result of the lock will be communicated via the callback.
 */
int ocfs2_inode_lock_full_nested(struct inode *inode,
				 struct buffer_head **ret_bh,
				 int ex,
				 int arg_flags,
				 int subclass)
{
	int ret = 0;
	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);

	if (ocfs2_disk_lock(osb) && (arg_flags & OCFS2_LOCK_DLM_ONLY))
		goto read_inode;
	if (!ocfs2_disk_lock(osb) && (arg_flags & OCFS2_LOCK_DISK_ONLY))
		goto read_inode;

	return osb->active_lockstack->l_ops->inode_lock(inode,
			ret_bh, ex, arg_flags, subclass);

read_inode:
	if (ret_bh) {
		ret = ocfs2_read_inode_block(inode, ret_bh);
		if (ret < 0)
			mlog_errno(ret);
	}
	return ret;
}

int ocfs2_add_holder(struct ocfs2_lock_res *lockres, struct ocfs2_lock_holder **oh,
		int ex, struct buffer_head *ret_bh)
{
	unsigned long flags;

	*oh = kzalloc(sizeof(struct ocfs2_lock_holder), GFP_KERNEL);
	if (*oh == NULL)
		return -ENOMEM;

	(*oh)->bh = ret_bh;
	(*oh)->oh_ex = ex;
	INIT_LIST_HEAD(&(*oh)->oh_list);
	(*oh)->oh_owner_pid = get_pid(task_pid(current));

	spin_lock_irqsave(&lockres->l_lock, flags);
	list_add_tail(&(*oh)->oh_list, &lockres->l_holders);
	spin_unlock_irqrestore(&lockres->l_lock, flags);

	return 0;
}

static struct ocfs2_lock_holder *
ocfs2_pid_holder(struct ocfs2_lock_res *lockres,
		struct pid *pid)
{
	struct ocfs2_lock_holder *oh;
	unsigned long flags;

	spin_lock_irqsave(&lockres->l_lock, flags);
	list_for_each_entry(oh, &lockres->l_holders, oh_list) {
		if (oh->oh_owner_pid == pid) {
			spin_unlock_irqrestore(&lockres->l_lock, flags);
			return oh;
		}
	}
	spin_unlock_irqrestore(&lockres->l_lock, flags);
	return NULL;
}

void ocfs2_remove_holder(struct inode *inode,
		struct ocfs2_lock_holder **oh, int had_lock)
{
	unsigned long flags;
	struct ocfs2_lock_res *lockres = &OCFS2_I(inode)->ip_inode_lockres;

	if (!had_lock) {
		spin_lock_irqsave(&lockres->l_lock, flags);
		list_del(&(*oh)->oh_list);
		spin_unlock_irqrestore(&lockres->l_lock, flags);

		put_pid((*oh)->oh_owner_pid);
		kfree(*oh);
		*oh = NULL;
	}
}

/*
 * return < 0 on error, return = 0 if there's no lock holder on the stack
 * before this call, return = 1 if this call would be a recursive locking
 */
int ocfs2_inode_lock_tracker_full_nested(struct inode *inode,
				struct buffer_head **ret_bh,
				int ex,
				struct ocfs2_lock_holder **oh,
				int arg_flags,
				int subclass)
{
	int ret = 0;
	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
	struct ocfs2_lock_res *lockres;
	struct ocfs2_lock_holder *tmp_oh;
	struct pid *pid = task_pid(current);

	if (ocfs2_disk_lock(osb) && (arg_flags & OCFS2_LOCK_DLM_ONLY))
		goto read_inode;
	if (!ocfs2_disk_lock(osb) && (arg_flags & OCFS2_LOCK_DISK_ONLY))
		goto read_inode;

	lockres = &OCFS2_I(inode)->ip_inode_lockres;
	tmp_oh = ocfs2_pid_holder(lockres, pid);
	if (!tmp_oh) {
		ret = osb->active_lockstack->l_ops->inode_lock(inode,
				ret_bh, ex, arg_flags, subclass);
		if (ret < 0) {
			mlog_errno(ret);
			return ret;
		}

		ret = ocfs2_add_holder(lockres, oh, ex, *ret_bh);
		if (ret) {
			osb->active_lockstack->l_ops->inode_unlock(inode, ex);
			if (*ret_bh) {
				brelse(*ret_bh);
				*ret_bh = NULL;
			}
			mlog_errno(ret);
			return ret;
		}
		return 0;
	}

	if (ret_bh)
		*ret_bh = tmp_oh->bh;

	/* old lock is pr, but new lock is ex */
	if (unlikely(ex && !tmp_oh->oh_ex)) {
		mlog(ML_ERROR,
			"Recursive locking is not permitted to upgrade to EX level from PR level.\n");
		dump_stack();
		return -EINVAL;
	}

	return 1;

read_inode:
	if (ret_bh) {
		ret = ocfs2_read_inode_block(inode, ret_bh);
		if (ret < 0)
			mlog_errno(ret);
	}
	return ret;
}

/*
 * This is working around a lock inversion between tasks acquiring DLM
 * locks while holding a page lock and the downconvert thread which
 * blocks dlm lock acquiry while acquiring page locks.
 *
 * ** These _with_page variantes are only intended to be called from aop
 * methods that hold page locks and return a very specific *positive* error
 * code that aop methods pass up to the VFS -- test for errors with != 0. **
 *
 * The DLM is called such that it returns -EAGAIN if it would have
 * blocked waiting for the downconvert thread.  In that case we unlock
 * our page so the downconvert thread can make progress.  Once we've
 * done this we have to return AOP_TRUNCATED_PAGE so the aop method
 * that called us can bubble that back up into the VFS who will then
 * immediately retry the aop call.
 *
 * We do a blocking lock and immediate unlock before returning, though, so that
 * the lock has a great chance of being cached on this node by the time the VFS
 * calls back to retry the aop.    This has a potential to livelock as nodes
 * ping locks back and forth, but that's a risk we're willing to take to avoid
 * the lock inversion simply.
 */
int ocfs2_inode_lock_with_page(struct inode *inode,
			      struct buffer_head **ret_bh,
			      int ex,
			      struct page *page)
{
	int ret;

	ret = ocfs2_inode_lock_full(inode, ret_bh, ex, OCFS2_LOCK_NONBLOCK);
	if (ret == -EAGAIN) {
		unlock_page(page);
		if (ocfs2_inode_lock(inode, ret_bh, ex) == 0)
			ocfs2_inode_unlock(inode, ex);
		ret = AOP_TRUNCATED_PAGE;
	}

	return ret;
}

int ocfs2_inode_lock_atime(struct inode *inode,
			  struct vfsmount *vfsmnt,
			  int *level)
{
	int ret;

	ret = ocfs2_inode_lock(inode, NULL, 0);
	if (ret < 0) {
		mlog_errno(ret);
		return ret;
	}

	/*
	 * If we should update atime, we will get EX lock,
	 * otherwise we just get PR lock.
	 */
	if (ocfs2_should_update_atime(inode, vfsmnt)) {
		struct buffer_head *bh = NULL;

		ocfs2_inode_unlock(inode, 0);
		ret = ocfs2_inode_lock(inode, &bh, 1);
		if (ret < 0) {
			mlog_errno(ret);
			return ret;
		}
		*level = 1;
		if (ocfs2_should_update_atime(inode, vfsmnt))
			ocfs2_update_inode_atime(inode, bh);
		brelse(bh);
	} else {
		*level = 0;
	}

	return ret;
}

void ocfs2_inode_unlock_full(struct inode *inode, int ex, int flags)
{
	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);

	if (ocfs2_disk_lock(osb) && (flags & OCFS2_LOCK_DLM_ONLY))
		return;
	if (!ocfs2_disk_lock(osb) && (flags & OCFS2_LOCK_DISK_ONLY))
		return;

	osb->active_lockstack->l_ops->inode_unlock(inode, ex);
}

void ocfs2_inode_unlock_tracker_full(struct inode *inode, int ex,
				struct ocfs2_lock_holder **oh, int had_lock, int flags)
{
	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
	struct ocfs2_lock_res *lockres;

	lockres = &OCFS2_I(inode)->ip_inode_lockres;

	if (ocfs2_disk_lock(osb) && (flags & OCFS2_LOCK_DLM_ONLY))
		return;
	if (!ocfs2_disk_lock(osb) && (flags & OCFS2_LOCK_DISK_ONLY))
		return;

	if (!had_lock) {
		osb->active_lockstack->l_ops->inode_unlock(inode, ex);
		ocfs2_remove_holder(inode, oh, had_lock);
	}
}

int ocfs2_orphan_scan_lock(struct ocfs2_super *osb, u32 *seqno)
{
	return osb->active_lockstack->l_ops->orphan_scan_lock(osb, seqno);
}

void ocfs2_orphan_scan_unlock(struct ocfs2_super *osb, u32 seqno)
{
	osb->active_lockstack->l_ops->orphan_scan_unlock(osb, seqno);
}

int ocfs2_super_block_lock(struct ocfs2_super *osb, int ex)
{
	return osb->active_lockstack->l_ops->super_block_lock(osb, ex);
}

void ocfs2_super_block_unlock(struct ocfs2_super *osb, int ex)
{
	osb->active_lockstack->l_ops->super_block_unlock(osb, ex);
}

int ocfs2_super_lock(struct ocfs2_super *osb, int ex)
{
	return osb->active_lockstack->l_ops->super_lock(osb, ex);
}

void ocfs2_super_unlock(struct ocfs2_super *osb, int ex)
{
	osb->active_lockstack->l_ops->super_unlock(osb, ex);
}

int ocfs2_rename_lock(struct ocfs2_super *osb)
{
	return osb->active_lockstack->l_ops->rename_lock(osb);
}

void ocfs2_rename_unlock(struct ocfs2_super *osb)
{
	osb->active_lockstack->l_ops->rename_unlock(osb);
}

int ocfs2_nfs_sync_lock(struct ocfs2_super *osb, int ex)
{
	return osb->active_lockstack->l_ops->nfs_sync_lock(osb, ex);
}

void ocfs2_nfs_sync_unlock(struct ocfs2_super *osb, int ex)
{
	osb->active_lockstack->l_ops->nfs_sync_unlock(osb, ex);
}

int ocfs2_dentry_lock(struct dentry *dentry, int ex)
{
	struct ocfs2_super *osb = OCFS2_SB(dentry->d_sb);

	return osb->active_lockstack->l_ops->dentry_lock(dentry, ex);
}

void ocfs2_dentry_unlock(struct dentry *dentry, int ex)
{
	struct ocfs2_super *osb = OCFS2_SB(dentry->d_sb);

	osb->active_lockstack->l_ops->dentry_unlock(dentry, ex);
}

/* Reference counting of the dlm debug structure. We want this because
 * open references on the debug inodes can live on after a mount, so
 * we can't rely on the ocfs2_super to always exist. */
static void ocfs2_lock_debug_free(struct kref *kref)
{
	struct ocfs2_lock_debug *lock_debug;

	lock_debug = container_of(kref, struct ocfs2_lock_debug, d_refcnt);

	kfree(lock_debug);
}

/* Reference counting of the deadlock debug structure. We want
 * this because open references on the debug inodes can live on
 * after a mount, so we can't rely on the ocfs2_super to always exist */
static void ocfs2_deadlock_debug_free(struct kref *kref)
{
	struct ocfs2_deadlock_debug *deadlock_debug;

	deadlock_debug = container_of(kref, struct ocfs2_deadlock_debug, d_refcnt);

	kfree(deadlock_debug);
}

void ocfs2_put_lock_debug(struct ocfs2_lock_debug *lock_debug)
{
	if (lock_debug)
		kref_put(&lock_debug->d_refcnt, ocfs2_lock_debug_free);
}

void ocfs2_put_deadlock_debug(struct ocfs2_deadlock_debug *deadlock_debug)
{
	if (deadlock_debug)
		kref_put(&deadlock_debug->d_refcnt, ocfs2_deadlock_debug_free);
}

void ocfs2_get_lock_debug(struct ocfs2_lock_debug *debug)
{
	kref_get(&debug->d_refcnt);
}

void ocfs2_get_deadlock_debug(struct ocfs2_deadlock_debug *deadlock_debug)
{
	kref_get(&deadlock_debug->d_refcnt);
}

struct ocfs2_lock_debug *ocfs2_new_lock_debug(void)
{
	struct ocfs2_lock_debug *lock_debug;

	lock_debug = kmalloc(sizeof(struct ocfs2_lock_debug), GFP_KERNEL);
	if (!lock_debug) {
		mlog_errno(-ENOMEM);
		goto out;
	}

	kref_init(&lock_debug->d_refcnt);
	INIT_LIST_HEAD(&lock_debug->d_lockres_tracking);
	lock_debug->d_locking_state = NULL;
out:
	return lock_debug;
}

struct ocfs2_deadlock_debug *ocfs2_new_deadlock_debug(void)
{
	struct ocfs2_deadlock_debug *deadlock_debug;

	deadlock_debug = kmalloc(sizeof(struct ocfs2_deadlock_debug), GFP_KERNEL);
	if (!deadlock_debug) {
		mlog_errno(-ENOMEM);
		return deadlock_debug;
	}

	kref_init(&deadlock_debug->d_refcnt);
	INIT_LIST_HEAD(&deadlock_debug->d_deadlock_tracking);
	deadlock_debug->d_deadlock_state = NULL;

	return deadlock_debug;
}

/* Access to this is arbitrated for us via seq_file->sem. */
struct ocfs2_lock_seq_priv {
	struct ocfs2_lock_debug *p_lock_debug;
	struct ocfs2_lock_res p_iter_res;
	struct ocfs2_lock_res p_tmp_res;
};

static struct ocfs2_lock_res *ocfs2_lock_next_res(struct ocfs2_lock_res *start,
				struct ocfs2_lock_seq_priv *priv)
{
	struct ocfs2_lock_res *iter, *ret = NULL;
	struct ocfs2_lock_debug *lock_debug = priv->p_lock_debug;

	assert_spin_locked(&ocfs2_lock_tracking_lock);

	list_for_each_entry(iter, &start->l_debug_list, l_debug_list) {
		/* discover the head of the list */
		if (&iter->l_debug_list == &lock_debug->d_lockres_tracking) {
			mlog(0, "End of list found, %p\n", ret);
			break;
		}

		/* We track our "dummy" iteration lockres' by a NULL
		 * l_ops field. */
		if (iter->l_ops != NULL) {
			ret = iter;
			break;
		}
	}

	return ret;
}

static void *ocfs2_lock_seq_start(struct seq_file *m, loff_t *pos)
{
	struct ocfs2_lock_seq_priv *priv = m->private;
	struct ocfs2_lock_res *iter;

	spin_lock(&ocfs2_lock_tracking_lock);
	iter = ocfs2_lock_next_res(&priv->p_iter_res, priv);
	if (iter) {
		/* Since lockres' have the lifetime of their container
		 * (which can be inodes, ocfs2_supers, etc) we want to
		 * copy this out to a temporary lockres while still
		 * under the spinlock. Obviously after this we can't
		 * trust any pointers on the copy returned, but that's
		 * ok as the information we want isn't typically held
		 * in them. */
		priv->p_tmp_res = *iter;
		iter = &priv->p_tmp_res;
	}
	spin_unlock(&ocfs2_lock_tracking_lock);

	return iter;
}

static void ocfs2_lock_seq_stop(struct seq_file *m, void *v)
{
}

static void *ocfs2_lock_seq_next(struct seq_file *m, void *v, loff_t *pos)
{
	struct ocfs2_lock_seq_priv *priv = m->private;
	struct ocfs2_lock_res *iter = v;
	struct ocfs2_lock_res *dummy = &priv->p_iter_res;

	spin_lock(&ocfs2_lock_tracking_lock);
	iter = ocfs2_lock_next_res(iter, priv);
	list_del_init(&dummy->l_debug_list);
	if (iter) {
		list_add(&dummy->l_debug_list, &iter->l_debug_list);
		priv->p_tmp_res = *iter;
		iter = &priv->p_tmp_res;
	}
	spin_unlock(&ocfs2_lock_tracking_lock);

	return iter;
}

static int ocfs2_lock_seq_show(struct seq_file *m, void *v)
{
	struct ocfs2_lock_res *lockres = v;

	if (!lockres)
		return -EINVAL;

	return lockres->lock_seq_show(m, v);
}

static const struct seq_operations ocfs2_lock_seq_ops = {
	.start =	ocfs2_lock_seq_start,
	.stop =		ocfs2_lock_seq_stop,
	.next =		ocfs2_lock_seq_next,
	.show =		ocfs2_lock_seq_show,
};

static int ocfs2_lock_debug_release(struct inode *inode, struct file *file)
{
	struct seq_file *seq = file->private_data;
	struct ocfs2_lock_seq_priv *priv = seq->private;
	struct ocfs2_lock_res *res = &priv->p_iter_res;

	ocfs2_remove_lockres_tracking(res);
	ocfs2_put_lock_debug(priv->p_lock_debug);
	return seq_release_private(inode, file);
}

static int ocfs2_lock_debug_open(struct inode *inode, struct file *file)
{
	int ret;
	struct ocfs2_lock_seq_priv *priv;
	struct seq_file *seq;
	struct ocfs2_super *osb;

	priv = kzalloc(sizeof(struct ocfs2_lock_seq_priv), GFP_KERNEL);
	if (!priv) {
		ret = -ENOMEM;
		mlog_errno(ret);
		goto out;
	}
	osb = inode->i_private;
	ocfs2_get_lock_debug(osb->osb_lock_debug);
	priv->p_lock_debug = osb->osb_lock_debug;
	INIT_LIST_HEAD(&priv->p_iter_res.l_debug_list);

	ret = seq_open(file, &ocfs2_lock_seq_ops);
	if (ret) {
		kfree(priv);
		mlog_errno(ret);
		goto out;
	}

	seq = file->private_data;
	seq->private = priv;

	ocfs2_add_lockres_tracking(&priv->p_iter_res,
				   priv->p_lock_debug);

out:
	return ret;
}

static const struct file_operations ocfs2_lock_debug_fops = {
	.open =		ocfs2_lock_debug_open,
	.release =	ocfs2_lock_debug_release,
	.read =		seq_read,
	.llseek =	seq_lseek,
};

static void ocfs2_copy_lock_host_to_p_res_info(struct ocfs2_lock_res *lockres,
		struct ocfs2_deadlock_lock_res_info *lockres_info)
{
	struct ocfs2_super *osb = ocfs2_get_lockres_osb(lockres);
	struct ocfs2_disk_lock_res *adlres = &lockres->diff.adlres;
	struct ocfs2_adl_lock *lock = (struct ocfs2_adl_lock *) (page_address(adlres->l_lksb.page));
	struct o2nm_node *node;
	int index;

	lockres_info->host = O2NM_INVALID_NODE_NUM;

	if (lockres->l_ro_holders || lockres->l_ex_holders) {
		lockres_info->host = osb->node_num;
	} else {
		for (index = 0; index < LOCK_MAX_HOLDERS; index++) {
			if (host_id(lock, index) != O2NM_INVALID_NODE_NUM) {
				lockres_info->host = host_id(lock, index);
				break;
			}
		}
	}
	if (lockres_info->host != O2NM_INVALID_NODE_NUM) {
		node = o2nm_get_node_by_num(lockres_info->host);
		if (node)
			lockres_info->host_ipv4_address = node->nd_ipv4_address;
	}
}

static void ocfs2_copy_pending_host_to_p_res_info(struct ocfs2_lock_res *lockres,
		struct ocfs2_deadlock_lock_res_info *lockres_info)
{
	struct ocfs2_super *osb = ocfs2_get_lockres_osb(lockres);
	struct ocfs2_adl_cluster_connection *conn = osb->cconn;
	struct adl_ctxt *adl = conn->cc_lockspace;
	struct ocfs2_disk_lock_res *adlres = &lockres->diff.adlres;
	struct ocfs2_adl_lock *lock = (struct ocfs2_adl_lock *) (page_address(adlres->l_lksb.page));
	struct o2nm_node *node;

	lockres_info->pending_host = O2NM_INVALID_NODE_NUM;

	if (adl_lock_pending_exist(adl, lock)) {
		lockres_info->pending_host = pending_host_id(lock);
		node = o2nm_get_node_by_num(pending_host_id(lock));
		if (node)
			lockres_info->pending_host_ipv4_address = node->nd_ipv4_address;
	}
}

static void copy_iter_to_p_res_info(struct ocfs2_lock_res *iter,
						struct ocfs2_deadlock_lock_res_info *p_res_info)
{
	struct ocfs2_deadlock_lock_res_info *lockres_info = p_res_info;
	struct ocfs2_lock_res *lockres = iter;
	struct ocfs2_disk_lock_res *adlres = &lockres->diff.adlres;
	struct inode *inode = NULL;

	memset(lockres_info, 0, sizeof(struct ocfs2_deadlock_lock_res_info));

	if (!adlres->l_lksb.page)
		return;

	strcpy(lockres_info->l_name, lockres->l_name);

	ocfs2_copy_lock_host_to_p_res_info(lockres, lockres_info);

	ocfs2_copy_pending_host_to_p_res_info(lockres, lockres_info);

	if (lockres->l_ro_holders) {
		lockres_info->l_ro_holders = lockres->l_ro_holders;
		lockres_info->l_ro_holder_pid = lockres->l_ro_holder_pid;
	}

	if (lockres->l_ex_holders) {
		lockres_info->l_ex_holders = lockres->l_ex_holders;
		lockres_info->l_ex_holder_pid = lockres->l_ex_holder_pid;
	}

	if (ocfs2_is_inode_lock(lockres)) {
		inode = ocfs2_lock_res_inode(lockres);
		lockres_info->inode_no = inode->i_ino;
	}

	lockres_info->l_blkno = adlres->blkno;
	lockres_info->l_sector_offset = adlres->sector_offset;
	lockres_info->l_type = lockres->l_type;
	lockres_info->l_level = lockres->l_level;
}

static struct ocfs2_lock_res *ocfs2_lock_next_deadlock_res(struct ocfs2_lock_res *start,
		struct ocfs2_deadlock_seq_priv *priv)
{
	struct ocfs2_lock_res *iter, *ret = NULL;
	struct ocfs2_deadlock_debug *deadlock_debug = priv->p_deadlock_debug;

	assert_spin_locked(&ocfs2_deadlock_tracking_lock);

	list_for_each_entry(iter, &start->l_deadlock_list, l_deadlock_list) {
		/* discover the head of the list */
		if (&iter->l_deadlock_list == &deadlock_debug->d_deadlock_tracking) {
			mlog(0, "End of list found, %p\n", ret);
			break;
		}

		/* We track our "dummy" iteration lockres' by a NULL l_ops field. */
		if (iter->l_ops != NULL) {
			ret = iter;
			break;
		}
	}

	return ret;
}

static void *ocfs2_deadlock_seq_start(struct seq_file *m, loff_t *pos)
{
	struct ocfs2_deadlock_seq_priv *priv = m->private;
	struct ocfs2_lock_res *iter;
	unsigned long flags;

	spin_lock_irqsave(&ocfs2_deadlock_tracking_lock, flags);
	iter = ocfs2_lock_next_deadlock_res(&priv->p_iter_res, priv);
	if (iter) {
		/* Since lockres' have the lifetime of their container
		 * (which can be inodes, ocfs2_supers, etc) we want to
		 * copy this out to a temporary lockres while still
		 * under the spinlock. Obviously after this we can't
		 * trust any pointers on the copy returned, but that's
		 * ok as the information we want isn't typically held
		 * in them. */
		copy_iter_to_p_res_info(iter, &priv->p_res_info);
		priv->p_tmp_res = *iter;
		iter = &priv->p_tmp_res;
	}
	spin_unlock_irqrestore(&ocfs2_deadlock_tracking_lock, flags);

	return iter;
}

static void ocfs2_deadlock_seq_stop(struct seq_file *m, void *v)
{
}

static void *ocfs2_deadlock_seq_next(struct seq_file *m, void *v, loff_t *pos)
{
	struct ocfs2_deadlock_seq_priv *priv = m->private;
	struct ocfs2_lock_res *iter = v;
	struct ocfs2_lock_res *dummy = &priv->p_iter_res;
	unsigned long flags;

	spin_lock_irqsave(&ocfs2_deadlock_tracking_lock, flags);
	iter = ocfs2_lock_next_deadlock_res(dummy, priv);
	if (iter) {
		iter = ocfs2_lock_next_deadlock_res(iter, priv);
		list_del_init(&dummy->l_deadlock_list);
		if (iter) {
			list_add(&dummy->l_deadlock_list, &iter->l_deadlock_list);
			copy_iter_to_p_res_info(iter, &priv->p_res_info);
			priv->p_tmp_res = *iter;
			iter = &priv->p_tmp_res;
		}
	}
	spin_unlock_irqrestore(&ocfs2_deadlock_tracking_lock, flags);

	return iter;
}

static int ocfs2_deadlock_seq_show(struct seq_file *m, void *v)
{
	struct ocfs2_lock_res *lockres = v;

	if (!lockres)
		return -EINVAL;

	return lockres->deadlock_seq_show(m, v);
}

static const struct seq_operations ocfs2_deadlock_seq_ops = {
	.start =    ocfs2_deadlock_seq_start,
	.stop =     ocfs2_deadlock_seq_stop,
	.next =     ocfs2_deadlock_seq_next,
	.show =     ocfs2_deadlock_seq_show,
};

static int ocfs2_deadlock_debug_release(struct inode *inode, struct file *file)
{
	struct seq_file *seq = file->private_data;
	struct ocfs2_deadlock_seq_priv *priv = seq->private;
	struct ocfs2_lock_res *res = &priv->p_iter_res;

	ocfs2_remove_lockres_deadlock_tracking(res);
	ocfs2_put_deadlock_debug(priv->p_deadlock_debug);
	return seq_release_private(inode, file);
}

static int ocfs2_deadlock_debug_open(struct inode *inode, struct file *file)
{
	struct ocfs2_deadlock_seq_priv *priv;
	struct ocfs2_super *osb;

	/* priv's space alloc by '__seq_open_private', release by 'seq_release_private' */
	priv = __seq_open_private(file, &ocfs2_deadlock_seq_ops, sizeof(*priv));
	if (!priv) {
		mlog_errno(-ENOMEM);
		return -ENOMEM;
	}

	osb = inode->i_private;
	ocfs2_get_deadlock_debug(osb->osb_deadlock_debug);
	priv->p_deadlock_debug = osb->osb_deadlock_debug;

	/* dummy res */
	INIT_LIST_HEAD(&priv->p_iter_res.l_deadlock_list);
	ocfs2_add_lockres_deadlock_tracking(&priv->p_iter_res, priv->p_deadlock_debug);

	return 0;
}

static const struct file_operations ocfs2_deadlock_debug_fops = {
	.open =        ocfs2_deadlock_debug_open,
	.release =     ocfs2_deadlock_debug_release,
	.read =        seq_read,
	.llseek =      seq_lseek,
};

static int ocfs2_lock_init_debug(struct ocfs2_super *osb)
{
	int ret = 0;
	struct ocfs2_lock_debug *lock_debug = osb->osb_lock_debug;
	struct ocfs2_deadlock_debug *deadlock_debug = osb->osb_deadlock_debug;

	lock_debug->d_locking_state = debugfs_create_file("locking_state",
							 S_IFREG|S_IRUSR,
							 osb->osb_debug_root,
							 osb,
							 &ocfs2_lock_debug_fops);
	if (!lock_debug->d_locking_state) {
		ret = -EINVAL;
		mlog(ML_ERROR,
		     "%s: Unable to create locking state debugfs file.\n",
		     osb->uuid_str);
		goto out;
	}

	deadlock_debug->d_deadlock_state =
			debugfs_create_file("deadlock_state",
					0400,
					osb->osb_debug_root,
					osb,
					&ocfs2_deadlock_debug_fops);
	if (!deadlock_debug->d_deadlock_state) {
		ret = -EINVAL;
		mlog(ML_ERROR,
		     "%s: Unable to create deadlock state debugfs file.\n",
		     osb->uuid_str);
		return ret;
	}

	ocfs2_get_lock_debug(lock_debug);
	ocfs2_get_deadlock_debug(deadlock_debug);

out:
	return ret;
}

static void ocfs2_lock_shutdown_debug(struct ocfs2_super *osb)
{
	struct ocfs2_lock_debug *lock_debug = osb->osb_lock_debug;
	struct ocfs2_deadlock_debug *deadlock_debug = osb->osb_deadlock_debug;

	if (lock_debug) {
		debugfs_remove(lock_debug->d_locking_state);
		ocfs2_put_lock_debug(lock_debug);
	}

	if (deadlock_debug) {
		debugfs_remove(deadlock_debug->d_deadlock_state);
		ocfs2_put_deadlock_debug(deadlock_debug);
	}
}

int ocfs2_lock_init(struct ocfs2_super *osb)
{
	int status = 0;

	if (ocfs2_mount_local(osb) || ocfs2_recover_ro(osb)) {
		osb->node_num = 0;
		goto local;
	}

	status = ocfs2_lock_init_debug(osb);
	if (status < 0) {
		mlog_errno(status);
		return status;
	}

	status = osb->active_lockstack->l_ops->lock_init(osb);
	if (status) {
		mlog_errno(status);
		goto bail;
	}

local:
	ocfs2_super_block_lock_res_init(&osb->osb_super_block_lockres, osb);
	ocfs2_super_lock_res_init(&osb->osb_super_lockres, osb);
	ocfs2_rename_lock_res_init(&osb->osb_rename_lockres, osb);
	ocfs2_nfs_sync_lock_res_init(&osb->osb_nfs_sync_lockres, osb);
	ocfs2_orphan_scan_lock_res_init(&osb->osb_orphan_scan.os_lockres, osb);

bail:
	if (status < 0)
		ocfs2_lock_shutdown_debug(osb);

	return status;
}

void ocfs2_lock_exit(struct ocfs2_super *osb)
{
	ocfs2_drop_osb_locks(osb);

	ocfs2_lock_res_free(osb, &osb->osb_super_block_lockres);
	ocfs2_lock_res_free(osb, &osb->osb_super_lockres);
	ocfs2_lock_res_free(osb, &osb->osb_rename_lockres);
	ocfs2_lock_res_free(osb, &osb->osb_nfs_sync_lockres);
	ocfs2_lock_res_free(osb, &osb->osb_orphan_scan.os_lockres);

	osb->active_lockstack->l_ops->lock_exit(osb);
	osb->cconn = NULL;

	ocfs2_lock_shutdown_debug(osb);
}

static int ocfs2_drop_lock(struct ocfs2_super *osb,
			   struct ocfs2_lock_res *lockres)
{
	return osb->active_lockstack->l_ops->drop_lock(osb, lockres);
}

void ocfs2_mark_lockres_freeing(struct ocfs2_super *osb,
				struct ocfs2_lock_res *lockres)
{
	osb->active_lockstack->l_ops->mark_lockres_freeing(osb, lockres);
}

void ocfs2_simple_drop_lockres(struct ocfs2_super *osb,
			       struct ocfs2_lock_res *lockres)
{
	int ret;

	ocfs2_mark_lockres_freeing(osb, lockres);
	ret = ocfs2_drop_lock(osb, lockres);
	if (ret)
		mlog_errno(ret);
}

static void ocfs2_drop_osb_locks(struct ocfs2_super *osb)
{
	ocfs2_simple_drop_lockres(osb, &osb->osb_super_block_lockres);
	ocfs2_simple_drop_lockres(osb, &osb->osb_super_lockres);
	ocfs2_simple_drop_lockres(osb, &osb->osb_rename_lockres);
	ocfs2_simple_drop_lockres(osb, &osb->osb_nfs_sync_lockres);
	ocfs2_simple_drop_lockres(osb, &osb->osb_orphan_scan.os_lockres);
}

int ocfs2_drop_inode_locks(struct inode *inode)
{
	int status, err;

	/* No need to call ocfs2_mark_lockres_freeing here -
	 * ocfs2_clear_inode has done it for us. */
	mlog(0, "inode %lu, ip_flags %x\n",
			inode->i_ino, OCFS2_I(inode)->ip_flags);

	err = ocfs2_drop_lock(OCFS2_SB(inode->i_sb),
			      &OCFS2_I(inode)->ip_open_lockres);
	if (err < 0)
		mlog_errno(err);

	status = err;

	err = ocfs2_drop_lock(OCFS2_SB(inode->i_sb),
			      &OCFS2_I(inode)->ip_inode_lockres);
	if (err < 0)
		mlog_errno(err);
	if (err < 0 && !status)
		status = err;

	err = ocfs2_drop_lock(OCFS2_SB(inode->i_sb),
			      &OCFS2_I(inode)->ip_rw_lockres);
	if (err < 0)
		mlog_errno(err);
	if (err < 0 && !status)
		status = err;

	return status;
}

void ocfs2_qinfo_unlock(struct ocfs2_mem_dqinfo *oinfo, int ex)
{
	struct ocfs2_super *osb = OCFS2_SB(oinfo->dqi_gi.dqi_sb);

	osb->active_lockstack->l_ops->qinfo_unlock(oinfo, ex);
}

int ocfs2_qinfo_lock(struct ocfs2_mem_dqinfo *oinfo, int ex)
{
	struct ocfs2_super *osb = OCFS2_SB(oinfo->dqi_gi.dqi_sb);

	return osb->active_lockstack->l_ops->qinfo_lock(oinfo, ex);
}

int ocfs2_refcount_lock(struct ocfs2_refcount_tree *ref_tree, int ex)
{
	struct ocfs2_super *osb = OCFS2_SB(ref_tree->rf_sb);

	return osb->active_lockstack->l_ops->refcount_lock(ref_tree, ex);
}

void ocfs2_refcount_unlock(struct ocfs2_refcount_tree *ref_tree, int ex)
{
	struct ocfs2_super *osb = OCFS2_SB(ref_tree->rf_sb);

	osb->active_lockstack->l_ops->refcount_unlock(ref_tree, ex);
}

void ocfs2_mark_inode_res_freed(struct ocfs2_super *osb,
				struct inode *inode)
{
	struct ocfs2_lock_res *res;

	if (!osb->active_lockstack->l_ops->mark_lockres_freed)
		return;

	res = &OCFS2_I(inode)->ip_inode_lockres;
	osb->active_lockstack->l_ops->mark_lockres_freed(res);

	res = &OCFS2_I(inode)->ip_open_lockres;
	osb->active_lockstack->l_ops->mark_lockres_freed(res);

	res = &OCFS2_I(inode)->ip_rw_lockres;
	osb->active_lockstack->l_ops->mark_lockres_freed(res);
}

void ocfs2_clear_lowlevel_recovery_map(struct ocfs2_super *osb,
		unsigned int node_num)
{
	if (!osb->active_lockstack->l_ops->clear_recovery_map)
		return;

	osb->active_lockstack->l_ops->clear_recovery_map(osb, node_num);
}

void ocfs2_set_lowlevel_recovery_map(struct ocfs2_super *osb,
		unsigned int node_num)
{
	if (!osb->active_lockstack->l_ops->set_recovery_map)
		return;

	osb->active_lockstack->l_ops->set_recovery_map(osb, node_num);
}

int ocfs2_lockstack_init(struct ocfs2_super *osb,
				enum ocfs2_lock_class class)
{
	BUG_ON(class != OCFS2_LOCK_DLM && class != OCFS2_LOCK_ADL);
	osb->active_lockstack = kzalloc(sizeof(struct ocfs2_lockglue_stack),
			GFP_KERNEL);
	if (!osb->active_lockstack)
		return -ENOMEM;

	if (class == OCFS2_LOCK_ADL) {
		osb->active_lockstack->stack_name = LOCK_CLASS_NAME_ADL;
		osb->active_lockstack->l_ops = &adl_lock_ops;
	} else {
#ifdef ENABLE_DLM
		osb->active_lockstack->stack_name = LOCK_CLASS_NAME_DLM;
		osb->active_lockstack->l_ops = &dlm_lock_ops;
#else
		mlog(ML_ERROR, "%s: device does not support disk lock\n", osb->uuid_str);
		return -EPERM;
#endif
	}
	return 0;
}

void ocfs2_reschedule_hung_check_work(struct ocfs2_super *osb,
		struct inode *inode)
{
	struct ocfs2_lock_res *lockres = &OCFS2_I(inode)->ip_inode_lockres;
	unsigned long flags;

	spin_lock_irqsave(&lockres->l_lock, flags);
	if (lockres->l_hung_check &&
			cancel_delayed_work(&lockres->l_hung_check_work)) {
		ocfs2_remove_lockres_deadlock_tracking(lockres);
		mlog(0, "%s: inode %lu\n", osb->uuid_str, inode->i_ino);
		schedule_delayed_work(&lockres->l_hung_check_work,
				msecs_to_jiffies(ocfs2_lock_hung_check_ms(osb)));
	}
	spin_unlock_irqrestore(&lockres->l_lock, flags);
}

void print_disk_lockres(struct ocfs2_super *osb,
		struct ocfs2_lock_res *lockres)
{
	struct ocfs2_adl_cluster_connection *conn = osb->cconn;
	struct adl_ctxt *adl = conn->cc_lockspace;
	struct ocfs2_disk_lock_res *adlres = &lockres->diff.adlres;
	struct ocfs2_adl_lock *lock;
	struct inode *inode = NULL;
	struct o2nm_node *node;
	char *buf;
	int index, out = 0, len = PAGE_SIZE - 1;
	u16 host = O2NM_INVALID_NODE_NUM;
	unsigned long flags;

	if (!adlres->l_lksb.page)
		return;

	lock = (struct ocfs2_adl_lock *)(page_address(adlres->l_lksb.page));

	buf = (char *)get_zeroed_page(GFP_NOFS);
	if (!buf)
		return;

	out += snprintf(buf + out, len - out, "%s: lockres %s: ",
			osb->uuid_str, lockres->l_name);

	if (lockres->l_ro_holders || lockres->l_ex_holders) {
		host = osb->node_num;
	} else {
		/* find a valid host id */
		for (index = 0; index < LOCK_MAX_HOLDERS; index++) {
			if (host_id(lock, index) != O2NM_INVALID_NODE_NUM) {
				host = host_id(lock, index);
				break;
			}
		}
	}

	if (host == O2NM_INVALID_NODE_NUM) {
		out += snprintf(buf + out, len - out, "no host, ");
	}
	else {
		out += snprintf(buf + out, len - out, "lock host %u, ", host);
		node = o2nm_get_node_by_num(host);
		if (node)
			out += snprintf(buf + out, len - out, "IP %pI4, ",
					&(node->nd_ipv4_address));
	}

	if (!adl_lock_pending_exist(adl, lock)) {
		out += snprintf(buf + out, len - out, "no pending host, ");
	}
	else {
		out += snprintf(buf + out, len - out, "pending host %u, ",
				pending_host_id(lock));
		node = o2nm_get_node_by_num(pending_host_id(lock));
		if (node)
			out += snprintf(buf + out, len - out, "IP %pI4, ",
					&(node->nd_ipv4_address));
	}

	spin_lock_irqsave(&lockres->l_lock, flags);
	if (lockres->l_ro_holders)
		out += snprintf(buf + out, len - out, "ro holders %u, pid %d, ",
				lockres->l_ro_holders, lockres->l_ro_holder_pid);
	if (lockres->l_ex_holders)
		out += snprintf(buf + out, len - out, "ex holders %u, pid %d, ",
				lockres->l_ex_holders, lockres->l_ex_holder_pid);

	if (ocfs2_is_inode_lock(lockres)) {
		inode = ocfs2_lock_res_inode(lockres);
		out += snprintf(buf + out, len - out, "inode %lu, ", inode->i_ino);
	}

	out += snprintf(buf + out, len - out, "lock blkno %llu, sector offset %u, lock type %s, ",
			adlres->blkno, adlres->sector_offset,
			ocfs2_lock_type_string(lockres->l_type));

	if (lockres->l_level == ADL_LOCK_EX)
		out += snprintf(buf + out, len - out, "lock level EX");
	else if (lockres->l_level == ADL_LOCK_PR)
		out += snprintf(buf + out, len - out, "lock level PR");
	else
		out += snprintf(buf + out, len - out, "lock level NL");
	spin_unlock_irqrestore(&lockres->l_lock, flags);

	switch (adlres->sector_offset) {
	case ADL_USER_INIT_LOCKFS_LOCK_OFFSET:
		out += snprintf(buf + out, len - out, ", user lock: init lockfs");
		break;
	case ADL_USER_RESIZE_LOCK_OFFSET:
		out += snprintf(buf + out, len - out, ", user lock: resize lock");
		break;
	case ADL_USER_MAP_TABLE_LOCK_OFFSET:
		out += snprintf(buf + out, len - out, ", user lock: maptable lock");
		break;
	default:
		break;
	}

	mlog(ML_NOTICE, "%s\n", buf);

	free_page((unsigned long)buf);
}
