// SPDX-License-Identifier: GPL-2.0-or-later
#include <linux/types.h>
#include <linux/mm.h>
#include <linux/seq_file.h>
#include <linux/quotaops.h>
#include <linux/delay.h>
#include <linux/kthread.h>
#include <linux/crc32.h>

#include "ocfs2.h"
#include "alloc.h"
#include "dcache.h"
#include "lockglue.h"
#include "adl/adlapi.h"
#include "adl/adllock.h"
#include "adlglue.h"
#include "extent_map.h"
#include "file.h"
#include "heartbeat.h"
#include "inode.h"
#include "journal.h"
#include "slot_map.h"
#include "super.h"
#include "uptodate.h"
#include "quota.h"
#include "refcounttree.h"
#include "buffer_head_io.h"
#include "iso.h"

#include "cluster/nodemanager.h"

#define MLOG_MASK_PREFIX ML_ADL_GLUE
#include "cluster/masklog.h"

static int ocfs2_adl_inode_lock_update(struct inode *inode,
				struct buffer_head **bh);
static void ocfs2_adl_inode_unlock(struct inode *inode, int ex);
static int ocfs2_adl_data_convert_worker(struct ocfs2_lock_res *lockres,
		int blocking);

/*
 * Note: to get osb from ocfs2_lock_res, should either set l_ops->get_osb,
 * or let l_priv an ocfs2_super pointer
 */
static struct ocfs2_lock_res_ops ocfs2_adl_inode_rw_lops = {
	.get_osb		= ocfs2_get_inode_osb,
	.flags		= 0,
};

static struct ocfs2_lock_res_ops ocfs2_adl_inode_inode_lops = {
	.get_osb		= ocfs2_get_inode_osb,
	.downconvert_worker	= ocfs2_adl_data_convert_worker,
	.flags		= LOCK_TYPE_REQUIRES_REFRESH,
};

static struct ocfs2_lock_res_ops ocfs2_adl_super_block_lops = {
	.flags		= LOCK_TYPE_REQUIRES_REFRESH,
};

static struct ocfs2_lock_res_ops ocfs2_adl_super_lops = {
	.flags		= LOCK_TYPE_REQUIRES_REFRESH,
};

static struct ocfs2_lock_res_ops ocfs2_adl_rename_lops = {
	.flags		= 0,
};

static struct ocfs2_lock_res_ops ocfs2_adl_nfs_sync_lops = {
	.flags		= 0,
};

static struct ocfs2_lock_res_ops ocfs2_adl_orphan_scan_lops = {
	.flags		= 0,
};

static struct ocfs2_lock_res_ops ocfs2_adl_inode_open_lops = {
	.get_osb		= ocfs2_get_inode_osb,
	.flags		= 0,
};

static struct ocfs2_lock_res_ops ocfs2_adl_flock_lops = {
	.get_osb		= ocfs2_get_file_osb,
	.flags		= 0,
};

static struct ocfs2_lock_res_ops ocfs2_adl_qinfo_lops = {
	.get_osb		= ocfs2_get_qinfo_osb,
	.flags		= 0,
};

static struct ocfs2_lock_res_ops ocfs2_adl_refcount_block_lops = {
	.flags		= 0,
};

static inline struct ocfs2_lock_res *ocfs2_adl_get_lock_res(
				struct ocfs2_disk_lock_res *adlres)
{
	return container_of(adlres, struct ocfs2_lock_res, diff.adlres);
}

static void ocfs2_adl_mark_lockres_freed(struct ocfs2_lock_res *lockres)
{
	struct ocfs2_disk_lock_res *adlres = &lockres->diff.adlres;
	unsigned long flags;

	spin_lock_irqsave(&lockres->l_lock, flags);
	adlres->l_lockblock_freed = 1;
	spin_unlock_irqrestore(&lockres->l_lock, flags);
}

static void ocfs2_adl_dec_holders(struct ocfs2_lock_res *lockres,
		int level)
{
	unsigned long flags;

	spin_lock_irqsave(&lockres->l_lock, flags);
	ocfs2_dec_holders(lockres, level);
	wake_up(&lockres->l_event);
	spin_unlock_irqrestore(&lockres->l_lock, flags);
}

static int ocfs2_journal_aborted(struct ocfs2_super *osb)
{
	if (osb->journal && osb->journal->j_journal &&
			is_journal_aborted(osb->journal->j_journal))
		return 1;
	return 0;
}

static int ocfs2_adl_cluster_unlock(struct ocfs2_super *osb,
				struct ocfs2_lock_res *lockres,
				int level)
{
	int new_level;
	int ret = 0;
	unsigned long flags;
	struct ocfs2_disk_lock_res *adlres = &lockres->diff.adlres;
	struct ocfs2_adl_cluster_connection *conn;

	mlog(0, "%s: blkno %llu, offset %d, level %d\n", osb->uuid_str,
			adlres->blkno, adlres->sector_offset, level);

	spin_lock_irqsave(&lockres->l_lock, flags);

	while (lockres->l_flags & OCFS2_LOCK_BUSY) {
		mlog(0, "%s: blkno %llu, offset %d has BUSY pending\n",
				osb->uuid_str, adlres->blkno, adlres->sector_offset);
		spin_unlock_irqrestore(&lockres->l_lock, flags);

		ocfs2_wait_on_busy_lock(lockres);
		spin_lock_irqsave(&lockres->l_lock, flags);
	}

	ocfs2_dec_holders(lockres, level);

	if (adlres->l_lockblock_freed == 1) {
		mlog(0, "%s: blkno %llu, offset %d, level %d, lock block is freed.\n",
				osb->uuid_str, adlres->blkno, adlres->sector_offset, level);
		spin_unlock_irqrestore(&lockres->l_lock, flags);
		goto out;
	}

	if (level == ADL_LOCK_EX &&
			lockres->l_ro_holders && !lockres->l_ex_holders) {
		new_level = ADL_LOCK_PR;
	} else if (!lockres->l_ro_holders && !lockres->l_ex_holders) {
		new_level = ADL_LOCK_NL;
	} else {
		mlog(0, "%s: blkno %llu, offset %d, level %d, no need to unlock. ro %d, ex %d\n",
				osb->uuid_str, adlres->blkno, adlres->sector_offset,
				level, lockres->l_ro_holders, lockres->l_ex_holders);
		spin_unlock_irqrestore(&lockres->l_lock, flags);
		goto out;
	}

	mlog(0, "%s: blkno %llu, offset %d set BUSY pending, new_level %d\n",
			osb->uuid_str, adlres->blkno, adlres->sector_offset, new_level);
	lockres_or_flags(lockres, OCFS2_LOCK_BUSY);
	lockres_or_flags(lockres, OCFS2_LOCK_UNLOCKING);
	spin_unlock_irqrestore(&lockres->l_lock, flags);

	conn = osb->cconn;
	if (!ocfs2_journal_aborted(osb)) {
		ret = adl_unlock(conn->cc_lockspace, adlres->blkno,
				adlres->sector_offset, &adlres->l_lksb, new_level, 0);
		if (ret == -EROFS) {
			mlog(ML_NOTICE, "%s: blkno %llu, offset %d, level %d\n", osb->uuid_str,
					adlres->blkno, adlres->sector_offset, level);
			if (ocfs2_is_inode_lock(lockres)) {
				struct inode *inode = ocfs2_lock_res_inode(lockres);

				mlog(ML_NOTICE, "lockres->l_type: %d\n", lockres->l_type);
				ocfs2_isolation(osb->sb, ocfs2_rip_build_t3(inode, &ret),
						"Invalid adl lock. blkno %llu, sector %d, then isolate [inode %lu]",
						adlres->blkno, adlres->sector_offset, inode->i_ino);
				goto out_mutex;
			} else {
				ocfs2_error(osb->sb, "Invalid adl lock block [%llu:%u]",
						adlres->blkno, adlres->sector_offset);
			}
		}
	} else {
		ret = -EIO;
		mlog_errno(ret);
	}

	if (ret && !ocfs2_test_invalid_fs(osb))
		o2hb_handle_invalid(osb->uuid_str);

out_mutex:
	spin_lock_irqsave(&lockres->l_lock, flags);
	lockres->l_level = new_level;
	lockres_clear_flags(lockres, OCFS2_LOCK_BUSY);
	lockres_clear_flags(lockres, OCFS2_LOCK_UNLOCKING);
	spin_unlock_irqrestore(&lockres->l_lock, flags);

out:
	wake_up(&lockres->l_event);

	if (ret)
		mlog_errno(ret);
	return ret;
}

static inline void ocfs2_recover_from_adl_error(
				struct ocfs2_lock_res *lockres)
{
	unsigned long flags;
	struct ocfs2_disk_lock_res *adlres = &lockres->diff.adlres;

	spin_lock_irqsave(&lockres->l_lock, flags);
	mlog(0, "blkno %llu, offset %d clear BUSY pending\n",
			adlres->blkno, adlres->sector_offset);
	lockres_clear_flags(lockres, OCFS2_LOCK_BUSY);
	spin_unlock_irqrestore(&lockres->l_lock, flags);

	wake_up(&lockres->l_event);
}

static int adl_can_lock(struct ocfs2_disk_lock_res *adlres)
{
	return atomic_read(&adlres->l_can_lock);
}

static int ocfs2_adl_cluster_lock(struct ocfs2_super *osb,
				struct ocfs2_lock_res *lockres,
				int level,
				int arg_flags)
{
	int ret, wait, locked, alloc_lksb;
	int lock_flags = arg_flags;
	int catch_signals = !(osb->s_mount_opt & OCFS2_MOUNT_NOINTR);
	unsigned long flags, flags_pending, start, next;
	struct ocfs2_mask_waiter mw;
	struct ocfs2_disk_lock_res *adlres = &lockres->diff.adlres;
	struct ocfs2_adl_cluster_connection *conn = osb->cconn;
	struct inode *inode;

	mlog(0, "%s: blkno %llu, offset %d, level %d\n",
			osb->uuid_str, adlres->blkno, adlres->sector_offset, level);

	if (!(lockres->l_flags & OCFS2_LOCK_INITIALIZED)) {
		mlog_errno(-EINVAL);
		return -EINVAL;
	}

	ocfs2_init_mask_waiter(&mw);
	start = next = jiffies;
again:
	locked = 0;
	alloc_lksb = 0;
	wait = 0;
	ret = 0;
	spin_lock_irqsave(&lockres->l_lock, flags);

	if (catch_signals && signal_pending(current)) {
		ret = -ERESTARTSYS;
		goto unlock;
	}

	/* We only compare against the currently granted level
	 * here. */
	if (lockres->l_flags & OCFS2_LOCK_BUSY &&
			level > lockres->l_level) {
		mlog(0, "%s: blkno %llu, offset %d has BUSY pending, level %d, l_level %d\n",
				osb->uuid_str, adlres->blkno,
				adlres->sector_offset, level, lockres->l_level);
		/* is someone sitting in dlm_lock? If so, wait on
		 * them. */
		lockres_add_mask_waiter(lockres, &mw, OCFS2_LOCK_BUSY, 0);
		wait = 1;
		goto unlock;
	}

	if (lockres->l_flags & OCFS2_LOCK_UNLOCKING) {
		mlog(0, "%s: blkno %llu, offset %d has UNLOCKING pending, level %d, l_level %d\n",
				osb->uuid_str, adlres->blkno,
				adlres->sector_offset, level, lockres->l_level);
		lockres_add_mask_waiter(lockres, &mw, OCFS2_LOCK_UNLOCKING, 0);
		wait = 1;
		goto unlock;
	}

	if (level <= lockres->l_level)
		goto update_holders;

	if (level > lockres->l_level) {
		mlog(0, "%s: blkno %llu, offset %d, level %d-->%d\n",
				osb->uuid_str, adlres->blkno, adlres->sector_offset,
				lockres->l_level, level);

		if (!(lockres->l_flags & OCFS2_LOCK_ATTACHED))
			alloc_lksb = 1;

		lockres_or_flags(lockres, OCFS2_LOCK_BUSY);
		lockres_or_flags(lockres, OCFS2_LOCK_DEBUG);
		spin_unlock_irqrestore(&lockres->l_lock, flags);

		if (alloc_lksb) {
			ret = adl_alloc_lock_lksb(conn->cc_lockspace, adlres->blkno,
					adlres->sector_offset, &adlres->l_lksb);
			if (ret) {
				ocfs2_recover_from_adl_error(lockres);
				goto out;
			}

			spin_lock_irqsave(&lockres->l_lock, flags);
			lockres_or_flags(lockres, OCFS2_LOCK_ATTACHED);
			spin_unlock_irqrestore(&lockres->l_lock, flags);
		}

		if ((jiffies - next) >= msecs_to_jiffies(ocfs2_lock_hung_check_ms(osb)))
			lock_flags |= ADL_LKF_PRINT_LOG;

		ret = adl_lock(conn->cc_lockspace, adlres->blkno,
				adlres->sector_offset, &adlres->l_lksb, level, lock_flags);
		if (ret < 0) {
			atomic_set(&adlres->l_can_lock, 0);
			ocfs2_recover_from_adl_error(lockres);
			if (ret != -EAGAIN)
				mlog_errno(ret);
			if (ret == -EROFS) {
				mlog(ML_NOTICE, "%s: blkno %llu, offset %d, level %d\n",
						osb->uuid_str, adlres->blkno,
						adlres->sector_offset, level);
				if (ocfs2_is_inode_lock(lockres)) {
					inode = ocfs2_lock_res_inode(lockres);
					mlog(ML_NOTICE, "lockres->l_type: %d\n", lockres->l_type);
					ocfs2_isolation(osb->sb, ocfs2_rip_build_t3(inode, &ret),
						"Invalid adl lock. blkno %llu, sector %d, then isolate [inode %lu]",
						adlres->blkno, adlres->sector_offset, inode->i_ino);
					spin_lock_irqsave(&osb->osb_pending_list_lock,
							flags_pending);
					list_del_init(&adlres->l_pending_list);
					spin_unlock_irqrestore(&osb->osb_pending_list_lock,
						flags_pending);
					goto out;
				} else {
					ocfs2_error(osb->sb,
						"Invalid adl lock block. blkno %llu, sector %d",
						adlres->blkno, adlres->sector_offset);
				}
			}

			if ((ret == -EIO || ret == -EROFS) && !ocfs2_test_invalid_fs(osb)) {
				o2hb_handle_invalid(osb->uuid_str);
			}

			if (ret == -EAGAIN &&
					!(lock_flags & (ADL_LKF_NOQUEUE | ADL_LKF_NONBLOCK))) {
				spin_lock_irqsave(&osb->osb_pending_list_lock, flags_pending);
				list_move_tail(&adlres->l_pending_list, &osb->osb_pending_list);
				spin_unlock_irqrestore(&osb->osb_pending_list_lock, flags_pending);

				if (lock_flags & ADL_LKF_PRINT_LOG) {
					next = jiffies;
					mlog(ML_ERROR,
							"%s: %s try getting %s lock for %us due to blocking IO of block device!\n",
							osb->uuid_str, lockres->l_name,
							((level == ADL_LOCK_EX) ? "ex" : "pr"),
							jiffies_to_msecs(next - start) / 1000);
					print_disk_lockres(osb, lockres);
					ocfs2_add_lockres_deadlock_tracking(lockres,
						osb->osb_deadlock_debug);
					lock_flags &= ~ADL_LKF_PRINT_LOG;
				}
				wait_event_timeout(adlres->l_wq, adl_can_lock(adlres),
						msecs_to_jiffies(DISK_RETRY_TIME_MS));
				goto again;
			}
			spin_lock_irqsave(&osb->osb_pending_list_lock, flags_pending);
			list_del_init(&adlres->l_pending_list);
			spin_unlock_irqrestore(&osb->osb_pending_list_lock, flags_pending);

			goto out;
		}

		spin_lock_irqsave(&lockres->l_lock, flags);
		atomic_set(&adlres->l_can_lock, 0);
		adlres->l_lock_time = jiffies;
		locked = 1;

		if (lockres->l_level == ADL_LOCK_NL &&
				lockres->l_ops->flags & LOCK_TYPE_REQUIRES_REFRESH)
			lockres_or_flags(lockres, OCFS2_LOCK_NEEDS_REFRESH);

		lockres->l_level = level;
		mlog(0, "%s: blkno %llu, offset %d clear BUSY pending\n",
				osb->uuid_str, adlres->blkno, adlres->sector_offset);
		lockres_clear_flags(lockres, OCFS2_LOCK_BUSY);
		wake_up(&lockres->l_event);
	}

update_holders:
	ocfs2_remove_lockres_deadlock_tracking(lockres);
	spin_lock_irqsave(&osb->osb_pending_list_lock, flags_pending);
	list_del_init(&adlres->l_pending_list);
	spin_unlock_irqrestore(&osb->osb_pending_list_lock, flags_pending);

	adlres->l_lru_time = jiffies;
	ocfs2_inc_holders(lockres, level);
	mlog(((jiffies - start) >= msecs_to_jiffies(ocfs2_lock_hung_check_ms(osb)) ?
			ML_NOTICE : 0),
			"%s: inc holders blkno %llu, offset %d, level %d, ro %d, ex %d\n",
			osb->uuid_str, adlres->blkno,
			adlres->sector_offset, level, lockres->l_ro_holders,
			lockres->l_ex_holders);

unlock:
	spin_unlock_irqrestore(&lockres->l_lock, flags);

	if (locked && lockres->l_type == OCFS2_LOCK_TYPE_META) {
		inode = ocfs2_lock_res_inode(lockres);
		if (!INODE_JOURNAL(inode)) {
			spin_lock(&osb->osb_locked_list_lock);
			list_move_tail(&adlres->l_locked_list,
					&osb->osb_locked_list);
			spin_unlock(&osb->osb_locked_list_lock);
		}
	}

out:
	if (wait && (lock_flags & ADL_LKF_NONBLOCK) &&
			mw.mw_mask & (OCFS2_LOCK_BUSY|OCFS2_LOCK_UNLOCKING)) {
		wait = 0;
		spin_lock_irqsave(&lockres->l_lock, flags);
		if (__lockres_remove_mask_waiter(lockres, &mw)) {
			spin_unlock_irqrestore(&lockres->l_lock, flags);
			ret = -EAGAIN;
		} else {
			spin_unlock_irqrestore(&lockres->l_lock, flags);
			goto again;
		}
	}
	if (wait) {
		ret = ocfs2_wait_for_mask(&mw);
		if (ret == 0) {
			mlog(0, "%s: blkno %llu, offset %d stop waiting, again\n",
					osb->uuid_str, adlres->blkno, adlres->sector_offset);
			goto again;
		}
		mlog_errno(ret);
	}

	spin_lock_irqsave(&lockres->l_lock, flags);
	lockres_clear_flags(lockres, OCFS2_LOCK_DEBUG);
	spin_unlock_irqrestore(&lockres->l_lock, flags);

	return ret;
}

static int ocfs2_adl_inode_lock_update(struct inode *inode,
				struct buffer_head **bh)
{
	int status = 0;
	struct ocfs2_inode_info *oi = OCFS2_I(inode);
	struct ocfs2_lock_res *lockres = &oi->ip_inode_lockres;
	struct ocfs2_disk_lock_res *adlres = &lockres->diff.adlres;
	struct ocfs2_dinode *fe;
	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);

	if (ocfs2_mount_local(osb) || ocfs2_recover_ro(osb))
		goto bail;

	spin_lock(&oi->ip_lock);
	if (oi->ip_flags & OCFS2_INODE_DELETED) {
		mlog(0,
			"Orphaned inode %llu was deleted while we were waiting on a lock. ip_flags = 0x%x\n",
			(unsigned long long)oi->ip_blkno, oi->ip_flags);
		spin_unlock(&oi->ip_lock);
		status = -ENOENT;
		goto bail;
	}
	spin_unlock(&oi->ip_lock);

	if (!ocfs2_should_refresh_lock_res(lockres))
		goto bail;

	/* This will discard any caching information we might have had
	 * for the inode metadata. */
	ocfs2_metadata_cache_purge(INODE_CACHE(inode));

	ocfs2_extent_map_trunc(inode, 0);

	if (*bh == NULL) {
		mlog(0, "%s: read from disk, blkno %lu, lockno %llu\n",
				osb->uuid_str, inode->i_ino, adlres->blkno);
		status = ocfs2_read_inode_block(inode, bh);
		if (status < 0) {
			mlog_errno(status);
			goto bail_refresh;
		}
	}
	fe = (struct ocfs2_dinode *)(*bh)->b_data;

	/* This is a good chance to make sure we're not
	 * locking an invalid object.  ocfs2_read_inode_block()
	 * already checked that the inode block is sane.
	 *
	 * We bug on a stale inode here because we checked
	 * above whether it was wiped from disk. The wiping
	 * node provides a guarantee that we receive that
	 * message and can mark the inode before dropping any
	 * locks associated with it. */
	if (inode->i_generation != le32_to_cpu(fe->i_generation)) {
		ocfs2_isolation(inode->i_sb, ocfs2_rip_build_t3(inode, &status),
				"Invalid dinode %llu disk generation: %u inode->i_generation: %u",
				(unsigned long long)oi->ip_blkno,
				le32_to_cpu(fe->i_generation), inode->i_generation);
		goto bail_refresh;
	}

	if (le64_to_cpu(fe->i_dtime) ||
			!(fe->i_flags & cpu_to_le32(OCFS2_VALID_FL))) {
		ocfs2_isolation(inode->i_sb, ocfs2_rip_build_t3(inode, &status),
				"Stale dinode %llu dtime: %llu flags: 0x%x",
				(unsigned long long)oi->ip_blkno,
				(unsigned long long)le64_to_cpu(fe->i_dtime),
				le32_to_cpu(fe->i_flags));
		goto bail_refresh;
	}

	ocfs2_refresh_inode(inode, fe);

	status = ocfs2_inode_get_owner(inode, *bh);
	if (status) {
		mlog_errno(status);
		goto bail_refresh;
	}

	status = 0;
bail_refresh:
	ocfs2_complete_lock_res_refresh(lockres, status);
bail:
	return status;
}

/*
 * returns < 0 error if the callback will never be called, otherwise
 * the result of the lock will be communicated via the callback.
 */
static int ocfs2_adl_inode_lock_full_nested(struct inode *inode,
				struct buffer_head **ret_bh,
				int ex,
				int arg_flags,
				int subclass)
{
	int status = 0, level, acquired = 0;
	u32 adl_flags = 0;
	struct ocfs2_lock_res *lockres = NULL;
	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
	struct buffer_head *local_bh = NULL;

	mlog(0, "inode %llu, take %s META lock\n",
			(unsigned long long)OCFS2_I(inode)->ip_blkno,
			ex ? "EXMODE" : "PRMODE");

	if (ocfs2_test_invalid_fs(osb)) {
		status = -EIO;
		goto bail;
	}

	/* We'll allow faking a readonly metadata lock for
	 * rodevices. */
	if (ocfs2_is_hard_readonly(osb)) {
		status = -EROFS;
		goto bail;
	}

	if (ocfs2_mount_local(osb) || ocfs2_recover_ro(osb))
		goto local;

	if (!(arg_flags & OCFS2_META_LOCK_RECOVERY)) {
		status = ocfs2_wait_for_recovery(osb);
		if (status < 0) {
			mlog_errno(status);
			goto bail;
		}
	}

	lockres = &OCFS2_I(inode)->ip_inode_lockres;
	level = ex ? ADL_LOCK_EX : ADL_LOCK_PR;
	if (arg_flags & OCFS2_META_LOCK_NOQUEUE)
		adl_flags |= ADL_LKF_NOQUEUE;

	if (arg_flags & OCFS2_LOCK_NONBLOCK)
		adl_flags |= ADL_LKF_NONBLOCK;

	if (!(arg_flags & OCFS2_META_LOCK_RECOVERY))
		adl_flags |= ADL_LKF_NEED_RECOVERY;

	status = ocfs2_adl_cluster_lock(osb, lockres, level, adl_flags);
	if (status < 0) {
		mlog_errno(status);
		goto bail;
	}
	/* Notify the error cleanup path to drop the cluster lock. */
	acquired = 1;

	/* We wait twice because a node may have died while we were in
	 * the lower dlm layers. The second time though, we've
	 * committed to owning this lock so we don't allow signals to
	 * abort the operation. */
	if (!(arg_flags & OCFS2_META_LOCK_RECOVERY)) {
		status = ocfs2_wait_for_recovery(osb);
		if (status < 0) {
			mlog_errno(status);
			goto bail;
		}
	}

local:
	/*
	 * We only see this flag if we're being called from
	 * ocfs2_read_locked_inode(). It means we're locking an inode
	 * which hasn't been populated yet, so clear the refresh flag
	 * and let the caller handle it.
	 */
	if (inode->i_state & I_NEW) {
		status = 0;
		if (lockres)
			ocfs2_complete_lock_res_refresh(lockres, 0);
		goto bail;
	}

	status = ocfs2_adl_inode_lock_update(inode, &local_bh);
	if (status < 0) {
		if (status != -ENOENT)
			mlog_errno(status);
		goto bail;
	}

	if (ret_bh) {
		status = ocfs2_assign_bh(inode, ret_bh, local_bh);
		if (status < 0) {
			mlog_errno(status);
			goto bail;
		}
	}

bail:
	if (status < 0) {
		if (ret_bh && (*ret_bh)) {
			brelse(*ret_bh);
			*ret_bh = NULL;
		}
		if (acquired)
			ocfs2_adl_inode_unlock(inode, ex);
	}
	brelse(local_bh);
	return status;
}

static void ocfs2_adl_checkpoint_inode(struct inode *inode,
		int level)
{
	struct ocfs2_lock_res *lockres =
			&OCFS2_I(inode)->ip_inode_lockres;
	int status;

	ocfs2_checkpoint_inode(inode);

	/* flush the inode->mapping to the disk */
	status = lockres->l_ops->downconvert_worker(lockres, level);
	mlog(0, "status = %d\n", status);
}

static void ocfs2_adl_inode_unlock(struct inode *inode, int ex)
{
	int level = ex ? ADL_LOCK_EX : ADL_LOCK_PR;
	struct ocfs2_lock_res *lockres = &OCFS2_I(inode)->ip_inode_lockres;
	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
	int status;

	mlog(0, "inode %llu drop %s META lock\n",
			(unsigned long long)OCFS2_I(inode)->ip_blkno,
			ex ? "EXMODE" : "PRMODE");

	if (ocfs2_mount_local(osb) || ocfs2_recover_ro(osb))
		return;

	/* journal inode should unlock now */
	if (INODE_JOURNAL(inode)) {
		ocfs2_adl_checkpoint_inode(inode, level);
		status = ocfs2_adl_cluster_unlock(osb, lockres, level);
		if (status)
			mlog_errno(status);
	} else {
		ocfs2_adl_dec_holders(lockres, level);
	}
}

/*
 * ocfs2_open_lock always get PR mode lock.
 */
static int ocfs2_adl_open_lock(struct inode *inode)
{
	int status = 0;
	struct ocfs2_lock_res *lockres;
	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);

	mlog(0, "inode %llu take PRMODE open lock\n",
			(unsigned long long)OCFS2_I(inode)->ip_blkno);

	if (ocfs2_mount_local(osb) || ocfs2_recover_ro(osb))
		goto out;

	lockres = &OCFS2_I(inode)->ip_open_lockres;

	status = ocfs2_adl_cluster_lock(osb, lockres, ADL_LOCK_PR, 0);
	if (status < 0)
		mlog_errno(status);

out:
	return status;
}

static int ocfs2_adl_try_open_lock(struct inode *inode, int write)
{
	int status = 0, level;
	struct ocfs2_lock_res *lockres;
	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);

	mlog(0, "inode %llu take %s open lock\n",
			(unsigned long long)OCFS2_I(inode)->ip_blkno,
			write ? "EXMODE" : "PRMODE");

	if (ocfs2_mount_local(osb) || ocfs2_recover_ro(osb))
		goto out;

	lockres = &OCFS2_I(inode)->ip_open_lockres;

	level = write ? ADL_LOCK_EX : ADL_LOCK_PR;

	/*
	 * The file system may already holding a PRMODE/EXMODE open lock.
	 * Since we pass DLM_LKF_NOQUEUE, the request won't block waiting on
	 * other nodes and the -EAGAIN will indicate to the caller that
	 * this inode is still in use.
	 */
	status = ocfs2_adl_cluster_lock(osb, lockres, level, ADL_LKF_NOQUEUE);
	if (status < 0) {
		mlog_errno(status);
		goto out;
	}

out:
	return status;
}

static void ocfs2_adl_open_unlock(struct inode *inode)
{
	int status = 0;
	struct ocfs2_lock_res *lockres = &OCFS2_I(inode)->ip_open_lockres;
	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);

	mlog(0, "inode %llu drop open lock\n",
	     (unsigned long long)OCFS2_I(inode)->ip_blkno);

	if (ocfs2_mount_local(osb) || ocfs2_recover_ro(osb))
		return;

	if (lockres->l_ro_holders) {
		status = ocfs2_adl_cluster_unlock(osb, lockres, ADL_LOCK_PR);
		if (status < 0)
			mlog_errno(status);
	}

	if (lockres->l_ex_holders) {
		status = ocfs2_adl_cluster_unlock(osb, lockres, ADL_LOCK_EX);
		if (status < 0)
			mlog_errno(status);
	}
}

static int ocfs2_adl_create_new_inode_locks(struct inode *inode)
{
	return ocfs2_adl_open_lock(inode);
}

/* TODO */
static int ocfs2_adl_file_lock(struct file *file, int ex, int trylock)
{
	return -EOPNOTSUPP;
}

/* TODO */
static void ocfs2_adl_file_unlock(struct file *file)
{
}

static void ocfs2_adl_super_unlock(struct ocfs2_super *osb, int ex)
{
	int level = ex ? ADL_LOCK_EX : ADL_LOCK_PR;
	struct ocfs2_lock_res *lockres = &osb->osb_super_lockres;
	int status = 0;

	if (!ocfs2_mount_local(osb) && !ocfs2_recover_ro(osb)) {
		status = ocfs2_adl_cluster_unlock(osb, lockres, level);
		if (status)
			mlog_errno(status);
		if (lockres->l_hung_check) {
			cancel_delayed_work_sync(&lockres->l_hung_check_work);
			ocfs2_remove_lockres_deadlock_tracking(lockres);
		}
	}
}

static int ocfs2_adl_super_lock(struct ocfs2_super *osb, int ex)
{
	int status = 0;
	int level = ex ? ADL_LOCK_EX : ADL_LOCK_PR;
	struct ocfs2_lock_res *lockres = &osb->osb_super_lockres;

	if (ocfs2_is_hard_readonly(osb))
		return -EROFS;

	if (ocfs2_mount_local(osb) || ocfs2_recover_ro(osb))
		goto bail;

	status = ocfs2_adl_cluster_lock(osb, lockres, level, 0);
	if (status < 0) {
		mlog_errno(status);
		goto bail;
	}

	if (lockres->l_hung_check)
		schedule_delayed_work(&lockres->l_hung_check_work,
				msecs_to_jiffies(ocfs2_lock_hung_check_ms(osb)));

	/* The super block lock path is really in the best position to
	 * know when resources covered by the lock need to be
	 * refreshed, so we do it here. Of course, making sense of
	 * everything is up to the caller :) */
	status = ocfs2_should_refresh_lock_res(lockres);
	if (status) {
		status = ocfs2_refresh_slot_info(osb);

		ocfs2_complete_lock_res_refresh(lockres, status);

		if (status < 0) {
			ocfs2_adl_super_unlock(osb, ex);
			mlog_errno(status);
		}
	}

bail:
	return status;
}

static void ocfs2_adl_super_block_unlock(struct ocfs2_super *osb, int ex)
{
	int level = ex ? ADL_LOCK_EX : ADL_LOCK_PR;
	struct ocfs2_lock_res *lockres = &osb->osb_super_block_lockres;
	int status = 0;

	if (!ocfs2_mount_local(osb) && !ocfs2_recover_ro(osb)) {
		status = ocfs2_adl_cluster_unlock(osb, lockres, level);
		if (status)
			mlog_errno(status);
		if (lockres->l_hung_check) {
			cancel_delayed_work_sync(&lockres->l_hung_check_work);
			ocfs2_remove_lockres_deadlock_tracking(lockres);
		}
	}
}

static int ocfs2_adl_super_block_lock(struct ocfs2_super *osb, int ex)
{
	int status = 0;
	int level = ex ? ADL_LOCK_EX : ADL_LOCK_PR;
	struct ocfs2_lock_res *lockres = &osb->osb_super_block_lockres;

	if (ocfs2_is_hard_readonly(osb))
		return -EROFS;

	if (ocfs2_mount_local(osb) || ocfs2_recover_ro(osb))
		goto bail;

	status = ocfs2_adl_cluster_lock(osb, lockres, level, 0);
	if (status < 0) {
		mlog_errno(status);
		goto bail;
	}

	if (lockres->l_hung_check)
		schedule_delayed_work(&lockres->l_hung_check_work,
				msecs_to_jiffies(ocfs2_lock_hung_check_ms(osb)));

	/* The super block lock path is really in the best position to
	 * know when resources covered by the lock need to be
	 * refreshed, so we do it here. Of course, making sense of
	 * everything is up to the caller :) */
	status = ocfs2_should_refresh_lock_res(lockres);
	if (status) {
		status = ocfs2_read_blocks_sync(osb, OCFS2_SUPER_BLOCK_BLKNO, 1,
				&osb->osb_super_bh);

		ocfs2_complete_lock_res_refresh(lockres, status);

		if (status < 0) {
			ocfs2_adl_super_block_unlock(osb, ex);
			mlog_errno(status);
		}
	}

bail:
	return status;
}

static int ocfs2_adl_rename_lock(struct ocfs2_super *osb)
{
	int status;
	struct ocfs2_lock_res *lockres = &osb->osb_rename_lockres;

	mlog(0, "take RENAME lock\n");

	if (ocfs2_is_hard_readonly(osb))
		return -EROFS;

	if (ocfs2_mount_local(osb) || ocfs2_recover_ro(osb))
		return 0;

	status = ocfs2_adl_cluster_lock(osb, lockres, ADL_LOCK_EX, 0);
	if (status < 0) {
		mlog_errno(status);
		return status;
	}

	if (lockres->l_hung_check)
		schedule_delayed_work(&lockres->l_hung_check_work,
				msecs_to_jiffies(ocfs2_lock_hung_check_ms(osb)));

	return status;
}

static void ocfs2_adl_rename_unlock(struct ocfs2_super *osb)
{
	int status = 0;
	struct ocfs2_lock_res *lockres = &osb->osb_rename_lockres;

	mlog(0, "drop RENAME lock\n");

	if (!ocfs2_mount_local(osb) && !ocfs2_recover_ro(osb)) {
		status = ocfs2_adl_cluster_unlock(osb, lockres, ADL_LOCK_EX);
		if (status < 0)
			mlog_errno(status);
		if (lockres->l_hung_check) {
			cancel_delayed_work_sync(&lockres->l_hung_check_work);
			ocfs2_remove_lockres_deadlock_tracking(lockres);
		}
	}
}

static int ocfs2_adl_nfs_sync_lock(struct ocfs2_super *osb, int ex)
{
	int status;
	struct ocfs2_lock_res *lockres = &osb->osb_nfs_sync_lockres;

	mlog(0, "take NFS %s lock\n", ex ? "EX" : "PR");

	if (ocfs2_is_hard_readonly(osb))
		return -EROFS;

	if (ocfs2_mount_local(osb) || ocfs2_recover_ro(osb))
		return 0;

	status = ocfs2_adl_cluster_lock(osb, lockres,
			ex ? ADL_LOCK_EX : ADL_LOCK_PR, 0);
	if (status < 0) {
		mlog(ML_ERROR, "lock on nfs sync lock failed %d\n", status);
		return status;
	}

	if (lockres->l_hung_check)
		schedule_delayed_work(&lockres->l_hung_check_work,
				msecs_to_jiffies(ocfs2_lock_hung_check_ms(osb)));

	return status;
}

static void ocfs2_adl_nfs_sync_unlock(struct ocfs2_super *osb, int ex)
{
	int status = 0;
	struct ocfs2_lock_res *lockres = &osb->osb_nfs_sync_lockres;

	mlog(0, "drop NFS %s lock, irq_disabled %d\n", ex ? "EX" : "PR",
			irqs_disabled());

	if (!ocfs2_mount_local(osb) && !ocfs2_recover_ro(osb)) {
		status = ocfs2_adl_cluster_unlock(osb, lockres,
				ex ? ADL_LOCK_EX : ADL_LOCK_PR);
		if (status < 0)
			mlog_errno(status);
		if (lockres->l_hung_check) {
			cancel_delayed_work_sync(&lockres->l_hung_check_work);
			ocfs2_remove_lockres_deadlock_tracking(lockres);
		}
	}
}

static int ocfs2_adl_orphan_scan_lock(struct ocfs2_super *osb, u32 *seqno)
{
	int status = 0;
	struct ocfs2_lock_res *lockres = &osb->osb_orphan_scan.os_lockres;
	struct ocfs2_disk_lock_res *adlres = &lockres->diff.adlres;

	mlog(0, "take ORPHAN SCAN lock\n");

	if (ocfs2_is_hard_readonly(osb))
		return -EROFS;

	if (ocfs2_mount_local(osb) || ocfs2_recover_ro(osb))
		return 0;

	status = ocfs2_adl_cluster_lock(osb, lockres, ADL_LOCK_EX, 0);
	if (status < 0) {
		mlog_errno(status);
		return status;
	}

	*seqno = adl_lock_seqno_get(&adlres->l_lksb);
	if (lockres->l_hung_check)
		schedule_delayed_work(&lockres->l_hung_check_work,
				msecs_to_jiffies(ocfs2_lock_hung_check_ms(osb)));

	return status;
}

static void ocfs2_adl_orphan_scan_unlock(struct ocfs2_super *osb, u32 seqno)
{
	int status = 0;
	struct ocfs2_lock_res *lockres = &osb->osb_orphan_scan.os_lockres;
	struct ocfs2_disk_lock_res *adlres = &lockres->diff.adlres;

	mlog(0, "drop ORPHAN SCAN lock\n");
	adl_lock_seqno_set(&adlres->l_lksb, seqno);

	if (!ocfs2_is_hard_readonly(osb) && !ocfs2_mount_local(osb) &&
			!ocfs2_recover_ro(osb)) {
		status = ocfs2_adl_cluster_unlock(osb, lockres, ADL_LOCK_EX);
		if (status < 0)
			mlog_errno(status);
		if (lockres->l_hung_check) {
			cancel_delayed_work_sync(&lockres->l_hung_check_work);
			ocfs2_remove_lockres_deadlock_tracking(lockres);
		}
	}
}

/* TODO */
static int ocfs2_adl_qinfo_lock(struct ocfs2_mem_dqinfo *oinfo, int ex)
{
	return 0;
}

/* TODO */
static void ocfs2_adl_qinfo_unlock(struct ocfs2_mem_dqinfo *oinfo, int ex)
{
}

/* TODO */
static int ocfs2_adl_refcount_lock(struct ocfs2_refcount_tree *ref_tree, int ex)
{
	return 0;
}

/* TODO */
static void ocfs2_adl_refcount_unlock(struct ocfs2_refcount_tree *ref_tree, int ex)
{
}

static int ocfs2_adl_this_node(unsigned int *node)
{
	int node_num;

	node_num = o2nm_this_node();
	if (node_num == O2NM_INVALID_NODE_NUM)
		return -ENOENT;

	if (node_num >= O2NM_MAX_NODES)
		return -EOVERFLOW;

	*node = node_num;
	return 0;
}

static void o2adl_eviction_cb(int node_num, void *data)
{
	struct ocfs2_adl_cluster_connection *conn = data;

	mlog(ML_NOTICE, "o2adl has evicted node %d from group %.*s\n",
	     node_num, conn->cc_namelen, conn->cc_name);

	conn->cc_recovery_handler(node_num, conn->cc_recovery_data);
}

static void o2adl_unlock_cb(u64 blkno, int sector, void *data)
{
	struct ocfs2_super *osb = data;
	struct ocfs2_disk_lock_res *adlres;
	unsigned long flags;

	mlog(0, "%s: blkno %llu, sector %d other nodes had unlocked\n",
			osb->uuid_str, blkno, sector);

	spin_lock_irqsave(&osb->osb_pending_list_lock, flags);
	list_for_each_entry(adlres, &osb->osb_pending_list, l_pending_list) {
		if (adlres->blkno != blkno || adlres->sector_offset != sector)
			continue;

		mlog(0, "blkno %llu, sector %d, adlres %p\n", blkno, sector, adlres);
		atomic_set(&adlres->l_can_lock, 1);
		wake_up(&adlres->l_wq);
		break;
	}
	spin_unlock_irqrestore(&osb->osb_pending_list_lock, flags);
}

static void o2adl_query_lock_cb(u64 blkno, int sector, void *data)
{
	struct ocfs2_super *osb = data;
	struct ocfs2_lock_res *res;
	struct ocfs2_disk_lock_res *adlres, *tmp;
	unsigned long expire_ms = osb->osb_expire_min_ms;
	unsigned long delay;
	unsigned long flags;

	mlog(0, "%s: blkno %llu, sector %d, other nodes want this lock\n",
			osb->uuid_str, blkno, sector);

	spin_lock(&osb->osb_locked_list_lock);
	list_for_each_entry_safe(adlres, tmp, &osb->osb_locked_list,
			l_locked_list) {
		if (adlres->blkno != blkno || adlres->sector_offset != sector)
			continue;

		res = ocfs2_adl_get_lock_res(adlres);
		spin_lock_irqsave(&res->l_lock, flags);
		if (res->l_flags & OCFS2_LOCK_FREEING) {
			spin_unlock_irqrestore(&res->l_lock, flags);
			break;
		}
		adlres->l_other_pending = 1;
		delay = adlres->l_lock_time + msecs_to_jiffies(expire_ms)
				- jiffies;
		spin_unlock_irqrestore(&res->l_lock, flags);

		if (delay > msecs_to_jiffies(expire_ms))
			delay = 0;

		list_del_init(&adlres->l_locked_list);
		queue_delayed_work(osb->unlock_wq, &adlres->l_unlock_work, delay);

		break;
	}
	spin_unlock(&osb->osb_locked_list_lock);
}

static void o2adl_release_truncate_log_cb(u64 blkno, int sector, void *data)
{
	struct ocfs2_super *osb = data;

	mlog(ML_NOTICE,
		"%s: receive msg from other node, ask to release truncate_log\n",
		osb->uuid_str);

	mutex_lock(&osb->tl_log_lock);
	ocfs2_schedule_truncate_log_flush(osb, 0);
	mutex_unlock(&osb->tl_log_lock);
}

static int ocfs2_locked_list_empty(struct ocfs2_super *osb)
{
	int empty;

	spin_lock(&osb->osb_locked_list_lock);
	empty = list_empty(&osb->osb_locked_list);
	spin_unlock(&osb->osb_locked_list_lock);
	return empty;
}

static int ocfs2_adl_data_convert_worker(struct ocfs2_lock_res *lockres,
		int blocking)
{
	struct inode *inode;
	struct address_space *mapping;
	struct ocfs2_inode_info *oi;

	inode = ocfs2_lock_res_inode(lockres);
	mapping = inode->i_mapping;

	ocfs2_checkpoint_inode(inode);

	if (S_ISDIR(inode->i_mode)) {
		oi = OCFS2_I(inode);
		oi->ip_dir_lock_gen++;
		mlog(0, "generation: %u\n", oi->ip_dir_lock_gen);
		return 0;
	}

	if (!S_ISREG(inode->i_mode))
		return 0;

	/*
	 * We need this before the filemap_fdatawrite() so that it can
	 * transfer the dirty bit from the PTE to the
	 * page. Unfortunately this means that even for EX->PR
	 * downconverts, we'll lose our mappings and have to build
	 * them up again.
	 */
	unmap_mapping_range(mapping, 0, 0, 0);

	if (filemap_fdatawrite(mapping)) {
		mlog(ML_ERROR, "%s: Could not sync inode %llu for downconvert!\n",
			ocfs2_get_lockres_osb(lockres)->uuid_str,
			(unsigned long long)OCFS2_I(inode)->ip_blkno);
	}
	sync_mapping_buffers(mapping);
	truncate_inode_pages(mapping, 0);

	return 0;
}

static int ocfs2_adl_drop_disk_lock(struct ocfs2_super *osb,
		struct ocfs2_lock_res *lockres)
{
	int ret = 0;
	unsigned long flags;
	struct ocfs2_disk_lock_res *adlres = &lockres->diff.adlres;
	struct ocfs2_adl_cluster_connection *conn = osb->cconn;

	mlog(0, "%s: drop lock, blkno %llu, offset %d, freed %d\n",
			osb->uuid_str, adlres->blkno,
			adlres->sector_offset, adlres->l_lockblock_freed);

	spin_lock_irqsave(&lockres->l_lock, flags);
	/* schedule hung check work before wait */
	if (lockres->l_hung_check)
		schedule_delayed_work(&lockres->l_hung_check_work,
				msecs_to_jiffies(ocfs2_lock_hung_check_ms(osb)));

	while (lockres->l_flags & OCFS2_LOCK_BUSY) {
		mlog(0, "%s: blkno %llu, offset %d has BUSY pending\n",
			osb->uuid_str, adlres->blkno, adlres->sector_offset);
		spin_unlock_irqrestore(&lockres->l_lock, flags);
		ocfs2_wait_on_busy_lock(lockres);
		spin_lock_irqsave(&lockres->l_lock, flags);
	}

	if (lockres->l_level == ADL_LOCK_NL) {
		mlog(0, "%s: blkno %llu, offset %d, this lock is already unlocked, new level %d\n",
				osb->uuid_str, adlres->blkno,
				adlres->sector_offset, lockres->l_level);
		spin_unlock_irqrestore(&lockres->l_lock, flags);
		goto out;
	}

	lockres_or_flags(lockres, OCFS2_LOCK_BUSY);
	lockres_or_flags(lockres, OCFS2_LOCK_UNLOCKING);
	while (lockres->l_ro_holders || lockres->l_ex_holders) {
		mlog(0, "%s: blkno %llu, offset %d has holders, ro holder %d, ex holder %d.\n",
				osb->uuid_str, adlres->blkno,
				adlres->sector_offset, lockres->l_ro_holders,
				lockres->l_ex_holders);
		spin_unlock_irqrestore(&lockres->l_lock, flags);
		ocfs2_wait_on_holders(lockres);
		spin_lock_irqsave(&lockres->l_lock, flags);
	}

	if (adlres->l_lockblock_freed == 1) {
		mlog(0, "%s: blkno %llu, offset %d, lock block is freed.\n",
				osb->uuid_str, adlres->blkno, adlres->sector_offset);
		goto set_level;
	}

	spin_unlock_irqrestore(&lockres->l_lock, flags);

	if (lockres->l_ops->downconvert_worker)
		lockres->l_ops->downconvert_worker(lockres, ADL_LOCK_EX);

	if (!ocfs2_journal_aborted(osb)) {
		ret = adl_unlock(conn->cc_lockspace, adlres->blkno,
				adlres->sector_offset, &adlres->l_lksb, ADL_LOCK_NL, 0);
	} else {
		ret = -EIO;
		mlog_errno(ret);
	}

	if (ret == -EROFS) {
		struct inode *inode = ocfs2_lock_res_inode(lockres);

		mlog(ML_NOTICE, "%s: drop lock, blkno %llu, offset %d, freed %d\n",
				osb->uuid_str, adlres->blkno,
				adlres->sector_offset, adlres->l_lockblock_freed);
		if (ocfs2_is_inode_lock(lockres)) {
			ocfs2_isolation(osb->sb, ocfs2_rip_build_t3(inode, &ret),
					"Invalid adl lock, blkno %llu, sector %d, then isolate [inode %lu]",
					adlres->blkno, adlres->sector_offset, inode->i_ino);
		} else {
			ocfs2_error(osb->sb, "Invalid adl lock, blkno %llu, sector %d",
					adlres->blkno, adlres->sector_offset);
		}
	} else if (ret && !ocfs2_test_invalid_fs(osb)) {
		o2hb_handle_invalid(osb->uuid_str);
	}

	spin_lock_irqsave(&lockres->l_lock, flags);

set_level:
	lockres->l_level = ADL_LOCK_NL;
	lockres_clear_flags(lockres, OCFS2_LOCK_BUSY);
	lockres_clear_flags(lockres, OCFS2_LOCK_UNLOCKING);
	spin_unlock_irqrestore(&lockres->l_lock, flags);
	wake_up(&lockres->l_event);

out:
	if (lockres->l_hung_check) {
		cancel_delayed_work_sync(&lockres->l_hung_check_work);
		ocfs2_remove_lockres_deadlock_tracking(lockres);
	}

	if (ret)
		mlog_errno(ret);

	return ret;
}

static int ocfs2_adl_drop_lock(struct ocfs2_super *osb,
		struct ocfs2_lock_res *lockres)
{
	struct ocfs2_disk_lock_res *adlres = &lockres->diff.adlres;
	int ret = 0;
	unsigned long flags;

	mlog(0, "%s: drop lock, blkno %llu, offset %d, freed %d\n",
			osb->uuid_str, adlres->blkno,
			adlres->sector_offset, adlres->l_lockblock_freed);

	if (!(lockres->l_flags & OCFS2_LOCK_INITIALIZED))
		return 0;

	if (ocfs2_is_hard_readonly(osb) ||
			ocfs2_mount_local(osb) || ocfs2_recover_ro(osb))
		return 0;

	if (lockres->l_type != OCFS2_LOCK_TYPE_META)
		goto out;

	spin_lock(&osb->osb_locked_list_lock);
	list_del_init(&adlres->l_locked_list);
	spin_unlock(&osb->osb_locked_list_lock);

	/* wait for work to finish, otherwise the worker
	 * can be running after we release lockres
	 */
	cancel_delayed_work_sync(&adlres->l_unlock_work);

	ret = ocfs2_adl_drop_disk_lock(osb, lockres);

out:
	adl_free_lock_lksb(&adlres->l_lksb);

	spin_lock_irqsave(&lockres->l_lock, flags);
	lockres_clear_flags(lockres, OCFS2_LOCK_ATTACHED);
	spin_unlock_irqrestore(&lockres->l_lock, flags);

	return ret;
}

static void ocfs2_adl_mark_lockres_freeing(struct ocfs2_super *osb,
		struct ocfs2_lock_res *lockres)
{
	unsigned long flags;

	spin_lock_irqsave(&lockres->l_lock, flags);
	lockres->l_flags |= OCFS2_LOCK_FREEING;
	spin_unlock_irqrestore(&lockres->l_lock, flags);
}

#define LOCKRES_LRU_TIMES 5000
static void ocfs2_unlock_lock(struct work_struct *work)
{
	struct ocfs2_disk_lock_res *adlres =
			container_of(work, struct ocfs2_disk_lock_res, l_unlock_work.work);
	struct ocfs2_lock_res *lockres =
			container_of(adlres, struct ocfs2_lock_res, diff.adlres);
	struct ocfs2_super *osb;
	struct ocfs2_adl_cluster_connection *conn;
	int ret, check_pending = 0;
	unsigned long flags;
	int has_holders = 0;

	osb = ocfs2_get_inode_osb(lockres);
	conn = osb->cconn;

	if (lockres->l_type != OCFS2_LOCK_TYPE_META) {
		mlog(ML_ERROR, "%s: %s, not meta lockres\n",
				osb->uuid_str, lockres->l_name);
		return;
	}

	/*
	 * Now we should check whether other nodes are pending on disk.
	 * First, the precondition is no other nodes query the lock via network.
	 * Second, check two contitions:
	 * 1. last use time is less than 5s from now
	 * 2. lock is used now.
	 */
	spin_lock_irqsave(&lockres->l_lock, flags);
	has_holders = !!(lockres->l_ex_holders || lockres->l_ro_holders);
	if ((!adlres->l_other_pending) &&
			((jiffies - adlres->l_lru_time < msecs_to_jiffies(LOCKRES_LRU_TIMES)) ||
			has_holders))
		check_pending = 1;
	adlres->l_other_pending = 0;
	spin_unlock_irqrestore(&lockres->l_lock, flags);

	if (check_pending &&
			!adl_other_nodes_pending(conn->cc_lockspace,
				adlres->blkno, adlres->sector_offset)) {
		spin_lock(&osb->osb_locked_list_lock);
		spin_lock_irqsave(&lockres->l_lock, flags);
		if (lockres->l_flags & OCFS2_LOCK_FREEING) {
			spin_unlock_irqrestore(&lockres->l_lock, flags);
			spin_unlock(&osb->osb_locked_list_lock);
			return;
		}
		spin_unlock_irqrestore(&lockres->l_lock, flags);

		list_move_tail(&adlres->l_locked_list, &osb->osb_locked_list);
		spin_unlock(&osb->osb_locked_list_lock);
		return;
	}

	ret = ocfs2_adl_drop_disk_lock(osb, lockres);
	if (ret)
		mlog_errno(ret);
}

/* only for inode lock */
static int ocfs2_adl_unlocking_thread(void *arg)
{
	struct ocfs2_super *osb = arg;
	unsigned long before, after;
	unsigned int elapsed_ms, unlocking_ms = 0;
	struct ocfs2_disk_lock_res *adlres, *tmp;
	struct ocfs2_lock_res *lockres;
	unsigned long expire_ms, expire_max_ms;
	unsigned long flags;

	while (!(kthread_should_stop() &&
			ocfs2_locked_list_empty(osb))) {
		spin_lock(&osb->osb_lock);
		expire_ms = osb->osb_expire_ms;
		expire_max_ms = osb->osb_expire_max_ms;
		spin_unlock(&osb->osb_lock);

		before = jiffies;

		if ((unlocking_ms >= expire_max_ms) || !o2hb_conn_normal(osb->uuid_str)) {
			unlocking_ms = 0;
			spin_lock(&osb->osb_locked_list_lock);
			list_for_each_entry_safe(adlres, tmp, &osb->osb_locked_list,
								l_locked_list) {
				if (jiffies_to_msecs(before - adlres->l_lock_time) < expire_ms)
					continue;

				lockres = ocfs2_adl_get_lock_res(adlres);
				spin_lock_irqsave(&lockres->l_lock, flags);
				if (lockres->l_flags & OCFS2_LOCK_FREEING) {
					spin_unlock_irqrestore(&lockres->l_lock, flags);
					continue;
				}
				spin_unlock_irqrestore(&lockres->l_lock, flags);

				list_del_init(&adlres->l_locked_list);
				queue_delayed_work(osb->unlock_wq, &adlres->l_unlock_work, 0);
			}
			spin_unlock(&osb->osb_locked_list_lock);
		}
		after = jiffies;
		elapsed_ms = jiffies_to_msecs(after - before);
		if (elapsed_ms < expire_ms)
			msleep_interruptible(expire_ms - elapsed_ms);
		unlocking_ms += expire_ms;
	}

	osb->unlock_task = NULL;
	return 0;
}

int ocfs2_adl_init(struct ocfs2_super *osb)
{
	int status;
	char wq_name[O2NM_MAX_NAME_LEN];
	struct ocfs2_adl_cluster_connection *conn = NULL;

	mlog(ML_NOTICE, "ocfs2unlock thread running\n");
	osb->unlock_task = kthread_run(ocfs2_adl_unlocking_thread, osb, "ocfs2unlock-%s",
			osb->uuid_str);
	if (IS_ERR(osb->unlock_task)) {
		status = PTR_ERR(osb->unlock_task);
		osb->unlock_task = NULL;
		mlog_errno(status);
		goto bail;
	}

	snprintf(wq_name, O2NM_MAX_NAME_LEN, "unlock_wq-%s", osb->uuid_str);
	osb->unlock_wq = alloc_workqueue(wq_name, 0, 0);
	if (!osb->unlock_wq) {
		status = -ENOMEM;
		mlog_errno(status);
		goto bail;
	}
	/* init lock for o2adl_release_truncate_log_cb */
	mutex_init(&osb->tl_log_lock);

	/* for now, uuid == domain */
	status = ocfs2_adl_cluster_connect(osb->uuid_str, strlen(osb->uuid_str),
			ocfs2_do_node_down, osb, &conn, osb->sb->s_bdev);
	if (status < 0) {
		mlog_errno(status);
		goto bail;
	}

	status = ocfs2_adl_this_node(&osb->node_num);
	if (status < 0) {
		mlog_errno(status);
		mlog(ML_ERROR, "%s: could not find this host's node number\n",
				osb->uuid_str);
		ocfs2_adl_cluster_disconnect(conn);
		goto bail;
	}

	osb->cconn = conn;
bail:
	if (status < 0) {
		if (osb->unlock_task)
			kthread_stop(osb->unlock_task);
		if (osb->unlock_wq)
			destroy_workqueue(osb->unlock_wq);
	}

	return status;
}

void ocfs2_adl_shutdown(struct ocfs2_super *osb)
{
	kthread_stop(osb->unlock_task);
	ocfs2_adl_cluster_disconnect(osb->cconn);

	destroy_workqueue(osb->unlock_wq);
}

int ocfs2_adl_cluster_connect(const char *domain_name,
				int domain_len,
				void (*recovery_handler)(int node_num, void *recovery_data),
				void *recovery_data,
				struct ocfs2_adl_cluster_connection **conn,
				struct block_device *bdev)
{
	int status = 0;
	struct adl_ctxt *adl;
	struct o2adl_private *priv;
	struct ocfs2_adl_cluster_connection *new_conn;
	u32 adl_key;

	new_conn = kzalloc(sizeof(struct ocfs2_adl_cluster_connection),
			GFP_KERNEL);
	if (!new_conn) {
		status = -ENOMEM;
		mlog_errno(status);
		goto out;
	}

	memcpy(new_conn->cc_name, domain_name, domain_len);
	new_conn->cc_namelen = domain_len;
	new_conn->cc_recovery_handler = recovery_handler;
	new_conn->cc_recovery_data = recovery_data;

	/* user lockfs should not register unlock/query cb */
	if (recovery_data) {
		adl_setup_net_request_cb(&new_conn->cc_unlock_lock_cb, o2adl_unlock_cb,
				ADL_UNLOCK_LOCK_CB, recovery_data);
		adl_setup_net_request_cb(&new_conn->cc_query_lock_cb, o2adl_query_lock_cb,
				ADL_QUERY_LOCK_CB, recovery_data);
		adl_setup_net_request_cb(&new_conn->cc_release_truncate_log_cb,
				o2adl_release_truncate_log_cb,
				ADL_RELEASE_TRUNCATE_LOG_CB, recovery_data);
	}

	priv = kzalloc(sizeof(struct o2adl_private), GFP_KERNEL);
	if (!priv) {
		status = -ENOMEM;
		mlog_errno(status);
		goto out;
	}

	adl_setup_eviction_cb(&priv->op_eviction_cb, o2adl_eviction_cb, new_conn);
	new_conn->cc_private = priv;

	adl_key = crc32_le(0, domain_name, domain_len);
	adl = adl_register_domain(new_conn->cc_name, adl_key, bdev);
	if (IS_ERR(adl)) {
		status = PTR_ERR(adl);
		mlog_errno(status);
		goto out;
	}

	new_conn->cc_lockspace = adl;
	adl_register_eviction_cb(adl, &priv->op_eviction_cb);

	/* user lockfs should not register unlock/query cb */
	if (recovery_data) {
		adl_register_net_request_cb(adl, &new_conn->cc_unlock_lock_cb);
		adl_register_net_request_cb(adl, &new_conn->cc_query_lock_cb);
		adl_register_net_request_cb(adl, &new_conn->cc_release_truncate_log_cb);
	}

	*conn = new_conn;
out:
	if (status && new_conn) {
		kfree(new_conn->cc_private);
		kfree(new_conn);
	}

	return status;
}
EXPORT_SYMBOL_GPL(ocfs2_adl_cluster_connect);

void ocfs2_adl_cluster_disconnect(struct ocfs2_adl_cluster_connection *conn)
{
	struct adl_ctxt *adl = conn->cc_lockspace;
	struct o2adl_private *priv = conn->cc_private;

	adl_unregister_eviction_cb(&priv->op_eviction_cb);
	conn->cc_private = NULL;
	kfree(priv);

	if (conn->cc_recovery_data) {
		adl_unregister_net_request_cb(&conn->cc_unlock_lock_cb);
		adl_unregister_net_request_cb(&conn->cc_query_lock_cb);
		adl_unregister_net_request_cb(&conn->cc_release_truncate_log_cb);
	}

	adl_unregister_domain(adl);
	conn->cc_lockspace = NULL;
	kfree(conn);
}
EXPORT_SYMBOL_GPL(ocfs2_adl_cluster_disconnect);

static int ocfs2_adl_lock_seq_show(struct seq_file *m, void *v);
static int ocfs2_adl_deadlock_seq_show(struct seq_file *m, void *v);
static void ocfs2_adl_lock_res_init_common(struct ocfs2_super *osb,
				struct ocfs2_lock_res *res,
				enum ocfs2_lock_type type,
				struct ocfs2_lock_res_ops *ops,
				u64 blkno,
				void *priv)
{
	struct ocfs2_disk_lock_res *adlres = &res->diff.adlres;

	res->l_class		= OCFS2_LOCK_ADL;
	res->l_type			= type;
	res->l_ops			= ops;
	res->l_priv			= priv;
	res->l_level		= ADL_LOCK_NL;

	adlres->blkno		= blkno;
	adlres->l_lock_time	= 0;
	adlres->l_lru_time = 0;
	adlres->l_lockblock_freed = 0;
	adlres->l_other_pending = 0;

	res->l_flags		= OCFS2_LOCK_INITIALIZED;

	INIT_LIST_HEAD(&adlres->l_locked_list);
	INIT_DELAYED_WORK(&adlres->l_unlock_work, ocfs2_unlock_lock);

	memset(&adlres->l_lksb, 0, sizeof(struct adl_lockstatus));

	atomic_set(&adlres->l_can_lock, 0);
	INIT_LIST_HEAD(&adlres->l_pending_list);
	init_waitqueue_head(&adlres->l_wq);

	res->lock_seq_show = ocfs2_adl_lock_seq_show;
	res->deadlock_seq_show = ocfs2_adl_deadlock_seq_show;
	ocfs2_add_lockres_tracking(res, osb->osb_lock_debug);
}

void ocfs2_adl_inode_lock_res_init(struct ocfs2_lock_res *res,
				enum ocfs2_lock_type type,
				unsigned int generation,
				struct inode *inode)
{
	struct ocfs2_lock_res_ops *ops;
	struct ocfs2_disk_lock_res *adlres = &(res->diff.adlres);

	res->l_hung_check = 1;
	switch (type) {
	case OCFS2_LOCK_TYPE_RW:
		ops = &ocfs2_adl_inode_rw_lops;
		adlres->sector_offset = ADL_RW_LOCK_OFFSET;
		break;
	case OCFS2_LOCK_TYPE_META:
		ops = &ocfs2_adl_inode_inode_lops;
		adlres->sector_offset = ADL_INODE_LOCK_OFFSET;
		break;
	case OCFS2_LOCK_TYPE_OPEN:
		ops = &ocfs2_adl_inode_open_lops;
		adlres->sector_offset = ADL_OPEN_LOCK_OFFSET;
		break;
	default:
		mlog_bug_on_msg(1, "type: %d\n", type);
		ops = NULL; /* thanks, gcc */
		break;
	};

	ocfs2_adl_lock_res_init_common(OCFS2_SB(inode->i_sb), res, type,
			ops, OCFS2_I(inode)->ip_inode_lock_blkno, inode);
	mlog(0, "name %s, blkno %llu, type %d\n", res->l_name,
			OCFS2_I(inode)->ip_inode_lock_blkno, type);
}

void ocfs2_adl_lock_res_free(struct ocfs2_lock_res *res)
{
	struct ocfs2_disk_lock_res *adlres = &(res->diff.adlres);

	mlog_bug_on_msg(!list_empty(&adlres->l_locked_list),
			"Lockres %s is on the locked list, blkno %llu, sector %d\n",
			res->l_name, adlres->blkno, adlres->sector_offset);
	mlog_bug_on_msg(!list_empty(&adlres->l_pending_list),
			"Lockres %s is on the pendng list, blkno %llu, sector %d\n",
			res->l_name, adlres->blkno, adlres->sector_offset);
}

void ocfs2_adl_super_block_lock_res_init(struct ocfs2_lock_res *res,
				struct ocfs2_super *osb)
{
	struct ocfs2_disk_lock_res *adlres = &(res->diff.adlres);

	adlres->sector_offset = ADL_SUPER_BLOCK_LOCK_OFFSET;
	ocfs2_adl_lock_res_init_common(osb, res, OCFS2_LOCK_TYPE_SUPER_BLOCK,
			&ocfs2_adl_super_block_lops, osb->disk_lock_blkno, osb);
}

void ocfs2_adl_super_lock_res_init(struct ocfs2_lock_res *res,
				struct ocfs2_super *osb)
{
	struct ocfs2_disk_lock_res *adlres = &(res->diff.adlres);

	adlres->sector_offset = ADL_SUPER_LOCK_OFFSET;
	ocfs2_adl_lock_res_init_common(osb, res, OCFS2_LOCK_TYPE_SUPER,
			&ocfs2_adl_super_lops, osb->disk_lock_blkno, osb);
}

static void ocfs2_adl_rename_lock_res_init(struct ocfs2_lock_res *res,
				struct ocfs2_super *osb)
{
	struct ocfs2_disk_lock_res *adlres = &(res->diff.adlres);

	adlres->sector_offset = ADL_RENAME_LOCK_OFFSET;
	ocfs2_adl_lock_res_init_common(osb, res, OCFS2_LOCK_TYPE_RENAME,
			&ocfs2_adl_rename_lops, osb->disk_lock_blkno, osb);
}

static void ocfs2_adl_nfs_sync_lock_res_init(struct ocfs2_lock_res *res,
				struct ocfs2_super *osb)
{
	struct ocfs2_disk_lock_res *adlres = &(res->diff.adlres);

	adlres->sector_offset = ADL_NFS_SYNC_LOCK_OFFSET;
	ocfs2_adl_lock_res_init_common(osb, res, OCFS2_LOCK_TYPE_NFS_SYNC,
			&ocfs2_adl_nfs_sync_lops, osb->disk_lock_blkno, osb);
}

static void ocfs2_adl_orphan_scan_lock_res_init(struct ocfs2_lock_res *res,
				struct ocfs2_super *osb)
{
	struct ocfs2_disk_lock_res *adlres = &(res->diff.adlres);

	adlres->sector_offset = ADL_ORPHAN_SCAN_LOCK_OFFSET;
	ocfs2_adl_lock_res_init_common(osb, res, OCFS2_LOCK_TYPE_ORPHAN_SCAN,
			&ocfs2_adl_orphan_scan_lops, osb->disk_lock_blkno, osb);
}

void ocfs2_adl_file_lock_res_init(struct ocfs2_lock_res *res,
				struct ocfs2_file_private *fp)
{
	struct ocfs2_disk_lock_res *adlres = &(res->diff.adlres);
	struct inode *inode = fp->fp_file->f_mapping->host;

	adlres->sector_offset = ADL_FILE_LOCK_OFFSET;
	ocfs2_adl_lock_res_init_common(OCFS2_SB(inode->i_sb), res,
			OCFS2_LOCK_TYPE_FLOCK, &ocfs2_adl_flock_lops,
			OCFS2_I(inode)->ip_inode_lock_blkno, fp);
	res->l_flags |= OCFS2_LOCK_NOCACHE;
}

void ocfs2_adl_qinfo_lock_res_init(struct ocfs2_lock_res *res,
				struct ocfs2_mem_dqinfo *info)

{
	struct ocfs2_disk_lock_res *adlres = &(res->diff.adlres);

	adlres->sector_offset = ADL_QUOTA_LOCK_OFFSET;
	/* TODO quota info lock */
	ocfs2_adl_lock_res_init_common(OCFS2_SB(info->dqi_gi.dqi_sb), res,
			OCFS2_LOCK_TYPE_QINFO, &ocfs2_adl_qinfo_lops, 0, info);
}

void ocfs2_adl_refcount_lock_res_init(struct ocfs2_lock_res *res,
				struct ocfs2_super *osb, u64 ref_blkno,
				unsigned int generation)

{
	struct ocfs2_disk_lock_res *adlres = &(res->diff.adlres);

	adlres->sector_offset = ADL_REFCOUNT_LOCK_OFFSET;
	/* TODO refcount lock */
	ocfs2_adl_lock_res_init_common(osb, res, OCFS2_LOCK_TYPE_REFCOUNT,
			&ocfs2_adl_refcount_block_lops, 0, osb);
}

static int ocfs2_adl_lock_seq_show(struct seq_file *m, void *v)
{
	struct ocfs2_lock_res *lockres = v;
	struct ocfs2_disk_lock_res *adlres = &lockres->diff.adlres;

	if (lockres->l_type == OCFS2_LOCK_TYPE_DENTRY)
		seq_printf(m, "%.*s%08x\t", OCFS2_DENTRY_LOCK_INO_START - 1,
				lockres->l_name,
				(unsigned int)ocfs2_get_dentry_lock_ino(lockres));
	else
		seq_printf(m, "%.*s\t", OCFS2_LOCK_ID_MAX_LEN, lockres->l_name);

	seq_printf(m, "%lld\t"
		   "%d\t"
		   "%d\t"
		   "0x%lx\t"
		   "%u\t"
		   "%u\t",
		   adlres->blkno,
		   adlres->sector_offset,
		   lockres->l_level,
		   lockres->l_flags,
		   lockres->l_ro_holders,
		   lockres->l_ex_holders);

	ocfs2_print_lock_holder_pids(m, lockres);
	seq_printf(m, "\n");
	return 0;
}

static int ocfs2_adl_deadlock_seq_show(struct seq_file *m, void *v)
{
	struct ocfs2_deadlock_seq_priv *priv = m->private;
	struct ocfs2_deadlock_lock_res_info *lockres_info = &priv->p_res_info;

	/* in func "copy_iter_to_p_res_info",
	 * if adlres->l_lksb.page no longer exist, skip this lockres_info through l_name's value */
	if (strlen(lockres_info->l_name) == 0)
		return 0;

	seq_printf(m, "lockres %s\t", lockres_info->l_name);

	if (lockres_info->host == O2NM_INVALID_NODE_NUM) {
		seq_printf(m, "no host\t");
	} else {
		seq_printf(m, "lock host %u\t", lockres_info->host);
		seq_printf(m, "IP %pI4\t", &(lockres_info->host_ipv4_address));
	}

	if (lockres_info->pending_host == O2NM_INVALID_NODE_NUM) {
		seq_printf(m, "no pending host\t");
	} else {
		seq_printf(m, "pending host %u\t", lockres_info->pending_host);
		seq_printf(m, "IP %pI4\t", &(lockres_info->pending_host_ipv4_address));
	}

	if (lockres_info->l_ro_holders)
		seq_printf(m, "ro holders %u\tpid %d\t",
			lockres_info->l_ro_holders, lockres_info->l_ro_holder_pid);
	if (lockres_info->l_ex_holders)
		seq_printf(m, "ex holders %u\tpid %d\t",
			lockres_info->l_ex_holders, lockres_info->l_ex_holder_pid);

	if (lockres_info->inode_no)
		seq_printf(m, "inode %lu\t", lockres_info->inode_no);

	seq_printf(m, "lock blkno %llu\tsector offset %u\tlock type %s\t",
			lockres_info->l_blkno, lockres_info->l_sector_offset,
			ocfs2_lock_type_string(lockres_info->l_type));

	if (lockres_info->l_level == ADL_LOCK_EX)
		seq_printf(m, "lock level EX\t");
	else if (lockres_info->l_level == ADL_LOCK_PR)
		seq_printf(m, "lock level PR\t");
	else
		seq_printf(m, "lock level NL\t");

	seq_printf(m, "\n");
	return 0;
}

static void ocfs2_adl_clear_recovery_map(struct ocfs2_super *osb,
		unsigned int node_num)
{
	struct ocfs2_adl_cluster_connection *conn = osb->cconn;

	adl_clear_recovery_map(conn->cc_lockspace, node_num);
}

static void ocfs2_adl_set_recovery_map(struct ocfs2_super *osb,
		unsigned int node_num)
{
	struct ocfs2_adl_cluster_connection *conn = osb->cconn;

	adl_set_recovery_map(conn->cc_lockspace, node_num);
}

const struct ocfs2_lockglue_ops adl_lock_ops = {
		.inode_lock = ocfs2_adl_inode_lock_full_nested,
		.inode_unlock = ocfs2_adl_inode_unlock,
		.open_lock = ocfs2_adl_open_lock,
		.try_open_lock = ocfs2_adl_try_open_lock,
		.open_unlock = ocfs2_adl_open_unlock,
		.create_inode_locks = ocfs2_adl_create_new_inode_locks,
		.file_lock = ocfs2_adl_file_lock,
		.file_unlock = ocfs2_adl_file_unlock,
		.super_block_lock = ocfs2_adl_super_block_lock,
		.super_block_unlock = ocfs2_adl_super_block_unlock,
		.super_lock = ocfs2_adl_super_lock,
		.super_unlock = ocfs2_adl_super_unlock,
		.rename_lock = ocfs2_adl_rename_lock,
		.rename_unlock = ocfs2_adl_rename_unlock,
		.nfs_sync_lock = ocfs2_adl_nfs_sync_lock,
		.nfs_sync_unlock = ocfs2_adl_nfs_sync_unlock,
		.orphan_scan_lock = ocfs2_adl_orphan_scan_lock,
		.orphan_scan_unlock = ocfs2_adl_orphan_scan_unlock,
		.qinfo_lock = ocfs2_adl_qinfo_lock,
		.qinfo_unlock = ocfs2_adl_qinfo_unlock,
		.refcount_lock = ocfs2_adl_refcount_lock,
		.refcount_unlock = ocfs2_adl_refcount_unlock,
		.lock_init = ocfs2_adl_init,
		.lock_exit = ocfs2_adl_shutdown,
		.lock_res_free = ocfs2_adl_lock_res_free,
		.drop_lock = ocfs2_adl_drop_lock,
		.mark_lockres_freeing = ocfs2_adl_mark_lockres_freeing,
		.super_block_lock_res_init = ocfs2_adl_super_block_lock_res_init,
		.super_lock_res_init = ocfs2_adl_super_lock_res_init,
		.rename_lock_res_init = ocfs2_adl_rename_lock_res_init,
		.nfs_sync_lock_res_init = ocfs2_adl_nfs_sync_lock_res_init,
		.orphan_scan_lock_res_init = ocfs2_adl_orphan_scan_lock_res_init,
		.inode_lock_res_init = ocfs2_adl_inode_lock_res_init,
		.file_lock_res_init = ocfs2_adl_file_lock_res_init,
		.qinfo_lock_res_init = ocfs2_adl_qinfo_lock_res_init,
		.refcount_lock_res_init = ocfs2_adl_refcount_lock_res_init,
		.mark_lockres_freed = ocfs2_adl_mark_lockres_freed,
		.clear_recovery_map = ocfs2_adl_clear_recovery_map,
		.set_recovery_map = ocfs2_adl_set_recovery_map,
};

