// SPDX-License-Identifier: GPL-2.0-or-later
 /*
 * useradl.c
 *
 * Code which implements the kernel side of a minimal userspace
 * interface to our ADL.
 *
 * Many of the functions here are pared down versions of adlglue.c
 * functions.
 *
 * Copyright (C) 2003, 2004 Oracle.  All rights reserved.
 */

#include <linux/module.h>
#include <linux/fs.h>
#include <linux/types.h>
#include <linux/delay.h>
#include <linux/sched/signal.h>

#include "ocfs2_lockingver.h"
#include "useradl.h"

#include "adl/adlapi.h"
#include "adl/adllock.h"
#include "adlglue.h"

#define MLOG_MASK_PREFIX ML_LOCKFS
#include "cluster/masklog.h"

#define user_log_adl_error(_func, _stat, _block_num, _sector_offset)	\
	mlog(ML_ERROR, "lock error %d while calling %s on blkno %lld, lock_offset %d\n",	\
			_stat, _func, (unsigned long long)_block_num, _sector_offset)

int user_adl_cluster_unlock(struct user_lock_res *lockres, int level)
{
	int status = 0;
	int new_level;
	struct ocfs2_adl_cluster_connection *conn =
			cluster_connection_from_user_lockres(lockres);

	if (level != ADL_LOCK_EX && level != ADL_LOCK_PR) {
		mlog(ML_ERROR, "lockres %.*s: invalid request!\n",
				lockres->l_namelen, lockres->l_name);
		status = -EINVAL;
		goto bail;
	}

	spin_lock(&lockres->l_lock);
	while (lockres->l_flags & USER_LOCK_BUSY) {
		spin_unlock(&lockres->l_lock);
		lockfs_wait_on_flag(lockres, USER_LOCK_BUSY);
		spin_lock(&lockres->l_lock);
	}

	lockfs_dec_holders(lockres, level);
	if (lockres->l_ro_holders != 0 && lockres->l_ex_holders == 0 &&
			level == ADL_LOCK_EX) {
		new_level = ADL_LOCK_PR;
	} else if (lockres->l_ro_holders == 0 && lockres->l_ex_holders == 0) {
		new_level = ADL_LOCK_NL;
	} else {
		mlog(0, "ro %d, ex %d\n", lockres->l_ro_holders,
				lockres->l_ex_holders);
		spin_unlock(&lockres->l_lock);
		goto bail;
	}

	mlog(0, "blkno %llu, lock_offset %d set BUSY pending\n",
			lockres->l_blkno, lockres->l_sector_offset);
	lockres->l_flags |= USER_LOCK_BUSY;
	lockres->l_flags |= USER_LOCK_UNLOCKING;
	spin_unlock(&lockres->l_lock);

	mlog(0, "lock %s\n", lockres->l_name);

	status = adl_unlock(conn->cc_lockspace, lockres->l_blkno,
			lockres->l_sector_offset, &lockres->l_adllksb,
			new_level, ADL_LKF_USER_MODE);
	if (status)
		o2hb_handle_invalid(conn->cc_name);

	spin_lock(&lockres->l_lock);
	lockres->l_level = new_level;
	lockres->l_flags &= ~USER_LOCK_BUSY;
	lockres->l_flags &= ~USER_LOCK_UNLOCKING;
	spin_unlock(&lockres->l_lock);
	wake_up(&lockres->l_event);

	mlog(0, "blkno %llu, offset %d, new level %d\n",
			lockres->l_blkno, lockres->l_sector_offset, new_level);

bail:
	if (status)
		mlog_errno(status);

	return status;
}

int user_adl_cluster_lock(struct user_lock_res *lockres,
		int level, int lkm_flags)
{
	int ret = 0, alloc_lksb, local_flags = ADL_LKF_USER_MODE;
	struct ocfs2_adl_cluster_connection *conn =
			cluster_connection_from_user_lockres(lockres);

	if (level != ADL_LOCK_EX && level != ADL_LOCK_PR) {
		mlog(ML_ERROR, "lockres %.*s: invalid request!\n",
				lockres->l_namelen, lockres->l_name);
		ret = -EINVAL;
		goto bail;
	}

	mlog(ML_BASTS, "lockres %.*s, level %d, flags = 0x%x\n",
	     lockres->l_namelen, lockres->l_name, level, lkm_flags);

again:
	alloc_lksb = 0;

	if (signal_pending(current)) {
		ret = -ERESTARTSYS;
		goto bail;
	}

	spin_lock(&lockres->l_lock);

	/* We only compare against the currently granted level
	 * here. If the lock is blocked waiting on a downconvert,
	 * we'll get caught below. */
	if ((lockres->l_flags & USER_LOCK_BUSY) &&
			(level > lockres->l_level)) {
		/* is someone sitting in dlm_lock? If so, wait on
		 * them. */
		spin_unlock(&lockres->l_lock);

		lockfs_wait_on_flag(lockres, USER_LOCK_BUSY);
		goto again;
	}

	if (lockres->l_flags & USER_LOCK_UNLOCKING) {
		/* is someone sitting in dlm_unlock? If so, wait on
		 * them. */
		spin_unlock(&lockres->l_lock);

		lockfs_wait_on_flag(lockres, USER_LOCK_UNLOCKING);
		goto again;
	}

	if (level > lockres->l_level) {
		if (!(lockres->l_flags & USER_LOCK_ATTACHED))
			alloc_lksb = 1;

		if (lkm_flags & DLM_LKF_NOQUEUE)
			local_flags |= ADL_LKF_NOQUEUE;
		lockres->l_flags |= USER_LOCK_BUSY;
		spin_unlock(&lockres->l_lock);

		if (alloc_lksb) {
			ret = adl_alloc_lock_lksb(conn->cc_lockspace, lockres->l_blkno,
					lockres->l_sector_offset, &lockres->l_adllksb);
			if (ret) {
				lockfs_recover_from_error(lockres);
				goto bail;
			}
			spin_lock(&lockres->l_lock);
			lockres->l_flags |= USER_LOCK_ATTACHED;
			spin_unlock(&lockres->l_lock);
		}

		/* call adl_lock to upgrade lock now */
		ret = adl_lock(conn->cc_lockspace, lockres->l_blkno,
				lockres->l_sector_offset, &lockres->l_adllksb,
				level, local_flags);
		if (ret < 0) {
			if (ret != -EAGAIN) {
				user_log_adl_error("user_adl_cluster_lock",
							ret, lockres->l_blkno,
							lockres->l_sector_offset);
			}
			lockfs_recover_from_error(lockres);

			if (ret == -EIO)
				o2hb_handle_invalid(conn->cc_name);

			if (ret == -EAGAIN && !(local_flags & ADL_LKF_NOQUEUE)) {
				ret = 0;
				msleep(DISK_RETRY_TIME_MS);
				goto again;
			}

			goto bail;
		}

		spin_lock(&lockres->l_lock);
		lockres->l_level = level;
		mlog(0, "blkno %llu, lock_offset %d clear BUSY pending\n",
				lockres->l_blkno, lockres->l_sector_offset);
		lockres->l_flags &= ~USER_LOCK_BUSY;
		wake_up(&lockres->l_event);
	}

	lockfs_inc_holders(lockres, level);
	spin_unlock(&lockres->l_lock);

bail:
	return ret;
}

static int user_adl_parse_dentry_name(struct user_lock_res *lockres,
		char *name)
{
	char *p;
	int i = 0;

	while ((p = strsep(&name, "_")) != NULL) {
		if (!*p)
			continue;

		switch (i++) {
		case LOCK_BLOCK_NUM:
			mlog(0, "blkno = %s\n", p);
			lockres->l_blkno = simple_strtol(p, &p, 0);
			break;
		case LOCK_SECTOR_OFFSET:
			mlog(0, "sector_offset = %s\n", p);
			lockres->l_sector_offset = simple_strtol(p, &p, 0);
			break;
		default:
			mlog(ML_ERROR, "too many parameters, can't be parsed\n");
			return -EINVAL;
		}
	}

	return 0;
}

int user_adl_lock_res_init(struct user_lock_res *lockres,
		struct dentry *dentry)
{
	memset(lockres, 0, sizeof(*lockres));

	spin_lock_init(&lockres->l_lock);
	init_waitqueue_head(&lockres->l_event);
	lockres->l_level = ADL_LOCK_IV;

	memcpy(lockres->l_name,
	       dentry->d_name.name,
	       dentry->d_name.len);
	lockres->l_namelen = dentry->d_name.len;

	return user_adl_parse_dentry_name(lockres, lockres->l_name);
}

int user_adl_destroy_lock(struct user_lock_res *lockres)
{
	int status = -EBUSY;

	mlog(ML_BASTS, "lockres %.*s\n", lockres->l_namelen, lockres->l_name);

	spin_lock(&lockres->l_lock);
	while (lockres->l_flags & USER_LOCK_BUSY) {
		spin_unlock(&lockres->l_lock);

		lockfs_wait_on_flag(lockres, USER_LOCK_BUSY);

		spin_lock(&lockres->l_lock);
	}

	if (lockres->l_ro_holders || lockres->l_ex_holders) {
		spin_unlock(&lockres->l_lock);
		goto bail;
	}

	lockres->l_flags &= ~USER_LOCK_ATTACHED;

	status = 0;
	spin_unlock(&lockres->l_lock);

	adl_free_lock_lksb(&lockres->l_adllksb);

bail:

	return status;
}

static void user_adl_recovery_handler_noop(int node_num,
		void *recovery_data)
{}

struct ocfs2_adl_cluster_connection *user_adl_register(char *domain_name,
				int domain_len)
{
	int rc;
	struct ocfs2_adl_cluster_connection *conn;
	struct block_device *bdev;

	bdev = o2hb_get_block_device(domain_name);
	if (!bdev) {
		mlog(ML_ERROR, "%s: Not find block device information\n",
				domain_name);
		rc = -EINVAL;
		goto bail;
	}

	rc = ocfs2_adl_cluster_connect(domain_name, domain_len,
			user_adl_recovery_handler_noop,
			NULL, &conn, bdev);
	if (rc)
		mlog_errno(rc);

bail:
	return rc ? ERR_PTR(rc) : conn;
}

void user_adl_unregister(struct ocfs2_adl_cluster_connection *conn)
{
	ocfs2_adl_cluster_disconnect(conn);
}
