// SPDX-License-Identifier: GPL-2.0-or-later
/* -*- mode: c; c-basic-offset: 8; -*-
 *
 * vim: noexpandtab sw=8 ts=8 sts=0:
 *
 * Copyright (C) 2005 Oracle.  All rights reserved.
 */

/* This quorum hack is only here until we transition to some more rational
 * approach that is driven from userspace.  Honest.  No foolin'.
 *
 * Imagine two nodes lose network connectivity to each other but they're still
 * up and operating in every other way.  Presumably a network timeout indicates
 * that a node is broken and should be recovered.  They can't both recover each
 * other and both carry on without serialising their access to the file system.
 * They need to decide who is authoritative.  Now extend that problem to
 * arbitrary groups of nodes losing connectivity between each other.
 *
 * So we declare that a node which has given up on connecting to a majority
 * of nodes who are still heartbeating will fence itself.
 *
 * There are huge opportunities for races here.  After we give up on a node's
 * connection we need to wait long enough to give heartbeat an opportunity
 * to declare the node as truly dead.  We also need to be careful with the
 * race between when we see a node start heartbeating and when we connect
 * to it.
 *
 * So nodes that are in this transtion put a hold on the quorum decision
 * with a counter.  As they fall out of this transition they drop the count
 * and if they're the last, they fire off the decision.
 */
#include <linux/kernel.h>
#include <linux/workqueue.h>
#include <linux/reboot.h>
#include <linux/delay.h>

#include "heartbeat.h"
#include "nodemanager.h"
#define MLOG_MASK_PREFIX ML_QUORUM
#include "masklog.h"
#include "quorum.h"

enum o2quo_check_heartbeat_state {
	NEED_CHECK_HEARTBEAT,
	NO_NEED_CHECK_HEARTBEAT
};

static struct o2quo_state {
	spinlock_t		qs_lock;
	struct work_struct	qs_work;
	struct delayed_work	qs_check_heartbeat;
	struct delayed_work	qs_check_heartbeat_timeout;
	int			qs_pending;
	int			qs_heartbeating;
	unsigned long		qs_hb_bm[BITS_TO_LONGS(O2NM_MAX_NODES)];
	int			qs_connected;
	unsigned long		qs_conn_bm[BITS_TO_LONGS(O2NM_MAX_NODES)];
	int			qs_holds;
	unsigned long		qs_hold_bm[BITS_TO_LONGS(O2NM_MAX_NODES)];
	int			qs_heartbeat_checked;
	int			qs_working;
	atomic_t	qs_disable_check;
} o2quo_state;

/* this is horribly heavy-handed.  It should instead flip the file
 * system RO and call some userspace script. */
static void o2quo_fence_self(void)
{
	printk(KERN_CRIT "*** o2quo: ocfs2 will fence self ***\n");
	/* panic spins with interrupts enabled.  with preempt
	 * threads can still schedule, etc, etc */
	o2hb_stop_all_regions();
	msleep(6000);

	switch (o2nm_single_cluster->cl_fence_method) {
	case O2NM_FENCE_PANIC:
		panic("*** o2quo: ocfs2 is very sorry to be fencing this system by "
		      "panicing ***\n");
		break;
	default:
		WARN_ON(o2nm_single_cluster->cl_fence_method >=
			O2NM_FENCE_METHODS);
		fallthrough;
	case O2NM_FENCE_RESET:
		printk(KERN_ERR "*** o2quo: ocfs2 is very sorry to be fencing this "
		       "system by restarting ***\n");
		emergency_restart();
		break;
	};
}

/* Indicate that a timeout occurred on a heartbeat region write. The
 * other nodes in the cluster may consider us dead at that time so we
 * want to "fence" ourselves so that we don't scribble on the disk
 * after they think they've recovered us. This can't solve all
 * problems related to writeout after recovery but this hack can at
 * least close some of those gaps. When we have real fencing, this can
 * go away as our node would be fenced externally before other nodes
 * begin recovery. */
void o2quo_disk_timeout(void)
{
	/* disable fence when halt system */
	if (!o2hb_disable_fence())
		o2quo_fence_self();
}

int o2quo_make_decision(unsigned long hb_bm[], int hb_heartbeating,
		u16 hr_qs_effect_nodes[], u16 hr_qs_lowest_node[])
{
	int quorum;
	int lowest_hb, lowest_reachable = 0, fence = 0;
	struct o2quo_state *qs = &o2quo_state;
	u16 qs_effect_nodes = 0, lowest_reach_node = O2NM_MAX_NODES;
	u16 qs_effect_nodes_other = 0, qs_lowest_node_other = O2NM_MAX_NODES;
	int i;
	unsigned long flags;

	spin_lock_irqsave(&qs->qs_lock, flags);

	lowest_hb = find_first_bit(hb_bm, O2NM_MAX_NODES);
	if (lowest_hb != O2NM_MAX_NODES)
		lowest_reachable = test_bit(lowest_hb, qs->qs_conn_bm);

	for (i = 0; i < O2NM_MAX_NODES; i++) {
		if (test_bit(i, hb_bm) && test_bit(i, qs->qs_conn_bm)) {
			lowest_reach_node = i;
			break;
		}
	}

	for (i = 0; i < O2NM_MAX_NODES; i++) {
		if (test_bit(i, hb_bm) && test_bit(i, qs->qs_conn_bm))
			qs_effect_nodes++;
	}

	mlog(ML_NOTICE,
			"o2quo: heartbeating: %d, connected: %d, lowest: %d (%sreachable)\n",
			hb_heartbeating,
			qs_effect_nodes, lowest_hb, lowest_reachable ? "" : "un");

	if (!test_bit(o2nm_this_node(), hb_bm) ||
			hb_heartbeating == 1)
		goto out;

	if (hb_heartbeating & 1) {
		/* the odd numbered cluster case is straight forward --
		 * if we can't talk to the majority we're hosed */
		quorum = (hb_heartbeating + 1)/2;
		if (qs_effect_nodes >= quorum)
			goto out;
	} else {
		/* the even numbered cluster adds the possibility of each half
		 * of the cluster being able to talk amongst themselves.. in
		 * that case we're hosed if we can't talk to the group that has
		 * the lowest numbered node */
		quorum = hb_heartbeating / 2;
		if (qs_effect_nodes > quorum ||
				(qs_effect_nodes == quorum && lowest_reachable))
			goto out;
	}

	/* Handle multiple net partitions */
	i = -1;
	while ((i = find_next_bit(hb_bm, O2NM_MAX_NODES, i + 1)) <
			O2NM_MAX_NODES) {
		if (!test_bit(i, qs->qs_conn_bm)) {
			qs_effect_nodes_other = hr_qs_effect_nodes[i];
			qs_lowest_node_other = hr_qs_lowest_node[i];
			mlog(0, "o2quo: Node : %d, effect node: %d, lowest node: %d\n",
					i, qs_effect_nodes_other, qs_lowest_node_other);

			if (qs_effect_nodes_other > qs_effect_nodes) {
				fence = 1;
				goto out;
			}
			if (qs_effect_nodes_other == qs_effect_nodes &&
					lowest_reach_node > qs_lowest_node_other) {
				fence = 1;
				goto out;
			}
		}
	}

out:
	spin_unlock_irqrestore(&qs->qs_lock, flags);
	mlog(ML_NOTICE, "o2quo: fence = %d, this node should%s fence self\n", fence,
			fence ? "" : "not");

	return fence;
}

static void o2quo_quorum_checked(struct o2quo_state *qs)
{
	assert_spin_locked(&qs->qs_lock);

	mlog(ML_QUORUM, "o2quo: heartbeat checked, going to make decision\n");
	qs->qs_heartbeat_checked = 1;
	if (qs->qs_holds == 0 && qs->qs_pending) {
		qs->qs_heartbeat_checked = 0;
		qs->qs_pending = 0;
		qs->qs_working = 0;
		schedule_work(&qs->qs_work);
	}
}

static void o2quo_check_heartbeat_timeout(struct work_struct *work)
{
	struct o2quo_state *qs = &o2quo_state;
	unsigned long flags;

	mlog(ML_QUORUM, "o2quo: max quroum check time %d ms reached, going to check holds\n",
			MAX_QUORUM_CHECK_TIME);
	cancel_delayed_work(&qs->qs_check_heartbeat);
	spin_lock_irqsave(&qs->qs_lock, flags);
	if (qs->qs_pending)
		o2quo_quorum_checked(qs);
	spin_unlock_irqrestore(&qs->qs_lock, flags);
}

static void o2quo_check_heartbeat(struct work_struct *work)
{
	struct o2quo_state *qs = &o2quo_state;
	unsigned long flags;

	mlog(ML_QUORUM, "o2quo: going to check heartbeat before quorum every %d ms\n",
			QUORUM_CHECK_TIME);
	spin_lock_irqsave(&qs->qs_lock, flags);
	if (!qs->qs_pending) {
		spin_unlock_irqrestore(&qs->qs_lock, flags);
		return;
	}
	spin_unlock_irqrestore(&qs->qs_lock, flags);

	if (o2hb_check_if_quorum()) {
		cancel_delayed_work(&qs->qs_check_heartbeat_timeout);
		spin_lock_irqsave(&qs->qs_lock, flags);
		if (qs->qs_pending)
			o2quo_quorum_checked(qs);
		spin_unlock_irqrestore(&qs->qs_lock, flags);
	} else if (atomic_read(&qs->qs_disable_check) == NEED_CHECK_HEARTBEAT) {
		schedule_delayed_work(&qs->qs_check_heartbeat,
				msecs_to_jiffies(QUORUM_CHECK_TIME));
	}
}

static void o2quo_quorum_region(struct work_struct *work)
{
	o2hb_quorum_region();
}

u16 o2quo_get_lowest_node(unsigned long hb_bm[], u16 node)
{
	u16 qs_lowest_node = O2NM_MAX_NODES;
	struct o2quo_state *qs = &o2quo_state;
	int i;
	unsigned long flags;

	spin_lock_irqsave(&qs->qs_lock, flags);
	/* fix me: maybe we need to check the effect nodes of the lowest node
	 * right now we do not do that as we already sync the values of both
	 * qs_effect_nodes and qs_lowest_node when the disk heart timeouts under
	 * network partition
	 */
	for (i = 0; i < O2NM_MAX_NODES; i++) {
		if (test_bit(i, hb_bm) && test_bit(i, qs->qs_conn_bm)) {
			qs_lowest_node = i;
			break;
		}
	}
	spin_unlock_irqrestore(&qs->qs_lock, flags);
	return qs_lowest_node;
}

u16 o2quo_calculate_effect_nodes(unsigned long hb_bm[], u16 node)
{
	u16 qs_effect_nodes = 0;
	struct o2quo_state *qs = &o2quo_state;
	int i;
	unsigned long flags;

	spin_lock_irqsave(&qs->qs_lock, flags);
	for (i = 0; i < O2NM_MAX_NODES; i++) {
		if (test_bit(i, hb_bm) && test_bit(i, qs->qs_conn_bm))
			qs_effect_nodes++;
	}
	spin_unlock_irqrestore(&qs->qs_lock, flags);
	return qs_effect_nodes;
}

static void o2quo_set_hold(struct o2quo_state *qs, u16 node)
{
	assert_spin_locked(&qs->qs_lock);

	if (!test_and_set_bit(node, qs->qs_hold_bm)) {
		qs->qs_holds++;
		mlog_bug_on_msg(qs->qs_holds == O2NM_MAX_NODES,
			        "node %u\n", node);
		mlog(0, "o2quo: node %u, holds %d total\n", node, qs->qs_holds);
	} else
		mlog(0, "o2quo: hold bm has been set, node %u, holds %d total\n",
				node, qs->qs_holds);
}

static void o2quo_clear_hold(struct o2quo_state *qs, u16 node)
{
	assert_spin_locked(&qs->qs_lock);

	if (test_and_clear_bit(node, qs->qs_hold_bm)) {
		qs->qs_holds--;
	} else {
		mlog(0, "o2quo: hold bm has been cleared, node %u\n", node);
		return;
	}
	mlog(0, "o2quo: node %u, holds %d total\n", node, qs->qs_holds);

	if (qs->qs_holds == 0) {
		if (qs->qs_pending) {
			if (qs->qs_heartbeat_checked) {
				qs->qs_heartbeat_checked = 0;
				qs->qs_pending = 0;
				qs->qs_working = 0;
				schedule_work(&qs->qs_work);
			} else if (!qs->qs_working) {
				qs->qs_working = 1;
				schedule_delayed_work(&qs->qs_check_heartbeat, 0);
				schedule_delayed_work(&qs->qs_check_heartbeat_timeout,
						msecs_to_jiffies(MAX_QUORUM_CHECK_TIME));
			}
		}
	}

	mlog_bug_on_msg(qs->qs_holds < 0, "node %u, holds %d\n",
			node, qs->qs_holds);
}

/* as a node comes up we delay the quorum decision until we know the fate of
 * the connection.  the hold will be droped in conn_up or hb_down.  it might be
 * perpetuated by con_err until hb_down.  if we already have a conn, we might
 * be dropping a hold that conn_up got. */
void o2quo_hb_up(u16 node, bool lock_type_adl)
{
	struct o2quo_state *qs = &o2quo_state;
	unsigned long flags;

	/* set qs_hb_bm only if the first DLM region up. */
	if (o2hb_dlm_existed(node) || lock_type_adl)
		return;

	spin_lock_irqsave(&qs->qs_lock, flags);

	qs->qs_heartbeating++;
	mlog_bug_on_msg(qs->qs_heartbeating == O2NM_MAX_NODES,
		        "node %u\n", node);
	mlog_bug_on_msg(test_bit(node, qs->qs_hb_bm), "node %u\n", node);
	set_bit(node, qs->qs_hb_bm);

	mlog(0, "o2quo: node %u, heartbeating %d total\n", node, qs->qs_heartbeating);

	if (!test_bit(node, qs->qs_conn_bm))
		o2quo_set_hold(qs, node);
	else
		o2quo_clear_hold(qs, node);

	spin_unlock_irqrestore(&qs->qs_lock, flags);
}

/* hb going down releases any holds we might have had due to this node from
 * conn_up, conn_err, or hb_up */
void o2quo_hb_down(u16 node, bool lock_type_adl)
{
	struct o2quo_state *qs = &o2quo_state;
	unsigned long flags;

	/* clear qs_hb_bm only if the last DLM region down. */
	if (o2hb_dlm_existed(node) || lock_type_adl)
		return;

	spin_lock_irqsave(&qs->qs_lock, flags);

	qs->qs_heartbeating--;
	mlog_bug_on_msg(qs->qs_heartbeating < 0,
			"node %u, %d heartbeating\n",
			node, qs->qs_heartbeating);
	mlog_bug_on_msg(!test_bit(node, qs->qs_hb_bm), "node %u\n", node);
	clear_bit(node, qs->qs_hb_bm);

	mlog(0, "o2quo: node %u, heartbeating %d total\n", node, qs->qs_heartbeating);

	o2quo_clear_hold(qs, node);

	spin_unlock_irqrestore(&qs->qs_lock, flags);
}

/* this tells us that we've decided that the node is still heartbeating
 * even though we've lost it's conn.  it must only be called after conn_err
 * and indicates that we must now make a quorum decision in the future,
 * though we might be doing so after waiting for holds to drain.  Here
 * we'll be dropping the hold from conn_err. */
void o2quo_hb_still_up(u16 node)
{
	struct o2quo_state *qs = &o2quo_state;
	unsigned long flags;

	spin_lock_irqsave(&qs->qs_lock, flags);

	mlog(0, "o2quo: node %u\n", node);

	qs->qs_pending = 1;
	o2quo_clear_hold(qs, node);

	spin_unlock_irqrestore(&qs->qs_lock, flags);
}

/* This is analogous to hb_up.  as a node's connection comes up we delay the
 * quorum decision until we see it heartbeating.  the hold will be droped in
 * hb_up or hb_down.  it might be perpetuated by con_err until hb_down.  if
 * it's already heartbeating we might be dropping a hold that conn_up got.
 * */
void o2quo_conn_up(u16 node)
{
	struct o2quo_state *qs = &o2quo_state;
	unsigned long flags;

	spin_lock_irqsave(&qs->qs_lock, flags);

	qs->qs_connected++;
	mlog_bug_on_msg(qs->qs_connected == O2NM_MAX_NODES,
		        "node %u\n", node);
	mlog_bug_on_msg(test_bit(node, qs->qs_conn_bm), "node %u\n", node);
	set_bit(node, qs->qs_conn_bm);

	mlog(0, "o2quo: node %u, connected %d total\n", node, qs->qs_connected);

	if (!test_bit(node, qs->qs_hb_bm))
		o2quo_set_hold(qs, node);
	else
		o2quo_clear_hold(qs, node);

	spin_unlock_irqrestore(&qs->qs_lock, flags);

	o2hb_conn_state_change(node, NODE_STATE_MOUNTED);
}

/* we've decided that we won't ever be connecting to the node again.  if it's
 * still heartbeating we grab a hold that will delay decisions until either the
 * node stops heartbeating from hb_down or the caller decides that the node is
 * still up and calls still_up */
void o2quo_conn_err(u16 node)
{
	struct o2quo_state *qs = &o2quo_state;
	unsigned long flags;

	spin_lock_irqsave(&qs->qs_lock, flags);

	if (test_bit(node, qs->qs_conn_bm)) {
		qs->qs_connected--;
		mlog_bug_on_msg(qs->qs_connected < 0,
				"node %u, connected %d\n",
				node, qs->qs_connected);

		clear_bit(node, qs->qs_conn_bm);

		if (test_bit(node, qs->qs_hb_bm))
			o2quo_set_hold(qs, node);
		else
			o2quo_clear_hold(qs, node);
	}

	mlog(0, "o2quo: node %u, connected %d total\n", node, qs->qs_connected);

	spin_unlock_irqrestore(&qs->qs_lock, flags);

	o2hb_conn_state_change(node, NODE_STATE_CONNERR);
}

int o2quo_conn_down(u16 node)
{
	int i, conn_err = 1;
	struct o2quo_state *qs = &o2quo_state;
	unsigned long flags;

	spin_lock_irqsave(&qs->qs_lock, flags);
	if (qs->qs_heartbeating == 1) {
		conn_err = 0;
		goto err;
	}

	for (i = 0; i < O2NM_MAX_NODES; i++) {
		if (i == node)
			continue;
		if (test_bit(i, qs->qs_hb_bm) &&
				test_bit(i, qs->qs_conn_bm)) {
			conn_err = 0;
			break;
		}
	}
err:
	spin_unlock_irqrestore(&qs->qs_lock, flags);

	return conn_err;
}

int o2quo_conn_check_err(u16 node)
{
	int i, conn_err = 0;
	struct o2quo_state *qs = &o2quo_state;
	unsigned long flags;

	spin_lock_irqsave(&qs->qs_lock, flags);
	if (node == o2nm_this_node()) {
		for (i = 0; i < O2NM_MAX_NODES; i++) {
			if (test_bit(i, qs->qs_hb_bm) && !test_bit(i, qs->qs_conn_bm)) {
				conn_err = 1;
				break;
			}
		}
	}

	if (node != o2nm_this_node())
		conn_err = !test_bit(node, qs->qs_conn_bm);

	spin_unlock_irqrestore(&qs->qs_lock, flags);

	return conn_err;
}

bool o2quo_conn_normal(unsigned long hb_bm[], u16 eff_nodes[])
{
	bool ret = true;
	struct o2quo_state *qs = &o2quo_state;
	int i = -1;
	unsigned long flags;
	u16 eff_local_nodes;

	spin_lock_irqsave(&qs->qs_lock, flags);
	eff_local_nodes = eff_nodes[o2nm_this_node()];
	while ((i = find_next_bit(hb_bm, O2NM_MAX_NODES, i + 1))
			< O2NM_MAX_NODES) {
		if (!test_bit(i, qs->qs_conn_bm) || eff_local_nodes != eff_nodes[i]) {
			ret = false;
			break;
		}
	}
	spin_unlock_irqrestore(&qs->qs_lock, flags);
	return ret;
}

void o2quo_init(void)
{
	struct o2quo_state *qs = &o2quo_state;

	spin_lock_init(&qs->qs_lock);
	atomic_set(&qs->qs_disable_check, NEED_CHECK_HEARTBEAT);
	INIT_WORK(&qs->qs_work, o2quo_quorum_region);
	INIT_DELAYED_WORK(&qs->qs_check_heartbeat, o2quo_check_heartbeat);
	INIT_DELAYED_WORK(&qs->qs_check_heartbeat_timeout, o2quo_check_heartbeat_timeout);
}

void o2quo_exit(void)
{
	struct o2quo_state *qs = &o2quo_state;

	atomic_set(&qs->qs_disable_check, NO_NEED_CHECK_HEARTBEAT);
	cancel_delayed_work(&qs->qs_check_heartbeat);
	cancel_delayed_work(&qs->qs_check_heartbeat_timeout);
	flush_work(&qs->qs_work);
}
