// SPDX-License-Identifier: GPL-2.0-or-later
/* -*- mode: c; c-basic-offset: 8; -*-
 * vim: noexpandtab sw=8 ts=8 sts=0:
 *
 * resize.c
 *
 * volume resize.
 * Inspired by ext3/resize.c.
 *
 * Copyright (C) 2007 Oracle.  All rights reserved.
 */

#include <linux/fs.h>
#include <linux/types.h>
#include <linux/delay.h>
#include <linux/jiffies.h>
#include <linux/sort.h>

#include <cluster/masklog.h>

#include "ocfs2.h"

#include "alloc.h"
#include "lockglue.h"
#include "inode.h"
#include "journal.h"
#include "super.h"
#include "sysfile.h"
#include "uptodate.h"
#include "ocfs2_trace.h"
#include "file.h"

#include "buffer_head_io.h"
#include "suballoc.h"
#include "resize.h"

/*
 * Check whether there are new backup superblocks exist
 * in the last group. If there are some, mark them or clear
 * them in the bitmap.
 *
 * Return how many backups we find in the last group.
 */
static u16 ocfs2_calc_new_backup_super(struct inode *inode,
				       struct ocfs2_group_desc *gd,
				       u16 cl_cpg,
				       u16 old_bg_clusters,
				       int set)
{
	int i;
	u16 backups = 0;
	u32 cluster, lgd_cluster;
	u64 blkno, gd_blkno, lgd_blkno = le64_to_cpu(gd->bg_blkno);

	for (i = 0; i < OCFS2_MAX_BACKUP_SUPERBLOCKS; i++) {
		blkno = ocfs2_backup_super_blkno(inode->i_sb, i);
		cluster = ocfs2_blocks_to_clusters(inode->i_sb, blkno);

		gd_blkno = ocfs2_which_cluster_group(inode, cluster);
		if (gd_blkno < lgd_blkno)
			continue;
		else if (gd_blkno > lgd_blkno)
			break;

		/* check if already done backup super */
		lgd_cluster = ocfs2_blocks_to_clusters(inode->i_sb, lgd_blkno);
		lgd_cluster += old_bg_clusters;
		if (lgd_cluster >= cluster)
			continue;
		if ((cluster % cl_cpg) > (gd->bg_bits - 1))
			break;

		if (set)
			ocfs2_set_bit(cluster % cl_cpg,
				      (unsigned long *)gd->bg_bitmap);
		else
			ocfs2_clear_bit(cluster % cl_cpg,
					(unsigned long *)gd->bg_bitmap);
		backups++;
	}

	return backups;
}

static int ocfs2_update_last_group_and_inode(handle_t *handle,
					     struct inode *bm_inode,
					     struct buffer_head *bm_bh,
					     struct buffer_head *group_bh,
					     u32 first_new_cluster,
					     int new_clusters,
					     int expand_clusters)
{
	int ret = 0;
	struct ocfs2_super *osb = OCFS2_SB(bm_inode->i_sb);
	struct ocfs2_dinode *fe = (struct ocfs2_dinode *) bm_bh->b_data;
	struct ocfs2_chain_list *cl = &fe->id2.i_chain;
	struct ocfs2_chain_rec *cr;
	struct ocfs2_group_desc *group;
	u16 chain, num_bits, backups = 0;
	u16 cl_bpc = le16_to_cpu(cl->cl_bpc);
	u16 cl_cpg = le16_to_cpu(cl->cl_cpg);
	u16 old_bg_clusters;

	trace_ocfs2_update_last_group_and_inode(new_clusters,
						first_new_cluster);

	ret = ocfs2_journal_access_gd(handle, INODE_CACHE(bm_inode),
				      group_bh, OCFS2_JOURNAL_ACCESS_WRITE);
	if (ret < 0) {
		mlog_errno(ret);
		goto out;
	}

	group = (struct ocfs2_group_desc *)group_bh->b_data;

	old_bg_clusters = le16_to_cpu(group->bg_bits) / cl_bpc;
	/* update the group first. */
	num_bits = new_clusters * cl_bpc;
	le16_add_cpu(&group->bg_bits, num_bits);
	le16_add_cpu(&group->bg_free_bits_count, num_bits);

	/*
	 * check whether there are some new backup superblocks exist in
	 * this group and update the group bitmap accordingly.
	 */
	if (OCFS2_HAS_COMPAT_FEATURE(osb->sb,
				     OCFS2_FEATURE_COMPAT_BACKUP_SB)) {
		backups = ocfs2_calc_new_backup_super(bm_inode,
						     group,
						     cl_cpg, old_bg_clusters, 1);
		le16_add_cpu(&group->bg_free_bits_count, -1 * backups);
	}

	ocfs2_journal_dirty(handle, group_bh);

	/* update the inode accordingly. */
	ret = ocfs2_journal_access_di(handle, INODE_CACHE(bm_inode), bm_bh,
				      OCFS2_JOURNAL_ACCESS_WRITE);
	if (ret < 0) {
		mlog_errno(ret);
		goto out_rollback;
	}

	chain = le16_to_cpu(group->bg_chain);
	cr = (&cl->cl_recs[chain]);
	le32_add_cpu(&cr->c_total, num_bits);
	le32_add_cpu(&cr->c_free, num_bits);
	le32_add_cpu(&fe->id1.bitmap1.i_total, num_bits);
	le32_add_cpu(&fe->i_clusters, new_clusters);
	fe->i_ex_clusters = cpu_to_le32(expand_clusters);

	if (backups) {
		le32_add_cpu(&cr->c_free, -1 * backups);
		le32_add_cpu(&fe->id1.bitmap1.i_used, backups);
	}

	spin_lock(&OCFS2_I(bm_inode)->ip_lock);
	OCFS2_I(bm_inode)->ip_clusters = le32_to_cpu(fe->i_clusters);
	le64_add_cpu(&fe->i_size, (u64)new_clusters << osb->s_clustersize_bits);
	spin_unlock(&OCFS2_I(bm_inode)->ip_lock);
	i_size_write(bm_inode, le64_to_cpu(fe->i_size));

	ocfs2_journal_dirty(handle, bm_bh);

out_rollback:
	if (ret < 0) {
		ocfs2_calc_new_backup_super(bm_inode,
					    group,
					    cl_cpg, old_bg_clusters, 0);
		le16_add_cpu(&group->bg_free_bits_count, backups);
		le16_add_cpu(&group->bg_bits, -1 * num_bits);
		le16_add_cpu(&group->bg_free_bits_count, -1 * num_bits);
	}
out:
	if (ret)
		mlog_errno(ret);
	return ret;
}

int update_backups(struct inode *inode, u32 clusters, char *data)
{
	int i, ret = 0;
	u32 cluster;
	u64 blkno;
	struct buffer_head *backup = NULL;
	struct ocfs2_dinode *backup_di = NULL;
	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);

	/* calculate the real backups we need to update. */
	for (i = 0; i < OCFS2_MAX_BACKUP_SUPERBLOCKS; i++) {
		blkno = ocfs2_backup_super_blkno(inode->i_sb, i);
		cluster = ocfs2_blocks_to_clusters(inode->i_sb, blkno);
		if (cluster >= clusters)
			break;

		ret = ocfs2_read_blocks_sync(osb, blkno, 1, &backup);
		if (ret < 0) {
			mlog_errno(ret);
			break;
		}

		memcpy(backup->b_data, data, inode->i_sb->s_blocksize);

		backup_di = (struct ocfs2_dinode *)backup->b_data;
		backup_di->i_blkno = cpu_to_le64(blkno);

		ret = ocfs2_write_super_or_backup(osb, backup);
		brelse(backup);
		backup = NULL;
		if (ret < 0) {
			mlog_errno(ret);
			break;
		}
	}

	return ret;
}

int ocfs2_update_super_and_backups(struct inode *inode,
		unsigned int cmd, void *para)
{
	int ret = 0;
	u32 clusters = 0;
	struct ocfs2_dinode *super_di = NULL;
	struct ocfs2_dinode *bm_di = NULL;
	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
	struct ocfs2_expand_volume_para *ep;
	struct ocfs2_extend_group_input *input;
	unsigned int *flags;
	u16 storage_attribute = 0, tmp_attr = 0;

	/*
	 * update the superblock last.
	 * It doesn't matter if the write failed.
	 */
	mutex_lock(&osb->system_file_mutex);
	ret = ocfs2_super_block_lock(osb, 1);
	if (ret < 0) {
		mutex_unlock(&osb->system_file_mutex);
		mlog_errno(ret);
		return ret;
	}

	super_di = (struct ocfs2_dinode *)osb->osb_super_bh->b_data;

	switch (cmd) {
	case OCFS2_ZERO_UPDATE:
		if (le16_to_cpu(super_di->id2.i_super.s_storage_attribute) &
				OCFS2_VAAI_WS_SUPPORT) {
			mlog(ML_NOTICE,
				"writesame performance has already existed on UUID %s.\n",
				osb->uuid_str);
			goto out;
		}
		super_di->id2.i_super.s_storage_attribute |= cpu_to_le16(OCFS2_VAAI_WS_SUPPORT);
		spin_lock(&osb->osb_lock);
		osb->osb_ws_support = 1;
		spin_unlock(&osb->osb_lock);
		break;
	case OCFS2_IOC_UPDATE_STOR_ATTR:
		if (para) {
			storage_attribute = *(unsigned int *)para;
			storage_attribute &= (OCFS2_VAAI_WS_SUPPORT | OCFS2_VAAI_XCOPY_SUPPORT |
					OCFS2_FENCE_SCSI_SUPPORT);
		}
		tmp_attr = le16_to_cpu(super_di->id2.i_super.s_storage_attribute);
		tmp_attr &= (OCFS2_VAAI_WS_SUPPORT | OCFS2_VAAI_XCOPY_SUPPORT |
				OCFS2_FENCE_SCSI_SUPPORT);

		if (!(storage_attribute & OCFS2_VAAI_WS_SUPPORT)) {
			super_di->id2.i_super.s_storage_attribute &=
					cpu_to_le16(~(OCFS2_VAAI_WS_SUPPORT |
					OCFS2_VAAI_WS_SWITCH));
			spin_lock(&osb->osb_lock);
			osb->osb_ws_support = 0;
			spin_unlock(&osb->osb_lock);
		}

		if (!(storage_attribute & OCFS2_VAAI_XCOPY_SUPPORT)) {
			super_di->id2.i_super.s_storage_attribute &=
					cpu_to_le16(~(OCFS2_VAAI_XCOPY_SUPPORT |
					OCFS2_VAAI_XCOPY_SWITCH));
			spin_lock(&osb->osb_lock);
			osb->osb_xcopy_support = 0;
			spin_unlock(&osb->osb_lock);
		}

		if (!(storage_attribute & OCFS2_FENCE_SCSI_SUPPORT)) {
			super_di->id2.i_super.s_storage_attribute &=
					cpu_to_le16(~(OCFS2_FENCE_SCSI_SUPPORT));
			spin_lock(&osb->osb_lock);
			osb->osb_fence_scsi_support = 0;
			spin_unlock(&osb->osb_lock);
		}

		/* if 'storage_attribute' is equal to the value on disk,
		 * no need set it to disk again.
		 */
		if (storage_attribute == tmp_attr) {
			mlog(ML_NOTICE, "%s: only need set storage_attribute 0x%x in mem\n",
				osb->uuid_str, storage_attribute);
			goto out;
		}
		mlog(ML_NOTICE, "%s: set storage_attribute 0x%x on disk\n",
				osb->uuid_str, storage_attribute);
		break;
	case OCFS2_IOC_GROUP_EXTEND:
	case OCFS2_IOC_GROUP_ADD:
	case OCFS2_IOC_GROUP_ADD64:
		ep = para;
		input = ep->ei;
		bm_di = (struct ocfs2_dinode *)ep->main_bm_bh->b_data;
		super_di->i_clusters = bm_di->i_clusters;
		if (input)
			super_di->i_ex_clusters = cpu_to_le32(input->expand_clusters);
		break;
	case OCFS2_IOC_FENCE_SCSI:
		spin_lock(&osb->osb_lock);
		osb->osb_fence_scsi_support = 1;
		spin_unlock(&osb->osb_lock);

		if (le16_to_cpu(super_di->id2.i_super.s_storage_attribute) &
				OCFS2_FENCE_SCSI_SUPPORT)
			goto out;
		super_di->id2.i_super.s_storage_attribute |= cpu_to_le16(OCFS2_FENCE_SCSI_SUPPORT);
		break;
	case OCFS2_IOC_CLEAR_FENCE_SCSI:
		spin_lock(&osb->osb_lock);
		osb->osb_fence_scsi_support = 0;
		spin_unlock(&osb->osb_lock);

		if (!(le16_to_cpu(super_di->id2.i_super.s_storage_attribute) &
				OCFS2_FENCE_SCSI_SUPPORT))
			goto out;
		super_di->id2.i_super.s_storage_attribute &= cpu_to_le16(~OCFS2_FENCE_SCSI_SUPPORT);
		break;
	case OCFS2_IOC_SET_HIGH_PERF_DIO:
		flags = para;
		spin_lock(&osb->osb_lock);
		if (*flags && !ocfs2_supports_high_perf_dio(osb)) {
			osb->s_feature_incompat |= OCFS2_FEATURE_INCOMPAT_HIGH_PERF_DIO;
			super_di->id2.i_super.s_feature_incompat |=
					cpu_to_le32(OCFS2_FEATURE_INCOMPAT_HIGH_PERF_DIO);
		} else if (!(*flags) && ocfs2_supports_high_perf_dio(osb)) {
			osb->s_feature_incompat &= ~OCFS2_FEATURE_INCOMPAT_HIGH_PERF_DIO;
			super_di->id2.i_super.s_feature_incompat &=
					cpu_to_le32(~OCFS2_FEATURE_INCOMPAT_HIGH_PERF_DIO);
		}
		spin_unlock(&osb->osb_lock);
		break;
	default:
		mlog(ML_ERROR, "%s: invalid command %d\n", osb->uuid_str, cmd);
		ret = -EINVAL;
		goto out;
	}

	clusters = le32_to_cpu(super_di->i_clusters);

	ret = ocfs2_write_super_or_backup(osb, osb->osb_super_bh);
	if (ret < 0) {
		mlog_errno(ret);
		goto out;
	}

	if (OCFS2_HAS_COMPAT_FEATURE(osb->sb, OCFS2_FEATURE_COMPAT_BACKUP_SB)) {
		ret = update_backups(inode, clusters, osb->osb_super_bh->b_data);
		if (ret < 0) {
			mlog_errno(ret);
			goto out;
		}
	}

out:
	if (ret)
		printk(KERN_WARNING "ocfs2: Failed to update super blocks on %s"
			"(uuid %s) during fs resize. This condition is not fatal,"
			" but fsck.ocfs2 should be run to fix it or resize the volume again\n",
			osb->dev_str, osb->uuid_str);
	ocfs2_super_block_unlock(osb, 1);
	mutex_unlock(&osb->system_file_mutex);
	return ret;
}

/*
 * Extend the filesystem to the new number of clusters specified.  This entry
 * point is only used to extend the current filesystem to the end of the last
 * existing group.
 */
int ocfs2_group_extend(struct inode *inode,
		unsigned int cmd,
		struct ocfs2_extend_group_input *input)
{
	int ret, update_sb = 0;
	handle_t *handle;
	journal_t *journal;
	struct buffer_head *main_bm_bh = NULL;
	struct buffer_head *group_bh = NULL;
	struct inode *main_bm_inode = NULL;
	struct ocfs2_dinode *fe = NULL;
	struct ocfs2_group_desc *group = NULL;
	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
	u16 cl_bpc;
	u32 first_new_cluster;
	u64 lgd_blkno;
	struct ocfs2_expand_volume_para ep = {0};

	if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb))
		return -EROFS;

	if (input->new_clusters == 0)
		return 0;

	main_bm_inode = ocfs2_get_system_file_inode(osb,
						    GLOBAL_BITMAP_SYSTEM_INODE,
						    OCFS2_INVALID_SLOT);
	if (!main_bm_inode) {
		ret = -EINVAL;
		mlog_errno(ret);
		goto out;
	}

	inode_lock(main_bm_inode);

	ret = ocfs2_inode_lock(main_bm_inode, &main_bm_bh, 1);
	if (ret < 0) {
		mlog_errno(ret);
		goto out_mutex;
	}

	fe = (struct ocfs2_dinode *)main_bm_bh->b_data;

	/* main_bm_bh is validated by inode read inside ocfs2_inode_lock(),
	 * but cache may be destroyed when using dd command, so main_bm_bh
	 * may be corrupted then */
	if (!OCFS2_IS_VALID_DINODE(fe)) {
		ocfs2_error(osb->sb, "Invalid dinode #%llu: signature = %.*s",
				(unsigned long long)OCFS2_I(main_bm_inode)->ip_blkno, 7,
				fe->i_signature);
		ret = -EROFS;
		goto out_unlock;
	}

	if (le16_to_cpu(fe->id2.i_chain.cl_cpg) !=
		ocfs2_group_bitmap_size(osb->sb, 0,
					osb->s_feature_incompat) * 8) {
		mlog(ML_ERROR,
			"%s: The disk is too old and small. Force to do offline resize.\n",
			osb->uuid_str);
		ret = -EINVAL;
		goto out_unlock;
	}

	first_new_cluster = le32_to_cpu(fe->i_clusters);
	lgd_blkno = ocfs2_which_cluster_group(main_bm_inode,
					      first_new_cluster - 1);

	ret = ocfs2_read_group_descriptor(main_bm_inode, fe, lgd_blkno,
					  &group_bh);
	if (ret < 0) {
		mlog_errno(ret);
		goto out_unlock;
	}
	group = (struct ocfs2_group_desc *)group_bh->b_data;

	cl_bpc = le16_to_cpu(fe->id2.i_chain.cl_bpc);
	if (le16_to_cpu(group->bg_bits) / cl_bpc + input->new_clusters >
		le16_to_cpu(fe->id2.i_chain.cl_cpg)) {
		mlog(ML_ERROR, "%s: invalid new_clusters.\n", osb->uuid_str);
		ret = -EINVAL;
		goto out_unlock;
	}

	trace_ocfs2_group_extend(
	     (unsigned long long)le64_to_cpu(group->bg_blkno), input->new_clusters);

	handle = ocfs2_start_trans(osb, OCFS2_GROUP_EXTEND_CREDITS);
	if (IS_ERR(handle)) {
		mlog_errno(PTR_ERR(handle));
		ret = -EINVAL;
		goto out_unlock;
	}

	/* update the last group descriptor and inode. */
	ret = ocfs2_update_last_group_and_inode(handle, main_bm_inode,
						main_bm_bh, group_bh,
						first_new_cluster,
						input->new_clusters,
						input->expand_clusters);
	if (ret) {
		mlog_errno(ret);
		goto out_commit;
	}

	ep.ei = input;
	ep.main_bm_bh = main_bm_bh;
	update_sb = 1;

out_commit:
	ocfs2_commit_trans(osb, handle);
	if (!ret) {
		journal = osb->journal->j_journal;
		ret = jbd2_journal_force_commit(journal);
		if (ret < 0) {
			mlog_errno(ret);
			goto out_unlock;
		}
	}
	if (update_sb)
		ocfs2_update_super_and_backups(main_bm_inode, cmd, (void *)&ep);
out_unlock:
	brelse(group_bh);
	brelse(main_bm_bh);

	ocfs2_inode_unlock(main_bm_inode, 1);

out_mutex:
	inode_unlock(main_bm_inode);
	iput(main_bm_inode);

out:
	return ret;
}

static int ocfs2_check_new_group(struct inode *inode,
				 struct ocfs2_dinode *di,
				 struct ocfs2_new_group_input *input,
				 struct buffer_head *group_bh)
{
	int ret;
	struct ocfs2_group_desc *gd =
		(struct ocfs2_group_desc *)group_bh->b_data;
	u16 cl_bpc = le16_to_cpu(di->id2.i_chain.cl_bpc);
	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);

	ret = ocfs2_check_group_descriptor(inode->i_sb, di, group_bh);
	if (ret)
		goto out;

	ret = -EINVAL;
	if (le16_to_cpu(gd->bg_chain) != input->chain)
		mlog(ML_ERROR,
		     "%s: Group descriptor # %llu has bad chain %u "
		     "while input has %u set.\n",
		     osb->uuid_str,
		     (unsigned long long)le64_to_cpu(gd->bg_blkno),
		     le16_to_cpu(gd->bg_chain), input->chain);
	else if (le16_to_cpu(gd->bg_bits) != input->clusters * cl_bpc)
		mlog(ML_ERROR,
		     "%s: Group descriptor # %llu has bit count %u but "
		     "input has %u clusters set\n",
		     osb->uuid_str,
		     (unsigned long long)le64_to_cpu(gd->bg_blkno),
		     le16_to_cpu(gd->bg_bits), input->clusters);
	else if (le16_to_cpu(gd->bg_free_bits_count) != input->frees * cl_bpc)
		mlog(ML_ERROR,
		     "%s: Group descriptor # %llu has free bit count %u "
		     "but it should have %u set\n",
		     osb->uuid_str,
		     (unsigned long long)le64_to_cpu(gd->bg_blkno),
		     le16_to_cpu(gd->bg_bits),
		     input->frees * cl_bpc);
	else
		ret = 0;

out:
	return ret;
}

static int ocfs2_verify_group_and_input(struct inode *inode,
					struct ocfs2_dinode *di,
					struct ocfs2_new_group_input *input,
					struct buffer_head *group_bh)
{
	u16 cl_count = le16_to_cpu(di->id2.i_chain.cl_count);
	u16 cl_cpg = le16_to_cpu(di->id2.i_chain.cl_cpg);
	u16 next_free = le16_to_cpu(di->id2.i_chain.cl_next_free_rec);
	u32 cluster = ocfs2_blocks_to_clusters(inode->i_sb, input->group);
	u32 total_clusters = le32_to_cpu(di->i_clusters);
	int ret = -EINVAL;
	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);

	if (cluster < total_clusters)
		mlog(ML_ERROR, "%s: add a group which is in the current volume.\n", osb->uuid_str);
	else if (input->chain >= cl_count)
		mlog(ML_ERROR, "%s: input chain exceeds the limit.\n", osb->uuid_str);
	else if (next_free != cl_count && next_free != input->chain)
		mlog(ML_ERROR,
		     "%s: the add group should be in chain %u\n", osb->uuid_str, next_free);
	else if (total_clusters + input->clusters < total_clusters)
		mlog(ML_ERROR, "%s: add group's clusters overflow.\n", osb->uuid_str);
	else if (input->clusters > cl_cpg)
		mlog(ML_ERROR, "%s: the cluster exceeds the maximum of a group\n", osb->uuid_str);
	else if (input->frees > input->clusters)
		mlog(ML_ERROR, "%s: the free cluster exceeds the total clusters\n", osb->uuid_str);
	else if (total_clusters % cl_cpg != 0)
		mlog(ML_ERROR,
		     "%s: the last group isn't full. Use group extend first.\n", osb->uuid_str);
	else if (input->group != ocfs2_which_cluster_group(inode, cluster))
		mlog(ML_ERROR, "%s: group blkno is invalid\n", osb->uuid_str);
	else if ((ret = ocfs2_check_new_group(inode, di, input, group_bh)))
		mlog(ML_ERROR, "%s: group descriptor check failed.\n", osb->uuid_str);
	else if (input->expand_clusters < total_clusters)
		mlog(ML_ERROR, "%s: invalid expand_clusters(%u) and total_clusters(%u).\n",
				osb->uuid_str, input->expand_clusters, total_clusters);
	else
		ret = 0;

	return ret;
}

/* Add a new group descriptor to global_bitmap. */
int ocfs2_group_add(struct inode *inode, unsigned int cmd, struct ocfs2_new_group_input *input)
{
	int ret, update_sb = 0;
	handle_t *handle;
	journal_t *journal;
	struct buffer_head *main_bm_bh = NULL;
	struct inode *main_bm_inode = NULL;
	struct ocfs2_dinode *fe = NULL;
	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
	struct buffer_head *group_bh = NULL;
	struct ocfs2_group_desc *group = NULL;
	struct ocfs2_chain_list *cl;
	struct ocfs2_chain_rec *cr;
	u16 cl_bpc;
	u64 bg_ptr;
	struct ocfs2_expand_volume_para ep = {0};
	struct ocfs2_extend_group_input support_input;

	if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb))
		return -EROFS;

	main_bm_inode = ocfs2_get_system_file_inode(osb,
						    GLOBAL_BITMAP_SYSTEM_INODE,
						    OCFS2_INVALID_SLOT);
	if (!main_bm_inode) {
		ret = -EINVAL;
		mlog_errno(ret);
		goto out;
	}

	inode_lock(main_bm_inode);

	ret = ocfs2_inode_lock(main_bm_inode, &main_bm_bh, 1);
	if (ret < 0) {
		mlog_errno(ret);
		goto out_mutex;
	}

	fe = (struct ocfs2_dinode *)main_bm_bh->b_data;

	if (le16_to_cpu(fe->id2.i_chain.cl_cpg) !=
		ocfs2_group_bitmap_size(osb->sb, 0,
					osb->s_feature_incompat) * 8) {
		mlog(ML_ERROR,
			"%s: The disk is too old and small. Force to do offline resize.\n",
			osb->uuid_str);
		ret = -EINVAL;
		goto out_unlock;
	}

	ret = ocfs2_read_blocks_sync(osb, input->group, 1, &group_bh);
	if (ret < 0) {
		mlog(ML_ERROR,
			"%s: Can't read the group descriptor # %llu from the device.\n",
			osb->uuid_str, (unsigned long long)input->group);
		goto out_unlock;
	}

	ret = ocfs2_verify_group_and_input(main_bm_inode, fe, input, group_bh);
	if (ret) {
		mlog_errno(ret);
		goto out_free_group_bh;
	}

	trace_ocfs2_group_add((unsigned long long)input->group,
			       input->chain, input->clusters, input->frees);

	handle = ocfs2_start_trans(osb, OCFS2_GROUP_ADD_CREDITS);
	if (IS_ERR(handle)) {
		mlog_errno(PTR_ERR(handle));
		ret = -EINVAL;
		goto out_free_group_bh;
	}

	cl_bpc = le16_to_cpu(fe->id2.i_chain.cl_bpc);
	cl = &fe->id2.i_chain;
	cr = &cl->cl_recs[input->chain];

	ret = ocfs2_journal_access_gd(handle, INODE_CACHE(main_bm_inode),
				      group_bh, OCFS2_JOURNAL_ACCESS_WRITE);
	if (ret < 0) {
		mlog_errno(ret);
		goto out_commit;
	}

	group = (struct ocfs2_group_desc *)group_bh->b_data;
	bg_ptr = le64_to_cpu(group->bg_next_group);
	group->bg_next_group = cr->c_blkno;
	ocfs2_journal_dirty(handle, group_bh);

	ret = ocfs2_journal_access_di(handle, INODE_CACHE(main_bm_inode),
				      main_bm_bh, OCFS2_JOURNAL_ACCESS_WRITE);
	if (ret < 0) {
		group->bg_next_group = cpu_to_le64(bg_ptr);
		mlog_errno(ret);
		goto out_commit;
	}

	if (input->chain == le16_to_cpu(cl->cl_next_free_rec)) {
		le16_add_cpu(&cl->cl_next_free_rec, 1);
		memset(cr, 0, sizeof(struct ocfs2_chain_rec));
	}

	cr->c_blkno = cpu_to_le64(input->group);
	le32_add_cpu(&cr->c_total, input->clusters * cl_bpc);
	le32_add_cpu(&cr->c_free, input->frees * cl_bpc);

	le32_add_cpu(&fe->id1.bitmap1.i_total, input->clusters *cl_bpc);
	le32_add_cpu(&fe->id1.bitmap1.i_used,
		     (input->clusters - input->frees) * cl_bpc);
	le32_add_cpu(&fe->i_clusters, input->clusters);
	fe->i_ex_clusters = cpu_to_le32(input->expand_clusters);

	ocfs2_journal_dirty(handle, main_bm_bh);

	spin_lock(&OCFS2_I(main_bm_inode)->ip_lock);
	OCFS2_I(main_bm_inode)->ip_clusters = le32_to_cpu(fe->i_clusters);
	le64_add_cpu(&fe->i_size, (u64)input->clusters << osb->s_clustersize_bits);
	spin_unlock(&OCFS2_I(main_bm_inode)->ip_lock);
	i_size_write(main_bm_inode, le64_to_cpu(fe->i_size));

	support_input.new_clusters = input->clusters;
	support_input.expand_clusters = input->expand_clusters;
	ep.ei = &support_input;
	ep.main_bm_bh = main_bm_bh;
	update_sb = 1;

out_commit:
	ocfs2_commit_trans(osb, handle);
	if (!ret) {
		journal = osb->journal->j_journal;
		ret = jbd2_journal_force_commit(journal);
		if (ret < 0) {
			mlog_errno(ret);
			goto out_unlock;
		}
	}
	if (update_sb)
		ocfs2_update_super_and_backups(main_bm_inode, cmd, (void *)&ep);

out_free_group_bh:
	brelse(group_bh);

out_unlock:
	brelse(main_bm_bh);

	ocfs2_inode_unlock(main_bm_inode, 1);

out_mutex:
	inode_unlock(main_bm_inode);
	iput(main_bm_inode);

out:
	return ret;
}

static int cmp_func(const void *a, const void *b)
{
	int a_free_bit, b_free_bit;
	struct ocfs2_group_desc *gd;

	gd = (struct ocfs2_group_desc *)((*((struct buffer_head **)a))->b_data);
	a_free_bit = le16_to_cpu(gd->bg_free_bits_count);

	gd = (struct ocfs2_group_desc *)((*((struct buffer_head **)b))->b_data);
	b_free_bit = le16_to_cpu(gd->bg_free_bits_count);

	return b_free_bit - a_free_bit;
}

static int ocfs2_resort_chain(struct ocfs2_super *osb,
			      struct inode *bm_inode,
			      struct buffer_head *bm_bh,
			      int num)
{
	int ret = 0, i;
	u32 num_gds, max_chain_len, chain_len = 0;
	u16 cpg;
	u64 blkno;
	journal_t *journal;
	handle_t *handle = NULL;
	struct ocfs2_chain_list *cl = NULL;
	struct ocfs2_dinode *fe = NULL;
	struct buffer_head **bh = NULL;
	struct ocfs2_group_desc *gd_next = NULL;
	struct ocfs2_group_desc *gd = NULL;

	fe = (struct ocfs2_dinode *)bm_bh->b_data;
	cl = &(fe->id2.i_chain);
	cpg = le16_to_cpu(cl->cl_cpg);
	num_gds = (le32_to_cpu(fe->i_clusters) + cpg - 1) / cpg;
	max_chain_len = (num_gds + le16_to_cpu(cl->cl_count) - 1) / le16_to_cpu(cl->cl_count);
	blkno = le64_to_cpu(cl->cl_recs[num].c_blkno);

	/* nothing to do when there's only one gd in each chain */
	if (max_chain_len < 2)
		return 0;

	bh = kcalloc(max_chain_len, sizeof(struct buffer_head *), GFP_NOFS);
	if (!bh) {
		ret = -ENOMEM;
		mlog_errno(ret);
		return ret;
	}

	while (chain_len < max_chain_len && blkno) {
		ret = ocfs2_read_group_descriptor(bm_inode, fe, blkno, &bh[chain_len]);
		if (ret < 0) {
			mlog(ML_ERROR,
				"%s: Can't read the group descriptor # %llu from the device.\n",
				osb->uuid_str, (unsigned long long)blkno);
			goto out_free;
		}
		gd = (struct ocfs2_group_desc *)bh[chain_len]->b_data;
		blkno = le64_to_cpu(gd->bg_next_group);
		chain_len++;
	}

	/* may be still only one gd in some chains, minimize gd write as much as possible */
	if (chain_len < 2)
		goto out_free;

	sort(bh, chain_len, sizeof(struct buffer_head *), &cmp_func, NULL);

	/* global_bitmap dinode and chain_len gds update */
	handle = ocfs2_start_trans(osb, chain_len + 1);
	if (IS_ERR(handle)) {
		mlog_errno(PTR_ERR(handle));
		ret = -EINVAL;
		goto out_free;
	}

	ret = ocfs2_journal_access_di(handle, INODE_CACHE(bm_inode),
			bm_bh, OCFS2_JOURNAL_ACCESS_WRITE);
	if (ret < 0) {
		mlog_errno(ret);
		goto out_commit;
	}

	for (i = 0; i < chain_len; i++) {
		ret = ocfs2_journal_access_gd(handle, INODE_CACHE(bm_inode),
				bh[i], OCFS2_JOURNAL_ACCESS_WRITE);
		if (ret < 0) {
			mlog_errno(ret);
			goto out_commit;
		}
	}

	gd = (struct ocfs2_group_desc *)bh[0]->b_data;
	cl->cl_recs[num].c_blkno = gd->bg_blkno;
	ocfs2_journal_dirty(handle, bm_bh);

	for (i = 1; i < chain_len; i++) {
		gd_next = (struct ocfs2_group_desc *)bh[i]->b_data;
		gd->bg_next_group = gd_next->bg_blkno;
		ocfs2_journal_dirty(handle, bh[i - 1]);
		gd = gd_next;
	}

	gd->bg_next_group = 0;
	ocfs2_journal_dirty(handle, bh[i - 1]);

out_commit:
	ocfs2_commit_trans(osb, handle);
	if (!ret) {
		journal = osb->journal->j_journal;
		ret = jbd2_journal_force_commit(journal);
		if (ret < 0)
			mlog_errno(ret);
	}

out_free:
	for (i = 0; i < chain_len; i++)
		brelse(bh[i]);
	kfree(bh);

	return ret;
}

int ocfs2_resort_global_bitmap(struct inode *inode)
{
	int ret, cur_chain, total_chain;
	struct ocfs2_dinode *fe = NULL;
	struct ocfs2_chain_list *cl;
	struct inode *global_bitmap_inode = NULL;
	struct buffer_head *bh = NULL;
	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);

	if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb))
		return -EROFS;

	global_bitmap_inode = ocfs2_get_system_file_inode(osb,
			GLOBAL_BITMAP_SYSTEM_INODE, OCFS2_INVALID_SLOT);
	if (!global_bitmap_inode) {
		mlog_errno(-EINVAL);
		return -EINVAL;
	}

	cur_chain = total_chain = 0;
	do {
		inode_lock(global_bitmap_inode);
		ret = ocfs2_inode_lock(global_bitmap_inode, &bh, 1);
		if (ret < 0) {
			mlog_errno(ret);
			inode_unlock(global_bitmap_inode);
			goto out;
		}

		fe = (struct ocfs2_dinode *)bh->b_data;
		cl = &(fe->id2.i_chain);
		total_chain = le16_to_cpu(cl->cl_next_free_rec);
		if (cur_chain >= total_chain) {
			ocfs2_inode_unlock(global_bitmap_inode, 1);
			inode_unlock(global_bitmap_inode);
			mlog(ML_ERROR,
				"Chain %d exceeds the max chain length %d, group block (%llu)",
				cur_chain, total_chain, (unsigned long long)bh->b_blocknr);
			goto out;
		}

		ret = ocfs2_resort_chain(osb, global_bitmap_inode, bh, cur_chain);
		if (ret) {
			ocfs2_inode_unlock(global_bitmap_inode, 1);
			inode_unlock(global_bitmap_inode);
			mlog(ML_ERROR, "sort chain %d failed\n", cur_chain);
			goto out;
		}

		cur_chain++;
		ocfs2_inode_unlock(global_bitmap_inode, 1);
		inode_unlock(global_bitmap_inode);
		brelse(bh);
		bh = NULL;
	} while (cur_chain < total_chain);

out:
	if (bh)
		brelse(bh);
	iput(global_bitmap_inode);
	return ret;
}

int ocfs2_update_super_tl_flag(struct ocfs2_super *osb, int sb_need_flush, u64 cuptime)
{
	int ret = 0;
	struct ocfs2_dinode *super_di = NULL;

	mutex_lock(&osb->system_file_mutex);
	ret = ocfs2_super_block_lock(osb, 1);
	if (ret < 0) {
		mutex_unlock(&osb->system_file_mutex);
		mlog_errno(ret);
		return ret;
	}
	super_di = (struct ocfs2_dinode *) osb->osb_super_bh->b_data;
	super_di->id2.i_super.tl_need_flush_flag = cpu_to_le16(sb_need_flush);
	super_di->id2.i_super.tl_flush_flag_seq = cpu_to_le64(cuptime);

	ret = ocfs2_write_super_or_backup(osb, osb->osb_super_bh);
	if (ret < 0)
		mlog_errno(ret);

	ocfs2_super_block_unlock(osb, 1);
	mutex_unlock(&osb->system_file_mutex);
	return ret;
}
