// SPDX-License-Identifier: GPL-2.0
/*
 * vim: noexpandtab sw=8 ts=8 sts=0:
 *
 * Created by:	Qiu Zhao, 2023-04-12
 * Copyright (c) Huawei Technologies Co., Ltd. 2023-2023. All rights reserved.
 */

#include "dealloc.h"
#include "unmap.h"

int ocfs2_validate_truncate_rec_list(struct super_block *sb,
		struct buffer_head *bh)
{
	int rc;
	struct ocfs2_truncate_rec_list *tr_list =
			(struct ocfs2_truncate_rec_list *) bh->b_data;

	BUG_ON(!buffer_uptodate(bh));

    /*
	 * If the ecc fails, we return the error but otherwise
	 * leave the filesystem running.  We know any error is
	 * local to this block.
	 */
	rc = ocfs2_validate_meta_ecc(sb, bh->b_data, &tr_list->tr_check);
	if (rc) {
		mlog(ML_ERROR, "%s: Validate failed for expand truncate log block %llu\n",
				OCFS2_SB(sb)->uuid_str, (unsigned long long) bh->b_blocknr);
		return rc;
	}

    /*
	 * Errors after here are fatal.
	 */

	if (le32_to_cpu(tr_list->tr_fs_generation) != OCFS2_SB(sb)->fs_generation) {
		ocfs2_error(sb,
				"Trucnate rec list #%llu has an invalid "
				"tr_fs_generation of #%u and ocfs2_super fs_generation is #%u",
				(unsigned long long) bh->b_blocknr,
				le32_to_cpu(tr_list->tr_fs_generation),
				OCFS2_SB(sb)->fs_generation);
		return -EINVAL;
	}

	return 0;
}

static bool ocfs2_expanded_truncate_log_can_coalesce(struct ocfs2_truncate_rec_list *tr_list,
		unsigned int new_start)
{
	unsigned int tail_index;
	unsigned int current_tail;

	/* No records, nothing to coalesce */
	if (!le16_to_cpu(tr_list->tr_used))
		return 0;

	tail_index = le16_to_cpu(tr_list->tr_used) - 1;
	current_tail = le32_to_cpu(tr_list->tl_recs[tail_index].t_start);
	current_tail += le32_to_cpu(tr_list->tl_recs[tail_index].t_clusters);
	if (current_tail == new_start) {
		mlog(ML_RO,
			 "expanded_truncate_log_can_coalesce: "
			 "rec[%u]'s current_tail[%u] is equal to rec[%u]'s start[%u],it will coalesce\n",
			 tail_index, current_tail, tail_index + 1, new_start);
	}

	return current_tail == new_start;
}

static int ocfs2_expanded_truncate_log_append(struct ocfs2_super *osb, handle_t *handle,
		struct ocfs2_truncate_log *tl,
		unsigned int start_cluster,
		unsigned int num_clusters)
{
	int status, index;
	int t_clusters = num_clusters;
	u64 tl_blkno, tl_recs_blkno;
	unsigned int tl_inline_count, tl_rec_per_block;
	struct buffer_head *tr_bh = NULL;
	struct ocfs2_truncate_rec_list *tr_list;
	struct inode *tl_inode = osb->osb_tl_inode;
	struct buffer_head *tl_bh = osb->osb_tl_bh;

	tl_blkno = ocfs2_clusters_to_blocks(osb->sb, tl->tl_clusterno);
	tl_inline_count = ocfs2_truncate_recs_per_inode(osb->sb);
	tl_rec_per_block = ocfs2_truncate_recs_per_block(osb->sb);

	index = le16_to_cpu(tl->tl_used);

	tl_recs_blkno = tl_blkno + (index - tl_inline_count) / tl_rec_per_block;
	index = (index - tl_inline_count) % tl_rec_per_block;
	status = ocfs2_read_block(INODE_CACHE(tl_inode), tl_recs_blkno,
			&tr_bh, ocfs2_validate_truncate_rec_list);
	if (status < 0) {
		mlog_errno(status);
		if (tr_bh != NULL)
			brelse(tr_bh);
		return status;
	}
	tr_list = (struct ocfs2_truncate_rec_list *) tr_bh->b_data;
	status = ocfs2_journal_access_tr(handle, INODE_CACHE(tl_inode),
			tr_bh, OCFS2_JOURNAL_ACCESS_WRITE);
	if (status < 0) {
		mlog_errno(status);
		brelse(tr_bh);
		return status;
	}
	if (ocfs2_expanded_truncate_log_can_coalesce(tr_list, start_cluster)) {
		index--;
		t_clusters += le32_to_cpu(tr_list->tl_recs[index].t_clusters);
	} else {
		tr_list->tl_recs[index].t_start = cpu_to_le32(start_cluster);
		tr_list->tr_used = cpu_to_le16(index + 1);
		tl->tl_used = cpu_to_le16(le16_to_cpu(tl->tl_used) + 1);
	}
	tr_list->tl_recs[index].t_clusters = cpu_to_le32(t_clusters);
	mlog(ML_RO, "%s: expanded_truncate_log_append: tl_recs_blkno[%llu],"
				"start_cluster[%u], clusters[%u], tl_used[%u], tl_count[%u] "
				"tr_used[%u], tr_count[%u]\n",
			osb->uuid_str, tl_recs_blkno,
			start_cluster, t_clusters,
			le16_to_cpu(tl->tl_used), le16_to_cpu(tl->tl_count),
			le16_to_cpu(tr_list->tr_used), le16_to_cpu(tr_list->tr_count));
	ocfs2_journal_dirty(handle, tr_bh);
	ocfs2_journal_dirty(handle, tl_bh);

	brelse(tr_bh);
	return status;
}

static int ocfs2_truncate_log_can_coalesce(struct ocfs2_truncate_log *tl,
		unsigned int new_start)
{
	unsigned int tail_index;
	unsigned int current_tail;

	/* No records, nothing to coalesce */
	if (!le16_to_cpu(tl->tl_used))
		return 0;
	tail_index = le16_to_cpu(tl->tl_used) - 1;
	current_tail = le32_to_cpu(tl->tl_recs[tail_index].t_start);
	current_tail += le32_to_cpu(tl->tl_recs[tail_index].t_clusters);
	return current_tail == new_start;
}

static int ocfs2_max_truncate_recs(struct super_block *sb)
{
	int size;
	int tr_per_blk;
	int tr_blk_cnt;
	int tl_inline_count;

	tl_inline_count = ocfs2_truncate_recs_per_inode(sb);
	tr_per_blk = ocfs2_truncate_recs_per_block(sb);
	tr_blk_cnt = ocfs2_clusters_to_blocks(sb, 1);

	size = tl_inline_count + tr_blk_cnt * tr_per_blk;

	return size;
}

int ocfs2_truncate_log_append_with_expand(struct ocfs2_super *osb, handle_t *handle,
		struct ocfs2_truncate_log *tl,
		unsigned int start_cluster,
		unsigned int num_clusters)
{
	int status, index;
	int t_clusters = num_clusters;
	unsigned int tl_count, tl_inline_count;
	struct inode *tl_inode = osb->osb_tl_inode;
	struct buffer_head *tl_bh = osb->osb_tl_bh;

	tl_count = le16_to_cpu(tl->tl_count);
	if (tl_count > ocfs2_max_truncate_recs(osb->sb) || tl_count == 0) {
		ocfs2_error(osb->sb, "Truncate record count on #%llu invalid wanted %u, actual %u",
				(unsigned long long) OCFS2_I(tl_inode)->ip_blkno,
				ocfs2_max_truncate_recs(osb->sb),
				le16_to_cpu(tl->tl_count));
		return -EROFS;
	}

	tl_inline_count = ocfs2_truncate_recs_per_inode(osb->sb);
	index = le16_to_cpu(tl->tl_used);
	if (index >= tl_count) {
		status = -ENOSPC;
		mlog_errno(status);
		return status;
	}

	status = ocfs2_journal_access_di(handle, INODE_CACHE(tl_inode),
			tl_bh, OCFS2_JOURNAL_ACCESS_WRITE);
	if (status < 0) {
		mlog_errno(status);
		return status;
	}
	trace_ocfs2_truncate_log_append_with_expand(
			(unsigned long long) OCFS2_I(tl_inode)->ip_blkno, index,
			start_cluster, t_clusters);

	/* If tl inode can't save any more records,
	 * we need to find the blk which used to save records now*/
	if (index >= tl_inline_count) {
		status = ocfs2_expanded_truncate_log_append(osb, handle,
				tl, start_cluster, t_clusters);
		if (status < 0) {
			mlog_errno(status);
			return status;
		}
	} else {
		if (ocfs2_truncate_log_can_coalesce(tl, start_cluster)) {
			index--;
			t_clusters += le32_to_cpu(tl->tl_recs[index].t_clusters);
		} else {
			tl->tl_recs[index].t_start = cpu_to_le32(start_cluster);
			tl->tl_used = cpu_to_le16(index + 1);
		}
		tl->tl_recs[index].t_clusters = cpu_to_le32(t_clusters);
		ocfs2_journal_dirty(handle, tl_bh);
		mlog(ML_RO, "%s: truncate_log_append: start_cluster[%u], "
					"clusters[%u], tl_used[%u], tl_count[%u]\n",
				osb->uuid_str, start_cluster, num_clusters,
				le16_to_cpu(tl->tl_used), le16_to_cpu(tl->tl_count));
	}

	return status;
}


int ocfs2_truncate_log_append_no_expand(struct ocfs2_super *osb, handle_t *handle,
		struct ocfs2_truncate_log *tl,
		unsigned int start_cluster,
		unsigned int num_clusters)
{
	int status, index;
	unsigned int tl_count;
	struct inode *tl_inode = osb->osb_tl_inode;
	struct buffer_head *tl_bh = osb->osb_tl_bh;
	unsigned int total_clusters = num_clusters;

	tl_count = le16_to_cpu(tl->tl_count);
	if (tl_count > ocfs2_truncate_recs_per_inode(osb->sb) || tl_count == 0) {
		ocfs2_error(osb->sb, "Truncate record count on #%llu invalid wanted %u, actual %u",
				(unsigned long long) OCFS2_I(tl_inode)->ip_blkno,
				ocfs2_truncate_recs_per_inode(osb->sb),
				le16_to_cpu(tl->tl_count));
		return -EROFS;
	}

	/* Caller should have known to flush before calling us. */
	index = le16_to_cpu(tl->tl_used);
	if (index >= tl_count) {
		status = -ENOSPC;
		mlog_errno(status);
		return status;
	}

	status = ocfs2_journal_access_di(handle, INODE_CACHE(tl_inode),
			tl_bh, OCFS2_JOURNAL_ACCESS_WRITE);
	if (status < 0) {
		mlog_errno(status);
		return status;
	}

	trace_ocfs2_truncate_log_append_no_expand(
			(unsigned long long) OCFS2_I(tl_inode)->ip_blkno, index,
			start_cluster, num_clusters);

	if (ocfs2_truncate_log_can_coalesce(tl, start_cluster)) {
		/*
		 * Move index back to the record we are coalescing with.
		 * ocfs2_truncate_log_can_coalesce() guarantees nonzero
		 */
		index--;

		total_clusters += le32_to_cpu(tl->tl_recs[index].t_clusters);
		trace_ocfs2_truncate_log_append_no_expand(
				(unsigned long long) OCFS2_I(tl_inode)->ip_blkno, index,
				le32_to_cpu(tl->tl_recs[index].t_start), total_clusters);
	} else {
		tl->tl_recs[index].t_start = cpu_to_le32(start_cluster);
		tl->tl_used = cpu_to_le16(index + 1);
	}
	tl->tl_recs[index].t_clusters = cpu_to_le32(total_clusters);
	mlog(ML_RO, "%s: truncate_log_append: start_cluster[%u], "
				"clusters[%u], tl_used[%u], tl_count[%u]\n",
			osb->uuid_str, start_cluster,
			total_clusters, le16_to_cpu(tl->tl_used), le16_to_cpu(tl->tl_count));

	ocfs2_journal_dirty(handle, tl_bh);

	return status;
}

#define UNMAP_GET_QOS_TIMEOUT  (2 * 60 * 1000)

int ocfs2_dealloc_fs_range(struct super_block *sb, u64 start, u64 len)
{
	struct ocfs2_super *osb = OCFS2_SB(sb);
	int discard_unit_blocks, discard_size_bytes, discard_size_blocks;
	u64 remain_blocks, start_blocks;
	int status = 0;
	ktime_t before_unmap, after_unmap;
	struct request_queue *q = bdev_get_queue(sb->s_bdev);

	if (!blk_queue_discard(q))
		return -EOPNOTSUPP;

	remain_blocks = ocfs2_clusters_to_blocks(sb, len);
	start_blocks = ocfs2_clusters_to_blocks(sb, start);
	discard_unit_blocks = OCFS2_DISCARD_UNIT_SIZE >> sb->s_blocksize_bits;
	discard_size_bytes = OCFS2_DISCARD_UNIT_SIZE;
	discard_size_blocks = discard_unit_blocks;
	while (remain_blocks > 0) {
		if (remain_blocks < discard_unit_blocks) {
			discard_size_blocks = remain_blocks;
			discard_size_bytes = discard_size_blocks << sb->s_blocksize_bits;
		}
		status = ocfs2_get_qos_token(osb->sb, discard_size_bytes, UNMAP_GET_QOS_TIMEOUT);
		if (status < 0)
			return status;
		mlog(ML_RO, "%s: do discard from block [%llu] length[%d] cluster %u len %u.\n",
				osb->uuid_str, start_blocks, discard_size_blocks,
				ocfs2_blocks_to_clusters(sb, start_blocks),
				ocfs2_blocks_to_clusters(sb, discard_size_blocks));

		before_unmap = ktime_get_real();
		status = sb_issue_discard(sb, start_blocks, discard_size_blocks, GFP_NOFS, 0);
		if (status < 0)
			return status;
		after_unmap = ktime_get_real();

		ocfs2_update_unmap_stats(sb, len, before_unmap, after_unmap);
		remain_blocks -= discard_size_blocks;
		start_blocks += discard_size_blocks;
	}
	return status;
}
