// SPDX-License-Identifier: GPL-2.0
/*
 * vim: noexpandtab sw=8 ts=8 sts=0:
 *
 * Created by:	Qiu Zhao, 2023-04-12
 * Copyright (c) Huawei Technologies Co., Ltd. 2023-2023. All rights reserved.
 */
#include <linux/math64.h>

#include <asm/div64.h>

#include "dealloc.h"
#include "unmap.h"
#include "aops.h"
#include "blockcheck.h"
#include "lockglue.h"
#include "extent_map.h"
#include "inode.h"
#include "journal.h"
#include "localalloc.h"
#include "suballoc.h"
#include "sysfile.h"
#include "file.h"
#include "super.h"
#include "uptodate.h"
#include "xattr.h"
#include "refcounttree.h"
#include "ocfs2_trace.h"
#include "resize.h"
static void ocfs2_init_unmap_stats(struct ocfs2_unmap_stats *s)
{
	struct ocfs2_unmap_stats init_state = {0};
	*s = init_state;
}

void ocfs2_update_unmap_stats(struct super_block *sb, int len,
		ktime_t before, ktime_t after)
{
	struct ocfs2_super *osb = OCFS2_SB(sb);
	struct ocfs2_unmap_stats *ous = osb->osb_unmap_stats;
	unsigned long flags;

	if (!spin_trylock_irqsave(&osb->osb_unmap_stats_lock, flags))
		return;

	if (ous->unmap_command_count == ULLONG_MAX || len >= ULLONG_MAX - ous->unmap_data_size) {
		mlog(ML_NOTICE, "Unmap state reset due to cmd count %llu or data size %llu.\n",
				ous->unmap_command_count, ous->unmap_data_size);
		ocfs2_init_unmap_stats(ous);
	}

	ous->unmap_command_count++;
	ous->unmap_data_size += len;

	if (ous->unmap_delay_count < MAX_DELAY_COUNT) {
		ous->unmap_delay_count++;
		ous->unmap_delay_timems += (unsigned int) ktime_ms_delta(after, before);
	} else {
		ous->unmap_average_delay_ms = div_u64(ous->unmap_delay_timems, ous->unmap_delay_count);
		ous->unmap_delay_count = 1;
		ous->unmap_delay_timems = (unsigned int) ktime_ms_delta(after, before);
	}
	spin_unlock_irqrestore(&osb->osb_unmap_stats_lock, flags);
}

static unsigned int ocfs2_get_unmap_records(struct ocfs2_super *osb,
		struct ocfs2_unmap_log **unmap_log)
{
	unsigned int num_to_flush;
	struct inode *ul_inode = osb->osb_ul_inode;
	struct buffer_head *ul_bh = osb->osb_ul_bh;
	struct ocfs2_dinode *di;
	struct ocfs2_unmap_log *ul;

	di = (struct ocfs2_dinode *) ul_bh->b_data;

	/* tl_bh is loaded from ocfs2_truncate_log_init().  It's validated
	 * by the underlying call to ocfs2_read_inode_block(), but cache
	 * may be destroyed when using dd command, so the tl_bh may
	 * be corrupted then */
	if (!OCFS2_IS_VALID_DINODE(di)) {
		ocfs2_error(osb->sb, "Invalid dinode #%llu: signature = %.*s",
				(unsigned long long) OCFS2_I(ul_inode)->ip_blkno,
				LOCAL_SYS_INODE_UNMAP_LOG,
				di->i_signature);
		return -EROFS;
	}

	ul = &di->id2.i_unmap;
	num_to_flush = le16_to_cpu(ul->ul_used);
	mlog(ML_RO, "ocfs2 get unmap records: num of unmap_log_rec = [%u]\n", num_to_flush);
	if (!num_to_flush)
		return 0;
	*unmap_log = ul;
	return num_to_flush;
}

/* Expects you to already be holding ul_inode->i_mutex */
int ocfs2_flush_unmap_log(struct ocfs2_super *osb)
{
	int status = 0;
	int i;
	tid_t target;
	long long record_size = 0;
	unsigned int num_records = 0;
	u64 start_blk;
	unsigned int start_cluster, num_clusters;
	struct inode *ul_inode = osb->osb_ul_inode;
	struct ocfs2_unmap_log *ul;
	struct ocfs2_unmap_rec rec;

	BUG_ON(inode_trylock(ul_inode));
	num_records = ocfs2_get_unmap_records(osb, &ul);
	if (num_records == 0)
		return status;

	i = le16_to_cpu(ul->ul_used) - 1;
	while (i >= 0) {
		rec = ul->ul_recs[i];

		start_blk = ocfs2_clusters_to_blocks(osb->sb, le32_to_cpu(rec.u_start));
		start_cluster = ocfs2_blocks_to_clusters(osb->sb, start_blk);
		num_clusters = le32_to_cpu(rec.u_clusters);

		/* calculate the clusters we gonna unmap,
		 * if unmap failed, then we just accept and continue to free the bitmap*/
		status = ocfs2_dealloc_fs_range(osb->sb, le32_to_cpu(rec.u_start), num_clusters);
		if (status && status != -EOPNOTSUPP)
			mlog_errno(status);

		status = ocfs2_free_dealloc_clusters(osb, start_blk, num_clusters, ul);
		if (status < 0) {
			mlog_errno(status);
			return status;
		}

		if (jbd2_journal_start_commit(osb->journal->j_journal, &target))
			jbd2_log_wait_commit(osb->journal->j_journal, target);

		record_size = ocfs2_get_record_size(osb->sb, num_clusters);
		mlog(ML_RO, "%s: unmap_log_flush: The unmap_rec[%u]'s size = [%lld]Bytes,"
					"start_cluster[%u], num_clusters[%u],"
					"ul_used[%u], ul_count[%u]\n",
				osb->uuid_str, i, record_size, start_cluster, num_clusters,
				le16_to_cpu(ul->ul_used), le16_to_cpu(ul->ul_count));
		i--;
	}

	return status;
}

static void ocfs2_clear_unmap_log_worker(struct work_struct *work)
{
	int status;
	struct ocfs2_super *osb =
		container_of(work, struct ocfs2_super, osb_clear_unmap_log_wq.work);
	struct inode *ul_inode = osb->osb_ul_inode;

	inode_lock(ul_inode);
	status = ocfs2_flush_unmap_log(osb);
	inode_unlock(ul_inode);

	if (status < 0)
		mlog_errno(status);
	else
		ocfs2_init_steal_slots(osb);
}

static int ocfs2_truncate_records_search(struct ocfs2_super *osb,
		struct ocfs2_truncate_log **tr_log)
{
	int status = 0;
	unsigned int num_to_flush;
	struct inode *tl_inode = osb->osb_tl_inode;
	struct buffer_head *tl_bh = osb->osb_tl_bh;
	struct ocfs2_dinode *di;
	struct ocfs2_truncate_log *tl;

	inode_lock(tl_inode);

	di = (struct ocfs2_dinode *) tl_bh->b_data;

	/* tl_bh is loaded from ocfs2_truncate_log_init().  It's validated
	 * by the underlying call to ocfs2_read_inode_block(), but cache
	 * may be destroyed when using dd command, so the tl_bh may
	 * be corrupted then */
	if (!OCFS2_IS_VALID_DINODE(di)) {
		ocfs2_error(osb->sb, "Invalid dinode #%llu: signature = %.*s",
				(unsigned long long) OCFS2_I(tl_inode)->ip_blkno,
				LOCAL_SYS_INODE_UNMAP_LOG,
				di->i_signature);
		status = -EROFS;
		inode_unlock(tl_inode);
		return status;
	}

	tl = &di->id2.i_dealloc;
	num_to_flush = le16_to_cpu(tl->tl_used);
	mlog(ML_RO, "truncate_records_search: num of truncate_log_rec = [%u]\n", num_to_flush);
	if (!num_to_flush) {
		status = 0;
		inode_unlock(tl_inode);
		return status;
	}

	/* Hope there is nothing wrong with tr_used */
	status = ocfs2_validate_tl_used_and_tr_used(osb, tl);
	if (status) {
		mlog_errno(status);
		inode_unlock(tl_inode);
		return status;
	}

	*tr_log = tl;
	status = num_to_flush;

	return status;
}

int ocfs2_transfer_truncate_log_to_unmap(struct ocfs2_super *osb)
{
	int status = 0;
	struct ocfs2_truncate_log *tr_log;
	struct inode *tl_inode = osb->osb_tl_inode;
	struct inode *ul_inode = osb->osb_ul_inode;
	tid_t target;
	handle_t *handle;

	inode_lock(ul_inode);

	/* We will hold tl_inode->i_mutex in ocfs2_truncate_records_search */
	while ((status = ocfs2_truncate_records_search(osb, &tr_log)) > 0) {
		BUG_ON(inode_trylock(tl_inode));

		handle = ocfs2_start_trans(osb, OCFS2_TRANSFER_TRUNCATE_LOG);
		if (IS_ERR(handle)) {
			inode_unlock(tl_inode);
			inode_unlock(ul_inode);
			status = PTR_ERR(handle);
			mlog_errno(status);
			return status;
		}

		status = ocfs2_transfer_truncate_log(osb, handle, tr_log);
		if (status < 0) {
			ocfs2_commit_trans(osb, handle);
			inode_unlock(tl_inode);
			inode_unlock(ul_inode);
			mlog_errno(status);
			return status;
		}

		ocfs2_commit_trans(osb, handle);

		if (jbd2_journal_start_commit(osb->journal->j_journal, &target))
			jbd2_log_wait_commit(osb->journal->j_journal, target);

		handle = NULL;

		/* After transfer truncate log, we must release tl_inode->i_mutex right away*/
		inode_unlock(tl_inode);

		status = ocfs2_flush_unmap_log(osb);
		if (status < 0) {
			inode_unlock(ul_inode);
			mlog_errno(status);
			return status;
		}
	}
	inode_unlock(ul_inode);
	return status;
}

static void ocfs2_unmap_log_worker(struct work_struct *work)
{
	int status;
	struct ocfs2_super *osb =
		container_of(work, struct ocfs2_super, osb_unmap_log_wq.work);

	status = ocfs2_transfer_truncate_log_to_unmap(osb);
	if (status < 0)
		mlog_errno(status);
	else
		ocfs2_init_steal_slots(osb);
}

#define OCFS2_UNMAP_LOG_FLUSH_INTERVAL (2 * HZ)
void ocfs2_schedule_unmap_log_flush(struct ocfs2_super *osb, int cancel)
{
	if (osb->osb_ul_inode && atomic_read(&osb->osb_ul_disable) == 0) {
		/* We want to push off log flushes while unmap are
		 * still running. */
		if (cancel)
			cancel_delayed_work(&osb->osb_unmap_log_wq);

		queue_delayed_work(osb->unmap_wq, &osb->osb_unmap_log_wq,
				OCFS2_UNMAP_LOG_FLUSH_INTERVAL);
	}
}

void ocfs2_schedule_clear_unmap_log(struct ocfs2_super *osb, int cancel)
{
	if (osb->osb_ul_inode &&
			atomic_read(&osb->osb_ul_disable) == 0) {
		/* We want to push off log flushes while unmap are
		 * still running. */
		if (cancel)
			cancel_delayed_work(&osb->osb_clear_unmap_log_wq);

		queue_delayed_work(osb->unmap_wq, &osb->osb_clear_unmap_log_wq,
				OCFS2_UNMAP_LOG_FLUSH_INTERVAL);
	}
}

int ocfs2_get_unmap_log_info(struct ocfs2_super *osb,
		int slot_num,
		struct inode **ul_inode,
		struct buffer_head **ul_bh)
{
	int status;
	struct inode *inode = NULL;
	struct buffer_head *bh = NULL;

	inode = ocfs2_get_system_file_inode(osb, UNMAP_LOG_SYSTEM_INODE, slot_num);
	if (!inode) {
		status = -EINVAL;
		mlog(ML_ERROR, "%s: Could not get load unmap log inode!\n", osb->uuid_str);
		goto bail;
	}

	status = ocfs2_read_inode_block_full(inode, &bh, OCFS2_BH_IGNORE_CACHE);
	if (status < 0) {
		iput(inode);
		mlog_errno(status);
		goto bail;
	}

	*ul_inode = inode;
	*ul_bh = bh;
bail:
	return status;
}

int ocfs2_unmap_log_init(struct ocfs2_super *osb)
{
	int status;
	struct inode *ul_inode = NULL;
	struct buffer_head *ul_bh = NULL;

	status = ocfs2_get_unmap_log_info(osb, osb->slot_num, &ul_inode, &ul_bh);
	if (status < 0)
		mlog_errno(status);

	/* wq   */
	INIT_DELAYED_WORK(&osb->osb_unmap_log_wq,
			ocfs2_unmap_log_worker);
	INIT_DELAYED_WORK(&osb->osb_clear_unmap_log_wq,
			ocfs2_clear_unmap_log_worker);

	atomic_set(&osb->osb_ul_disable, 0);
	osb->osb_ul_bh = ul_bh;
	osb->osb_ul_inode = ul_inode;

	return status;
}
