/**
 * (C) Copyright 2019-2024 Intel Corporation.
 *
 * SPDX-License-Identifier: BSD-2-Clause-Patent
 */
/**
 * Implementation for aggregation and discard
 */
#define D_LOGFAC	DD_FAC(vos)

#include <daos_srv/vos.h>
#include <daos/checksum.h>
#include <daos_srv/srv_csum.h>
#include "vos_internal.h"
#include "evt_priv.h"

unsigned int vos_agg_nvme_thresh = VOS_MW_NVME_THRESH;

/*
 * EV tree sorted iterator returns logical entry in extent start order, and
 * the information like: physical entry it belongs to, visibility, is it the
 * last entry, etc. will be returned as well. One thing worth noting is that
 * sorted iterator is working on an out-of-band auxiliary index, so it won't
 * be affected by any in-tree modifications.
 *
 * EV tree aggregation is driven by sorted iterator: As the iterator moves on,
 * the visible logical entries will be queued to form a merge window, and all
 * the physical entries (no matter if it's fully covered or not) within the
 * window will be queued too. When the window size reaches certain threshold,
 * a procedure called merge window flush will be triggered to replace the old
 * physical entries by the new coalesced physical entries.
 *
 * On merge window flush, the queued visible logical entries will be used to
 * facilitate the data transfer from old physical entries to new coalesced
 * physical entries. If any old physical entry (not fully covered in current
 * window) straddles window end, it has to be head-truncated on window flush,
 * and the remaining part will be processed in next merge window.
 */

/* EV tree physical entry */
struct agg_phy_ent {
	d_list_t		pe_link;
	/* Original in-tree rectangle */
	struct evt_rect		pe_rect;
	/* Entry payload address */
	bio_addr_t		pe_addr;
	/* Original csums for entry */
	struct dcs_csum_info	pe_csum_info;
	/*
	 * Extent start offset for the truncated physical entry. (Entry
	 * straddles window end could be truncated on merge window flush)
	 */
	daos_off_t		pe_off;
	/* Pool map version of entry */
	uint32_t		pe_ver;
	/* Referenced by visible entries */
	uint32_t		pe_ref;
	/* Need to truncate on window flush */
	bool			pe_trunc_head;
	/* Mark the entry as removed */
	bool			pe_remove;
	/* Need to free the csum buffer when the physical entry is freed */
	bool			pe_csum_free;
};

/* Removal record */
struct agg_rmv_ent {
	d_list_t		re_link;
	/** list link for physical records */
	d_list_t		re_phy_link;
	/* In tree rectangle */
	struct evt_rect		re_rect;
	/** Real entries, if any, contained in a logical rectangle */
	d_list_t		re_contained;
	uint32_t                re_aggregate : 1, /* Aggregate of one or more records */
	    re_child                         : 1; /* Contained in aggregate record */
	/** Refcount of physical records that reference this removal */
	unsigned int		re_phy_count;
};

/* EV tree logical entry */
struct agg_lgc_ent {
	struct evt_extent	 le_ext;
	struct agg_phy_ent	*le_phy_ent;
};

/*
 * EV tree logical segment, it'll be used to form new physical
 * rectangle and being inserted in evtree on merge window flush.
 */
struct agg_lgc_seg {
	/* Start index in mw_lgc_ents */
	unsigned int		 ls_idx_start;
	/* End index in mw_lgc_ents */
	unsigned int		 ls_idx_end;
	/* New segment generated by truncate on window flush */
	struct agg_phy_ent	*ls_phy_ent;
	/* Description of the new physical entry to be inserted */
	struct evt_entry_in	 ls_ent_in;
};

/* I/O context used on EV tree merge window flush */
struct agg_io_context {
	/* Buffer to hold output csums for entire aggregation */
	void			*ic_csum_buf;
	unsigned int		 ic_csum_buf_len;
	/* Array of structs used for recalculation of checksums */
	struct csum_recalc	*ic_csum_recalcs;
	unsigned int		 ic_csum_recalc_cnt;
	/* Segments being involved on merge window flush */
	struct agg_lgc_seg	*ic_segs;
	unsigned int		 ic_seg_max;
	unsigned int		 ic_seg_cnt;
	/* Reserved SCM extents for new physical entries */
	struct umem_rsrvd_act	*ic_rsrvd_scm;
	/* Reserved NVMe extents for new physical entries */
	d_list_t		 ic_nvme_exts;
};

/* Merge window for evtree aggregation */
struct agg_merge_window {
	/* Record size */
	daos_size_t			 mw_rsize;
	/* Threshold for merge window flush */
	daos_size_t			 mw_flush_thresh;
	/* Merge window extent */
	struct evt_extent		 mw_ext;
	/** Real merge window upper bound */
	uint64_t			 mw_alloc_hi;
	/* Physical entries in merge window */
	d_list_t			 mw_phy_ents;
	unsigned int			 mw_phy_cnt;
	/** Possibly deleted physical entries */
	d_list_t			 mw_rmv_ents;
	/** In order list of physical removal records */
	d_list_t			 mw_phy_rmv_ents;
	unsigned int			 mw_rmv_cnt;
	/* Visible logical entries in merge window */
	struct agg_lgc_ent		*mw_lgc_ents;
	unsigned int			 mw_lgc_max;
	unsigned int			 mw_lgc_cnt;
	/* I/O context for transferring data on flush */
	struct agg_io_context		 mw_io_ctxt;
	uint16_t			 mw_csum_type;
};

struct vos_agg_credits {
	uint32_t	vac_creds_scan;		/* # of tight loops */
	uint32_t	vac_creds_del;		/* # of obj/key/rec deletions */
	uint32_t	vac_creds_merge;	/* # of merging operations */
};

#define EV_TRACE_MAX 1024
struct vos_agg_param {
	vos_iter_entry_t        ap_evt_trace[EV_TRACE_MAX];
	int                     ap_trace_start;
	int                     ap_trace_count;
	struct vos_agg_credits	ap_credits;
	daos_handle_t		ap_coh;		/* container handle */
	daos_unit_oid_t		ap_oid;		/* current object ID */
	/* Boundary for aggregatable write filter */
	daos_epoch_t		ap_filter_epoch;
	uint32_t		ap_flags;
	unsigned int ap_discard : 1, ap_csum_err : 1, ap_nospc_err : 1, ap_in_progress : 1,
	    ap_discard_obj : 1;
	struct umem_instance	*ap_umm;
	int			(*ap_yield_func)(void *arg);
	void			*ap_yield_arg;
	/* SV tree: Max epoch in specified iterate epoch range */
	daos_epoch_t		 ap_max_epoch;
	/* EV tree: Merge window for evtree aggregation */
	struct agg_merge_window	 ap_window;
	bool			 ap_skip_akey;
	bool			 ap_skip_dkey;
	bool			 ap_skip_obj;
};

static inline void
credits_set(struct vos_agg_credits *vac, bool tight)
{
	vac->vac_creds_scan = tight ? AGG_CREDS_SCAN_TIGHT : AGG_CREDS_SCAN_SLACK;
	vac->vac_creds_del = tight ? AGG_CREDS_DEL_TIGHT : AGG_CREDS_DEL_SLACK;
	vac->vac_creds_merge = tight ? AGG_CREDS_MERGE_TIGHT : AGG_CREDS_MERGE_SLACK;
}

static inline void
credits_consume(struct vos_agg_credits *vac, unsigned int agg_op)
{
	switch (agg_op) {
	case AGG_OP_SCAN:
	case AGG_OP_SKIP:
		if (vac->vac_creds_scan)
			vac->vac_creds_scan--;
		break;
	case AGG_OP_DEL:
		if (vac->vac_creds_del)
			vac->vac_creds_del--;
		break;
	case AGG_OP_MERGE:
		if (vac->vac_creds_merge)
			vac->vac_creds_merge--;
		break;
	default:
		D_ASSERTF(0, "Invalid agg opcode %u\n", agg_op);
		break;
	}
}

static inline bool
credits_exhausted(struct vos_agg_credits *vac)
{
	return !vac->vac_creds_scan || !vac->vac_creds_del || !vac->vac_creds_merge;
}

static inline struct vos_agg_metrics *
agg_cont2metrics(struct vos_container *cont)
{
	struct vos_pool_metrics	*vpm;

	vpm = cont->vc_pool->vp_metrics;
	if (vpm == NULL)
		return NULL;

	return &vpm->vp_agg_metrics;
}

static int
agg_del_sv(daos_handle_t ih, struct vos_agg_param *agg_param,
	   vos_iter_entry_t *entry, unsigned int *acts)
{
	struct umem_instance	*umm = agg_param->ap_umm;
	struct vos_container	*cont = vos_hdl2cont(agg_param->ap_coh);
	struct vos_agg_metrics	*vam = agg_cont2metrics(cont);
	int			 rc;

	D_ASSERT(umm != NULL);
	D_ASSERT(acts != NULL);

	rc = umem_tx_begin(umm, NULL);
	if (rc)
		return rc;

	rc = vos_iter_process(ih, VOS_ITER_PROC_OP_DELETE, NULL);
	if (rc != 0)
		rc = umem_tx_abort(umm, rc);
	else
		rc = umem_tx_commit(umm);

	if (rc) {
		D_ERROR("Failed to delete entry: "DF_RC"\n", DP_RC(rc));
		return rc;
	}

	*acts |= VOS_ITER_CB_DELETE;
	if (vam && vam->vam_del_sv && !agg_param->ap_discard)
		d_tm_inc_counter(vam->vam_del_sv, 1);
	credits_consume(&agg_param->ap_credits, AGG_OP_DEL);

	return rc;
}

static void
inc_agg_counter(struct vos_agg_param *agg_param, vos_iter_type_t type, unsigned int agg_op)
{
	struct vos_container	*cont = vos_hdl2cont(agg_param->ap_coh);
	struct vos_agg_metrics	*vam = agg_cont2metrics(cont);
	struct d_tm_node_t	*counter = NULL;

	credits_consume(&agg_param->ap_credits, agg_op);

	if (vam == NULL)
		return;

	D_ASSERT(agg_op < AGG_OP_MERGE);
	switch (type) {
	case VOS_ITER_OBJ:
		counter = vam->vam_obj[agg_op];
		break;
	case VOS_ITER_DKEY:
		counter = vam->vam_dkey[agg_op];
		break;
	case VOS_ITER_AKEY:
		counter = vam->vam_akey[agg_op];
		break;
	default:
		D_ASSERTF(0, "Invalid iter type: %u\n", type);
		break;
	}

	if (counter)
		d_tm_inc_counter(counter, 1);
}

static inline bool
need_aggregate(daos_handle_t ih, struct vos_agg_param *agg_param, vos_iter_desc_t *desc)
{
	bool			 agg_needed = true;
	struct vos_container	*cont = vos_hdl2cont(agg_param->ap_coh);

	/** Skip this check for discard */
	if (agg_param->ap_discard_obj || agg_param->ap_discard)
		return true;

	if (desc->id_agg_write <= agg_param->ap_filter_epoch &&
	    desc->id_parent_punch <= agg_param->ap_filter_epoch)
		agg_needed = false;

	D_DEBUG(DB_EPC, "flags:%u, hae:"DF_U64" agg_needed=%s\n", agg_param->ap_flags,
		cont->vc_cont_df->cd_hae, agg_needed ? "yes" : "no");

	return agg_needed;
}

static inline bool
vos_aggregate_yield(struct vos_agg_param *agg_param)
{
	int			 rc;
	struct vos_container	*cont = vos_hdl2cont(agg_param->ap_coh);

	/* Current DTX handle must be NULL, since aggregation runs under non-DTX mode. */
	D_ASSERT(vos_dth_get(cont->vc_pool->vp_sysdb) == NULL);

	if (agg_param->ap_yield_func == NULL) {
		bio_yield(agg_param->ap_umm);
		credits_set(&agg_param->ap_credits, true);
		return false;
	}

	rc = agg_param->ap_yield_func(agg_param->ap_yield_arg);
	/* Abort */
	if (rc < 0)
		return true;

	/* rc == 0: tight mode; rc == 1: slack mode */
	credits_set(&agg_param->ap_credits, rc == 0);

	return false;
}

static int
vos_agg_filter(daos_handle_t ih, vos_iter_desc_t *desc, void *cb_arg, unsigned int *acts)
{
	struct vos_agg_param	*agg_param = cb_arg;
	int			 rc = 0;

	rc = need_aggregate(ih, agg_param, desc);
	if (rc == 0) {
		if (desc->id_type == VOS_ITER_OBJ) {
			D_DEBUG(DB_EPC, "Skip untouched oid:"DF_UOID"\n",
				DP_UOID(desc->id_oid));
		} else {
			D_DEBUG(DB_EPC, "Skip untouched %s:"DF_KEY"\n",
				desc->id_type == VOS_ITER_DKEY ? "dkey" : "akey",
				DP_KEY(&desc->id_key));
		}
		*acts |= VOS_ITER_CB_SKIP;
		inc_agg_counter(agg_param, desc->id_type, AGG_OP_SKIP);

		D_GOTO(out, rc = 0);
	}

	if (rc < 0) /** Ignore the filter error, let iterator handle it on actual probe */
		D_GOTO(out, rc = 0);

	if (desc->id_type == VOS_ITER_OBJ)
		rc = oi_iter_check_punch(ih);
	else
		rc = vos_obj_iter_check_punch(ih);
	if (rc < 0)
		goto out;
	if (rc == 1) {
		*acts |= VOS_ITER_CB_DELETE;
		inc_agg_counter(agg_param, desc->id_type, AGG_OP_DEL);
		D_GOTO(out, rc = 0);
	}
out:

	if (credits_exhausted(&agg_param->ap_credits) ||
	    (DAOS_FAIL_CHECK(DAOS_VOS_AGG_RANDOM_YIELD) && (rand() % 2))) {
		D_DEBUG(DB_EPC, "Credits exhausted, type:%u, acts:%u\n", desc->id_type, *acts);

		if (vos_aggregate_yield(agg_param)) {
			D_DEBUG(DB_EPC, "VOS discard/aggregation aborted\n");
			*acts |= VOS_ITER_CB_EXIT;
		}
	}

	return rc;
}

static int
vos_agg_obj(daos_handle_t ih, vos_iter_entry_t *entry,
	    struct vos_agg_param *agg_param, unsigned int *acts)
{
	agg_param->ap_oid = entry->ie_oid;
	inc_agg_counter(agg_param, VOS_ITER_OBJ, AGG_OP_SCAN);

	return 0;
}

static int
vos_agg_dkey(daos_handle_t ih, vos_iter_entry_t *entry,
	     struct vos_agg_param *agg_param, unsigned int *acts)
{
	inc_agg_counter(agg_param, VOS_ITER_DKEY, AGG_OP_SCAN);

	return 0;
}

static inline bool
ext1_covers_ext2(struct evt_extent *ext1, struct evt_extent *ext2)
{
	D_ASSERT(ext1->ex_lo <= ext1->ex_hi);
	D_ASSERT(ext2->ex_lo <= ext2->ex_hi);

	return (ext1->ex_lo <= ext2->ex_lo && ext1->ex_hi >= ext2->ex_hi);
}

enum {
	MW_CLOSED	= 0,	/* Window closed, resource released */
	MW_FLUSHED,		/* Window flushed, no logical entries */
	MW_OPENED,		/* Window opened, has logical entries */
};

static int
merge_window_status(struct agg_merge_window *mw)
{
	struct agg_io_context	*io = &mw->mw_io_ctxt;

	D_ASSERT(io->ic_seg_cnt == 0);
	D_ASSERT(umem_rsrvd_act_cnt(io->ic_rsrvd_scm) == 0);
	D_ASSERT(d_list_empty(&io->ic_nvme_exts));

	D_ASSERT(mw->mw_ext.ex_lo <= mw->mw_ext.ex_hi);

	if (mw->mw_lgc_cnt != 0) {
		D_ASSERT(mw->mw_rsize != 0);
		D_ASSERT(mw->mw_phy_cnt != 0);
		D_ASSERT(!d_list_empty(&mw->mw_phy_ents));

		return MW_OPENED;
	}

	D_ASSERT(mw->mw_ext.ex_lo == 0 && mw->mw_ext.ex_hi == 0);

	if (mw->mw_lgc_ents != NULL) {
		/*
		 * Even there if there isn't any logical entries,
		 * there could be some truncated physical entries.
		 */
		D_ASSERT(mw->mw_rsize != 0);
		return MW_FLUSHED;
	}

	if (mw->mw_phy_cnt != 0 && mw->mw_rmv_cnt != 0) {
		/* There are physical records that are partially covered by removal records */
		return MW_FLUSHED;
	}

	/* Window closed, all resource should have been released */
	D_ASSERT(mw->mw_phy_cnt == 0);
	D_ASSERT(d_list_empty(&mw->mw_phy_ents));
	D_ASSERT(mw->mw_lgc_max == 0);

	D_ASSERT(io->ic_seg_max == 0);
	D_ASSERT(io->ic_segs == NULL);
	D_ASSERT(io->ic_rsrvd_scm == NULL);

	return MW_CLOSED;
}

static int
vos_agg_akey(daos_handle_t ih, vos_iter_entry_t *entry,
	     struct vos_agg_param *agg_param, unsigned int *acts)
{
	inc_agg_counter(agg_param, VOS_ITER_AKEY, AGG_OP_SCAN);

	if (agg_param->ap_discard) {
		/* No merge window for discard path so bypass checks below. */
		return 0;
	}

	/* Reset the max epoch for low-level SV tree iteration */
	agg_param->ap_max_epoch = 0;
	/* The merge window for EV tree aggregation should have been closed */
	if (merge_window_status(&agg_param->ap_window) != MW_CLOSED)
		D_ASSERTF(false, "Merge window isn't closed.\n");

	return 0;
}

static int
vos_agg_sv(daos_handle_t ih, vos_iter_entry_t *entry,
	   struct vos_agg_param *agg_param, unsigned int *acts)
{
	int	rc;

	D_ASSERT(agg_param != NULL);
	D_ASSERT(entry->ie_epoch != 0);

	credits_consume(&agg_param->ap_credits, AGG_OP_SCAN);

	/* Discard */
	if (agg_param->ap_discard)
		goto delete;

	/* If entry is covered, the key or object is punched */
	if (entry->ie_vis_flags & VOS_VIS_FLAG_COVERED)
		goto delete;
	/*
	 * Aggregate: preserve the first recx which has highest epoch, because
	 * of re-probe, the highest epoch could be iterated multiple times.
	 */
	if (agg_param->ap_max_epoch == 0 ||
	    agg_param->ap_max_epoch == entry->ie_epoch) {

		switch (entry->ie_dtx_state) {
		case DTX_ST_COMMITTED:
			/* Highest epoch is committed, keep it. */
			agg_param->ap_max_epoch = entry->ie_epoch;
			break;
		case DTX_ST_PREPARED:
			/*
			 * Highest epoch is uncommitted.  Since it may be
			 * punched by a key or object and that entity may not
			 * know about the update, we need to abort processing
			 * of the current single value for now.
			 */
			D_DEBUG(DB_EPC, "Hit uncommitted single value at epoch:"
				DF_X64"\n", entry->ie_epoch);
			return -DER_TX_BUSY;
		case DTX_ST_ABORTED:
			/*
			 * Highest epoch is aborted, delete it and continue
			 * checking on next lower epoch.
			 */
			D_DEBUG(DB_EPC, "Delete aborted at epoch:"DF_X64"\n",
				entry->ie_epoch);
			goto delete;
		default:
			D_ASSERTF(0, "Unexpected DTX state: %d\n",
				  entry->ie_dtx_state);
			break;
		}

		return 0;
	}

	D_ASSERTF(entry->ie_epoch < agg_param->ap_max_epoch,
		  "max:"DF_U64", cur:"DF_U64"\n",
		  agg_param->ap_max_epoch, entry->ie_epoch);

delete:
	rc = agg_del_sv(ih, agg_param, entry, acts);
	if (rc) {
		D_ERROR("Failed to delete SV entry: "DF_RC"\n", DP_RC(rc));
	} else if (vos_iter_empty(ih) == 1 && agg_param->ap_discard) {
		/* Trigger re-probe in akey iteration */
		*acts |= VOS_ITER_CB_YIELD;
	}

	return rc;
}

/* Allocates sub-ranges of the checksum buffer to each output segment. */
static unsigned int
csum_prepare_ent(struct evt_entry_in *ent_in, unsigned int cs_type,
		 unsigned int cs_len, unsigned int chunksize)
{
	unsigned int cur_cnt = csum_chunk_count(chunksize,
						ent_in->ei_rect.rc_ex.ex_lo,
						ent_in->ei_rect.rc_ex.ex_hi,
						ent_in->ei_inob);

	ent_in->ei_csum.cs_nr = cur_cnt;
	ent_in->ei_csum.cs_type = cs_type;
	ent_in->ei_csum.cs_len = cs_len;
	ent_in->ei_csum.cs_buf_len = cur_cnt * ent_in->ei_csum.cs_len;
	ent_in->ei_csum.cs_chunksize = chunksize;

	return cur_cnt * ent_in->ei_csum.cs_len;
}

/* Each new segment requires an allocated buffer range to hold the checksums
 * calculated for the new segment. This buffer range is also used to hold
 * the verification checksum for the component (input) segments.
 * The full buffer is extended to hold checksums for entire merge window.
 * Currently, allocations for prior windows are retained until aggregation
 * for an evtree is complete (in vos_agg_akey, and at end of aggregation).
 */
static int
csum_prepare_buf(struct agg_lgc_seg *segs, unsigned int seg_cnt,
		 void **csum_bufp, unsigned int cur_len, unsigned int new_len)
{
	unsigned char	*buffer = NULL;
	unsigned int	 cur_buf = 0;
	int		 i;

	if (new_len > cur_len) {
		D_REALLOC_NZ(buffer, *csum_bufp, new_len);
		if (buffer == NULL)
			return -DER_NOMEM;
	} else
		buffer = *csum_bufp;

	memset(buffer, 0, new_len);
	for (i = 0; i < seg_cnt; i++) {
		struct dcs_csum_info *csum_info = &segs[i].ls_ent_in.ei_csum;

		csum_info->cs_csum = &buffer[cur_buf];
		cur_buf += csum_info->cs_len * csum_info->cs_nr;
		D_ASSERT(cur_buf <= new_len);
	}
	*csum_bufp = buffer;

	return 0;
}

static inline struct agg_rmv_ent *
allocate_rmv_ent(const struct evt_extent *ext, daos_epoch_t epoch, uint16_t minor_epc)
{
	struct agg_rmv_ent *rm_ent;

	D_ALLOC_PTR(rm_ent);
	if (rm_ent == NULL)
		return NULL;

	D_INIT_LIST_HEAD(&rm_ent->re_contained);
	rm_ent->re_rect.rc_ex.ex_lo = ext->ex_lo;
	rm_ent->re_rect.rc_ex.ex_hi = ext->ex_hi;
	rm_ent->re_rect.rc_minor_epc = minor_epc;
	rm_ent->re_rect.rc_epc = epoch;

	return rm_ent;
}

static inline void
recx2ext(const daos_recx_t *recx, struct evt_extent *ext)
{
	D_ASSERT(recx->rx_nr > 0);
	ext->ex_lo = recx->rx_idx;
	ext->ex_hi = recx->rx_idx + recx->rx_nr - 1;
}

static inline int
delete_evt_entry(struct vos_agg_param *agg_param, struct vos_obj_iter *oiter,
		 const vos_iter_entry_t *entry, const char *desc)
{
	struct vos_container	*cont = oiter->it_obj->obj_cont;
	struct vos_agg_metrics	*vam = agg_cont2metrics(cont);
	struct evt_rect		 rect;
	int			 rc;

	recx2ext(&entry->ie_orig_recx, &rect.rc_ex);
	rect.rc_epc = entry->ie_epoch;
	rect.rc_minor_epc = entry->ie_minor_epc;

	rc = evt_delete(oiter->it_hdl, &rect, NULL);
	if (rc) {
		D_ERROR("Delete %s EV entry "DF_RECT" error: "DF_RC"\n",
			desc, DP_RECT(&rect), DP_RC(rc));
		return rc;
	}

	if (vam && vam->vam_del_ev && !agg_param->ap_discard)
		d_tm_inc_counter(vam->vam_del_ev, 1);
	credits_consume(&agg_param->ap_credits, AGG_OP_DEL);

	return rc;
}

static int
delete_removal(struct agg_merge_window *mw, struct vos_obj_iter *oiter, struct agg_rmv_ent *rm_ent)
{
	int			 rc = 0;

	D_ASSERT(d_list_empty(&rm_ent->re_contained));
	D_DEBUG(DB_EPC, "Removing physical removal record: "DF_RECT"\n", DP_RECT(&rm_ent->re_rect));
	rc = evt_delete(oiter->it_hdl, &rm_ent->re_rect, NULL);
	if (rc) {
		D_ERROR("Remove "DF_RECT" error: "DF_RC"\n", DP_RECT(&rm_ent->re_rect), DP_RC(rc));
		return rc;
	}

	d_list_del(&rm_ent->re_phy_link);
	d_list_del(&rm_ent->re_link);
	if (!rm_ent->re_child) {
		D_ASSERT(mw->mw_rmv_cnt > 0);
		mw->mw_rmv_cnt--;
	}
	D_FREE(rm_ent);

	return 0;
}

static int
enqueue_rmv_ent(struct agg_merge_window *mw, const struct evt_extent *ext,
		const vos_iter_entry_t *entry, struct vos_obj_iter *oiter)
{
	struct agg_rmv_ent	*rm_ent, *rm_ent2, *rm_ent3;
	d_list_t		*list = &mw->mw_rmv_ents;
	int			 rc;

	/** Remove any past removal records that have no remaining physical references */
	d_list_for_each_entry_safe(rm_ent, rm_ent2, &mw->mw_phy_rmv_ents, re_phy_link) {
		if (rm_ent->re_phy_count != 0)
			continue;

		if (rm_ent->re_rect.rc_ex.ex_hi >= ext->ex_lo)
			continue;

		rc = delete_removal(mw, oiter, rm_ent);

		if (rc != 0)
			return rc;
	}

	rm_ent = allocate_rmv_ent(ext, entry->ie_epoch, entry->ie_minor_epc);
	if (rm_ent == NULL)
		return -DER_NOMEM;

	d_list_for_each_entry_reverse(rm_ent2, &mw->mw_rmv_ents, re_link) {
		if (rm_ent->re_rect.rc_epc != rm_ent2->re_rect.rc_epc)
			continue;

		if ((ext->ex_lo) != rm_ent2->re_rect.rc_ex.ex_hi + 1)
			continue;

		D_DEBUG(DB_EPC, "Removal record "DF_RECT" is contiguous with "DF_RECT"\n",
			DP_RECT(&rm_ent->re_rect), DP_RECT(&rm_ent2->re_rect));

		if (!rm_ent2->re_aggregate) {
			D_ASSERT(d_list_empty(&rm_ent2->re_contained));
			/* Duplicate the entry */
			rm_ent3 = allocate_rmv_ent(&rm_ent2->re_rect.rc_ex, rm_ent2->re_rect.rc_epc,
						   rm_ent2->re_rect.rc_minor_epc);
			if (rm_ent3 == NULL) {
				D_FREE(rm_ent);
				return -DER_NOMEM;
			}
			D_DEBUG(DB_EPC, "Removal record "DF_RECT" duplicated\n",
				DP_RECT(&rm_ent2->re_rect));
			/** Replace the existing entry in the list with the duplicate */
			d_list_add(&rm_ent3->re_link, &rm_ent2->re_link);
			d_list_del(&rm_ent2->re_link);
			/** Add existing entry to duplicate contained list */
			d_list_add_tail(&rm_ent2->re_link, &rm_ent3->re_contained);
			/** Subsequent modification should be on the duplicate */
			rm_ent3->re_aggregate = 1;
			rm_ent2->re_child = 1;
			rm_ent2 = rm_ent3;
		}

		rm_ent2->re_rect.rc_ex.ex_hi = ext->ex_hi;
		list = &rm_ent2->re_contained;
		rm_ent->re_child = 1;
		goto enqueue;
	}
	mw->mw_rmv_cnt++;
enqueue:
	d_list_add_tail(&rm_ent->re_link, list);
	/** Keep a list of just the physical records */
	d_list_add_tail(&rm_ent->re_phy_link, &mw->mw_phy_rmv_ents);

	return 0;
}

static inline bool
phy_ent_is_removed(struct agg_merge_window *mw, const struct evt_extent *phy_ext,
		   daos_epoch_t epoch)
{
	struct agg_rmv_ent	*rm_ent;

	d_list_for_each_entry(rm_ent, &mw->mw_rmv_ents,
			      re_link) {
		struct evt_rect	*rm_rect = &rm_ent->re_rect;

		if (rm_rect->rc_epc == epoch &&
		    rm_rect->rc_ex.ex_lo <= phy_ext->ex_hi &&
		    rm_rect->rc_ex.ex_hi >= phy_ext->ex_hi)
			return true;
	}

	return false;
}

static inline void
free_phy_ent(struct agg_phy_ent	*phy_ent)
{
	if (phy_ent->pe_csum_free)
		D_FREE(phy_ent->pe_csum_info.cs_csum);
	D_FREE(phy_ent);
}

static int
prepare_segments(struct agg_merge_window *mw)
{
	struct agg_io_context	*io = &mw->mw_io_ctxt;
	struct agg_phy_ent	*phy_ent = NULL;
	struct agg_phy_ent	*first = NULL;
	struct agg_phy_ent	*temp = NULL;
	struct agg_lgc_ent	*lgc_ent;
	struct agg_lgc_seg	*lgc_seg;
	struct evt_entry_in	*ent_in;
	struct evt_extent	 ext;
	unsigned int		 i, seg_max;
	unsigned int		 cs_type = 0;
	unsigned int		 cs_len = 0;
	unsigned int		 chunksize = 0;
	unsigned int		 cs_total = 0;
	bool			 hole = false, coalesce;
	int			 rc = 0;

	/*
	 * Allocate large enough segments array to hold all the coalesced
	 * segments (at most mw_lgc_cnt) and truncated segments (at most
	 * mw_phy_cnt).
	 */
	D_ASSERT(mw->mw_rmv_cnt > 0 || (mw->mw_phy_cnt > 0 && mw->mw_lgc_cnt > 0));
	io->ic_seg_cnt = 0;
	if (mw->mw_lgc_cnt == 0)
		goto process_physical;

	seg_max = MAX((mw->mw_lgc_cnt + mw->mw_phy_cnt), 200);
	if (io->ic_seg_max < seg_max) {
		D_REALLOC_ARRAY_NZ(lgc_seg, io->ic_segs, seg_max);
		if (lgc_seg == NULL)
			return -DER_NOMEM;

		io->ic_segs = lgc_seg;
		io->ic_seg_max = seg_max;
	}
	memset(io->ic_segs, 0, io->ic_seg_max * sizeof(*lgc_seg));

	/* Generate coalesced segments according to visible logical entries */
	for (i = 0; i < mw->mw_lgc_cnt; i++) {
		lgc_ent = &mw->mw_lgc_ents[i];
		phy_ent = lgc_ent->le_phy_ent;

		ext = lgc_ent->le_ext;
		D_ASSERT(ext1_covers_ext2(&mw->mw_ext, &ext));

		if (i == 0) {
			coalesce = false;
		} else if (hole != bio_addr_is_hole(&phy_ent->pe_addr)) {
			coalesce = false;
			io->ic_seg_cnt++;
			D_ASSERT(io->ic_seg_cnt < io->ic_seg_max);
		} else {
			coalesce = true;
		}

		hole = bio_addr_is_hole(&phy_ent->pe_addr);
		lgc_seg = &io->ic_segs[io->ic_seg_cnt];
		ent_in = &lgc_seg->ls_ent_in;

		if (!coalesce) {
			lgc_seg->ls_phy_ent = NULL;
			lgc_seg->ls_idx_start = i;
			ent_in->ei_inob = mw->mw_rsize;
			ent_in->ei_rect.rc_ex.ex_lo = ext.ex_lo;
			bio_addr_set_hole(&ent_in->ei_addr, hole);
			if (hole) {
				bio_addr_set(&ent_in->ei_addr, DAOS_MEDIA_SCM,
					     0);
				ent_in->ei_inob = 0;
			}
		} else {
			D_ASSERT(ext.ex_lo == ent_in->ei_rect.rc_ex.ex_hi + 1);
		}

		lgc_seg->ls_idx_end = i;
		ent_in->ei_rect.rc_ex.ex_hi = ext.ex_hi;
		/* Merge to highest epoch */
		if (ent_in->ei_rect.rc_epc < phy_ent->pe_rect.rc_epc)
			ent_in->ei_rect.rc_epc = phy_ent->pe_rect.rc_epc;
		/* Merge to highest pool map version */
		if (ent_in->ei_ver < phy_ent->pe_ver)
			ent_in->ei_ver = phy_ent->pe_ver;
		ent_in->ei_rect.rc_minor_epc = VOS_SUB_OP_MAX;
	}

	io->ic_seg_cnt++;
	D_ASSERT(io->ic_seg_cnt < io->ic_seg_max);

process_physical:
	/* Generate truncated segments according to physical entries */
	d_list_for_each_entry_safe(phy_ent, temp, &mw->mw_phy_ents, pe_link) {
		if (first == NULL)
			first = phy_ent; /* Save the first one */

		lgc_seg = &io->ic_segs[io->ic_seg_cnt];
		ent_in = &lgc_seg->ls_ent_in;

		ext = phy_ent->pe_rect.rc_ex;

		/* The physical entry was truncated on prev window flush */
		if (phy_ent->pe_off != 0)
			ext.ex_lo += phy_ent->pe_off;

		D_ASSERT(ext.ex_lo <= ext.ex_hi);
		phy_ent->pe_remove = false;
		if (ext.ex_hi > mw->mw_ext.ex_hi) {
			if (phy_ent_is_removed(mw, &ext, phy_ent->pe_rect.rc_epc)) {
				/** If a record is covered by a removal record and is
				 *  contained in the current merge window, it will be
				 *  removed by aggregation algorithm.  If it extends
				 *  into next window, and the tail is fully covered
				 *  by a removal entry, we need to mark the record for
				 *  removal.
				 */
				phy_ent->pe_remove = true;
			}
		}

		/*
		 * Physical entry is in window, or it's fully covered (not
		 * visible) in current window, or the tail (outside of current
		 * window) is fully covered by a removal record.
		 */
		if (ext.ex_hi <= mw->mw_ext.ex_hi || phy_ent->pe_ref == 0 ||
		    phy_ent->pe_remove)
			continue;

		D_ASSERT(ext.ex_lo <= mw->mw_ext.ex_hi);
		D_ASSERT(ext.ex_hi >= mw->mw_ext.ex_lo);

		lgc_seg->ls_phy_ent = phy_ent;
		lgc_seg->ls_idx_start = 0;
		lgc_seg->ls_idx_end = 0;

		ent_in->ei_inob = mw->mw_rsize;
		ent_in->ei_rect.rc_ex.ex_lo = mw->mw_ext.ex_hi + 1;
		ent_in->ei_rect.rc_ex.ex_hi = ext.ex_hi;
		ent_in->ei_rect.rc_epc = phy_ent->pe_rect.rc_epc;
		ent_in->ei_rect.rc_minor_epc = phy_ent->pe_rect.rc_minor_epc;
		ent_in->ei_ver = phy_ent->pe_ver;

		hole = bio_addr_is_hole(&phy_ent->pe_addr);
		bio_addr_set_hole(&ent_in->ei_addr, hole);
		if (hole) {
			bio_addr_set(&ent_in->ei_addr, DAOS_MEDIA_SCM, 0);
			ent_in->ei_inob = 0;
		}

		io->ic_seg_cnt++;
		D_ASSERT(io->ic_seg_cnt <= io->ic_seg_max);
	}

	if (mw->mw_csum_type && io->ic_seg_cnt > 0) {
		D_ASSERT(first != NULL);
		cs_len = first->pe_csum_info.cs_len;
		cs_type = first->pe_csum_info.cs_type;
		chunksize = first->pe_csum_info.cs_chunksize;

		for (i = 0; i < io->ic_seg_cnt; i++) {
			lgc_seg = &io->ic_segs[i];
			ent_in = &lgc_seg->ls_ent_in;
			if (ent_in->ei_inob != 0)
				/* Allocates csum buffer range. */
				cs_total += csum_prepare_ent(ent_in, cs_type,
							     cs_len, chunksize);
		}
		/* Reallocates csum buffer. */
		if (cs_total) {
			rc = csum_prepare_buf(io->ic_segs, io->ic_seg_cnt,
					      &io->ic_csum_buf,
					      io->ic_csum_buf_len, cs_total);
			if (cs_total > io->ic_csum_buf_len)
				io->ic_csum_buf_len = cs_total;
		}
	}
	return rc;
}

static int
reserve_segment(struct vos_object *obj, struct agg_io_context *io,
		daos_size_t size, bio_addr_t *addr)
{
	uint64_t	off, now;
	int		rc;

	memset(addr, 0, sizeof(*addr));

	if (vos_io_scm(vos_obj2pool(obj), DAOS_IOD_ARRAY, size, VOS_IOS_AGGREGATION)) {
		/** Store on SCM */
		off = vos_reserve_scm(obj->obj_cont, io->ic_rsrvd_scm, size);
		if (UMOFF_IS_NULL(off)) {
			now = daos_gettime_coarse();
			if (now - obj->obj_cont->vc_agg_nospc_ts > VOS_NOSPC_ERROR_INTVL) {
				D_ERROR("Reserve "DF_U64" from SCM failed.\n", size);
				obj->obj_cont->vc_agg_nospc_ts = now;
			}
			return -DER_NOSPACE;
		}
		bio_addr_set(addr, DAOS_MEDIA_SCM, off);
		return 0;
	}

	/** Store on NVMe */
	rc = vos_reserve_blocks(obj->obj_cont, &io->ic_nvme_exts, size,
				VOS_IOS_AGGREGATION, &off);
	if (rc == -DER_NOSPACE) {
		now = daos_gettime_coarse();
		if (now - obj->obj_cont->vc_agg_nospc_ts > VOS_NOSPC_ERROR_INTVL) {
			D_ERROR("Reserve "DF_U64" from NVMe failed. "DF_RC"\n",
				size, DP_RC(rc));
			obj->obj_cont->vc_agg_nospc_ts = now;
		}
	} else if (rc) {
		D_ERROR("Reserve "DF_U64" from NVMe failed. "DF_RC"\n",
			size, DP_RC(rc));
	} else {
		bio_addr_set(addr, DAOS_MEDIA_NVME, off);
	}

	return rc;
}

static inline daos_size_t
merge_window_size(struct agg_merge_window *mw)
{
	struct evt_extent ext;
	D_ASSERT(mw->mw_ext.ex_hi >= mw->mw_ext.ex_lo);
	D_ASSERT(mw->mw_alloc_hi >= mw->mw_ext.ex_lo);
	D_ASSERT(mw->mw_rsize != 0);
	ext.ex_hi = mw->mw_alloc_hi;
	ext.ex_lo = mw->mw_ext.ex_lo;
	return evt_extent_width(&ext) * mw->mw_rsize;
}

/* Widen biov entry for read extents to range required to verify checksums. */
static void
csum_widen_biov(struct bio_iov *biov, struct agg_phy_ent *phy_ent,
		struct evt_extent *ext, uint32_t rsize, daos_off_t phy_lo)
{
	struct evt_entry	ent;
	struct evt_extent	aligned_extent = { 0 };

	ent.en_ext = phy_ent->pe_rect.rc_ex;
	if (phy_lo)
		ent.en_ext.ex_lo = phy_lo;
	ent.en_sel_ext = *ext;
	ent.en_csum = phy_ent->pe_csum_info;
	aligned_extent = evt_entry_align_to_csum_chunk(&ent, rsize);
	bio_iov_set_extra(biov,
			  (ent.en_sel_ext.ex_lo - aligned_extent.ex_lo) *
			  rsize,
			  (aligned_extent.ex_hi - ent.en_sel_ext.ex_hi) *
			  rsize);
}

/* An array of csum_recalc structures is constructed for each output entry.
 * This data is used for checksum verification of the input data, and for
 * calculating the checksum(s) for the output extent.
 */
static inline void
csum_add_recalcs(struct csum_recalc **recalcs_p, struct agg_phy_ent *phy_ent,
		 struct evt_extent *ext, unsigned int idx)
{
	struct csum_recalc      *recalcs = *recalcs_p;

	recalcs[idx].cr_log_ext         = *ext;
	recalcs[idx].cr_phy_ext		= &phy_ent->pe_rect.rc_ex;
	recalcs[idx].cr_phy_csum	= &phy_ent->pe_csum_info;
	recalcs[idx].cr_phy_off		= phy_ent->pe_off;
}

static int
verify_and_recalc(struct bio_sglist *bsgl, struct evt_entry_in *ent_in,
		  struct csum_recalc *recalcs, unsigned int recalc_seg_cnt)
{
	struct csum_recalc_args	args;

	args.cra_bsgl		= bsgl;
	args.cra_ent_in		= ent_in;
	args.cra_recalcs	= recalcs;
	args.cra_seg_cnt	= recalc_seg_cnt;

	vos_offload_exec(vos_csum_recalc_fn, &args);
	if (args.cra_rc == -DER_CSUM)
		bio_log_data_csum_err(vos_xsctxt_get());
	return args.cra_rc;
}

static int
fill_one_segment(daos_handle_t ih, struct agg_merge_window *mw,
		 struct agg_lgc_seg *lgc_seg, unsigned int *acts)
{
	struct vos_obj_iter	*oiter = vos_hdl2oiter(ih);
	struct vos_object	*obj = oiter->it_obj;
	struct agg_io_context	*io = &mw->mw_io_ctxt;
	struct evt_entry_in	*ent_in = &lgc_seg->ls_ent_in;
	struct agg_phy_ent	*phy_ent;
	struct bio_io_context	*bio_ctxt;
	struct bio_sglist	 bsgl = { 0 }, bsgl_dst = { 0 };
	bio_addr_t		 addr_src;
	daos_size_t		 seg_size, copy_size, read_size = 0;
	struct evt_extent	 ext = { 0 };
	daos_off_t		 phy_lo = 0;
	unsigned int		 i, seg_count, biov_idx = 0;
	struct bio_copy_desc	*copy_desc;
	struct umem_instance	*umem;
	int			 rc;

	D_ASSERT(obj != NULL);
	D_ASSERT(mw->mw_rsize > 0);

	if (bio_addr_is_hole(&ent_in->ei_addr))
		return 0;

	seg_size = evt_rect_width(&ent_in->ei_rect) * mw->mw_rsize;
	D_ASSERTF(seg_size > 0, "seg_size:"DF_U64"\n", seg_size);

	/* Copy data from old logical entries into new segment */
	D_ASSERT(lgc_seg->ls_idx_start <= lgc_seg->ls_idx_end);
	D_ASSERT(lgc_seg->ls_idx_end < mw->mw_lgc_cnt);

	bio_ctxt = vos_data_ioctxt(obj->obj_cont->vc_pool);
	umem = &obj->obj_cont->vc_pool->vp_umm;

	seg_count = lgc_seg->ls_idx_end - lgc_seg->ls_idx_start + 1;
	rc = bio_sgl_init(&bsgl, seg_count);
	if (rc) {
		D_ERROR("Init bsgl error: "DF_RC"\n", DP_RC(rc));
		return rc;
	}

	rc = bio_sgl_init(&bsgl_dst, 1);
	if (rc) {
		D_ERROR("Init bsgl_dst error: "DF_RC"\n", DP_RC(rc));
		bio_sgl_fini(&bsgl);
		return rc;
	}

	if (mw->mw_csum_type && seg_count > io->ic_csum_recalc_cnt) {
		void *buffer;

		/* An array of recalc structs (one per output segment). */
		D_REALLOC_ARRAY(buffer, io->ic_csum_recalcs,
				io->ic_csum_recalc_cnt, seg_count);
		if (buffer == NULL)
			D_GOTO(out, rc = -DER_NOMEM);

		io->ic_csum_recalcs = buffer;
		io->ic_csum_recalc_cnt = seg_count;
	}

	i = lgc_seg->ls_idx_start;
	while (i <= lgc_seg->ls_idx_end) {
		if (lgc_seg->ls_phy_ent != NULL) {
			phy_ent = lgc_seg->ls_phy_ent;
			ext = ent_in->ei_rect.rc_ex;
		} else {
			struct agg_lgc_ent *lgc_ent = &mw->mw_lgc_ents[i];

			phy_ent = lgc_ent->le_phy_ent;
			ext = lgc_ent->le_ext;
		}
		i++;

		D_ASSERT(ext1_covers_ext2(&ent_in->ei_rect.rc_ex, &ext));
		D_ASSERT(ext1_covers_ext2(&phy_ent->pe_rect.rc_ex, &ext));

		phy_lo = phy_ent->pe_rect.rc_ex.ex_lo;
		if (phy_ent->pe_off != 0)
			phy_lo += phy_ent->pe_off;

		D_ASSERT(phy_lo <= phy_ent->pe_rect.rc_ex.ex_hi);
		D_ASSERT(ext.ex_lo >= phy_lo);

		copy_size = evt_extent_width(&ext) * ent_in->ei_inob;

		addr_src = phy_ent->pe_addr;
		addr_src.ba_off += (ext.ex_lo - phy_lo) * ent_in->ei_inob;

		D_ASSERT(!bio_addr_is_hole(&addr_src));

		D_ASSERT(biov_idx < bsgl.bs_nr);
		bio_iov_set(&bsgl.bs_iovs[biov_idx], addr_src, copy_size);

		if (mw->mw_csum_type) {
			csum_widen_biov(&bsgl.bs_iovs[biov_idx], phy_ent, &ext,
					ent_in->ei_inob, phy_lo);

			csum_add_recalcs(&io->ic_csum_recalcs, phy_ent, &ext, biov_idx);
		}
		biov_idx++;
		read_size += copy_size;
	}
	D_ASSERT(seg_size == read_size);

	rc = reserve_segment(obj, io, seg_size, &ent_in->ei_addr);
	if (rc) {
		DL_CDEBUG(rc == -DER_NOSPACE, DB_EPC, DLOG_ERR, rc,
			  "Reserve " DF_U64 " segment error", seg_size);
		goto out;
	}
	D_ASSERT(!bio_addr_is_hole(&ent_in->ei_addr));
	bio_iov_set(&bsgl_dst.bs_iovs[0], ent_in->ei_addr, seg_size);

	rc = bio_copy_prep(bio_ctxt, umem, &bsgl, &bsgl_dst, &copy_desc);
	if (rc) {
		D_ERROR("Failed to Prepare source & target SGLs for copy. "DF_RC"\n", DP_RC(rc));
		goto out;
	}

	if (mw->mw_csum_type) {
		/* Verify prior data, calculate csums for output range. */
		rc = verify_and_recalc(bio_copy_get_sgl(copy_desc, true), ent_in,
				       io->ic_csum_recalcs, seg_count);
		if (rc) {
			D_ERROR("CSUM verify error: "DF_RC"\n", DP_RC(rc));
			goto post;
		}
	}

	rc = bio_copy_run(copy_desc, seg_size, NULL);
	if (rc)
		D_ERROR("Copy to "DF_RECT" error "DF_RC"\n",
			DP_RECT(&ent_in->ei_rect), DP_RC(rc));
post:
	rc = bio_copy_post(copy_desc, rc);
	if (rc) {
		D_ERROR("Write to "DF_RECT" error "DF_RC"\n",
			DP_RECT(&ent_in->ei_rect), DP_RC(rc));
	} else {
		struct vos_agg_metrics	*vam = agg_cont2metrics(obj->obj_cont);

		if (vam) {
			if (vam->vam_merge_recs)
				d_tm_inc_counter(vam->vam_merge_recs, seg_count);
			if (vam->vam_merge_size)
				d_tm_inc_counter(vam->vam_merge_size, seg_size);
		}
	}
out:
	bio_sgl_fini(&bsgl);
	bio_sgl_fini(&bsgl_dst);
	return rc;
}

static int
fill_segments(daos_handle_t ih, struct vos_agg_param *agg_param, unsigned int *acts)
{
	struct agg_merge_window	*mw = &agg_param->ap_window;
	struct agg_io_context	*io = &mw->mw_io_ctxt;
	struct umem_instance	*umm = agg_param->ap_umm;
	struct agg_lgc_seg	*lgc_seg;
	unsigned int		 i, scm_max;
	int			 rc = 0;

	if (io->ic_seg_cnt == 0) {
		/** No logical extent or truncated physical extent (only removals) */
		return 0;
	}

	scm_max = MAX(io->ic_seg_cnt, 200);
	rc = umem_rsrvd_act_realloc(umm, &io->ic_rsrvd_scm, scm_max);
	if (rc)
		return rc;
	D_ASSERT(umem_rsrvd_act_cnt(io->ic_rsrvd_scm) == 0);

	for (i = 0; i < io->ic_seg_cnt; i++) {
		lgc_seg = &io->ic_segs[i];

		D_DEBUG(DB_EPC, "Fill segment: %u-%u "DF_RECT"\n",
			lgc_seg->ls_idx_start, lgc_seg->ls_idx_end,
			DP_RECT(&lgc_seg->ls_ent_in.ei_rect));

		rc = fill_one_segment(ih, mw, lgc_seg, acts);
		if (rc) {
			DL_CDEBUG(rc == -DER_NOSPACE, DB_EPC, DLOG_ERR, rc,
				  "Fill seg %u-%u %p " DF_RECT " error", lgc_seg->ls_idx_start,
				  lgc_seg->ls_idx_end, lgc_seg->ls_phy_ent,
				  DP_RECT(&lgc_seg->ls_ent_in.ei_rect));
			break;
		}
	}

	return rc;
}

static int
process_removals(struct agg_merge_window *mw, struct vos_obj_iter *oiter, d_list_t *head, bool last,
		 bool top)
{
	struct agg_rmv_ent	*rm_ent, *rm_tmp;
	struct evt_rect		 rect;
	int			 rc = 0;

	d_list_for_each_entry_safe(rm_ent, rm_tmp, head, re_link) {
		rect = rm_ent->re_rect;

		if (!last && (rm_ent->re_phy_count != 0 || rect.rc_ex.ex_hi > mw->mw_ext.ex_hi))
			continue;

		if (!rm_ent->re_aggregate) {
			D_ASSERT(d_list_empty(&rm_ent->re_contained));
			D_DEBUG(DB_EPC, "Removing physical removal record: "DF_RECT"\n",
				DP_RECT(&rm_ent->re_rect));
			rc = evt_delete(oiter->it_hdl, &rect, NULL);
			d_list_del(&rm_ent->re_phy_link);
		} else if (!d_list_empty(&rm_ent->re_contained)) {
			D_ASSERT(top);
			D_DEBUG(DB_EPC, "Removing logical removal record: "DF_RECT"\n",
				DP_RECT(&rm_ent->re_rect));
			rc = process_removals(mw, oiter, &rm_ent->re_contained, last, false);

			if (!d_list_empty(&rm_ent->re_contained))
				continue;
		}
		if (rc) {
			D_ERROR("Remove "DF_RECT" error: "DF_RC"\n",
				DP_RECT(&rect), DP_RC(rc));
			return rc;
		}

		d_list_del(&rm_ent->re_link);
		if (top) {
			D_ASSERT(mw->mw_rmv_cnt > 0);
			mw->mw_rmv_cnt--;
		}
		D_FREE(rm_ent);
	}

	return 0;
}

static void
unmark_removals(struct agg_merge_window *mw, const struct agg_phy_ent *phy_ent)
{
	struct agg_rmv_ent	*rmv_ent;

	d_list_for_each_entry_reverse(rmv_ent, &mw->mw_phy_rmv_ents, re_phy_link) {
		if (rmv_ent->re_rect.rc_epc != phy_ent->pe_rect.rc_epc)
			continue;

		if (rmv_ent->re_rect.rc_ex.ex_hi < phy_ent->pe_rect.rc_ex.ex_lo)
			break;

		if (rmv_ent->re_rect.rc_ex.ex_lo > phy_ent->pe_rect.rc_ex.ex_hi)
			continue;

		/*
		 * Aggregation could abort before processing the invisible record
		 * which being covered by a removal record, in such case, the removal
		 * record & physical record are both enqueued but the removal record
		 * isn't referenced yet.
		 */
		if (rmv_ent->re_phy_count > 0)
			rmv_ent->re_phy_count--;
	}
}

static void
dump_trace(struct agg_merge_window *mw)
{
	struct vos_agg_param *agg_param = container_of(mw, struct vos_agg_param, ap_window);
	vos_iter_entry_t     *entry;
	int                   i;
	int                   last;

	if (agg_param->ap_trace_count == 0)
		return;

	if (agg_param->ap_trace_count < EV_TRACE_MAX) {
		D_ERROR("Assertion will trigger, dumping all %d evt_trace entries\n",
			agg_param->ap_trace_count);
		last = agg_param->ap_trace_count;
	} else {
		D_ERROR("Assertion will trigger, dumping the last %d of %d total evt_trace"
			" entries\n",
			EV_TRACE_MAX, agg_param->ap_trace_count);
		last = agg_param->ap_trace_start;
	}

	i = agg_param->ap_trace_start;
	do {
		entry = &agg_param->ap_evt_trace[i];
		D_ERROR("  " DF_U64 " recs@" DF_U64 " (" DF_U64 " recs@ " DF_U64 ")@" DF_X64
			".%d tx=%d hole=%d flg=%x rsz=" DF_U64 " gsz=" DF_U64 "\n",
			entry->ie_recx.rx_nr, entry->ie_recx.rx_idx, entry->ie_orig_recx.rx_nr,
			entry->ie_orig_recx.rx_idx, entry->ie_epoch, entry->ie_minor_epc,
			entry->ie_dtx_state, bio_addr_is_hole(&entry->ie_biov.bi_addr),
			entry->ie_vis_flags, entry->ie_rsize, entry->ie_gsize);
		i = (i + 1) % EV_TRACE_MAX;
	} while (i != last);
}

#define D_AGG_ASSERTF(mw, cond, ...)                                                               \
	do {                                                                                       \
		if (!(cond))                                                                       \
			dump_trace(mw);                                                            \
		D_ASSERTF((cond), __VA_ARGS__);                                                    \
	} while (0)

#define D_AGG_ASSERT(mw, cond)                                                                     \
	do {                                                                                       \
		if (!(cond))                                                                       \
			dump_trace(mw);                                                            \
		D_ASSERT(cond);                                                                    \
	} while (0)

static int
insert_segments(daos_handle_t ih, struct agg_merge_window *mw, bool last, unsigned int *acts)
{
	struct vos_obj_iter	*oiter = vos_hdl2oiter(ih);
	struct vos_object	*obj = oiter->it_obj;
	struct agg_io_context	*io = &mw->mw_io_ctxt;
	struct agg_phy_ent	*phy_ent, *tmp;
	struct agg_lgc_ent	*lgc_ent;
	struct agg_lgc_seg	*lgc_seg;
	struct evt_entry_in	*ent_in;
	struct evt_rect		 rect;
	unsigned int		 i, leftovers = 0;
	int			 rc;

	D_AGG_ASSERT(mw, obj != NULL);
	rc = umem_tx_begin(vos_obj2umm(obj), NULL);
	if (rc)
		return rc;

	/* Publish SCM reservations */
	rc = vos_publish_scm(vos_obj2umm(obj), io->ic_rsrvd_scm, true);
	if (rc) {
		D_ERROR("Publish SCM extents error: "DF_RC"\n", DP_RC(rc));
		goto abort;
	}

	/* Adjust logical entry queue */
	for (i = 0; i < mw->mw_lgc_cnt; i++) {
		lgc_ent = &mw->mw_lgc_ents[i];
		phy_ent = lgc_ent->le_phy_ent;

		D_AGG_ASSERTF(mw, ext1_covers_ext2(&mw->mw_ext, &lgc_ent->le_ext),
			      "mw->mw_ext=" DF_EXT " lgc_ent->le_ext=" DF_EXT "\n",
			      DP_EXT(&mw->mw_ext), DP_EXT(&lgc_ent->le_ext));
		D_AGG_ASSERT(mw, phy_ent->pe_ref > 0);
		phy_ent->pe_ref--;
		phy_ent->pe_trunc_head = true;
	}
	mw->mw_lgc_cnt = 0;

	/* Adjust payload address of truncated physical entries */
	for (i = 0; i < io->ic_seg_cnt; i++) {
		lgc_seg = &io->ic_segs[i];
		ent_in = &io->ic_segs[i].ls_ent_in;
		phy_ent = lgc_seg->ls_phy_ent;

		if (phy_ent != NULL && !bio_addr_is_hole(&ent_in->ei_addr)) {
			/*
			 * the csum free flag indicates that memory has been allocated and should
			 * only be allocated once for the phy_ent.
			 */
			if (phy_ent->pe_csum_free)
				D_FREE(phy_ent->pe_csum_info.cs_csum);
			phy_ent->pe_addr = ent_in->ei_addr;
			/* Checksum from ent_in is assigned to truncated
			 * physical entry, in addition to re-assigning address.
			 */
			phy_ent->pe_csum_info = ent_in->ei_csum;
			D_ALLOC(phy_ent->pe_csum_info.cs_csum, phy_ent->pe_csum_info.cs_buf_len);
			if (phy_ent->pe_csum_info.cs_csum == NULL)
				return -DER_NOMEM;
			phy_ent->pe_csum_free = true;
			memcpy(phy_ent->pe_csum_info.cs_csum, ent_in->ei_csum.cs_csum,
			       phy_ent->pe_csum_info.cs_buf_len);
		}
	}

	/* Remove old physical entries from EV tree */
	d_list_for_each_entry_safe(phy_ent, tmp, &mw->mw_phy_ents, pe_link) {
		rect = phy_ent->pe_rect;

		D_AGG_ASSERTF(mw, phy_ent->pe_ref == 0, "phy_ent->pe_ref=%d\n", phy_ent->pe_ref);
		/* The physical entry was truncated on prev window flush */
		if (phy_ent->pe_off != 0)
			rect.rc_ex.ex_lo += phy_ent->pe_off;

		D_AGG_ASSERTF(mw, rect.rc_ex.ex_lo <= rect.rc_ex.ex_hi,
			      "phy_ent " DF_RECT " off=" DF_X64 "\n", DP_RECT(&phy_ent->pe_rect),
			      phy_ent->pe_off);
		/*
		 * The physical entry spans window end, but is fully covered
		 * in current window, keep it intact.
		 */
		if (!phy_ent->pe_remove && rect.rc_ex.ex_hi > mw->mw_ext.ex_hi &&
		    !phy_ent->pe_trunc_head) {
			leftovers++;
			continue;
		}

		rc = evt_delete(oiter->it_hdl, &rect, NULL);
		if (rc) {
			D_ERROR("Delete "DF_RECT" pe_off:"
				DF_U64" error: "DF_RC"\n",
				DP_RECT(&rect), phy_ent->pe_off,
				DP_RC(rc));
			goto abort;
		}

		/* Physical entry is in window or fully removed */
		if (rect.rc_ex.ex_hi <= mw->mw_ext.ex_hi ||
		    phy_ent->pe_remove) {
			d_list_del(&phy_ent->pe_link);
			unmark_removals(mw, phy_ent);
			free_phy_ent(phy_ent);
			D_AGG_ASSERT(mw, mw->mw_phy_cnt > 0);
			mw->mw_phy_cnt--;
			continue;
		}

		/* Update extent start of truncated physical entry */
		rect.rc_ex.ex_lo = mw->mw_ext.ex_hi + 1;
		phy_ent->pe_off = rect.rc_ex.ex_lo -
				phy_ent->pe_rect.rc_ex.ex_lo;
		phy_ent->pe_trunc_head = false;

		leftovers++;
	}
	D_AGG_ASSERTF(mw, leftovers == mw->mw_phy_cnt, "leftovers=%d, mw->mw_phy_cnt=%d\n",
		      leftovers, mw->mw_phy_cnt);

	/** Remove processed removal records */
	rc = process_removals(mw, oiter, &mw->mw_rmv_ents, last, true);

	/* Insert new segments into EV tree */
	for (i = 0; i < io->ic_seg_cnt; i++) {
		ent_in = &io->ic_segs[i].ls_ent_in;

		/** For insertion, no tx will be inserting anything at this
		 *  epoch so just use the max value for the minor epoch.
		 */
		rc = evt_insert(oiter->it_hdl, ent_in,
				&ent_in->ei_csum.cs_csum);
		if (rc == 1)
			rc = 0;
		if (rc) {
			D_ERROR("Insert segment "DF_RECT" error: "DF_RC"\n",
				DP_RECT(&ent_in->ei_rect), DP_RC(rc));
			goto abort;
		}
	}

	/* Clear window size */
	mw->mw_ext.ex_lo = mw->mw_ext.ex_hi = mw->mw_alloc_hi = 0;

	/* Publish NVMe reservations */
	rc = vos_publish_blocks(obj->obj_cont, &io->ic_nvme_exts, true,
				VOS_IOS_AGGREGATION);
	if (rc) {
		D_ERROR("Publish NVMe extents error: "DF_RC"\n", DP_RC(rc));
		goto abort;
	}
abort:
	if (rc)
		rc = umem_tx_abort(vos_obj2umm(obj), rc);
	else
		rc = umem_tx_commit(vos_obj2umm(obj));

	return rc;
}

static void
cleanup_segments(daos_handle_t ih, struct agg_merge_window *mw, int rc)
{
	struct vos_obj_iter	*oiter = vos_hdl2oiter(ih);
	struct vos_object	*obj = oiter->it_obj;
	struct agg_io_context	*io = &mw->mw_io_ctxt;

	D_AGG_ASSERT(mw, obj != NULL);
	if (rc) {
		vos_publish_scm(vos_obj2umm(obj), io->ic_rsrvd_scm, false);

		if (!d_list_empty(&io->ic_nvme_exts))
			vos_publish_blocks(obj->obj_cont, &io->ic_nvme_exts,
					   false, VOS_IOS_AGGREGATION);
	}

	/* Reset io context */
	D_AGG_ASSERT(mw, d_list_empty(&io->ic_nvme_exts));
	D_AGG_ASSERT(mw, umem_rsrvd_act_cnt(io->ic_rsrvd_scm) == 0);
	io->ic_seg_cnt = 0;
}

static void
clear_merge_window(struct agg_merge_window *mw)
{
	struct agg_phy_ent *phy_ent, *tmp;

	mw->mw_ext.ex_lo = mw->mw_ext.ex_hi = mw->mw_alloc_hi = 0;
	mw->mw_lgc_cnt = 0;
	d_list_for_each_entry_safe(phy_ent, tmp, &mw->mw_phy_ents, pe_link) {
		d_list_del(&phy_ent->pe_link);
		unmark_removals(mw, phy_ent);
		free_phy_ent(phy_ent);
	}
	mw->mw_phy_cnt = 0;
}

static void
free_removal_records(struct agg_merge_window *mw, d_list_t *head, bool top)
{
	struct agg_rmv_ent *rm_ent, *tmp;

	d_list_for_each_entry_safe(rm_ent, tmp, head, re_link) {
		d_list_del(&rm_ent->re_link);
		if (!d_list_empty(&rm_ent->re_contained)) {
			D_AGG_ASSERT(mw, top);
			free_removal_records(mw, &rm_ent->re_contained, false);
		}
		D_FREE(rm_ent);
	}
	if (top) {
		mw->mw_rmv_cnt = 0;
		D_INIT_LIST_HEAD(&mw->mw_phy_rmv_ents);
	}
}

static inline bool
need_merge(daos_handle_t ih, uint16_t src_media, int lgc_cnt, daos_size_t seg_size)
{
	struct vos_obj_iter	*oiter = vos_hdl2oiter(ih);
	struct vos_object	*obj = oiter->it_obj;
	unsigned int		 seg_blks, nvme_blks;
	uint16_t		 tgt_media;

	D_ASSERTF(lgc_cnt > 0 && seg_size > 0, "lgc_cnt=%d seg_size=" DF_U64 "\n", lgc_cnt,
		  seg_size);
	if (lgc_cnt == 1)
		return false;

	if (vos_io_scm(vos_obj2pool(obj), DAOS_IOD_ARRAY, seg_size, VOS_IOS_AGGREGATION))
		tgt_media = DAOS_MEDIA_SCM;
	else
		tgt_media = DAOS_MEDIA_NVME;

	/* Some data can be migrated from SCM to NVMe to alleviate SCM pressure */
	if (src_media != tgt_media)
		return true;

	/*
	 * Only trigger SCM to SCM data migration when there are enough amount of
	 * SCM records accumulated.
	 */
	if (tgt_media == DAOS_MEDIA_SCM)
		return lgc_cnt >= VOS_EVT_ORDER;

	/*
	 * Only trigger NVMe to NVMe data migration when:
	 * - Coalesced record is larger than threshold; And
	 * - Enough small NVMe records accumulated, or coalesced size is threshold
	 *   size aligned;
	 */
	seg_blks = (seg_size + VOS_BLK_SZ - 1) >> VOS_BLK_SHIFT;
	if (seg_blks < vos_agg_nvme_thresh)
		return false;

	nvme_blks = (seg_blks / vos_agg_nvme_thresh);
	return (lgc_cnt >= VOS_EVT_ORDER) || (seg_blks == (nvme_blks * vos_agg_nvme_thresh));
}

/*
 * General rules for deciding if a merge window needs be flushed or skipped:
 *
 * 1. If any invisible data to be removed, flush merge window to free space.
 * 2. If any data could be migrated from SCM to NVMe, flush merge window to alleviate
 *    SCM space pressure.
 * 3. If any removal records, punch records could be removed or merged, flush merge
 *    window to condense VOS tree.
 * 4. If only records coalescing within same media (eg. merging small SCM records to a
 *    larger SCM record, or merging small NVMe records to a larger NVMe record), make
 *    a trade-off between VOS tree condensing and data relocating (which consumes CPU
 *    & storage bandwidth, yet likely to generate more fragmentations).
 */
static bool
need_flush(daos_handle_t ih, struct vos_agg_param *agg_param, bool last)
{
	struct agg_merge_window	*mw = &agg_param->ap_window;
	struct agg_phy_ent	*phy_ent;
	struct agg_lgc_ent	*lgc_ent;
	struct evt_extent	 lgc_ext, phy_ext;
	int			 i, lgc_cnt = 0;
	bool			 hole = false;
	daos_size_t		 seg_width = 0;
	uint16_t		 src_media = DAOS_MEDIA_SCM;

	/* Any invisible physical entries ? */
	if (mw->mw_lgc_cnt != mw->mw_phy_cnt)
		return true;

	/* Need to cleanup remaining removal records */
	if (last && mw->mw_rmv_cnt != 0)
		return true;

	/*
	 * To reduce fragmentation, we don't flush (migrate) segment individually,
	 * that means the whole merge window data will be migrated to a new location
	 * when any segment in the window needs be flushed.
	 */
	for (i = 0; i < mw->mw_lgc_cnt; i++) {
		lgc_ent = &mw->mw_lgc_ents[i];
		phy_ent = lgc_ent->le_phy_ent;
		phy_ext = phy_ent->pe_rect.rc_ex;
		lgc_ext = lgc_ent->le_ext;

		/* Any physical entry is partially covered, or appeared in other window */
		if (lgc_ext.ex_lo != phy_ext.ex_lo ||
		    lgc_ext.ex_hi != phy_ext.ex_hi)
			return true;

		if (i == 0 || (hole != bio_addr_is_hole(&phy_ent->pe_addr))) {
			if (i && need_merge(ih, src_media, lgc_cnt, seg_width * mw->mw_rsize))
				return true;

			src_media = phy_ent->pe_addr.ba_type;
			seg_width = evt_extent_width(&lgc_ext);
			lgc_cnt = 1;
		} else {
			/*
			 * Any consecutive punch records need be merged, Or;
			 * Caller wants to merge any kinds of consecutive records.
			 */
			if (hole || (agg_param->ap_flags & VOS_AGG_FL_FORCE_MERGE))
				return true;

			/* Regard source media as SCM when any source record is on SCM */
			if (phy_ent->pe_addr.ba_type == DAOS_MEDIA_SCM)
				src_media = DAOS_MEDIA_SCM;
			seg_width += evt_extent_width(&lgc_ext);
			lgc_cnt++;
		}

		hole = bio_addr_is_hole(&phy_ent->pe_addr);
	}

	if (lgc_cnt && need_merge(ih, src_media, lgc_cnt, seg_width * mw->mw_rsize))
		return true;

	clear_merge_window(mw);
	D_DEBUG(DB_EPC, "Skip window flush "DF_EXT"\n", DP_EXT(&mw->mw_ext));

	return false;
}

static int
flush_merge_window(daos_handle_t ih, struct vos_agg_param *agg_param,
		   bool last, unsigned int *acts)
{
	struct agg_merge_window	*mw = &agg_param->ap_window;
	int			 rc;

	if (!need_flush(ih, agg_param, last))
		return 0;

	D_DEBUG(DB_TRACE, "Flush to merge to window "DF_EXT"\n", DP_EXT(&mw->mw_ext));

	/* Prepare the new segments to be inserted */
	rc = prepare_segments(mw);
	if (rc) {
		D_ERROR("Prepare segments "DF_EXT" error: "DF_RC"\n",
			DP_EXT(&mw->mw_ext), DP_RC(rc));
		goto out;
	}

	/* Transfer data from old logical records to reserved new segments */
	rc = fill_segments(ih, agg_param, acts);
	if (rc) {
		DL_CDEBUG(rc == -DER_NOSPACE, DB_EPC, DLOG_ERR, rc,
			  "Fill segments " DF_EXT " error", DP_EXT(&mw->mw_ext));
		goto out;
	}

	/* Replace the old logical records with new segments in EV tree */
	rc = insert_segments(ih, mw, last, acts);
	if (rc) {
		D_ERROR("Insert segments "DF_EXT" error: "DF_RC"\n",
			DP_EXT(&mw->mw_ext), DP_RC(rc));
		goto out;
	}
	credits_consume(&agg_param->ap_credits, AGG_OP_MERGE);
out:
	cleanup_segments(ih, mw, rc);

	return rc;
}

static bool
trigger_flush(struct agg_merge_window *mw, struct evt_extent *lgc_ext)
{
	struct evt_extent *w_ext = &mw->mw_ext;

	D_AGG_ASSERTF(mw, w_ext->ex_lo <= lgc_ext->ex_lo,
		      "w_ext->ex_lo(" DF_X64 ") > lgc_ext->ex_lo(" DF_X64 ")\n", w_ext->ex_lo,
		      lgc_ext->ex_lo);
	/* Empty or closed merge window */
	if (merge_window_status(mw) == MW_CLOSED ||
	    merge_window_status(mw) == MW_FLUSHED)
		return false;

	/*
	 * Window is formed by visible logical entries, must have no
	 * overlapping.
	 */
	D_AGG_ASSERTF(mw, w_ext->ex_hi < lgc_ext->ex_lo, "win:" DF_EXT ", lgc_ent:" DF_EXT "\n",
		      DP_EXT(w_ext), DP_EXT(lgc_ext));

	/* Window is large enough */
	if (merge_window_size(mw) >= mw->mw_flush_thresh)
		return true;

	/* Trigger flush when entry is disjoint with window */
	return !((w_ext->ex_hi + 1) == lgc_ext->ex_lo);
}

static struct agg_phy_ent *
enqueue_phy_ent(struct agg_merge_window *mw, struct evt_extent *phy_ext,
		const vos_iter_entry_t *entry, bio_addr_t *addr,
		struct dcs_csum_info *csum_info, uint32_t ver)
{
	struct agg_phy_ent *phy_ent;

	D_ALLOC_PTR(phy_ent);
	if (phy_ent == NULL)
		return NULL;

	phy_ent->pe_rect.rc_ex = *phy_ext;
	phy_ent->pe_rect.rc_epc = entry->ie_epoch;
	phy_ent->pe_rect.rc_minor_epc = entry->ie_minor_epc;
	phy_ent->pe_addr = *addr;
	phy_ent->pe_csum_info = *csum_info;
	phy_ent->pe_csum_free = false;
	phy_ent->pe_off = 0;
	phy_ent->pe_ver = ver;
	phy_ent->pe_ref = 0;

	/* Sanity check */
	if (!d_list_empty(&mw->mw_phy_ents)) {
		struct agg_phy_ent *prev;

		D_AGG_ASSERTF(mw, mw->mw_phy_cnt != 0, "mw->mw_phy_cnt is 0");
		prev = d_list_entry(mw->mw_phy_ents.prev, struct agg_phy_ent,
				    pe_link);
		D_AGG_ASSERTF(mw, prev->pe_rect.rc_ex.ex_lo <= phy_ext->ex_lo,
			      "prev phy_ext: " DF_EXT ", phy_ext: " DF_EXT "\n",
			      DP_EXT(&prev->pe_rect.rc_ex), DP_EXT(phy_ext));
	} else {
		D_AGG_ASSERTF(mw, mw->mw_phy_cnt == 0, "mw->mw_phy_cnt = %d\n", mw->mw_phy_cnt);
	}

	d_list_add_tail(&phy_ent->pe_link, &mw->mw_phy_ents);
	mw->mw_phy_cnt++;

	return phy_ent;
}

static int
enqueue_lgc_ent(struct agg_merge_window *mw, struct evt_extent *lgc_ext,
		struct agg_phy_ent *phy_ent)
{
	struct agg_lgc_ent	*lgc_ent;
	unsigned int		 max, cnt;

	max = mw->mw_lgc_max;
	cnt = mw->mw_lgc_cnt;
	/* Sanity check */
	if (cnt > 0) {
		lgc_ent = &mw->mw_lgc_ents[cnt - 1];
		D_AGG_ASSERTF(mw,
			      lgc_ext->ex_lo == lgc_ent->le_ext.ex_hi + 1 &&
				  lgc_ent->le_ext.ex_hi == mw->mw_ext.ex_hi,
			      "prev lgc_ext: " DF_EXT ", lgc_ext: " DF_EXT "\n",
			      DP_EXT(&lgc_ent->le_ext), DP_EXT(lgc_ext));
	}

	if (cnt == max) {
		unsigned int new_max = max ? max * 2 : 10;

		D_REALLOC_ARRAY(lgc_ent, mw->mw_lgc_ents, max, new_max);
		if (lgc_ent == NULL)
			return -DER_NOMEM;

		mw->mw_lgc_max = new_max;
		mw->mw_lgc_ents = lgc_ent;
	}

	D_AGG_ASSERTF(mw, mw->mw_lgc_max > mw->mw_lgc_cnt,
		      "mw->mw_lgc_max(%d) <= mw->mw_lgc_cnt(%d)\n", mw->mw_lgc_max, mw->mw_lgc_cnt);
	lgc_ent = &mw->mw_lgc_ents[cnt];
	lgc_ent->le_ext = *lgc_ext;
	phy_ent->pe_ref++;
	lgc_ent->le_phy_ent = phy_ent;
	mw->mw_lgc_cnt++;

	/*
	 * Extend window size. If the visible entry is a punched record, the
	 * window size could be very huge, but this is ok, because there won't
	 * be huge contiguous allocation on window flush, only lots of covered
	 * physical entries being deleted.
	 */
	if (mw->mw_lgc_cnt == 1)
		mw->mw_ext.ex_lo = lgc_ext->ex_lo;
	mw->mw_ext.ex_hi = mw->mw_alloc_hi = lgc_ext->ex_hi;

	D_DEBUG(DB_EPC, "lgc_ext:"DF_EXT", phy_ext:"DF_RECT", mw:"DF_EXT", "
		"index:%u\n", DP_EXT(lgc_ext), DP_RECT(&phy_ent->pe_rect),
		DP_EXT(&mw->mw_ext), cnt);

	return 0;
}

static void
close_merge_window(struct agg_merge_window *mw, int rc)
{
	struct agg_io_context *io = &mw->mw_io_ctxt;

	if (rc) {
		clear_merge_window(mw);
		free_removal_records(mw, &mw->mw_rmv_ents, true);
	}

	D_AGG_ASSERTF(mw, mw->mw_rmv_cnt == 0, "mw->mw_rmv_cnt = %d\n", mw->mw_rmv_cnt);
	D_AGG_ASSERT(mw, merge_window_status(mw) != MW_OPENED);

	mw->mw_rsize = 0;
	if (mw->mw_lgc_ents != NULL) {
		D_FREE(mw->mw_lgc_ents);
		mw->mw_lgc_ents = NULL;
		mw->mw_lgc_max = 0;
	}

	if (io->ic_segs != NULL) {
		D_FREE(io->ic_segs);
		io->ic_segs = NULL;
		io->ic_seg_max = 0;
	}

	umem_rsrvd_act_free(&io->ic_rsrvd_scm);

	if (io->ic_csum_recalcs != NULL) {
		D_FREE(io->ic_csum_recalcs);
		io->ic_csum_recalcs = NULL;
		io->ic_csum_recalc_cnt = 0;
	}
	if (io->ic_csum_buf != NULL) {
		D_FREE(io->ic_csum_buf);
		io->ic_csum_buf = NULL;
		io->ic_csum_buf_len = 0;
	}
}

static struct agg_phy_ent *
lookup_phy_ent(struct agg_merge_window *mw, const struct evt_extent *phy_ext,
	       const vos_iter_entry_t *entry)
{
	struct agg_phy_ent *phy_ent;

	d_list_for_each_entry_reverse(phy_ent, &mw->mw_phy_ents, pe_link) {
		/* Physical entry list is sorted by extent start */
		if (phy_ent->pe_rect.rc_ex.ex_lo < phy_ext->ex_lo)
			break;

		if (phy_ent->pe_rect.rc_epc == entry->ie_epoch &&
		    phy_ent->pe_rect.rc_minor_epc == entry->ie_minor_epc &&
		    phy_ent->pe_rect.rc_ex.ex_hi == phy_ext->ex_hi)
			return phy_ent;
	}

	return NULL;
}

static void
mark_removals(struct agg_merge_window *mw, struct agg_phy_ent *phy_ent,
	      const struct evt_extent *lgc_ext)
{
	struct agg_rmv_ent	*rmv_ent;

	if (d_list_empty(&mw->mw_phy_rmv_ents))
		return;

	/* This is not a real entry but it doesn't matter.   It will be used to calculate
	 * where to continue if the physical record has been processed before to ensure
	 * we never refcount the same record more than once.
	 */
	rmv_ent = d_list_entry(&mw->mw_phy_rmv_ents, struct agg_rmv_ent, re_phy_link);

	d_list_for_each_entry_continue(rmv_ent, &mw->mw_phy_rmv_ents, re_phy_link) {
		if (rmv_ent->re_rect.rc_epc != phy_ent->pe_rect.rc_epc)
			continue;

		if (rmv_ent->re_rect.rc_ex.ex_hi < lgc_ext->ex_lo)
			continue;

		/** We should be processing extents in order so this should mean there is overlap */
		D_AGG_ASSERTF(mw, rmv_ent->re_rect.rc_ex.ex_lo <= lgc_ext->ex_lo,
			      "rmv_ent->re_rect.rc_ex.ex_lo=" DF_X64 " lgc_ext->ex_lo=" DF_X64 "\n",
			      rmv_ent->re_rect.rc_ex.ex_lo, lgc_ext->ex_lo);
		rmv_ent->re_phy_count++;
	}
}

static int
join_merge_window(daos_handle_t ih, struct vos_agg_param *agg_param,
		  vos_iter_entry_t *entry, unsigned int *acts)
{
	struct vos_obj_iter	*oiter = vos_hdl2oiter(ih);
	struct agg_merge_window	*mw = &agg_param->ap_window;
	struct evt_extent	 phy_ext, lgc_ext;
	struct agg_phy_ent	*phy_ent;
	bool			 remove, visible, partial, last;
	int			 rc = 0;

	recx2ext(&entry->ie_recx, &lgc_ext);
	recx2ext(&entry->ie_orig_recx, &phy_ext);
	D_AGG_ASSERTF(mw, ext1_covers_ext2(&phy_ext, &lgc_ext),
		      "phy_ext=" DF_EXT ", lgc_ext=" DF_EXT "\n", DP_EXT(&phy_ext),
		      DP_EXT(&lgc_ext));

	switch (entry->ie_dtx_state) {
	case DTX_ST_COMMITTED:
		break;
	case DTX_ST_ABORTED:
		/*
		 * Delete the aborted entry, and inform iterator to abort
		 * current evtree aggregation.
		 *
		 * NB. We can't continue current evtree aggregation since
		 * other entry's visibility could be invalid after deleting.
		 */
		D_DEBUG(DB_EPC, "Delete aborted EV entry "DF_EXT"@"DF_X64"\n",
			DP_EXT(&phy_ext), entry->ie_epoch);

		rc = delete_evt_entry(agg_param, oiter, entry, "aborted");
		if (rc)
			return rc;
		/** We just need an alternative error code.  Use -DER_TX_RESTART
		 *  here to indicate that we hit an aborted entry and need to
		 *  restart the aggregation of the evtree.  Using -DER_TX_BUSY
		 *  would mean aborting the current level and everything above
		 *  it.   We only want to do that if we hit an in-progress
		 *  entry.
		 */
		return -DER_TX_RESTART;
	case DTX_ST_PREPARED:
		/*
		 * Keep uncommitted entry, and inform iterator to abort
		 * current evtree aggregation.
		 */
		D_DEBUG(DB_EPC, "Hit uncommitted EV entry "DF_EXT"@"DF_X64"\n",
			DP_EXT(&phy_ext), entry->ie_epoch);
		return -DER_TX_BUSY;
	default:
		D_AGG_ASSERTF(mw, 0, "Unexpected DTX state: %d\n", entry->ie_dtx_state);
		break;
	}

	visible = (entry->ie_vis_flags & VOS_VIS_FLAG_VISIBLE);
	remove = (entry->ie_vis_flags & VOS_VIS_FLAG_REMOVE);
	partial = (entry->ie_vis_flags & VOS_VIS_FLAG_PARTIAL);
	last = (entry->ie_vis_flags & VOS_VIS_FLAG_LAST);

	/* Just delete the fully covered intact physical entry */
	if (!visible && !partial && !remove) {
		D_AGG_ASSERTF(mw, lgc_ext.ex_lo == phy_ext.ex_lo && lgc_ext.ex_hi == phy_ext.ex_hi,
			      "" DF_EXT " != " DF_EXT "\n", DP_EXT(&lgc_ext), DP_EXT(&phy_ext));
		D_AGG_ASSERTF(mw, entry->ie_vis_flags & VOS_VIS_FLAG_COVERED,
			      "entry->ie_vis_flags=%x\n", entry->ie_vis_flags);

		rc = delete_evt_entry(agg_param, oiter, entry, "covered");
		if (rc)
			return rc;
		goto out;
	}

	if (remove) {
		/* Enqueue removal record */
		rc = enqueue_rmv_ent(mw, &phy_ext, entry, oiter);
		if (rc != 0) {
			D_ERROR("Enqueue rm_ent win:"DF_EXT", ent:"DF_EXT" "
				"error: "DF_RC"\n", DP_EXT(&mw->mw_ext),
				DP_EXT(&phy_ext), DP_RC(rc));
			return rc;
		}

		goto out;
	}

	/* Trigger current window flush when reaching threshold */
	if (visible && trigger_flush(mw, &lgc_ext)) {
		/* The window flush doesn't expect holes caused by removal records */
		mw->mw_ext.ex_hi = lgc_ext.ex_lo - 1;
		rc = flush_merge_window(ih, agg_param, false, acts);
		if (rc) {
			DL_CDEBUG(rc == -DER_NOSPACE, DB_EPC, DLOG_ERR, rc,
				  "Flush window " DF_EXT " error", DP_EXT(&mw->mw_ext));
			return rc;
		}
		D_AGG_ASSERT(mw, merge_window_status(mw) == MW_FLUSHED);
	}

	/* Lookup physical entry, enqueue if it doesn't exist */
	phy_ent = lookup_phy_ent(mw, &phy_ext, entry);
	if (phy_ent == NULL) {
		if (phy_ext.ex_lo != lgc_ext.ex_lo) {
			D_AGG_ASSERTF(mw,
				      !visible && phy_ent_is_removed(mw, &phy_ext, entry->ie_epoch),
				      "visible=%d phy_ext=" DF_EXT " entry->ie_epoch=" DF_X64 "\n",
				      visible, DP_EXT(&phy_ext), entry->ie_epoch);
			goto out;
		}
		phy_ent = enqueue_phy_ent(mw, &phy_ext, entry,
					  &entry->ie_biov.bi_addr,
					  &entry->ie_csum, entry->ie_ver);
		if (phy_ent == NULL) {
			rc = -DER_NOMEM;
			D_ERROR("Enqueue phy_ent win:"DF_EXT", ent:"DF_EXT" "
				"error: "DF_RC"\n", DP_EXT(&mw->mw_ext),
				DP_EXT(&phy_ext), DP_RC(rc));
			return rc;
		}
	} else {
		/* Can't be the first logical entry */
		D_AGG_ASSERTF(mw, phy_ext.ex_lo != lgc_ext.ex_lo,
			      "phy_ext=" DF_EXT ", lgc_ext=" DF_EXT "\n", DP_EXT(&phy_ext),
			      DP_EXT(&lgc_ext));
	}

	/* Enqueue the visible logical entry */
	if (visible) {
		rc = enqueue_lgc_ent(mw, &lgc_ext, phy_ent);
		if (rc) {
			D_ERROR("Enqueue lgc_ent win: "DF_EXT", ent:"DF_EXT" "
				"error: "DF_RC"\n", DP_EXT(&mw->mw_ext),
				DP_EXT(&lgc_ext), DP_RC(rc));
			return rc;
		}
	} else {
		/* Fully covered physical entry must have been deleted */
		D_AGG_ASSERT(mw, partial);
		/* refcount any removal records covering this extent */
		mark_removals(mw, phy_ent, &lgc_ext);
	}
out:
	/* Flush & close window on last entry */
	if (last) {
		rc = flush_merge_window(ih, agg_param, true, acts);
		if (rc)
			DL_CDEBUG(rc == -DER_NOSPACE, DB_EPC, DLOG_ERR, rc,
				  "Flush window " DF_EXT " error", DP_EXT(&mw->mw_ext));

		close_merge_window(mw, rc);
	}

	return rc;
}

static int
set_window_size(struct agg_merge_window *mw, vos_iter_entry_t *entry)
{
	struct dcs_csum_info	*csum_info = &entry->ie_csum;
	daos_size_t		 rsize = entry->ie_rsize;

	if (rsize == 0) {
		D_DEBUG(DB_TRACE, "EV tree 0 iod_size could be caused by "
			"inserting punch records in an empty tree.  This can "
			"happen during rebuild.\n");
		/** Just set it to 1.  If the tree is all holes anyway, it
		 *  should be fine to assume a record size.
		 */
		rsize = 1;
	}

	if (mw->mw_rsize == 0) {
		mw->mw_rsize = rsize;

		if (DAOS_FAIL_CHECK(DAOS_VOS_AGG_MW_THRESH)) {
			mw->mw_flush_thresh = daos_fail_value_get();
			D_INFO("Set flush threshold to: "DF_U64"\n",
			       mw->mw_flush_thresh);
		} else if (rsize < (VOS_MW_FLUSH_THRESH / 2)) {
			mw->mw_flush_thresh = VOS_MW_FLUSH_THRESH;
		} else {
			mw->mw_flush_thresh = (rsize < VOS_MW_FLUSH_THRESH) ?
						rsize * 2 : rsize;
			D_INFO("Bump flush threshold to: "DF_U64", rsize: "
			       ""DF_U64"\n", mw->mw_flush_thresh, rsize);
		}

		/* Set csum support flag on processing first entry */
		mw->mw_csum_type = csum_info->cs_type;

	} else if (mw->mw_rsize != rsize) {
		D_CRIT("Mismatched iod_size "DF_U64" != "DF_U64"\n",
		       mw->mw_rsize, rsize);
		return -DER_INVAL;
	} else if (csum_info->cs_type != mw->mw_csum_type) {
		D_CRIT("Mismatched csum type %u != %u\n",
		       mw->mw_csum_type, csum_info->cs_type);
		return -DER_INVAL;
	}

	return 0;
}

static int
vos_agg_ev(daos_handle_t ih, vos_iter_entry_t *entry,
	   struct vos_agg_param *agg_param, unsigned int *acts)
{
	struct agg_merge_window	*mw = &agg_param->ap_window;
	struct evt_extent	 phy_ext, lgc_ext;
	int			 rc = 0;
	int                      next_idx;
	struct vos_container	*cont = vos_hdl2cont(agg_param->ap_coh);

	D_ASSERT(agg_param != NULL);
	D_ASSERT(acts != NULL);
	recx2ext(&entry->ie_recx, &lgc_ext);
	recx2ext(&entry->ie_orig_recx, &phy_ext);

	if (agg_param->ap_trace_count >= EV_TRACE_MAX) {
		next_idx                  = agg_param->ap_trace_start;
		agg_param->ap_trace_start = (agg_param->ap_trace_start + 1) % EV_TRACE_MAX;
	} else {
		next_idx = agg_param->ap_trace_start + agg_param->ap_trace_count;
	}
	agg_param->ap_trace_count++;
	memcpy(&agg_param->ap_evt_trace[next_idx], entry, sizeof(*entry));

	credits_consume(&agg_param->ap_credits, AGG_OP_SCAN);

	/* Discard */
	if (agg_param->ap_discard) {
		struct vos_obj_iter	*oiter = vos_hdl2oiter(ih);

		/*
		 * Delete the physical entry when iterating to the first
		 * logical entry
		 */
		if (phy_ext.ex_lo == lgc_ext.ex_lo)
			rc = delete_evt_entry(agg_param, oiter, entry, "discarded");

		/*
		 * Sorted iteration doesn't support tree empty check, so we
		 * always inform vos_iterate() to check if subtree is empty.
		 */
		if (entry->ie_vis_flags & VOS_VIS_FLAG_LAST) {
			/* Trigger re-probe in akey iteration */
			*acts |= VOS_ITER_CB_YIELD;
		}
		return rc;
	}

	/* Current DTX handle must be NULL, since aggregation runs under non-DTX mode. */
	D_ASSERT(vos_dth_get(cont->vc_pool->vp_sysdb) == NULL);

	/* Aggregation Yield for testing purpose */
	while (DAOS_FAIL_CHECK(DAOS_VOS_AGG_BLOCKED))
		ABT_thread_yield();

	/* Aggregation */
	D_DEBUG(DB_EPC, "oid:"DF_UOID", lgc_ext:"DF_EXT", "
		"phy_ext:"DF_EXT", epoch:"DF_X64".%d, flags: %x(%c)\n",
		DP_UOID(agg_param->ap_oid), DP_EXT(&lgc_ext),
		DP_EXT(&phy_ext), entry->ie_epoch, entry->ie_minor_epc,
		entry->ie_vis_flags, evt_vis2dbg(entry->ie_vis_flags));

	rc = set_window_size(mw, entry);
	if (rc)
		goto out;

	rc = join_merge_window(ih, agg_param, entry, acts);
	if (rc)
		DL_CDEBUG(rc == -DER_TX_RESTART || rc == -DER_TX_BUSY || rc == -DER_NOSPACE,
			  DB_TRACE, DLOG_ERR, rc, "Join window " DF_EXT "/" DF_EXT " error",
			  DP_EXT(&mw->mw_ext), DP_EXT(&phy_ext));
out:
	if (rc)
		close_merge_window(mw, rc);

	return rc;
}

static int
vos_aggregate_pre_cb(daos_handle_t ih, vos_iter_entry_t *entry,
		     vos_iter_type_t type, vos_iter_param_t *param,
		     void *cb_arg, unsigned int *acts)
{
	struct vos_agg_param	*agg_param = cb_arg;
	struct vos_container	*cont;
	int			 rc = 0;

	cont = vos_hdl2cont(param->ip_hdl);
	D_DEBUG(DB_EPC, DF_CONT": Aggregate pre, type:%d, is_discard:%d\n",
		DP_CONT(cont->vc_pool->vp_id, cont->vc_id), type,
		agg_param->ap_discard);

	switch (type) {
	case VOS_ITER_OBJ:
		rc = vos_agg_obj(ih, entry, agg_param, acts);
		break;
	case VOS_ITER_DKEY:
		rc = vos_agg_dkey(ih, entry, agg_param, acts);
		break;
	case VOS_ITER_AKEY:
		rc = vos_agg_akey(ih, entry, agg_param, acts);
		agg_param->ap_trace_start = 0;
		agg_param->ap_trace_count = 0;
		break;
	case VOS_ITER_RECX:
		rc = vos_agg_ev(ih, entry, agg_param, acts);
		if (rc == -DER_TX_RESTART) {
			D_DEBUG(DB_EPC, "Restarting evtree aggregation\n");
			*acts |= VOS_ITER_CB_RESTART;
			rc = 0;
			break;
		}
		/* fall through to check for abort */
	case VOS_ITER_SINGLE:
		if (type == VOS_ITER_SINGLE)
			rc = vos_agg_sv(ih, entry, agg_param, acts);
		if (rc == -DER_CSUM || rc == -DER_TX_BUSY || rc == -DER_NOSPACE) {
			struct vos_agg_metrics	*vam = agg_cont2metrics(cont);

			D_DEBUG(DB_EPC, "Abort value aggregation "DF_RC"\n",
				DP_RC(rc));

			*acts |= VOS_ITER_CB_ABORT;
			if (rc == -DER_CSUM) {
				agg_param->ap_csum_err = 1;
				if (vam && vam->vam_csum_errs)
					d_tm_inc_counter(vam->vam_csum_errs, 1);
			} else if (rc == -DER_NOSPACE) {
				agg_param->ap_nospc_err = true;
			} else if (rc == -DER_TX_BUSY) {
				/** Must not aggregate anything above
				 *  this entry to avoid orphaned tree
				 *  assertion
				 */
				agg_param->ap_in_progress = 1;
				agg_param->ap_skip_akey = true;
				agg_param->ap_skip_dkey = true;
				agg_param->ap_skip_obj = true;

				if (vam && vam->vam_uncommitted)
					d_tm_inc_counter(vam->vam_uncommitted, 1);
			}
			rc = 0;
		}
		break;
	default:
		D_ASSERTF(false, "Invalid iter type\n");
		rc = -DER_INVAL;
		break;
	}

	if (rc < 0) {
		struct vos_agg_metrics *vam = agg_cont2metrics(cont);

		D_ERROR("VOS aggregation failed: "DF_RC"\n", DP_RC(rc));
		if (vam && vam->vam_fail_count)
			d_tm_inc_counter(vam->vam_fail_count, 1);

		return rc;
	}

	if (credits_exhausted(&agg_param->ap_credits) ||
	    (DAOS_FAIL_CHECK(DAOS_VOS_AGG_RANDOM_YIELD) && (rand() % 2))) {
		D_DEBUG(DB_EPC, "Credits exhausted, type:%u, acts:%u\n", type, *acts);

		if (vos_aggregate_yield(agg_param)) {
			D_DEBUG(DB_EPC, "VOS discard/aggregation aborted\n");
			*acts |= VOS_ITER_CB_EXIT;
		}
	}

	return 0;
}

static int
vos_aggregate_post_cb(daos_handle_t ih, vos_iter_entry_t *entry,
		      vos_iter_type_t type, vos_iter_param_t *param,
		      void *cb_arg, unsigned int *acts)
{
	struct vos_agg_param	*agg_param = cb_arg;
	struct vos_container	*cont;
	int			 rc = 0;

	cont = vos_hdl2cont(param->ip_hdl);
	D_DEBUG(DB_EPC, DF_CONT": Aggregate post, type:%d, is_discard:%d\n",
		DP_CONT(cont->vc_pool->vp_id, cont->vc_id), type,
		agg_param->ap_discard);

	switch (type) {
	case VOS_ITER_OBJ:
		if (agg_param->ap_skip_obj) {
			agg_param->ap_skip_obj = false;
			break;
		}
		rc = oi_iter_aggregate(ih, agg_param->ap_discard_obj);
		break;
	case VOS_ITER_DKEY:
		if (agg_param->ap_skip_dkey) {
			agg_param->ap_skip_dkey = false;
			break;
		}
	case VOS_ITER_AKEY:
		if (agg_param->ap_skip_akey) {
			agg_param->ap_skip_akey = false;
			break;
		}
		agg_param->ap_trace_start = 0;
		agg_param->ap_trace_count = 0;
		rc = vos_obj_iter_aggregate(ih, agg_param->ap_discard_obj);
		break;
	case VOS_ITER_SINGLE:
		return 0;
	case VOS_ITER_RECX:
		return 0;
	default:
		D_ASSERTF(false, "Invalid iter type\n");
		return -DER_INVAL;
	}

	if (rc > 0) {
		/* Reprobe flag is set */
		if (rc == 1)
			*acts |= VOS_ITER_CB_DELETE;
		/** If it's 2, we don't need a reprobe. The key still exists at other epoch.
		 *  Treat it as a delete regardless for accounting purposes the key is no longer
		 *  visible at this epoch.
		 */
		inc_agg_counter(agg_param, type, AGG_OP_DEL);
		rc = 0;
	} else if (rc != 0) {
		struct vos_agg_metrics *vam = agg_cont2metrics(cont);

		D_ERROR("VOS aggregation failed: %d\n", rc);
		if (vam && vam->vam_fail_count)
			d_tm_inc_counter(vam->vam_fail_count, 1);

		/*
		 * -DER_TX_BUSY error indicates current ilog aggregation
		 * aborted on hitting uncommitted entry, this should be a very
		 * rare case, we'd suppress the error here to keep aggregation
		 * moving forward.   We do, however, need to ensure we do not
		 * aggregate anything in the parent path.  Otherwise, we could
		 * orphan the current entry due to incarnation log semantics.
		 */
		if (rc == -DER_TX_BUSY) {
			agg_param->ap_in_progress = 1;
			rc = 0;
			switch (type) {
			default:
				D_ASSERTF(type == VOS_ITER_OBJ,
					  "Invalid iter type\n");
				break;
			case VOS_ITER_AKEY:
				agg_param->ap_skip_dkey = true;
				/* fall through */
			case VOS_ITER_DKEY:
				agg_param->ap_skip_obj = true;
				/* fall through */
			}

			if (vam && vam->vam_uncommitted)
				d_tm_inc_counter(vam->vam_uncommitted, 1);
		}
	}

	return rc;
}

enum {
	AGG_MODE_AGGREGATE,
	AGG_MODE_DISCARD,
	AGG_MODE_OBJ_DISCARD,
};

static int
aggregate_enter(struct vos_container *cont, int agg_mode, daos_epoch_range_t *epr)
{
	struct vos_agg_metrics	*vam = agg_cont2metrics(cont);
	int			 rc;

	/** TODO: Now that we have per object mutual exclusion, perhaps we can
	 * remove the top level mutual exclusion.  Keep it for now to avoid too
	 * much change at once.
	 */
	switch (agg_mode) {
	default:
		D_ASSERT(0);
		break;
	case AGG_MODE_DISCARD:
		if (cont->vc_in_discard) {
			D_ERROR(DF_CONT": Already in discard epr["DF_U64", "DF_U64"]\n",
				DP_CONT(cont->vc_pool->vp_id, cont->vc_id),
				cont->vc_epr_discard.epr_lo, cont->vc_epr_discard.epr_hi);
			return -DER_BUSY;
		}

		if (cont->vc_obj_discard_count != 0) {
			D_ERROR(DF_CONT ": In object discard epr[" DF_U64 ", " DF_U64 "]\n",
				DP_CONT(cont->vc_pool->vp_id, cont->vc_id),
				cont->vc_epr_discard.epr_lo, cont->vc_epr_discard.epr_hi);
			return -DER_BUSY;
		}

		if (cont->vc_in_aggregation && cont->vc_epr_aggregation.epr_hi >= epr->epr_lo) {
			D_ERROR(DF_CONT": Aggregate epr["DF_U64", "DF_U64"], "
				"discard epr["DF_U64", "DF_U64"]\n",
				DP_CONT(cont->vc_pool->vp_id, cont->vc_id),
				cont->vc_epr_aggregation.epr_lo,
				cont->vc_epr_aggregation.epr_hi,
				epr->epr_lo, epr->epr_hi);
			return -DER_BUSY;
		}

		cont->vc_in_discard = 1;
		cont->vc_epr_discard = *epr;
		break;
	case AGG_MODE_AGGREGATE:
		if (cont->vc_in_aggregation) {
			D_DEBUG(DB_EPC, DF_CONT": Already in aggregation epr["DF_U64", "DF_U64"]\n",
				DP_CONT(cont->vc_pool->vp_id, cont->vc_id),
				cont->vc_epr_aggregation.epr_lo, cont->vc_epr_aggregation.epr_hi);
			return -DER_BUSY;
		}

		if (cont->vc_in_discard &&
		    cont->vc_epr_discard.epr_lo <= epr->epr_hi) {
			D_ERROR(DF_CONT": Discard epr["DF_U64", "DF_U64"], "
				"aggregation epr["DF_U64", "DF_U64"]\n",
				DP_CONT(cont->vc_pool->vp_id, cont->vc_id),
				cont->vc_epr_discard.epr_lo,
				cont->vc_epr_discard.epr_hi,
				epr->epr_lo, epr->epr_hi);
			return -DER_BUSY;
		}

		cont->vc_in_aggregation = 1;
		cont->vc_epr_aggregation = *epr;

		if (vam && vam->vam_epr_dur)
			d_tm_mark_duration_start(vam->vam_epr_dur, D_TM_CLOCK_THREAD_CPUTIME);

		break;
	case AGG_MODE_OBJ_DISCARD:
		/** Theoretically, this could overlap with vos_discard as well
		 * as aggregation but it makes the logic in vos_obj_hold more
		 * complicated so defer for now and just disallow it. We can
		 * conflict with aggregation, however without issues.
		 */
		if (cont->vc_in_discard) {
			D_ERROR(DF_CONT ": Already in discard epr[" DF_U64 ", " DF_U64 "]\n",
				DP_CONT(cont->vc_pool->vp_id, cont->vc_id),
				cont->vc_epr_discard.epr_lo, cont->vc_epr_discard.epr_hi);
			return -DER_BUSY;
		}

		cont->vc_obj_discard_count++;
		break;
	}

	rc = vos_flush_wal_header(cont->vc_pool);
	if (rc)
		D_ERROR(DF_CONT": Failed to flush WAL header. "DF_RC"\n",
			DP_CONT(cont->vc_pool->vp_id, cont->vc_id), DP_RC(rc));

	return rc;
}

static void
aggregate_exit(struct vos_container *cont, int agg_mode)
{
	struct vos_agg_metrics	*vam = agg_cont2metrics(cont);

	switch (agg_mode) {
	default:
		D_ASSERT(0);
		break;
	case AGG_MODE_DISCARD:
		D_ASSERT(cont->vc_in_discard);
		cont->vc_in_discard = 0;
		cont->vc_epr_discard.epr_lo = 0;
		cont->vc_epr_discard.epr_hi = 0;
		break;
	case AGG_MODE_AGGREGATE:
		D_ASSERT(cont->vc_in_aggregation);
		cont->vc_in_aggregation = 0;
		cont->vc_epr_aggregation.epr_lo = 0;
		cont->vc_epr_aggregation.epr_hi = 0;

		if (vam && vam->vam_epr_dur)
			d_tm_mark_duration_end(vam->vam_epr_dur);
		break;
	case AGG_MODE_OBJ_DISCARD:
		D_ASSERT(cont->vc_obj_discard_count > 0);
		cont->vc_obj_discard_count--;
		break;
	}
}

static void
merge_window_init(struct agg_merge_window *mw)
{
	struct agg_io_context *io = &mw->mw_io_ctxt;

	memset(mw, 0, sizeof(*mw));
	D_INIT_LIST_HEAD(&mw->mw_phy_ents);
	D_INIT_LIST_HEAD(&mw->mw_phy_rmv_ents);
	D_INIT_LIST_HEAD(&mw->mw_rmv_ents);
	D_INIT_LIST_HEAD(&io->ic_nvme_exts);
}

struct agg_data {
	vos_iter_param_t	ad_iter_param;
	struct vos_agg_param	ad_agg_param;
	struct vos_iter_anchors	ad_anchors;
};

int
vos_aggregate_enter(daos_handle_t coh, daos_epoch_range_t *epr)
{
	return aggregate_enter(vos_hdl2cont(coh), AGG_MODE_AGGREGATE, epr);
}

void
vos_aggregate_exit(daos_handle_t coh)
{
	aggregate_exit(vos_hdl2cont(coh), AGG_MODE_AGGREGATE);
}

int
vos_aggregate(daos_handle_t coh, daos_epoch_range_t *epr,
	      int (*yield_func)(void *arg), void *yield_arg, uint32_t flags)
{
	struct vos_container	*cont = vos_hdl2cont(coh);
	struct vos_agg_metrics  *vam  = agg_cont2metrics(cont);
	struct agg_data		*ad;
	uint64_t		 feats;
	daos_epoch_t		 agg_write;
	bool			 has_agg_write;
	int			 rc;
	bool			 run_agg = false;
	int                      blocks  = 0;

	D_DEBUG(DB_TRACE, "epr: %lu -> %lu\n", epr->epr_lo, epr->epr_hi);
	D_ASSERT(epr != NULL);
	D_ASSERTF(epr->epr_lo < epr->epr_hi && epr->epr_hi != DAOS_EPOCH_MAX,
		  "epr_lo:"DF_U64", epr_hi:"DF_U64"\n",
		  epr->epr_lo, epr->epr_hi);

	D_ALLOC_PTR(ad);
	if (ad == NULL)
		return -DER_NOMEM;

	rc = aggregate_enter(cont, AGG_MODE_AGGREGATE, epr);
	if (rc)
		goto free_agg_data;

	/** Use the lower end of the epoch range as the barrier when we are aggregating a
	 *  deleted snapshot.  If there is no write above that range for a given key,
	 *  the scan would be a noop anyway.
	 */
	if (flags & VOS_AGG_FL_FORCE_SCAN)
		ad->ad_agg_param.ap_filter_epoch = epr->epr_lo;
	else
		ad->ad_agg_param.ap_filter_epoch = cont->vc_cont_df->cd_hae;

	feats = dbtree_feats_get(&cont->vc_cont_df->cd_obj_root);
	has_agg_write = vos_feats_agg_time_get(feats, &agg_write);
	if (has_agg_write && agg_write <= ad->ad_agg_param.ap_filter_epoch)
		goto update_hae;

	/* Set iteration parameters */
	ad->ad_iter_param.ip_hdl = coh;
	ad->ad_iter_param.ip_epr = *epr;
	/*
	 * Iterate in epoch reserve order for SV tree, so that we can know for
	 * sure the first returned recx in SV tree has highest epoch and can't
	 * be aggregated.
	 */
	ad->ad_iter_param.ip_epc_expr = VOS_IT_EPC_RR;
	/* EV tree iterator returns all sorted logical rectangles */
	ad->ad_iter_param.ip_flags = VOS_IT_PUNCHED | VOS_IT_RECX_COVERED;
	ad->ad_iter_param.ip_filter_cb = vos_agg_filter;
	ad->ad_iter_param.ip_filter_arg = &ad->ad_agg_param;

	/* Set aggregation parameters */
	ad->ad_agg_param.ap_umm = &cont->vc_pool->vp_umm;
	ad->ad_agg_param.ap_coh = coh;
	credits_set(&ad->ad_agg_param.ap_credits, true);
	ad->ad_agg_param.ap_discard = 0;
	ad->ad_agg_param.ap_yield_func = yield_func;
	ad->ad_agg_param.ap_yield_arg = yield_arg;
	run_agg = true;
	merge_window_init(&ad->ad_agg_param.ap_window);
	ad->ad_agg_param.ap_flags = flags;

	ad->ad_iter_param.ip_flags |= VOS_IT_FOR_PURGE | VOS_IT_FOR_AGG;
retry:
	rc = vos_iterate(&ad->ad_iter_param, VOS_ITER_OBJ, true, &ad->ad_anchors,
			 vos_aggregate_pre_cb, vos_aggregate_post_cb,
			 &ad->ad_agg_param, NULL);
	if (rc == -DER_BUSY) {
		/** Hit a conflict with obj_discard.   Rather than exiting, let's
		 * yield and try again.
		 */
		if (vam && vam->vam_agg_blocked)
			d_tm_inc_counter(vam->vam_agg_blocked, 1);
		blocks++;
		/** Warn once if it goes over 20 times */
		D_CDEBUG(blocks == 20, DLOG_WARN, DB_EPC,
			 "VOS aggrregation hit conflict (nr=%d), retrying...\n", blocks);
		close_merge_window(&ad->ad_agg_param.ap_window, rc);
		vos_aggregate_yield(&ad->ad_agg_param);
		goto retry;
	} else if (rc != 0 || ad->ad_agg_param.ap_nospc_err) {
		close_merge_window(&ad->ad_agg_param.ap_window, rc);
		goto exit;
	} else if (ad->ad_agg_param.ap_csum_err) {
		rc = -DER_CSUM;	/* Inform caller the csum error */
		close_merge_window(&ad->ad_agg_param.ap_window, rc);
		/* HAE needs be updated for csum error case */
	} else if (ad->ad_agg_param.ap_in_progress) {
		/* Don't update HAE when there were in-progress entries. Otherwise,
		 * we will never aggregate anything in those subtrees until there is
		 * a new write.
		 *
		 * NB: We may be able to improve this by tracking the lowest epoch
		 * of such  entries and updating the HAE to that value - 1.
		 */
		goto exit;
	}

update_hae:
	/*
	 * Update HAE, when aggregating for snapshot deletion, the
	 * @epr->epr_hi could be smaller than the HAE
	 */
	if (cont->vc_cont_df->cd_hae < epr->epr_hi)
		cont->vc_cont_df->cd_hae = epr->epr_hi;
exit:
	aggregate_exit(cont, AGG_MODE_AGGREGATE);

	if (run_agg && merge_window_status(&ad->ad_agg_param.ap_window) != MW_CLOSED)
		D_ASSERTF(false, "Merge window resource leaked.\n");

free_agg_data:
	D_FREE(ad);

	if (rc < 0) {
		if (vam && vam->vam_fail_count)
			d_tm_inc_counter(vam->vam_fail_count, 1);
	}

	return rc;
}

int
vos_discard(daos_handle_t coh, daos_unit_oid_t *oidp, daos_epoch_range_t *epr,
	    int (*yield_func)(void *arg), void *yield_arg)
{
	struct vos_container    *cont = vos_hdl2cont(coh);
	struct vos_agg_metrics  *vam  = agg_cont2metrics(cont);
	struct agg_data		*ad;
	int			 type = VOS_ITER_OBJ;
	int			 rc;
	int			 mode = oidp == NULL ? AGG_MODE_DISCARD : AGG_MODE_OBJ_DISCARD;
	int                      blocks = 0;

	D_ASSERT(epr != NULL);
	D_ASSERTF(epr->epr_lo <= epr->epr_hi,
		  "epr_lo:"DF_U64", epr_hi:"DF_U64"\n",
		  epr->epr_lo, epr->epr_hi);

	D_ALLOC_PTR(ad);
	if (ad == NULL)
		return -DER_NOMEM;

	rc = aggregate_enter(cont, mode, epr);
	if (rc != 0)
		goto exit;

	if (oidp != NULL) {
		D_DEBUG(DB_EPC, "Discard "DF_UOID" epr "DF_X64"-"DF_X64"\n", DP_UOID(*oidp),
			epr->epr_lo, epr->epr_hi);
		type = VOS_ITER_DKEY;
		ad->ad_iter_param.ip_oid = *oidp;
		ad->ad_agg_param.ap_discard_obj = 1;
	} else {
		ad->ad_agg_param.ap_discard_obj = 0;
		D_DEBUG(DB_EPC, "Discard epr "DF_X64"-"DF_X64"\n",
			epr->epr_lo, epr->epr_hi);
	}

	/* Set iteration parameters */
	ad->ad_iter_param.ip_hdl = coh;
	ad->ad_iter_param.ip_epr = *epr;
	if (epr->epr_lo == epr->epr_hi)
		ad->ad_iter_param.ip_epc_expr = VOS_IT_EPC_EQ;
	else if (epr->epr_hi != DAOS_EPOCH_MAX)
		ad->ad_iter_param.ip_epc_expr = VOS_IT_EPC_RR;
	else
		ad->ad_iter_param.ip_epc_expr = VOS_IT_EPC_GE;
	/* EV tree iterator returns all sorted logical rectangles */
	ad->ad_iter_param.ip_flags = VOS_IT_PUNCHED | VOS_IT_RECX_COVERED;

	/* Set aggregation parameters */
	ad->ad_agg_param.ap_umm = &cont->vc_pool->vp_umm;
	ad->ad_agg_param.ap_coh = coh;
	credits_set(&ad->ad_agg_param.ap_credits, true);
	ad->ad_agg_param.ap_discard = 1;
	ad->ad_agg_param.ap_yield_func = yield_func;
	ad->ad_agg_param.ap_yield_arg = yield_arg;

	ad->ad_iter_param.ip_flags |= VOS_IT_FOR_DISCARD;
retry:
	rc = vos_iterate(&ad->ad_iter_param, type, true, &ad->ad_anchors, vos_aggregate_pre_cb,
			 vos_aggregate_post_cb, &ad->ad_agg_param, NULL);
	if (rc == -DER_BUSY) {
		/** Hit an object conflict with EC aggregation.   Rather than exiting, let's
		 * yield and try again.
		 */
		blocks++;
		/** Warn once if it goes over 20 times */
		D_CDEBUG(blocks == 20, DLOG_WARN, DB_EPC,
			 "VOS discard hit conflict (nr=%d), retrying...\n", blocks);
		if (vam && vam->vam_discard_blocked)
			d_tm_inc_counter(vam->vam_discard_blocked, 1);
		vos_aggregate_yield(&ad->ad_agg_param);
		goto retry;
	}

	aggregate_exit(cont, mode);

exit:
	D_FREE(ad);

	return rc;
}
