/*
 * Copyright (c) 2024 Huawei Technologies Co.,Ltd.
 *
 * openGauss is licensed under Mulan PSL v2.
 * You can use this software according to the terms and conditions of the Mulan PSL v2.
 * You may obtain a copy of Mulan PSL v2 at:
 *
 * http://license.coscl.org.cn/MulanPSL2
 *
 * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
 * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
 * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
 * See the Mulan PSL v2 for more details.
 * -------------------------------------------------------------------------
 *
 * htap_flush.cpp
 *
 * IDENTIFICATION
 * src/gausskernel/storage/htap/htap_flush.cpp
 *
 * -------------------------------------------------------------------------
 */

#include <ostream>
#include <istream>
#include <iomanip>
#include <cstring>
#include "c.h"
#include "utils/palloc.h"
#include "htap_logger.h"
#include "htap_debug.h"
#include "htap_flush.h"
#include "htap_infra.h"
#include "htap_internal.h"
#include "access/imcs/imcu.h"
#include "postgres.h"
#include "storage/procarray.h"
#include "postmaster/postmaster.h"
#include "htap_delta_table.h"
#include "access/imcs/imcs_am.h"
#include "access/imcs/imcu_cache_mgr.h"
#include "access/imcs/imcs_vector.h"
#include "session_manager.h"
#include "mot_engine.h"
#include "postmaster/postmaster.h"
#include "executor/executor.h"
#include "utils/postinit.h"
#include "utils/ps_status.h"
#include "catalog/pg_database.h"
#include "catalog/pg_partition_fn.h"
#include "htap_helpers.h"
#include "htap_statistics.h"
#include "htap_flush_worker.h"
#include "../../../include/storage/cu.h"
#include <vector>

using namespace std;
extern int64 ConvertToInt64Data(_in_ const char *inBuf, _in_ const short each_val_size);
using namespace MOT;

namespace htap {
HTAP_DECLARE_LOGGER(HTAPFlush, HTAP)

HTAPFlush *HTAPFlush::m_flush = nullptr;

static void FlushExitHandler(SIGNAL_ARGS);
static void FlushSigHupHandler(SIGNAL_ARGS);

LogLevel statistic_level = htap::LogLevel::LL_DEBUG;

static bool getTable(MOT::Table *table, void *data)
{
    HTAPFlush *flush = (HTAPFlush *)data;
    flush->addTable(table);
    return true;
}

HTAPFlush *HTAPFlush::GetInstance()
{
    return m_flush;
}

void HTAPFlush::DestroyInstance()
{
    if (m_flush) {
        delete m_flush;
        m_flush = nullptr;
    }
}

RC HTAPFlush::Start(ThreadId &threadId)
{
    std::lock_guard<std::mutex> guard(m_lock);
    if (m_isRunning) {
        HTAP_REPORT_ERROR(HTAP_RC_INVALID_STATE, "Internal error",
            "Cannot start flush thread: flush thread is already running");
        return HTAP_RC_INVALID_STATE;
    }
    // launch flush thread as kernel thread
    RC result = HTAP_RC_OK;
    PG_TRY();
    {
        m_flushThread = initialize_util_thread(IMCS_FLUSH);
        if (m_flushThread == 0) {
            HTAP_REPORT_ERROR(HTAP_RC_SYSTEM_FAILURE, "Internal error", "Failed to create kernel thread for flush");
            result = HTAP_RC_SYSTEM_FAILURE;
        } else {
            m_isRunning = true;
            threadId = m_flushThread;
        }
        for (int i = 0; i < m_num_workers; i++) {
            m_workers[i]->Start(i);
            m_workers_thread_id[i] = m_workers[i]->GetThreadId();
        }
    }
    PG_CATCH();
    {
        HTAP_LOG_ERROR_STACK("Failed to create flush thread: %s (%d)", Geterrmsg(), geterrcode());
        FlushErrorState();
    }
    PG_END_TRY();

    m_confEnableFlush = HTAPInfra::GetInstance()->GetCfg().m_enableFlush;
    HTAP_LOG_DEBUG("enable flush conf is %d", m_confEnableFlush);
    m_conf_enable_print_stats = HTAPInfra::GetInstance()->GetCfg().m_enableFlushPrintStats;
    if (m_conf_enable_print_stats) {
        statistic_level = htap::LogLevel::LL_INFO;
    }

    return result;
}

RC HTAPFlush::Stop()
{
    std::lock_guard<std::mutex> guard(m_lock);
    if (!m_isRunning) {
        HTAP_REPORT_ERROR(HTAP_RC_INVALID_STATE, "Internal error",
            "Cannot stop flush thread: flush thread is not running");
        return HTAP_RC_INVALID_STATE;
    }

    // signal thread to stop
    RC result = HTAP_RC_OK;
    PG_TRY();
    {
        StopWorkers(true);
        gs_thread_t thr = { m_flushThread };
        int res = gs_signal_send(m_flushThread, SIGQUIT);
        if (res != 0) {
            HTAP_REPORT_SYSTEM_ERROR_CODE(res, HTAP_RC_SYSTEM_FAILURE, "Internal error", "gs_signal_send failed");
            result = HTAP_RC_SYSTEM_FAILURE;
        } else {
            // join thread
            res = gs_thread_join(thr, nullptr);
            if (res != 0) {
                HTAP_REPORT_SYSTEM_ERROR_CODE(res, HTAP_RC_SYSTEM_FAILURE, "Internal error", "gs_thread_join failed");
                result = HTAP_RC_SYSTEM_FAILURE;
            } else {
                m_isRunning = false;
                m_flushThread = 0;
            }
        }
    }
    PG_CATCH();
    {
        HTAP_LOG_ERROR_STACK("Failed to stop flush thread: %s (%d)", Geterrmsg(), geterrcode());
        FlushErrorState();
    }
    PG_END_TRY();

    return result;
}

void HTAPFlush::StopWorkers(bool shouldWait /* = false */)
{
    for (int i = 0; i < m_num_workers; i++) {
        HTAP_LOG_DEBUG("Sending stop to thread %d  (%lu)", i, m_workers_thread_id[i]);
        m_workers[i]->Stop();
    }

    if (shouldWait) {
        // this section is called during DB stop procedure
        // we have to wait for worker threads cause memory management will be destroyed
        // any HTAP threads that do not exit may crash on memory de-allocation
        for (int i = 0; i < m_num_workers; i++) {
            (void)pthread_join(m_workers_thread_id[i], nullptr);
            m_workers_running[i] = false;
            HTAP_LOG_INFO("Joined to thread %d  (%lu) ", i, m_workers_thread_id[i]);
            m_workers_thread_id[i] = 0;
        }
        return;
    }
    int count = 0;
    while (count != m_num_workers) {
        count = 0;
        for (int i = 0; i < m_num_workers; i++) {
            if (!m_workers[i]->IsRunning()) {
                m_workers_running[i] = false;
                HTAP_LOG_DEBUG("Joined to thread %d  (%lu) ", i, m_workers_thread_id[i]);
                m_workers_thread_id[i] = 0;
                count++;
            }
        }
        pthread_yield();
    }
}

bool HTAPFlush::IsRunning()
{
    std::lock_guard<std::mutex> guard(m_lock);
    return m_isRunning;
}

ThreadId HTAPFlush::GetThreadId()
{
    std::lock_guard<std::mutex> guard(m_lock);
    return m_flushThread;
}

void HTAPFlush::RunWorker()
{
    int i;

    ThreadId pid = gs_thread_self();
    for (i = 0; i < m_num_workers; i++) {
        if (m_workers[i] && m_workers[i]->GetThreadId() == pid) {
            m_workers_running[i] = true;
            m_workers[i]->RunFlushWorker();
            break;
        }
    }
}

void HTAPFlush::ExecFlush()
{
    uint64_t runCount = 0;
    uint64_t prevGCelems = 0;
    uint64_t prevCsn = 0;
    uint64_t currCsn = 0;
    bool changeDB = false;
    bool stopDueToDbCnange = false;
    HTAP_LOG_INFO("Flush thread started");

    // run initialization stuff required as kernel thread
    InitFlushThread();
    if (t_thrd.htap_cxt.flush_need_exit) {
        HTAP_LOG_DEBUG("HTAP flush thread ordered to stop after init");
        return;
    }

    if (!HTAPInfra::HTAPTxnEnsure(false)) {
        return;
    }

    MOT::TxnManager *txn = HTAPGetSafeTxn();

    // flush main loop
    uint64_t flushTimeoutMillis = HTAPInfra::GetInstance()->GetCfg().m_flushTimeoutMillis;
    while (!SignaledToStop(flushTimeoutMillis)) {
        // handle configuration change
        if (t_thrd.htap_cxt.flush_got_sighup) {
            t_thrd.htap_cxt.flush_got_sighup = false;
            ProcessConfigFile(PGC_SIGHUP);
            flushTimeoutMillis = HTAPInfra::GetInstance()->GetCfg().m_flushTimeoutMillis;
        }

        if (MOT::GetTableManager()->IsEmpty()) {
            changeDB = true;
            continue;
        } else if (changeDB) {
            const char *dbname = MOT::GetTableManager()->GetFirstDbName();
            if (strcasecmp(dbname, m_dbname.c_str())) {
                HTAP_LOG_INFO("Flush thread changing DB from [%s] to [%s]", m_dbname.c_str(), dbname);
                m_dbname.assign(dbname);
                stopDueToDbCnange = true;
                break;
            }
            changeDB = false;
        }

        StartTransactionCommand();
        (void)GetTransactionSnapshot();

        // Init memory and delta extractor
        m_flush_ctx = HTAPInfra::GetInstance()->CreateMemoryContext(
            HTAPInfra::GetInstance()->GetThreadMemoryContext(), "flushCtx");
        MemoryContext saved_cycle_context = MemoryContextSwitchTo(m_flush_ctx);

        // execute flush cycle
        if (m_confEnableFlush) {
            runCount++;
            m_prevFlushdCSN = m_lastFlushdCSN;
            uint64_t currFlushCSN = calculate_local_csn_min();

            if (m_prevFlushdCSN != currFlushCSN) {
                (void)Flush(currFlushCSN);
                m_lastFlushdCSN = currFlushCSN;
            }
            if ((runCount % 100) == 0) {
                GcEpochType gcMinEpoch;
                uint64_t gcElems = txn->GetGcSession()->GetAllGcLimboInuseElements(gcMinEpoch);
                currCsn = MOT::GetCSNManager().GetGcEpoch();
                if (prevGCelems != 0 && prevGCelems == gcElems && prevCsn == currCsn && currCsn == gcMinEpoch) {
                    uint64_t nextCsn = getLocalNextCSN();
                    setCommitCsn(nextCsn);
                    tableList.clear();
                    MOT::GetTableManager()->ForEach(getTable, this);
                    for (uint64_t tableId : tableList) {
                        uint64_t tSize = 0;
                        MOT::Table *table = MOT::GetTableManager()->GetTableSafeByExId(tableId);
                        if (table != nullptr) {
                            tSize += table->GetTableHtapObjCount();
                            tSize += table->GetPrimaryIndex()->GetIndexHtapObjCount();
                            table->Unlock();
                        }
                    }
                    tableList.clear();
                    HTAP_LOG_INFO(
                        "Flush: The system is in rest gcEpoch [%lu], incremented transaction csn [%lu], to clean.",
                        currCsn, nextCsn + 1);
                } else {
                    prevGCelems = gcElems;
                    prevCsn = currCsn;
                }
            }
        }

        // restore memory_context
        MemoryContextSwitchTo(saved_cycle_context);
        HTAPInfra::GetInstance()->DestroyMemoryContext(m_flush_ctx);
        CommitTransactionCommand();
    }

    if (stopDueToDbCnange) {
        StopWorkers();
    }
    m_isRunning = false;
    m_flushThread = 0;
    m_prevFlushdCSN = 0;
    m_lastFlushdCSN = 0;

    // cleanup
    HTAP_LOG_INFO("Flush thread exiting");
}

bool HTAPFlush::CreateInstance()
{
    if (m_flush != nullptr) {
        HTAP_LOG_ERROR("HTAPFlush already created.");
        return false;
    }
    m_flush = new (std::nothrow) HTAPFlush();

    if (m_flush == nullptr) {
        HTAP_LOG_ERROR("HTAPFlush allocation failed.");
        return false;
    }

    if (!m_flush->InitFlush()) {
        delete m_flush;
        m_flush = nullptr;
        HTAP_LOG_ERROR("HTAPFlush failed to init.");
    }
    return true;
}

HTAPFlush::HTAPFlush() : m_isRunning(false), m_num_workers(0)
{
    m_prevFlushdCSN = 0;
    m_lastFlushdCSN = 0;
    m_dbname.assign(DEFAULT_DATABASE);
    memset(m_tbl_stats, 0, sizeof(tbl_stats_t) * 1000);
}

HTAPFlush::~HTAPFlush()
{
    for (int i = 0; i < m_num_workers; i++) {
        if (m_workers[i] != nullptr) {
            delete m_workers[i];
            m_workers[i] = nullptr;
        }
    }
    m_num_workers = 0;
}

bool HTAPFlush::InitFlush()
{
    HTAP_LOG_INFO("InitFlush: Num conf workers %d", g_instance.attr.attr_storage.max_flush_workers);
    m_num_workers = g_instance.attr.attr_storage.max_flush_workers < MAX_WORKERS_THREAD ?
        g_instance.attr.attr_storage.max_flush_workers :
        MAX_WORKERS_THREAD;
    memset(m_workers_thread_id, 0, sizeof(m_workers_thread_id));
    memset(m_workers, 0, sizeof(m_workers));
    memset(m_workers_running, 0, sizeof(m_workers_running));
    for (int i = 0; i < m_num_workers; i++) {
        m_workers[i] = new (std::nothrow) HTAPFlushWorker();
        if (!m_workers[i]) {
            HTAP_LOG_ERROR("HTAPFlush worker allocation failed.");
            return false;
        }
    }
    return true;
}

void HTAPFlush::InitFlushThread()
{
    // we are a postmaster sub-process now
    IsUnderPostmaster = true;
    t_thrd.role = IMCS_FLUSH;

    /* reset t_thrd.proc_cxt.MyProcPid */
    t_thrd.proc_cxt.MyProcPid = gs_thread_self();

    /* record Start Time for logging */
    t_thrd.proc_cxt.MyStartTime = time(NULL);
    t_thrd.proc_cxt.MyProgName = "HTAPFlush";

    /* Identify myself via ps */
    init_ps_display("flush delta tables in back-end process", "", "", "");

    SetProcessingMode(InitProcessing);

    /*
     * Only the signal of reloading the configuration file and exiting is processed here
     */
    (void)gspqsignal(SIGINT, SIG_IGN);
    (void)gspqsignal(SIGTERM, SIG_IGN);
    (void)gspqsignal(SIGQUIT, FlushExitHandler);
    (void)gspqsignal(SIGALRM, SIG_IGN);

    (void)gspqsignal(SIGPIPE, SIG_IGN);
    (void)gspqsignal(SIGUSR1, SIG_IGN);
    (void)gspqsignal(SIGUSR2, SIG_IGN);
    (void)gspqsignal(SIGFPE, SIG_IGN);
    (void)gspqsignal(SIGHUP, FlushSigHupHandler);
    (void)gspqsignal(SIGURG, SIG_IGN);
    /* Early initialization */
    BaseInit();

    /* Unblock signals (they were blocked when the postmaster forked us) */
    gs_signal_setmask(&t_thrd.libpq_cxt.UnBlockSig, NULL);
    (void)gs_signal_unblock_sigusr2();

    t_thrd.proc_cxt.PostInit->SetDatabaseAndUser(m_dbname.c_str(), InvalidOid, m_user_name);
    t_thrd.proc_cxt.PostInit->InitIMCSFlush();
}

bool HTAPFlush::SignaledToStop(uint64_t timeoutMillis)
{
    // wait for the given timeout for one of 2 events: latch was set, or shutdown was signaled
    bool res = false;
    int waitRes = WaitLatch(&t_thrd.proc->procLatch, WL_LATCH_SET | WL_POSTMASTER_DEATH | WL_TIMEOUT, timeoutMillis);
    if (waitRes & WL_POSTMASTER_DEATH) {
        HTAP_LOG_DEBUG("Flush detected postmaster death");
        res = true;
    } else if (t_thrd.htap_cxt.flush_need_exit) {
        HTAP_LOG_DEBUG("Flush exit signal caught");
        res = true;
    }
    return res;
}

void FlushExitHandler(SIGNAL_ARGS)
{
    int save_errno = errno;

    t_thrd.htap_cxt.flush_need_exit = true;

    if (t_thrd.proc) {
        SetLatch(&t_thrd.proc->procLatch);
    }

    errno = save_errno;
}

void FlushSigHupHandler(SIGNAL_ARGS)
{
    int save_errno = errno;

    t_thrd.htap_cxt.flush_got_sighup = true;

    if (t_thrd.proc) {
        SetLatch(&t_thrd.proc->procLatch);
    }

    errno = save_errno;
}

void HTAPFlush::addTable(MOT::Table *table)
{
    tableList.push_back(table->GetTableExId());
}

void HTAPFlush::flushPrintTablesStats()
{
    for (int i = 0; i < MAX_TABLES_IN_STATS; i++) {
        if (m_tbl_stats[i].tbl_id != 0) {
            HTAP_LOG(statistic_level,
                "Table cycle:\nRelId %d num rows %d new rgs %d  insert time (mili) %d  rg in deletes %d  delete time "
                "(mili) %d  total time %d num deleted %d",
                m_tbl_stats[i].tbl_id, m_tbl_stats[i].num_rows, m_tbl_stats[i].num_new_rg, m_tbl_stats[i].insert_time,
                m_tbl_stats[i].num_delete_rgs, m_tbl_stats[i].delete_time, m_tbl_stats[i].total_time,
                m_tbl_stats[i].num_deleted);
        }
    }
}

bool HTAPFlush::Flush(CommitSeqNo csn)
{
    // clean tables list
    tableList.clear();
    MOT::GetTableManager()->ForEach(getTable, this);
    int successfully_locked_num = 0;
    for (int tableId : tableList) {
        if (ConditionalLockRelationOid(tableId, AccessShareLock)) {
            successfully_locked_num++;
            HTAP_LOG_TRACE("HTAPFlush: successfully locked table %d.", tableId);
        } else {
            HTAP_LOG_DEBUG("HTAPFlush: couldn't lock table %d. cancelling flush cycle", tableId);
            for (uint64_t unlockTableId : tableList) {
                if (successfully_locked_num > 0) {
                    UnlockRelationOid(unlockTableId, AccessShareLock);
                    successfully_locked_num--;
                } else
                    break;
            }
            return false;
        }
    }
    list<HTAPFlushCtx *> ctxList;
    for (uint64_t tableId : tableList) {
        if (!OidIsValid(tableId)) {
            HTAP_LOG_ERROR("HTAPFlush: FlushDeltaTableToIMCU request for non valid table %d. Skipping", tableId);
            continue;
        }

        HTAPFlushCtx *ctx = New(m_flush_ctx)HTAPFlushCtx(tableId, csn);
        PG_TRY();
        {
            ctx->GetDeltaTable(NoLock);
            HTAP_LOG_DEBUG("HTAPFlush: Locking table %d", ctx->m_rel_oid);
            if (ctx->m_table != NULL) {
                if (ctx->setup()) {
                    HTAP_LOG_DEBUG("HTAPFlush: Merging table %d, csn %d", ctx->m_rel_oid, ctx->csn);
                    ctx->m_start_time = GetSysClock();
                    m_flush->FlushDeltaTableToIMCU(ctx);
                }
            } else {
                HTAP_LOG_ERROR("HTAPFlush: table %d not found in table manager. Skipping", ctx->m_rel_oid);
            }

            HTAP_LOG_DEBUG("HTAPFlush: Adding table ctx for table %d to list", ctx->m_rel_oid);
            ctxList.push_back(ctx);
        }
        PG_CATCH();
        {
            HTAP_LOG_ERROR("HTAPFlush: FlushDeltaTableToIMCU request failed to open table %d. Skipping", ctx->m_rel_oid);
            FlushErrorState();
            ctx->ReleaseDeltaTable(NoLock);
            UnlockRelationOid(tableId, AccessShareLock);
            DELETE_EX(ctx);
        }
        PG_END_TRY();
    }

    // wait while all tasks are finished
    while (!m_task_queue.IsFinished()) {
        pg_usleep(1000L);
    }

    list<HTAPFlushCtx *>::iterator it = ctxList.begin();
    while (it != ctxList.end()) {
        HTAPFlushCtx *ctx = *it;
        HTAP_LOG_TRACE("Checking table %d, ref_count %d", ctx->m_rel_oid, ctx->getRefCount());
        if (ctx->getRefCount() == 0) {
            int stat_tbl_ind = ctx->m_rel_oid - ((ctx->m_rel_oid / 1000) * 1000);
            int delete_relevant_rgs =
                ctx->m_num_rgs - ctx->m_num_filtered_rgs_1 - ctx->m_num_filtered_rgs_2;
            ctx->m_end_time = GetSysClock();
            uint64_t total_time =
                MOT::CpuCyclesLevelTime::CyclesToMicroseconds(ctx->m_end_time - ctx->m_start_time);
            HTAP_LOG(statistic_level,
                "Table task: rel_id %d \n Delete: num rgs %d  filtered_1 %d  filtered_2 %d Relevant groups %d  delete "
                "time (mili) %d \n Insert: newRG %d time (mili) %d Total %ld",
                ctx->m_rel_oid, ctx->m_num_rgs, ctx->m_num_filtered_rgs_1,
                ctx->m_num_filtered_rgs_2, delete_relevant_rgs, ctx->m_delete_time / 1000,
                ctx->m_new_rgs, ctx->m_insert_time / 1000, total_time / 1000);

            if (stat_tbl_ind >= 0 && (stat_tbl_ind < MAX_TABLES_IN_STATS)) {
                m_tbl_stats[stat_tbl_ind].tbl_id = stat_tbl_ind;
                m_tbl_stats[stat_tbl_ind].num_new_rg += ctx->m_new_rgs;
                m_tbl_stats[stat_tbl_ind].num_rows += ctx->m_num_rows;
                m_tbl_stats[stat_tbl_ind].insert_time += ctx->m_insert_time / 1000;
                m_tbl_stats[stat_tbl_ind].delete_time += ctx->m_delete_time / 1000;
                m_tbl_stats[stat_tbl_ind].total_time += total_time / 1000;
                m_tbl_stats[stat_tbl_ind].num_delete_rgs += delete_relevant_rgs;
                m_tbl_stats[stat_tbl_ind].num_deleted += ctx->m_deleted;
            } else {
                HTAP_LOG_ERROR("Coudnt add stat for %d", stat_tbl_ind);
            }
            it = ctxList.erase(it);
            if (ctx->m_imcs_desc) {
                uint64_t epoch = MOT::GetCSNManager().GetGcEpoch();
                HTAP_LOG_DEBUG("FlushDeltaTableToIMCU %d visible: %u last: %u epoch: %u", ctx->m_rel_oid, csn,
                    ctx->m_imcs_desc->last_flushed_csn, epoch);
                ctx->m_imcs_desc->last_flushed_csn = csn;
            }
            HTAP_LOG_TRACE("HTAPFlush: Unlocking table %d", ctx->m_rel_oid);
            ctx->ReleaseDeltaTable(NoLock);
            UnlockRelationOid(ctx->m_rel_oid, AccessShareLock);
            DELETE_EX(ctx);
        } else {
            it++;
        }
        pthread_yield();
    }
    flushPrintTablesStats();
    m_task_queue.CleanFinished();
    pthread_yield();
    return true;
}

TupleDesc HTAPFlush::initImcsTupleDesc(Relation rel, MOT::Table *table)
{
    TupleDesc rstore_tuple_desc = RelationGetDescr(rel);
    uint32_t num_imcs_cols = table->GetNumIMCSColumns();
    // Init tuple desc for imcs, while the last column is CTID column
    TupleDesc imcs_tuple_desc = CreateTemplateTupleDesc(num_imcs_cols + 1, false);
    /* copy tupledesc base info */
    imcs_tuple_desc->tdtypeid = rel->rd_att->tdtypeid;
    imcs_tuple_desc->tdtypmod = rel->rd_att->tdtypmod;
    imcs_tuple_desc->tdisredistable = rel->rd_att->tdisredistable;

    int ctid_col_idx = num_imcs_cols;

    // Init CTID column desc
    TupleDescInitEntry(imcs_tuple_desc, ctid_col_idx + 1, "ctid", TIDOID, -1, 0);
    imcs_tuple_desc->attrs[ctid_col_idx].attnum = VirtualCtidColID;
    imcs_tuple_desc->attrs[ctid_col_idx].attlen = sizeof(CTID);
    imcs_tuple_desc->attrs[ctid_col_idx].attnotnull = true;
    imcs_tuple_desc->attrs[ctid_col_idx].atthasdef = false;

    // Copy appropriate desc from rstore tuple_desc (only for imcs columns)
    for (uint32_t col_idx = 0; col_idx < num_imcs_cols; ++col_idx) {
        int32 col_idx_in_rstore_tuple_desc =
            table->GetIMCSColumnKey(col_idx) - 1; // While rstore has reserved first column (col_idx_in_rstore == 0)
        errno_t rc = memcpy_s(&imcs_tuple_desc->attrs[col_idx], ATTRIBUTE_FIXED_PART_SIZE,
            &rstore_tuple_desc->attrs[col_idx_in_rstore_tuple_desc], ATTRIBUTE_FIXED_PART_SIZE);
        securec_check(rc, "\0", "\0");
        imcs_tuple_desc->attrs[col_idx].attnotnull = false;
        imcs_tuple_desc->attrs[col_idx].atthasdef = false;
    }
    return imcs_tuple_desc;
}

bool HTAPFlush::checkDeltaInCURange(IMCUDesc *cu_desc, HTAPFlushCtx *ctx)
{
    HTAP_LOG_DEBUG("HTAPFlush: CTID cuid: %d", cu_desc->cu_id);
    CTID *cu_max_val = (CTID *)&(cu_desc->cu_max);
    uint64_t max_cu_ctid = HTAPAdaptor::GetCtid(&cu_max_val->item);
    CTID *cu_min_val = (CTID *)&(cu_desc->cu_min);
    uint64_t min_cu_ctid = HTAPAdaptor::GetCtid(&cu_min_val->item);
    if (min_cu_ctid > ctx->m_htap_delta->maxCtid() ||
        max_cu_ctid < ctx->m_htap_delta->minCtid()) {
        HTAP_LOG_DEBUG(
            "min_cu_ctid %lld > max delta ctid %lld, or max_cu_ctid %lld < min delta ctid %lld. Skipping the CU",
            min_cu_ctid, ctx->m_htap_delta->maxCtid(), max_cu_ctid, ctx->m_htap_delta->minCtid());
        return false;
    }
    return true;
}

void HTAPFlush::MarkDeleteToIMCU(HTAPFlushCtx *ctx)
{
    IMCUDesc *cu_desc = NULL;
    ctx->m_tuple_desc = initImcsTupleDesc(ctx->m_rel, ctx->m_table);

    ctx->m_num_rgs = ctx->m_imcs_desc->rowgroup_num;

    for (uint rg_idx = 0; rg_idx < ctx->m_imcs_desc->rowgroup_num; ++rg_idx) {
        // Search for the CU with this tag, and check its min/max against Delta table's min/max
        cu_desc = IMCU_CACHE->GetImcuDescByHash(ctx->m_imcs_desc, rg_idx, IMCS_CTID_IDX);
        if (cu_desc == NULL) {
            HTAP_LOG_ERROR("Can't find CTID CU for rg %d in table %d. Exitting", rg_idx, ctx->m_rel_oid);
            DELETE_EX(ctx->m_htap_delta);
            return;
        }

        HTAPDeleteMap *delMap =
            (ctx->m_imcs_desc->deletedList) ? ctx->m_imcs_desc->deletedList[rg_idx] : NULL;

        if (cu_desc->row_count == (int)(delMap ? delMap->bitsSigned() : 0)) {
            HTAP_LOG_DEBUG("All rows are deleted in table %d rg %d. Skipping handleDeletes", ctx->m_rel_oid,
                           rg_idx);
            continue;
        }

        // We want to go over each ctid in this RG, if it is deleted or changed - we will update the deleted bitmap.
        // In addition, the updated and inserted row we add to a new RG.
        if (ctx->m_htap_delta->checkIMCU(cu_desc)) {
            ctx->updateRefCount(1);
            FlushTask *task = New(m_flush_ctx)FlushTask(DELTA_DELETE, ctx, rg_idx, -1, 0);
            m_task_queue.Add(task);
            HTAP_LOG_DEBUG("Adding DELTA_DELETE to Queue, table %d, table_rel %p, ref count %d, ctx %p, delta "
                           "extractor %p, rg %d, queue size %d",
                           ctx->m_rel_oid, ctx->m_table, ctx->getRefCount(), ctx,
                           ctx->m_htap_delta, rg_idx, m_task_queue.m_queue.size());
        } else {
            HTAP_LOG_DEBUG("Table %d rg %d skipped by min_max check", ctx->m_imcs_desc, rg_idx);
            ctx->m_num_filtered_rgs_1++;
        }
    }

}

void HTAPFlush::AddDeltaDataToIMCU(HTAPFlushCtx *ctx)
{
    vector<pair<int, int>> rowgroup;
    
    // Prepare table objects once for all inserts
    ctx->m_imcs_to_values = (int2 *)palloc0(sizeof(int2) * ctx->m_table->GetNumIMCSColumns());
    for (int col_idx = 0; col_idx < ctx->m_tuple_desc->natts - 1; ++col_idx) {
        ctx->m_imcs_to_values[col_idx] = col_idx + 1;
    }
    
    // Prepare IMCSInsert for inserting to a new RG
    ctx->m_result_rel_info = makeNode(ResultRelInfo);
    InitResultRelInfo(ctx->m_result_rel_info, ctx->m_rel, 1, 0);


    // ctx->m_new_rgs = (newRecords - 1) / IMCS_MAX_ROWS_SIZE + 1;
    uint64_t currentKey = 0;
    uint32_t num_rows_to_advance = 0; // first scan ctx shouldn't advance
    HTAPDelta::ScanContext scan_ctx = HTAPDelta::ScanContext();
    while (true) {
        uint32_t rowgroup_rows = IMCS_MAX_ROWS_SIZE;
        uint32_t rg_idx = -1;
        TaskType task_type = DELTA_INSERT;
        if (!rowgroup.empty()) {
            rg_idx = rowgroup.back().first;
            rowgroup_rows = rowgroup.back().second;
            rowgroup.pop_back();
            task_type = DELTA_COMPLETE;
        }
        currentKey = ctx->m_htap_delta->getNextCtid(scan_ctx, num_rows_to_advance);
        if (currentKey == 0)
            break;

        ctx->updateRefCount(1);
        FlushTask *task =
            New(m_flush_ctx)FlushTask(task_type, ctx, rg_idx, rowgroup_rows, currentKey);
        m_task_queue.Add(task); // addInsertsToNewRGs(m_imcs_desc->rowgroup_num, scanCtx);
        HTAP_LOG_DEBUG("Adding DELTA_INSERT to Queue, table %d, table_rel %p, ref count %d, ctx %p, delta "
                       "extractor %p, rg %d, queue size %d",
                       ctx->m_rel_oid, ctx->m_table, ctx->getRefCount(), ctx, ctx->m_htap_delta,
                       ctx->m_imcs_desc->rowgroup_num, m_task_queue.m_queue.size());
        num_rows_to_advance = rowgroup_rows;
    }
}

void HTAPFlush::FlushDeltaTableToIMCU(HTAPFlushCtx *ctx)
{
    ctx->m_htap_delta = New(m_flush_ctx)HTAPDelta(true, false);

    if (ctx->m_htap_delta == nullptr) {
        HTAP_LOG_ERROR("HTAPFlush: failed to allocate HTAPDelta");
        return;
    }

    if (!ctx->m_htap_delta->BuildIndexOnDeltaTable(ctx->m_rel_oid, ctx->csn)) {
        HTAP_LOG_ERROR("HTAPDelta::build failed");
        DELETE_EX(ctx->m_htap_delta);
        return;
    }
    HTAP_LOG_DEBUG("HTAPFlush: table %d, csn %d", ctx->m_rel_oid, ctx->csn);
    if (!ctx->m_htap_delta->hasData()) {
        HTAP_LOG_TRACE("HTAPFlush: delta contains no data. Skipping");
        DELETE_EX(ctx->m_htap_delta);
        return;
    }
    
    MarkDeleteToIMCU(ctx);
    AddDeltaDataToIMCU(ctx);
}

void HTAPFlushCtx::Destroy()
{
    if (m_htap_delta)
        DELETE_EX(m_htap_delta);
    if (m_tuple_desc)
        FreeTupleDesc(m_tuple_desc);
    if (m_result_rel_info != NULL) {
        ExecCloseIndices(m_result_rel_info);
        pfree(m_result_rel_info);
    }
    if (m_imcs_to_values)
        pfree(m_imcs_to_values);
}

void HTAPFlushCtx::GetDeltaTable(LOCKMODE lockmode)
{
    Oid parent = partid_get_parentid(m_rel_oid);
    if (parent != 0) { // partition
        Oid grandparent = partid_get_parentid(parent);
        if (grandparent != 0) { // sub-partition
            parent = grandparent;
        }
        parent_rel = heap_open(parent, lockmode);
        part = partitionOpen(parent_rel, m_rel_oid, lockmode); // Should be EXCLUSIVE_LOCK?
        m_rel = partitionGetRelation(parent_rel, part);
    } else {
        m_rel = heap_open(m_rel_oid, lockmode);
    }
    m_table = MOT::GetTableManager()->GetTableSafeByExId(m_rel_oid);
}

bool HTAPFlushCtx::setup()
{
    m_imcs_desc = IMCU_CACHE->GetImcsDesc(m_rel_oid);
    if (m_imcs_desc == NULL) {
        HTAP_LOG_ERROR("HTAPFlush: MoveDeltaDataToCU: can't find imcs desc table %d. Creating new IMCS Desc", m_rel_oid);
        return false;
    }

    bool need_flush = m_imcs_desc->last_flushed_csn < csn;
    if (!need_flush) {
        HTAP_LOG_DEBUG("HTAPFlush: MoveDeltaDataToCU for table %d: last flushed CSN %lld greater or equal to the Flush CSN "
                       "%lld. Skipping",
                       m_rel_oid, m_imcs_desc->last_flushed_csn, csn);
    }
    return need_flush;
}

void HTAPFlushCtx::ReleaseDeltaTable(LOCKMODE lockmode)
{
    if (parent_rel) {
        if (m_rel)
            releaseDummyRelation(&m_rel);
        partitionClose(parent_rel, part, lockmode);
        heap_close(parent_rel, lockmode);
        parent_rel = NULL;
        part = NULL;
        m_rel = NULL;
    } else {
        if (m_rel) {
            heap_close(m_rel, lockmode);
            m_rel = NULL;
        }
    }
    if (m_table) {
        m_table->Unlock();
        m_table = NULL;
    }
}

void FlushTaskQueue::Add(FlushTask *task)
{
    std::lock_guard<std::mutex> guard(m_lock);
    m_queue.push_back(task);
    m_queue_size++;
}

FlushTask *FlushTaskQueue::Get()
{
    std::lock_guard<std::mutex> guard(m_lock);
    FlushTask *task = NULL;
    if (m_queue.begin() != m_queue.end()) {
        task = m_queue.front();
        m_queue.pop_front();
    }
    return task;
}

bool FlushTaskQueue::Empty()
{
    std::lock_guard<std::mutex> guard(m_lock);
    return m_queue.empty();
}

bool FlushTaskQueue::IsFinished()
{
    std::lock_guard<std::mutex> guard(m_finished_lock);
    return (m_queue_size == m_finished_queue.size());
}

void FlushTaskQueue::SetFinished(FlushTask *task)
{
    std::lock_guard<std::mutex> guard(m_finished_lock);
    m_finished_queue.push_back(task);
}

void FlushTaskQueue::CleanFinished()
{
    std::lock_guard<std::mutex> guard(m_finished_lock);
    for (FlushTask *task : m_finished_queue) {
        DELETE_EX(task);
    }
    m_finished_queue.clear();
    m_queue_size = 0;
}
} // namespace htap
