/*
 * Copyright (c) 2024 Huawei Technologies Co.,Ltd.
 *
 * openGauss is licensed under Mulan PSL v2.
 * You can use this software according to the terms and conditions of the Mulan PSL v2.
 * You may obtain a copy of Mulan PSL v2 at:
 *
 * http://license.coscl.org.cn/MulanPSL2
 *
 * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
 * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
 * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
 * See the Mulan PSL v2 for more details.
 * -------------------------------------------------------------------------
 *
 * imcs_cache_mgr.cpp
 * 		routines to support common cache
 *
 * IDENTIFICATION
 * src/gausskernel/storage/imcstore/imcs_cache_mgr.cpp
 *
 * -------------------------------------------------------------------------
 */
#include "postgres.h"
#include "knl/knl_variable.h"
#include "access/imcs/imcs_cache_mgr.h"
#include "access/imcs/imcu_cache_mgr.h"
#include "access/imcs/imcu.h"
#include "utils/aiomem.h"
#include "utils/resowner.h"
#include "storage/ipc.h"
#include "miscadmin.h"
#include "access/imcs/imcu_storage.h"
#include "catalog/gs_imcs.h"
#include "access/imcs/imcs_ctlg.h"
#include "utils/resowner.h"

const int IM_MAX_LOOPS = 16;

const int IM_MAX_RETRY_NUM = 3;

typedef struct CacheLookupEnt {
    CacheTag cache_tag;
    CacheSlotId_t slot_id;
} CacheLookupEnt;

/* 21474836480 = 20 *1024*1024*1024 == 20G */
#define IM_MAX_METADATA_CACHE_SIZE 21474836480

/* the Max allocated cache size cannot exceed MaxAllocSize; so the count is MaxAllocSize/sizeof(IMCU) */
#define IM_MAX_CACHE_SLOT_COUNT (8947848)

#define IM_CHECK_CACHE_SLOT_STATUS()                                                                        \
    {                                                                                                       \
        if (m_csweep == start) {                                                                            \
            looped++;                                                                                       \
            if (looped > IM_MAX_LOOPS) {                                                                    \
                if (retry_num > IM_MAX_RETRY_NUM) {                                                         \
                    ereport(ERROR, (errcode(ERRCODE_OUT_OF_BUFFER), errmodule(MOD_CACHE),                   \
                        errmsg("No free Cache Blocks! cstore_buffers maybe too small, scanned=%d,"          \
                        " pinned=%d, unpinned=%d, invalid=%d, looped=%d, reserved=%d, freepinned = %d, "    \
                        "start=%d, max=%d. request_size = %d, current_size = %ld, buffer_max_size = %ld.",  \
                        scanned, pinned, unpinned, invalid, looped, reserved, freepinned, start, max, size, \
                        m_cstoreCurrentSize, m_cstoreMaxSize)));                                            \
                } else {                                                                                    \
                    UnlockSweep();                                                                          \
                    return CACHE_BLOCK_INVALID_IDX;                                                         \
                }                                                                                           \
            }                                                                                               \
        }                                                                                                   \
    }

/*
 * @Description: cache_mgr_num_locks
 * Returns the number of LW locks required by the IMCSCacheMgr instance.
 * This function is called by lwlock_num() to calculate the required memory
 * for all the LW Locks.  Since this must be done prior to initializing the
 * instance of the IMCSCacheMgr class, the function cannot be defined
 * as a method of the class. the locks number acording to total slots of cache manager
 * @Return: lock number
 * @See also:
 */
int IMCacheMgrNumLocks(int64 cache_size, uint32 each_block_size)
{
    /* One LW Lock for each cache block include IO lock and compress lock */
    return (Min(cache_size / each_block_size, IM_MAX_CACHE_SLOT_COUNT)) * 2;
}

/*
 * @Description: calc cache size by tyey, meta data cache size 1/4 of cstore buffers and max is 2G,
 * data cache use the left size
 * @IN type: cache type
 * @Return: cache size
 * @See also:
 */
int64 IMCacheMgrCalcSizeByType(MgrCacheType type)
{
    int64 cache_size = (int64)(g_instance.attr.attr_memory.imcs_max_cache * 1024L);
    return cache_size;
}

/*
 * @Description: init all resource of cache instance
 * @IN cache_size: to tal cache size
 * @IN each_block_size: each cache block size, to devide cache slots
 * @IN type: cache type
 * @IN each_slot_size: slot struct size, ep:sizeof(IMCU)
 * @See also:
 */
void IMCSCacheMgr::Init(int64 cache_size, uint32 each_block_size, MgrCacheType type, uint32 each_slot_length)
{
    int i = 0;
    int tranche_id = LWTRANCHE_UNKNOWN;
    int32 total_slots = 0;

    m_cache_type = type;
    /* Must be greater than 0 */
    m_CaccheSlotMax = 1;

    m_cstoreCurrentSize = 0;
    m_cstoreMaxSize = cache_size;

    total_slots = Min(cache_size / each_block_size, IM_MAX_CACHE_SLOT_COUNT);
    m_CacheSlots = (char *)palloc0(total_slots * each_slot_length);
    m_CacheDesc = (CacheDesc *)palloc0(total_slots * sizeof(CacheDesc));
    m_CacheSlotsNum = total_slots;
    m_slot_length = each_slot_length;
    for (i = 0; i < total_slots; ++i) {
        m_CacheDesc[i].m_usage_count = 0;
        m_CacheDesc[i].m_refcount = 0;
        m_CacheDesc[i].m_ring_count = 0;
        m_CacheDesc[i].m_slot_id = i;
        m_CacheDesc[i].m_freeNext = i + 1;
        m_CacheDesc[i].m_cache_tag.type = CACHE_TYPE_NONE;
        m_CacheDesc[i].m_flag = CACHE_BLOCK_FREE;
        if (type == MGR_CACHE_TYPE_DATA) {
            tranche_id = (int)LWTRANCHE_IMCUDATA_CACHE;
        } else if (type == MGR_CACHE_TYPE_INDEX) {
            tranche_id = (int)LWTRANCHE_IMCUMETA_CACHE;
        }
        m_CacheDesc[i].m_iobusy_lock = LWLockAssign(tranche_id);
        m_CacheDesc[i].m_compress_lock = LWLockAssign(tranche_id);
        m_CacheDesc[i].m_refreshing = false;
        m_CacheDesc[i].m_datablock_size = 0;

        SpinLockInit(&m_CacheDesc[i].m_slot_hdr_lock);
    }

    /* Cache Slot Free List */
    m_CacheDesc[total_slots - 1].m_freeNext = CACHE_BLOCK_INVALID_IDX;
    m_freeListHead = 0;
    m_freeListTail = total_slots - 1;
    SpinLockInit(&m_freeList_lock);
    SpinLockInit(&m_memsize_lock);

    /* Clock Sweep Starting point  */
    m_csweep = 0;
    m_csweep_lock = CStoreCUCacheSweepLock;
    m_partition_lock = FirstCacheSlotMappingLock;
    if (type == MGR_CACHE_TYPE_INDEX) {
        m_csweep_lock = MetaCacheSweepLock;
        m_partition_lock = FirstCacheSlotMappingLock + NUM_CACHE_BUFFER_PARTITIONS / 2;
    }

    HASHCTL info;
    errno_t rc = memset_s(&info, sizeof(info), 0, sizeof(info));
    securec_check(rc, "\0", "\0");

    char hash_name[130] = {0};
    rc = snprintf_s(hash_name, sizeof(hash_name), 129, "ImCache Buffer Lookup Table(%d)", type);
    securec_check_ss(rc, "\0", "\0");

    /* BufferTag maps to Buffer */
    info.keysize = sizeof(CacheTag);
    info.entrysize = sizeof(CacheLookupEnt);
    info.hash = tag_hash;
    info.num_partitions = NUM_CACHE_BUFFER_PARTITIONS / 2;

    m_hash = HeapMemInitHash(hash_name, total_slots, total_slots, &info, HASH_ELEM | HASH_FUNCTION | HASH_PARTITION);
}

/*
 * @Description: destroy all resource of cache instance,
 * 	  excluding this instance itself.
 * @See also:
 */
void IMCSCacheMgr::Destroy(void)
{
    /* reset and clear up hash table  */
    HeapMemResetHash(m_hash, "Cache Buffer Lookup Table");

    for (int i = 0; i < m_CacheSlotsNum; ++i) {
        /* free all memory belonged to this slot */
        FreeCacheBlockMem(i);
        SpinLockFree(&m_CacheDesc[i].m_slot_hdr_lock);
    }

    /* free spin lock resource */
    SpinLockFree(&m_memsize_lock);
    SpinLockFree(&m_freeList_lock);

    pfree_ext(m_CacheSlots);
    pfree_ext(m_CacheDesc);
}

CacheDesc *IMCSCacheMgr::GetCacheDesc(CacheSlotId_t slot_id)
{
    return &m_CacheDesc[slot_id];
}

/*
 * @Description: pin cache block, increase the reference.
 * @IN slot_id: cache block index
 * @Return: return false if the slot is not marked valid, this means the block may soon be removed or changed.
 * @See also:
 */
bool IMCSCacheMgr::PinCacheBlock(CacheSlotId_t slot_id)
{
    Assert(slot_id >= 0 && slot_id <= m_CaccheSlotMax && slot_id < m_CacheSlotsNum);
    Assert(((m_cache_type == MGR_CACHE_TYPE_DATA) && (m_CacheDesc[slot_id].m_cache_tag.type == CACHE_COlUMN_DATA ||
        m_CacheDesc[slot_id].m_cache_tag.type == CACHE_ORC_DATA ||
        m_CacheDesc[slot_id].m_cache_tag.type == CACHE_OBS_DATA)) ||
        ((m_cache_type == MGR_CACHE_TYPE_INDEX) && (m_CacheDesc[slot_id].m_cache_tag.type == CACHE_ORC_INDEX ||
        m_CacheDesc[slot_id].m_cache_tag.type == CACHE_CARBONDATA_METADATA)));

    LockCacheDescHeader(slot_id);

    /* Pinning an invalid slot returns false and does not pin the slot */
    if (!(m_CacheDesc[slot_id].m_flag & CACHE_BLOCK_VALID)) {
        UnLockCacheDescHeader(slot_id);
        return false;
    }

    /* increase refcount to pin cache block */
    m_CacheDesc[slot_id].m_refcount++;
    Assert(m_CacheDesc[slot_id].m_refcount > 0);

    /* Increment the usage count */
    UnLockCacheDescHeader(slot_id);
    ereport(DEBUG2, (errmodule(MOD_CACHE), errmsg("pin cache block, slot(%d), type(%d) ,flag(%hhu), refcount(%u)",
        slot_id, m_CacheDesc[slot_id].m_cache_tag.type, m_CacheDesc[slot_id].m_flag, m_CacheDesc[slot_id].m_refcount)));

    if (m_cache_type == MGR_CACHE_TYPE_INDEX) {
        ResourceOwnerEnlargeMetaCacheSlot(t_thrd.utils_cxt.CurrentResourceOwner);
        ResourceOwnerRememberMetaCacheSlot(t_thrd.utils_cxt.CurrentResourceOwner, slot_id);
    } else {
        ResourceOwnerEnlargeImcucacheSlot(t_thrd.utils_cxt.CurrentResourceOwner);
        ResourceOwnerRememberImcucacheSlot(t_thrd.utils_cxt.CurrentResourceOwner, slot_id);
    }
    return true;
}

/*
 * @Description:  pin cache block, increase the reference  while already holding the header lock.
 * @IN slot_id: cache block index
 * @See also:  header lock must be held on enty. This is an internal pin, we do not want to
 * count it used yet.  We know what we are doing here, there  is no need to check whether the Block is valid.
 */
void IMCSCacheMgr::PinCacheBlock_Locked(CacheSlotId_t slot_id)
{
    Assert(slot_id >= 0 && slot_id <= m_CaccheSlotMax && slot_id < m_CacheSlotsNum);

    m_CacheDesc[slot_id].m_refcount++;
    UnLockCacheDescHeader(slot_id);

    if (m_cache_type == MGR_CACHE_TYPE_INDEX) {
        ResourceOwnerEnlargeMetaCacheSlot(t_thrd.utils_cxt.CurrentResourceOwner);
        ResourceOwnerRememberMetaCacheSlot(t_thrd.utils_cxt.CurrentResourceOwner, slot_id);
    } else {
        ResourceOwnerEnlargeImcucacheSlot(t_thrd.utils_cxt.CurrentResourceOwner);
        ResourceOwnerRememberImcucacheSlot(t_thrd.utils_cxt.CurrentResourceOwner, slot_id);
    }
    ereport(DEBUG2, (errmodule(MOD_CACHE),
        errmsg("pin locked cache block, slot(%d), refcount(%u)", slot_id, m_CacheDesc[slot_id].m_refcount)));
}

/*
 * @Description:   unpin cache block, decrease the reference.
 * @IN slot_id: cache block index
 * @See also:
 */
void IMCSCacheMgr::UnPinCacheBlock(CacheSlotId_t slot_id)
{
    Assert(slot_id >= 0 && slot_id <= m_CaccheSlotMax && slot_id < m_CacheSlotsNum);
    Assert(((m_cache_type == MGR_CACHE_TYPE_DATA) && (m_CacheDesc[slot_id].m_cache_tag.type == CACHE_COlUMN_DATA ||
        m_CacheDesc[slot_id].m_cache_tag.type == CACHE_ORC_DATA ||
        m_CacheDesc[slot_id].m_cache_tag.type == CACHE_OBS_DATA)) ||
        ((m_cache_type == MGR_CACHE_TYPE_INDEX) && (m_CacheDesc[slot_id].m_cache_tag.type == CACHE_ORC_INDEX ||
        m_CacheDesc[slot_id].m_cache_tag.type == CACHE_CARBONDATA_METADATA)));

    if (m_cache_type == MGR_CACHE_TYPE_INDEX) {
        ResourceOwnerForgetMetaCacheSlot(t_thrd.utils_cxt.CurrentResourceOwner, slot_id);
    } else {
        ResourceOwnerForgetImcucacheSlot(t_thrd.utils_cxt.CurrentResourceOwner, slot_id);
    }

    LockCacheDescHeader(slot_id);
    Assert(m_CacheDesc[slot_id].m_refcount > 0);
    m_CacheDesc[slot_id].m_refcount--;

    UnLockCacheDescHeader(slot_id);
    ereport(DEBUG2, (errmodule(MOD_CACHE), errmsg("unpin cache block, slot(%d), type(%d) ,refcount(%u)", slot_id,
        m_CacheDesc[slot_id].m_cache_tag.type, m_CacheDesc[slot_id].m_refcount)));
}

/*
 * @Description: use clock-swap algorithm to evict a block
 * @Return: slot id
 * @See also:
 */
CacheSlotId_t IMCSCacheMgr::EvictCacheBlock(int size, int retry_num)
{
    CacheSlotId_t slot_id = CACHE_BLOCK_INVALID_IDX;

    /* If there is insufficient memory or no slots available
     * we must evict one of the cache blocks using the clock sweep.
     * Get the m_csweep_lock, only one sweeper searches the list. */
    LockSweep();

    /* Initialize statistics for each new sweep */
    int found = 0;
    int start = m_csweep;
    int max = m_CaccheSlotMax;
    int scanned = 0;
    int pinned = 0;
    int unpinned = 0;
    int invalid = 0;
    int looped = 0;
    int reserved = 0;
    int freepinned = 0;

    while (1) {
        /* Set the start slot to the current sweep position(m_csweep),
        Then advance the current sweep position to the next */
        slot_id = m_csweep;
        if (m_csweep++ >= m_CaccheSlotMax) {
            m_csweep = 0;
        }
        ereport(DEBUG2, (errmodule(MOD_CACHE),
            errmsg("try evict cache block, solt(%d), flag(%hhu), refcount(%u), usage_count(%hu), ring_count(%hu)",
            slot_id, m_CacheDesc[slot_id].m_flag, m_CacheDesc[slot_id].m_refcount, m_CacheDesc[slot_id].m_usage_count,
            m_CacheDesc[slot_id].m_ring_count)));

        if (IsIOBusy(slot_id)) {
            IM_CHECK_CACHE_SLOT_STATUS();
            reserved++;
            pg_usleep(2);
            continue;
        }

        LockCacheDescHeader(slot_id);
        scanned++;
        /* skip invalid and error cache blocks */
        if ((m_CacheDesc[slot_id].m_flag & CACHE_BLOCK_VALID) || (m_CacheDesc[slot_id].m_flag & CACHE_BLOCK_ERROR)) {
            /* skip pinned cache blocks */
            if (m_CacheDesc[slot_id].m_refcount == 0) {
                unpinned++;
                /* skip cache blocks with usage count > 0 */
                if (m_CacheDesc[slot_id].m_usage_count == 0) {
                    /* skip cache blocks that are in another ring , 1 in my ring,  0 no ring */
                    if (m_CacheDesc[slot_id].m_ring_count == 0) {
                        CacheFlags old_flag = m_CacheDesc[slot_id].m_flag;
                        m_CacheDesc[slot_id].m_flag = CACHE_BLOCK_INFREE; /* !Valid */

                        PinCacheBlock_Locked(slot_id); /* Released header lock */
                        found++;

                        ereport(DEBUG2, (errmodule(MOD_CACHE), errmsg("evict cache block, solt(%d), flag(%d - %d)",
                            slot_id, old_flag, CACHE_BLOCK_INFREE)));

                        /* Found a slot to be reused with some buffer space,
                         * the space must be freed and reused.
                         * The slot is Invalid and Pinned!!! */
                        break;
                    }
                    reserved++;
                } else {
                    /* decrement the usage count to age the entry */
                    m_CacheDesc[slot_id].m_usage_count--;
                }
            } else {
                pinned++;
            }
        } else {
            invalid++;
            if ((m_CacheDesc[slot_id].m_flag & CACHE_BLOCK_FREE) && (m_CacheDesc[slot_id].m_refcount > 0)) {
                freepinned++;
            }
        }
        UnLockCacheDescHeader(slot_id);

        /* A full sweep should rarely happen
         * Bail-out if we cannot find any unpinned blocks to
         * avoid waiting forever.
         *
         * If the next sweep position is the same as
         * the starting point, then we have gone
         * an entire loop
         *
         * If there is not proper slot to replace, it will return CACHE_BLOCK_INVALID_IDX
         * and unlock sweep lock in Macro IM_CHECK_CACHE_SLOT_STATUS(). If this function
         * returns CACHE_BLOCK_INVALID_IDX, it will retry to find freespace in get_free_cache_block
         * function.
         */
        IM_CHECK_CACHE_SLOT_STATUS();
    }

    /* swap and free the cache block memory */
    FreeCacheBlockMem(slot_id);

    UnlockSweep();

    /* purposely returning pinned Invalid slot !!! */
    return slot_id;
}

/*
 * @Description: release cache block memory, this function call by reinit(switch over), and we want to try free all
 * cache list slot memeory, so the slot may equal to m_CacheSlotsNum
 * @IN slot: cache block index
 * @See also:
 */
void IMCSCacheMgr::FreeCacheBlockMem(CacheSlotId_t slot)
{
    Assert(slot >= 0 && slot < m_CacheSlotsNum);
    CacheDesc *cache_desc = m_CacheDesc + slot;
    Assert(cache_desc->m_slot_id == slot);
    DataSlotTag data_slot_tag;
    // data_slot_tag.slot_type = (int32)cache_desc->m_cache_tag.type;
    errno_t rc =
        memcpy_s(&data_slot_tag.slotTag, MAX_CACHE_TAG_LEN, cache_desc->m_cache_tag.key, sizeof(DataSlotTagKey));
    securec_check(rc, "\0", "\0");
    CFileNode cFileNode(data_slot_tag.slotTag.cuSlotTag.m_rnode);
    cFileNode.m_attid = data_slot_tag.slotTag.cuSlotTag.m_colId;
    IMCUStorage *cu_storage = New(CurrentMemoryContext)IMCUStorage(cFileNode);
    IMCU *cu = (IMCU *)(&m_CacheSlots[slot * m_slot_length]); // 基于slotid偏移从数组里取

    IMCUDesc *cu_desc = IMCU_CACHE->GetImcuDesc(cFileNode.m_rnode.relNode, data_slot_tag.slotTag.cuSlotTag.m_CUId,
        data_slot_tag.slotTag.cuSlotTag.m_colId);

    bool compressed = cu->m_cache_compressed;
    if (cu->m_cache_compressed) {
        cu_storage->SaveIMCU(cu->m_compressedBuf, cu_desc->cu_size, data_slot_tag.slotTag.cuSlotTag.m_CUId, true);
    } else {
        cu->Compress(cu_desc->row_count, 0, ALIGNOF_CUSIZE);
        int size = cu->GetCUSize();
        cu_storage->SaveIMCU(cu->m_compressedBuf, size, data_slot_tag.slotTag.cuSlotTag.m_CUId, true);
    }
    IMCSDesc *imcs_desc = IMCU_CACHE->GetImcsDesc(data_slot_tag.slotTag.cuSlotTag.m_rnode.relNode);
    LWLockAcquire(imcs_desc->imcu_desc_latch, LW_EXCLUSIVE);
    imcs_desc->cu_num_in_mem--;
    if (compressed) {
        imcs_desc->cu_mem_size -= cu->m_cuSize;
    } else {
        imcs_desc->cu_mem_size -= cu->m_srcDataSize;
    }
    imcs_desc->cu_num_in_disk++;
    imcs_desc->cu_disk_size += cu_desc->cu_size;
    cu_desc->flag = IMCUDescInDisk;
    UpdateImcHashTblStatus(data_slot_tag.slotTag.cuSlotTag.m_rnode.relNode, IMCS_STATUS_STORED);
    LWLockRelease(imcs_desc->imcu_desc_latch);
}
