
#include "postgres.h"

#include "storage/he3db_logindex.h"
#include "storage/shmem.h"
#include "storage/spin.h"

static LogIndexMemList *log_index_mem_list;
static uint64 logindex_mem_tbl_size;

static Size
LogIndexMemListSize(uint64 he3db_logindex_mem_size)
{
    Size size;

    logindex_mem_tbl_size = (he3db_logindex_mem_size * 1024L * 1024L) / sizeof(LogIndexMemTBL);
    size = offsetof(LogIndexMemList, mem_table); // 去除柔性数组之外的空间大小
    size = add_size(size, mul_size(sizeof(LogIndexMemTBL), logindex_mem_tbl_size));

    size = MAXALIGN(size);//为了使sizeof(struct)向上对齐，成为8的倍数的大小

    /* The number of logindex memory table is at least 3 */
    if (logindex_mem_tbl_size < 3)
        elog(FATAL, "The number=%ld of logindex memory table is less than 3", logindex_mem_tbl_size);
    else
        ereport(LOG, (errmsg("The total log index memory table size is %ld, number logindex mem-table size is %ld", size, logindex_mem_tbl_size)));

    return size;
}

static void SetNewPageItem(LogIndexMemTBL *mem_tbl, const BufferTag *page)
{
    // set page item
    LogIndexMemItemHead *page_head = &(mem_tbl->page_head[mem_tbl->meta.page_free_head-1]);
    memcpy(&(page_head->tag), page, sizeof(BufferTag));
    page_head->next_item = LOG_INDEX_TBL_INVALID_SEG;
    page_head->next_seg = mem_tbl->meta.lsn_free_head;
    page_head->tail_seg = mem_tbl->meta.lsn_free_head;
}

// When active table is full, get next free mem table and will change to active mem.
static LogIndexMemTBL *GetNextFreeMemTbl(void)
{
    // TODO change to Lightweight Lock
    uint64 active_tbl_index = (log_index_mem_list->active_table_index + 1)%(log_index_mem_list->table_cap);
    // if all mem table is full, waiting for recycle
    if(active_tbl_index == log_index_mem_list->table_start_index)
    {
        elog(LOG, "Mem table is full, waiting for cleanup. Total size: %ld", logindex_mem_tbl_size);
    }
    while(active_tbl_index == log_index_mem_list->table_start_index)
    {
        pg_usleep(10);	/* 10 us */
    }
    elog(DEBUG5, "Find next free mem table and set active_table_index + 1: %ld", active_tbl_index);
    LWLockAcquire(LogIndexMemListLock,LW_EXCLUSIVE);
    // Circular List
    log_index_mem_list->active_table_index = active_tbl_index;
    LWLockRelease(LogIndexMemListLock);
    // if it finds free mem table will return directly.
    return &(log_index_mem_list->mem_table[log_index_mem_list->active_table_index]);
}

static void SetLsnSeg(LogIndexMemItemSeg *lsn_seg, XLogRecPtr lsn){
    LOG_INDEX_INSERT_LSN_INFO(lsn_seg, lsn_seg->number, lsn);
    lsn_seg->number++;
}

static void SetNewLsnSeg(LogIndexMemTBL *mem_tbl, XLogRecPtr lsn)
{
    // set lsn seg
    // first seg index start with 0, seg_item[0]
    LogIndexMemItemSeg *lsn_seg = &(mem_tbl->seg_item[mem_tbl->meta.lsn_free_head-1]);
    lsn_seg->prev_seg = LOG_INDEX_TBL_INVALID_SEG;
    lsn_seg->next_seg = LOG_INDEX_TBL_INVALID_SEG;
    SetLsnSeg(lsn_seg, lsn);
}

static void SetNextLsnSeg(LogIndexMemItemHead *page_head, LogIndexMemItemSeg *lsn_seg_old, LogIndexMemTBL *mem_tbl, XLogRecPtr lsn)
{
    // set lsn next seg
    LogIndexMemItemSeg *lsn_seg_next = &(mem_tbl->seg_item[mem_tbl->meta.lsn_free_head-1]);
    lsn_seg_old->next_seg = mem_tbl->meta.lsn_free_head;
    lsn_seg_next->prev_seg = page_head->tail_seg;
    lsn_seg_next->next_seg = LOG_INDEX_TBL_INVALID_SEG;
    page_head->tail_seg = mem_tbl->meta.lsn_free_head;
    SetLsnSeg(lsn_seg_next, lsn);
}

static void UpdateMemTableMetaWithNewPage(LogIndexMemTBL *mem_tbl, XLogRecPtr lsn)
{
    // set metadata for active mem table
    SpinLockAcquire(&(mem_tbl->meta.meta_lock));
    // set prefix_lsn, min_lsn and max_lsn
    LOG_INDEX_MEM_TBL_SET_PREFIX_LSN(mem_tbl, lsn);
    mem_tbl->meta.max_lsn = Max(lsn, mem_tbl->meta.max_lsn);
    mem_tbl->meta.min_lsn = Min(lsn, mem_tbl->meta.min_lsn);
    // page,lsn free index ++
    mem_tbl->meta.page_free_head++;
    mem_tbl->meta.lsn_free_head++;
    SpinLockRelease(&(mem_tbl->meta.meta_lock));
}

static void UpdateMemTableMetaWithNextPage(LogIndexMemTBL *mem_tbl, XLogRecPtr lsn)
{
    // set metadata for active mem table
    SpinLockAcquire(&(mem_tbl->meta.meta_lock));
    // set prefix_lsn, min_lsn and max_lsn
    mem_tbl->meta.max_lsn = Max(lsn, mem_tbl->meta.max_lsn);
    mem_tbl->meta.min_lsn = Min(lsn, mem_tbl->meta.min_lsn);
    // page,lsn free index ++
    mem_tbl->meta.page_free_head++;
    mem_tbl->meta.lsn_free_head++;
    SpinLockRelease(&(mem_tbl->meta.meta_lock));
}

static void UpdateMemTableMetaWithNextSeg(LogIndexMemTBL *mem_tbl, XLogRecPtr lsn)
{
    // set metadata for active mem table
    SpinLockAcquire(&(mem_tbl->meta.meta_lock));
    mem_tbl->meta.max_lsn = Max(lsn, mem_tbl->meta.max_lsn);
    mem_tbl->meta.min_lsn = Min(lsn, mem_tbl->meta.min_lsn);
    mem_tbl->meta.lsn_free_head++;
    SpinLockRelease(&(mem_tbl->meta.meta_lock));
}

static void UpdateMemTableMetaWithCurrentSeg(LogIndexMemTBL *mem_tbl, XLogRecPtr lsn)
{
    // set metadata for active mem table
    SpinLockAcquire(&(mem_tbl->meta.meta_lock));
    mem_tbl->meta.max_lsn = Max(lsn, mem_tbl->meta.max_lsn);
    mem_tbl->meta.min_lsn = Min(lsn, mem_tbl->meta.min_lsn);
    SpinLockRelease(&(mem_tbl->meta.meta_lock));
}

static void SetActiveTblWithFirstPage(LogIndexMemTBL *mem_tbl, const BufferTag *page, XLogRecPtr lsn)
{
    uint32 hash_key;

    // set mem table state to active
    pg_atomic_write_u32(&(mem_tbl->meta.state), LOG_INDEX_MEM_TBL_STATE_ACTIVE);

    // index start with 1, 0 means INVALID. hash[] all values will be 0 after init, so set to 1 when first use.
    mem_tbl->meta.id = log_index_mem_list->active_table_index;
    mem_tbl->meta.lsn_free_head = 1;
    mem_tbl->meta.page_free_head = 1;
    // calculate hashcode by buffer tag
    hash_key = LOG_INDEX_MEM_TBL_HASH_PAGE(page);
    mem_tbl->hash[hash_key] = mem_tbl->meta.page_free_head;

    // set page item
    SetNewPageItem(mem_tbl, page);

    // set lsn seg
    SetNewLsnSeg(mem_tbl, lsn);

    // set metadata for active mem table
    UpdateMemTableMetaWithNewPage(mem_tbl, lsn);
}

static void InsertLsnWhenOldTblIsFull(LogIndexMemTBL *mem_tbl_old, const BufferTag *page, XLogRecPtr lsn)
{
    LogIndexMemTBL *mem_tbl_new;

    // set mem table state to inactive
    pg_atomic_write_u32(&(mem_tbl_old->meta.state), LOG_INDEX_MEM_TBL_STATE_INACTIVE);
    mem_tbl_new = GetNextFreeMemTbl();
    SetActiveTblWithFirstPage(mem_tbl_new, page, lsn);
}

static void SetNextPageItem(LogIndexMemTBL *mem_tbl, const BufferTag *page, XLogRecPtr lsn)
{
    // there's no free page_head or lsn_seg, means current active is full, will apply for new mem table as active table
    if (mem_tbl->meta.page_free_head > LOG_INDEX_MEM_TBL_PAGE_NUM || mem_tbl->meta.lsn_free_head > LOG_INDEX_MEM_TBL_SEG_NUM)
    {
        // no free page head in active mem table, will apply for new mem table
        InsertLsnWhenOldTblIsFull(mem_tbl, page, lsn);
    }
    else
    {
        // set new page and lsn seg when active mem table have free resource
        SetNewPageItem(mem_tbl, page);
        SetNewLsnSeg(mem_tbl, lsn);
        UpdateMemTableMetaWithNewPage(mem_tbl, lsn);
    }
}

static void RestMemTable(LogIndexMemTBL *mem_tbl)
{
    //  reset table's metadata
    mem_tbl->meta.id = LOG_INDEX_TABLE_INVALID_ID;
    pg_atomic_write_u32(&(mem_tbl->meta.state), LOG_INDEX_MEM_TBL_STATE_FREE);
    mem_tbl->meta.page_free_head = LOG_INDEX_TBL_INVALID_SEG;
    mem_tbl->meta.lsn_free_head = LOG_INDEX_TBL_INVALID_SEG;
    mem_tbl->meta.min_lsn = UINT64_MAX;
    mem_tbl->meta.max_lsn = InvalidXLogRecPtr;
    mem_tbl->meta.prefix_lsn = 0;

    // reset hash[] and page head[]
    for(int i = 0; i < LOG_INDEX_MEM_TBL_PAGE_NUM; i++)
    {
        mem_tbl->hash[i] = LOG_INDEX_TBL_INVALID_SEG;
        CLEAR_BUFFERTAG(mem_tbl->page_head[i].tag);
        mem_tbl->page_head[i].next_item = LOG_INDEX_TBL_INVALID_SEG;
        mem_tbl->page_head[i].next_seg = LOG_INDEX_TBL_INVALID_SEG;
        mem_tbl->page_head[i].tail_seg = LOG_INDEX_TBL_INVALID_SEG;
        // reset seg_item[]
        mem_tbl->seg_item[i].prev_seg = LOG_INDEX_TBL_INVALID_SEG;
        mem_tbl->seg_item[i].next_seg = LOG_INDEX_TBL_INVALID_SEG;
        mem_tbl->seg_item[i].number = 0;
    }
    // reset seg_item[]
    for(int i = LOG_INDEX_MEM_TBL_PAGE_NUM; i < LOG_INDEX_MEM_TBL_SEG_NUM; i++){
        mem_tbl->seg_item[i].prev_seg = LOG_INDEX_TBL_INVALID_SEG;
        mem_tbl->seg_item[i].next_seg = LOG_INDEX_TBL_INVALID_SEG;
        mem_tbl->seg_item[i].number = 0;
    }
}

static LsnNode *InitLsnNode()
{
    LsnNode *head;

    head = (LsnNode *)malloc(sizeof(LsnNode));
    head->next = NULL;
    return head;
}

// insert nodelist from head, eg: before: head-->node1-->NULL, after: head-->newNode-->node1-->NULL
static void InsertLsnNodeByHead(LsnNode *head, XLogRecPtr lsn)
{
    LsnNode *new_node;

    new_node = (LsnNode *)malloc(sizeof(LsnNode));
    new_node->lsn = lsn;
    new_node->next = head->next;
    head->next = new_node;
}

// eg: before: head-->node1-->NULL, after: head-->node1-->newNode-->NULL
static LsnNode *InsertLsnNodeByTail(LsnNode *head, XLogRecPtr lsn)
{
    LsnNode *new_node;
    new_node = (LsnNode *)malloc(sizeof(LsnNode));
    head->next = new_node;
    new_node->lsn = lsn;
    new_node->next = NULL;
    return new_node;
}

// print nodelist
static void PrintLsnNode(LsnNode *head)
{
    LsnNode *p;
    p = head->next;
    while (p) {
        printf(" %d\t ", p->lsn);
        p = p->next;
    }
}

static void ReverseLsnNode(LsnNode *head)
{
    if (head == NULL || head->next == NULL) {
        return;
    }
    LsnNode *p = NULL;
    LsnNode *q = head->next;
    LsnNode *next ;
    while (q != NULL) {
        next = q->next;
        q->next = p;
        p = q;
        q = next;
    }
    head->next=p;
}

static uint16 FindFirstLsnSegInMemTblByPageTag(LogIndexMemTBL *mem_tbl, const BufferTag *page, XLogRecPtr start_lsn, XLogRecPtr end_lsn)
{
    LogIndexMemItemHead *page_head;
    uint32 hash_key;

    // end_lsn <= min_lsn or start_lsn > max_lsn means the request lsn region not in this mem table
    if(mem_tbl->meta.min_lsn >= end_lsn || mem_tbl->meta.max_lsn < start_lsn)
    {
        return LOG_INDEX_TBL_INVALID_SEG;
    }else{
        hash_key = LOG_INDEX_MEM_TBL_HASH_PAGE(page);
        if(mem_tbl->hash[hash_key] != LOG_INDEX_TBL_INVALID_SEG)
        {
            page_head = &(mem_tbl->page_head[mem_tbl->hash[hash_key]-1]);
            while(!BUFFERTAGS_EQUAL(page_head->tag, *page)){
                if(page_head->next_item == LOG_INDEX_TBL_INVALID_SEG)
                {
                    return LOG_INDEX_TBL_INVALID_SEG;
                }
                page_head = &(mem_tbl->page_head[page_head->next_item-1]);
            }
            // find request page, return lsn seg
            return (page_head->next_seg);
        }else
        {
            return LOG_INDEX_TBL_INVALID_SEG;
        }
    }
}

static TagNode *InitTagNode()
{
    TagNode *head;

    head = (TagNode *)malloc(sizeof(TagNode));
    head->next = NULL;
    return head;
}

// insert nodelist from head, eg: before: head-->node1-->NULL, after: head-->newNode-->node1-->NULL
static void InsertTagNodeByHead(TagNode *head, BufferTag tag)
{
    TagNode *new_node;

    new_node = (TagNode *)malloc(sizeof(TagNode));
    new_node->tag.tag = tag;
    new_node->next = head->next;
    head->next = new_node;
}

void He3dbLogIndexTblListInit(void)
{
    bool found_logindex;
    log_index_mem_list = (LogIndexMemList *)
            ShmemInitStruct("log index", LogIndexMemListSize(he3db_logindex_mem_size), &found_logindex);
    Assert(log_index_mem_list != NULL);
    log_index_mem_list->table_start_index = 0;
    log_index_mem_list->active_table_index = 0;
    log_index_mem_list->table_cap = logindex_mem_tbl_size;
    //SpinLockInit(&(log_index_mem_list->lock));
    for (uint64 i = 0; i < log_index_mem_list->table_cap; i++) {
        // set mem table init values
        SpinLockInit(&(log_index_mem_list->mem_table[i].meta.meta_lock));
        log_index_mem_list->mem_table[i].meta.id = i + 1;
        log_index_mem_list->mem_table[i].meta.min_lsn = UINT64_MAX;
        log_index_mem_list->mem_table[i].meta.max_lsn = InvalidXLogRecPtr;
        SpinLockInit(&(log_index_mem_list->mem_table[i].meta.meta_lock));
        pg_atomic_write_u32(&(log_index_mem_list->mem_table[i].meta.state), LOG_INDEX_MEM_TBL_STATE_FREE);
    }
	//SpinLockInit(&(log_index_mem_list->lock));
}

uint64 GetMemTblSize(void)
{
    return log_index_mem_list->table_cap;
}

void InsertLogIndexByPage(const BufferTag *page, XLogRecPtr lsn)
{
    LogIndexMemItemSeg *lsn_seg;
    uint32 hash_key;
    LogIndexMemTBL *mem_tbl;
    LogIndexMemItemHead *page_head;

    // calculate hashcode by buffer tag
    hash_key = LOG_INDEX_MEM_TBL_HASH_PAGE(page);
    // get active mem table
    mem_tbl = &(log_index_mem_list->mem_table[log_index_mem_list->active_table_index]);
    // first time to use active mem table
    if(pg_atomic_read_u32(&mem_tbl->meta.state) == LOG_INDEX_MEM_TBL_STATE_FREE)
    {
        SetActiveTblWithFirstPage(mem_tbl, page, lsn);
    }
    else
    {
        // if have same lsn prefix with active table
        if(LOG_INDEX_SAME_TABLE_LSN_PREFIX(mem_tbl, lsn))
        {
            // 0 means INVALID, also means page don't exist in active mem table
            if(mem_tbl->hash[hash_key] == 0)
            {
                // set hash value to next free head
                if (!(mem_tbl->meta.page_free_head > LOG_INDEX_MEM_TBL_PAGE_NUM || mem_tbl->meta.lsn_free_head > LOG_INDEX_MEM_TBL_SEG_NUM))
                    mem_tbl->hash[hash_key] = mem_tbl->meta.page_free_head;
                SetNextPageItem(mem_tbl, page, lsn);
            }
            else
            {
                // page already exist or hash conflict
                // get exist page item
                page_head = &(mem_tbl->page_head[mem_tbl->hash[hash_key]-1]);
                /* if item page tag equal to current tag, true insert lsn to lsn_seg,
                 * false loop for next_item until equal or not found one. Then apply new page_item and lsn_seg.
                */
                while(!BUFFERTAGS_EQUAL(page_head->tag, *page)){
                    if(page_head->next_item == LOG_INDEX_TBL_INVALID_SEG)
                    {
                        // apply new page item
                        // there's no free page_head or lsn_seg, means current active is full, will apply for new mem table as active table
                        if (mem_tbl->meta.page_free_head > LOG_INDEX_MEM_TBL_PAGE_NUM || mem_tbl->meta.lsn_free_head > LOG_INDEX_MEM_TBL_SEG_NUM)
                        {
                            // no free page head in active mem table, will apply for new mem table
                            InsertLsnWhenOldTblIsFull(mem_tbl, page, lsn);
                        }
                        else
                        {
                            // set new page and lsn seg when active mem table have free resource
                            // set old page item's next_item to new one.
                            page_head->next_item = mem_tbl->meta.page_free_head;
                            // set page item
                            SetNewPageItem(mem_tbl, page);
                            SetNewLsnSeg(mem_tbl, lsn);
                            UpdateMemTableMetaWithNextPage(mem_tbl, lsn);
                        }
                        return;
                    }
                    page_head = &(mem_tbl->page_head[page_head->next_item-1]);
                }

                // find same tag's page_head
                lsn_seg = &(mem_tbl->seg_item[page_head->tail_seg-1]);
                // if current seg full?
                if(lsn_seg->number < LOG_INDEX_MEM_ITEM_SEG_LSN_NUM)
                {
                    // insert lsn to seg
                    SetLsnSeg(lsn_seg, lsn);
                    UpdateMemTableMetaWithCurrentSeg(mem_tbl, lsn);
                }
                else
                {
                    if(mem_tbl->meta.lsn_free_head > LOG_INDEX_MEM_TBL_SEG_NUM)
                    {
                        // no free page head in active mem table, will apply for new mem table
                        InsertLsnWhenOldTblIsFull(mem_tbl, page, lsn);
                    }
                    else
                    {
                        // apply new seg and insert lsn
                        SetNextLsnSeg(page_head, lsn_seg, mem_tbl, lsn);
                        UpdateMemTableMetaWithNextSeg(mem_tbl, lsn);
                    }
                }
            }
        }
        else
        {
            // prefix of lsn is different, so cannot use current active table, will apply new mem table
            InsertLsnWhenOldTblIsFull(mem_tbl, page, lsn);
        }
    }
}

LsnNode *GetLogIndexByPage(const BufferTag *page, XLogRecPtr start_lsn, XLogRecPtr end_lsn)
{
    LsnNode *head_node;
    LsnNode *tail;
    uint64 tbl_index;

    // Prevent metadata changes during discovery.
    // TODO change to Lightweight Lock
    head_node = InitLsnNode();
    tail = head_node;
	LWLockAcquire(LogIndexMemListLock,LW_SHARED);
    tbl_index = log_index_mem_list->table_start_index;
    while(tbl_index != log_index_mem_list->active_table_index)
    {
        LogIndexMemTBL *mem_tbl = &(log_index_mem_list->mem_table[tbl_index]);
        tbl_index = (tbl_index + 1)%(log_index_mem_list->table_cap);
        // current mem table no suitability lsn_list
        if(mem_tbl->meta.max_lsn < start_lsn)
        {
            continue;
        }else if(mem_tbl->meta.min_lsn >= end_lsn)
        {
            // there is no suitability lsn_list after this mem table
            break;
        } else
        {
            // get index of current table's seg
            uint16 seg_index = FindFirstLsnSegInMemTblByPageTag(mem_tbl, page, start_lsn, end_lsn);
            while (seg_index != LOG_INDEX_TBL_INVALID_SEG)
            {
                LogIndexMemItemSeg *item_seg = &(mem_tbl->seg_item[seg_index - 1]);
                // loop for lsn list
                for(int i=0; i < item_seg->number; i++){
                    XLogRecPtr lsn = LOG_INDEX_COMBINE_LSN(mem_tbl, item_seg->suffix_lsn[i]);
                    if(lsn >= start_lsn)
                    {
                        if(lsn < end_lsn)
                        {
                            tail = InsertLsnNodeByTail(tail, lsn);
                        }else{
                            LWLockRelease(LogIndexMemListLock);
                            return head_node;
                        }
                    }else
                    {
                        continue;
                    }
                }
                seg_index = item_seg->next_seg;
            }
        }
    }
    // loop for active table
    if(tbl_index == log_index_mem_list->active_table_index)
    {
        LogIndexMemTBL *mem_tbl = &(log_index_mem_list->mem_table[log_index_mem_list->active_table_index]);
        // get index of current table's seg
        uint16 seg_index = FindFirstLsnSegInMemTblByPageTag(mem_tbl, page, start_lsn, end_lsn);
        while (seg_index != LOG_INDEX_TBL_INVALID_SEG)
        {
            LogIndexMemItemSeg *item_seg = &(mem_tbl->seg_item[seg_index - 1]);
            // loop for lsn list
            for(int i=0; i < item_seg->number; i++){
                XLogRecPtr lsn = LOG_INDEX_COMBINE_LSN(mem_tbl, item_seg->suffix_lsn[i]);
                if(lsn >= start_lsn)
                {
                    if(lsn < end_lsn)
                    {
                        tail = InsertLsnNodeByTail(tail, lsn);
                    }else{
                        LWLockRelease(LogIndexMemListLock);
                        return head_node;
                    }
                }else
                {
                    continue;
                }
            }
            seg_index = item_seg->next_seg;
        }
        LWLockRelease(LogIndexMemListLock);
        return head_node;
    }
    LWLockRelease(LogIndexMemListLock);
    return head_node;
}

/* cleanup useless mem table which max_lsn less than consist_lsn,
 * and reset mem table to reuse.
 */
void CleanLogIndexByPage(XLogRecPtr consist_lsn)
{
    // TODO change to Lightweight Lock
    LWLockAcquire(LogIndexMemListLock,LW_EXCLUSIVE);
    // loop mem table from table_start_index
    while(log_index_mem_list->table_start_index != log_index_mem_list->active_table_index)
    {
        LogIndexMemTBL *mem_tbl = &(log_index_mem_list->mem_table[log_index_mem_list->table_start_index]);
        // max_lsn large than consistLsn? true: cannot cleanup and reuse just break; false: cleanup
        if (mem_tbl->meta.max_lsn >= consist_lsn || pg_atomic_read_u32(&mem_tbl->meta.state) != LOG_INDEX_MEM_TBL_STATE_INACTIVE)
        {
            break;
        }
        elog(DEBUG5, "Reset Mem table id=%ld by consist_lsn=%ld ", mem_tbl->meta.id, consist_lsn);
        RestMemTable(mem_tbl);
        log_index_mem_list->table_start_index = (log_index_mem_list->table_start_index + 1)%(log_index_mem_list->table_cap);
    }
    LWLockRelease(LogIndexMemListLock);
}

Size He3dbLogIndexShmemSize(void)
{
    Size size = 0;
    if (he3db_logindex_mem_size <= 0)
        return size;
    size = LogIndexMemListSize(he3db_logindex_mem_size);
    size = CACHELINEALIGN(size);
    elog(DEBUG5, "Mem table size=%ld in share memory", size);
    return size;
}

void FreeLsnNode(LsnNode *head)
{
    LsnNode* ln;
    while (head != NULL)
    {
        ln = head;
        head = head->next;
        free(ln);
        ln = NULL;
    }
}

TagNode *GetBufTagByLsnRange(XLogRecPtr start_lsn, XLogRecPtr end_lsn)
{
    TagNode *head_node;
    uint64 tbl_index;
    LogIndexMemItemHead *item_page;
    LogIndexMemItemSeg *first_seg;
    LogIndexMemItemSeg *last_seg;
    XLogRecPtr page_min_lsn;
    XLogRecPtr page_max_lsn;

    // Prevent metadata changes during discovery.
    // change to Lightweight Lock
    head_node = InitTagNode();
    if (end_lsn < start_lsn)
    {
        return head_node;
    }
    LWLockAcquire(LogIndexMemListLock,LW_SHARED);
    tbl_index = log_index_mem_list->table_start_index;
    while(tbl_index != log_index_mem_list->active_table_index)
    {
        LogIndexMemTBL *mem_tbl = &(log_index_mem_list->mem_table[tbl_index]);
        tbl_index = (tbl_index + 1)%(log_index_mem_list->table_cap);
        // current mem table no suitability lsn_list
        if(mem_tbl->meta.max_lsn < start_lsn)
        {
            continue;
        }else if(mem_tbl->meta.min_lsn > end_lsn)
        {
            // there is no suitability lsn_list after this mem table
            LWLockRelease(LogIndexMemListLock);
            return head_node;
        }
        else
        {
            end_lsn = Min(end_lsn, mem_tbl->meta.max_lsn);
            head_node->tag.lsn = end_lsn;
            // loop for page list
            for(int i = 0; i < (mem_tbl->meta.page_free_head - 1); i++)
            {
                item_page = &(mem_tbl->page_head[i]);
                if(item_page->next_seg == LOG_INDEX_TBL_INVALID_SEG || item_page->tail_seg == LOG_INDEX_TBL_INVALID_SEG)
                {
                    continue;
                }
                else
                {
                    first_seg = &(mem_tbl->seg_item[item_page->next_seg - 1]);
                    last_seg = &(mem_tbl->seg_item[item_page->tail_seg - 1]);
                    page_min_lsn = LOG_INDEX_COMBINE_LSN(mem_tbl, first_seg->suffix_lsn[0]);
                    uint8 id = Min(LOG_INDEX_MEM_ITEM_SEG_LSN_NUM - 1, last_seg->number - 1);
                    page_max_lsn = LOG_INDEX_COMBINE_LSN(mem_tbl, last_seg->suffix_lsn[id]);
                    if(page_min_lsn > end_lsn || page_max_lsn < start_lsn)
                    {
                        continue;
                    }
                    else
                    {
                        InsertTagNodeByHead(head_node, item_page->tag);
                    }
                }
            }
            LWLockRelease(LogIndexMemListLock);
            return head_node;
        }
    }
    if (tbl_index == log_index_mem_list->active_table_index){

        LogIndexMemTBL *mem_tbl = &(log_index_mem_list->mem_table[tbl_index]);
        // current mem table no suitability lsn_list
        if(!(mem_tbl->meta.max_lsn < start_lsn || mem_tbl->meta.min_lsn > end_lsn))
        {
            end_lsn = Min(end_lsn, mem_tbl->meta.max_lsn);
            head_node->tag.lsn = end_lsn;
            // loop for page list
            for(int i = 0; i < (mem_tbl->meta.page_free_head - 1); i++)
            {
                item_page = &(mem_tbl->page_head[i]);
                if(item_page->next_seg == LOG_INDEX_TBL_INVALID_SEG || item_page->tail_seg == LOG_INDEX_TBL_INVALID_SEG)
                {
                    continue;
                }
                else
                {
                    first_seg = &(mem_tbl->seg_item[item_page->next_seg - 1]);
                    last_seg = &(mem_tbl->seg_item[item_page->tail_seg - 1]);
                    page_min_lsn = LOG_INDEX_COMBINE_LSN(mem_tbl, first_seg->suffix_lsn[0]);
                    uint8 id = Min(LOG_INDEX_MEM_ITEM_SEG_LSN_NUM - 1, last_seg->number - 1);
                    page_max_lsn = LOG_INDEX_COMBINE_LSN(mem_tbl, last_seg->suffix_lsn[id]);
                    if(page_min_lsn > end_lsn || page_max_lsn < start_lsn)
                    {
                        continue;
                    }
                    else
                    {
                        InsertTagNodeByHead(head_node, item_page->tag);
                    }
                }
            }
        }
    }
    LWLockRelease(LogIndexMemListLock);

    return head_node;
}

bool CheckBufTagExistByLsnRange(const BufferTag *page, XLogRecPtr start_lsn, XLogRecPtr end_lsn)
{
    uint64 tbl_index;
    LogIndexMemItemSeg *first_seg;
    LogIndexMemItemSeg *last_seg;
    XLogRecPtr page_min_lsn;
    XLogRecPtr page_max_lsn;
    uint32 hash_key;
    LogIndexMemItemHead *page_head;

    // Prevent metadata changes during discovery.
    LWLockAcquire(LogIndexMemListLock,LW_SHARED);
    tbl_index = log_index_mem_list->table_start_index;
loop:
    while(tbl_index != log_index_mem_list->active_table_index)
    {
        LogIndexMemTBL *mem_tbl = &(log_index_mem_list->mem_table[tbl_index]);
        tbl_index = (tbl_index + 1)%(log_index_mem_list->table_cap);
        // current mem table no suitability lsn_list
        if(mem_tbl->meta.max_lsn < start_lsn)
        {
            continue;
        }else if(mem_tbl->meta.min_lsn >= end_lsn)
        {
            // there is no suitability lsn_list after this mem table
            goto outerloop;
        }
        else
        {
            // find page from current mem table
            hash_key = LOG_INDEX_MEM_TBL_HASH_PAGE(page);
            if(mem_tbl->hash[hash_key] != LOG_INDEX_TBL_INVALID_SEG)
            {
                page_head = &(mem_tbl->page_head[mem_tbl->hash[hash_key]-1]);
                while(!BUFFERTAGS_EQUAL(page_head->tag, *page)){
                    if(page_head->next_item == LOG_INDEX_TBL_INVALID_SEG)
                    {
                        // cannot find page from current mem table
                        goto loop;
                    }
                    page_head = &(mem_tbl->page_head[page_head->next_item-1]);
                }
                // find request page, but not lsn
                if(page_head->next_seg == LOG_INDEX_TBL_INVALID_SEG || page_head->tail_seg == LOG_INDEX_TBL_INVALID_SEG)
                {
                    continue;
                }
                else
                {
                    first_seg = &(mem_tbl->seg_item[page_head->next_seg - 1]);
                    last_seg = &(mem_tbl->seg_item[page_head->tail_seg - 1]);
                    page_min_lsn = LOG_INDEX_COMBINE_LSN(mem_tbl, first_seg->suffix_lsn[0]);
                    uint8 id = Min(LOG_INDEX_MEM_ITEM_SEG_LSN_NUM - 1, last_seg->number - 1);
                    page_max_lsn = LOG_INDEX_COMBINE_LSN(mem_tbl, last_seg->suffix_lsn[id]);
                    // lsn not correspond with request
                    if(page_min_lsn >= end_lsn || page_max_lsn < start_lsn)
                    {
                        continue;
                    }
                    else
                    {
                        // find one
                        LWLockRelease(LogIndexMemListLock);
                        return true;
                    }
                }
            }else
            {
                continue;
            }
        }
    }
    
    if (tbl_index == log_index_mem_list->active_table_index){
        LogIndexMemTBL *mem_tbl = &(log_index_mem_list->mem_table[tbl_index]);
        tbl_index = (tbl_index + 1)%(log_index_mem_list->table_cap);
        // current mem table no suitability lsn_list
        if(mem_tbl->meta.max_lsn < start_lsn)
        {
            goto outerloop;
        }else if(mem_tbl->meta.min_lsn >= end_lsn)
        {
            // there is no suitability lsn_list after this mem table
            goto outerloop;
        }
        else
        {
            // find page from current mem table
            hash_key = LOG_INDEX_MEM_TBL_HASH_PAGE(page);
            if(mem_tbl->hash[hash_key] != LOG_INDEX_TBL_INVALID_SEG)
            {
                page_head = &(mem_tbl->page_head[mem_tbl->hash[hash_key]-1]);
                while(!BUFFERTAGS_EQUAL(page_head->tag, *page)){
                    if(page_head->next_item == LOG_INDEX_TBL_INVALID_SEG)
                    {
                        // cannot find page from current mem table
                        goto outerloop;
                    }
                    page_head = &(mem_tbl->page_head[page_head->next_item-1]);
                }
                // find request page
                if(page_head->next_seg == LOG_INDEX_TBL_INVALID_SEG || page_head->tail_seg == LOG_INDEX_TBL_INVALID_SEG)
                {
                    goto outerloop;
                }
                else
                {
                    first_seg = &(mem_tbl->seg_item[page_head->next_seg - 1]);
                    last_seg = &(mem_tbl->seg_item[page_head->tail_seg - 1]);
                    page_min_lsn = LOG_INDEX_COMBINE_LSN(mem_tbl, first_seg->suffix_lsn[0]);
                    uint8 id = Min(LOG_INDEX_MEM_ITEM_SEG_LSN_NUM - 1, last_seg->number - 1);
                    page_max_lsn = LOG_INDEX_COMBINE_LSN(mem_tbl, last_seg->suffix_lsn[id]);
                    if(page_min_lsn >= end_lsn || page_max_lsn < start_lsn)
                    {
                        goto outerloop;
                    }
                    else
                    {
                        // find one
                        LWLockRelease(LogIndexMemListLock);
                        return true;
                    }
                }
            }else
            {
                goto outerloop;
            }
        }
    }
outerloop:
    LWLockRelease(LogIndexMemListLock);
    return false;
}

void FreeTagNode(TagNode *head)
{
    TagNode* tn;
    while (head != NULL)
    {
        tn = head;
        head = head->next;
        free(tn);
        tn = NULL;
    }
}

void He3DBGetLogindexStats(uint64 *memtable_total, uint64 *memtable_used, uint64 *memtable_active_index,
                           uint64 *memtable_start_index, uint64 *page_total)
{
    LWLockAcquire(LogIndexMemListLock,LW_SHARED);
    *memtable_start_index = log_index_mem_list->table_start_index;
    *memtable_active_index = log_index_mem_list->active_table_index;
    *memtable_total = log_index_mem_list->table_cap;
    LWLockRelease(LogIndexMemListLock);
    *memtable_used = ((*memtable_active_index - *memtable_start_index) + *memtable_total)%*memtable_total + 1;
    uint64 tbl_index = *memtable_start_index;
    uint64 page_num = 0;
    while(tbl_index != *memtable_active_index)
    {
        LogIndexMemTBL *mem_tbl = &(log_index_mem_list->mem_table[tbl_index]);
        tbl_index = (tbl_index + 1)%(*memtable_total);
        page_num = page_num + mem_tbl->meta.page_free_head - 2;
    }
    if (tbl_index == *memtable_active_index)
    {
        LogIndexMemTBL *mem_tbl = &(log_index_mem_list->mem_table[tbl_index]);
        if (pg_atomic_read_u32(&mem_tbl->meta.state) != LOG_INDEX_MEM_TBL_STATE_FREE){
            page_num = page_num + mem_tbl->meta.page_free_head - 2;
        }
    }
    *page_total = page_num;
}
