#include "access/pagehashqueue.h"
#include <stdlib.h>
#include "utils/palloc.h"
#include "utils/hsearch.h"
#include "storage/pmsignal.h"
#include "miscadmin.h"
#include "access/xlog.h"
#include "access/xlogutils.h"
#include "postmaster/interrupt.h"
#include "libpq/pqsignal.h"
#include "storage/ipc.h"
#include "utils/wait_event.h"
#include "c.h"
#include "utils/ps_status.h"
#include "storage/procsignal.h"
#include "utils/memutils.h"
#include "postmaster/fork_process.h"
#include "postmaster/postmaster.h"
#include "storage/proc.h"
#include "access/pushpage.h"
#include "storage/buf_internals.h"
#include "utils/guc.h"
#include "storage/he3db_logindex.h"
#include "utils/hfs.h"

static void WakeupFlushWork(void);
XLogRecPtr *g_redoStartLsn;
static HTAB *PageLogindexHash = NULL;
static int MaxNum(int num) {
	if (num <= 0) return 1;
    if ((num & (num - 1)) == 0) return num;
    num |= num >> 1;
    num |= num >> 2;
    num |= num >> 4;
    num |= num >> 8;
    num |= num >> 16;
    return num + 1;
}
#define FREELISTBRUCKET 32
typedef struct FreelistManage {
	slock_t mutex;
	int curNum;
	lsn_list_t*head;
}FreelistManage;
static FreelistManage* FreeList;

const int multiple = 1;
static Size freesize = 0;
#define FREELISTSIZE (freesize?freesize:(freesize = MaxNum((NBuffers + NUM_BUFFER_PARTITIONS) * multiple)))
static Size
LogindexFreeListShmemSize(void) {
	Size		size;
	size = 0;
	size = add_size(size, mul_size(FREELISTSIZE, sizeof(lsn_list_t)));
	return size;
}

static Size
FreeListManageShmemSize(void) {
	Size		size;
	size = 0;
	size = add_size(size,sizeof(FreelistManage));
	return size;
}

static Size LogindexFreeListAllShmemSize(void) {
	Size		size;
	size = 0;
	for (int i = 0; i < FREELISTBRUCKET;i++) {
		size = add_size(size, LogindexFreeListShmemSize());
		size = add_size(size, FreeListManageShmemSize());
	}
	return size;
}

static Size LogindexHashShmemSize(void) {
	return hash_estimate_size(NBuffers + NUM_BUFFER_PARTITIONS,sizeof(page_head_list_t));
}

Size LogindexHashAllShmemSize(void) {
	Size		size;
	return LogindexFreeListAllShmemSize() + LogindexHashShmemSize();
}


static void
LogindexFreeListShmemInit(void)
{
	Size		size = LogindexFreeListAllShmemSize();
	bool		found;
	FreeList = (FreelistManage*)
		ShmemInitStruct("LogindexSpace",
						size,
						&found);
	if (!found)
	{
		for (Size i = 0; i < FREELISTBRUCKET;i++) {
			FreelistManage* FreePos = (FreelistManage*)(((char*)FreeList)+ i * (LogindexFreeListShmemSize()+FreeListManageShmemSize()));
			lsn_list_t* begin = (lsn_list_t*)(((char*)FreePos) + FreeListManageShmemSize());
			FreePos->head = begin;
			FreePos->curNum = 0;
			SpinLockInit(&FreePos->mutex);
			int j = 0;
			for (;j < FREELISTSIZE-1; j++) {
					begin[j].next = &begin[j+1];				 
			}
			begin[j].next = NULL;
		}		
	}
	
}

static FreelistManage* getFreeList(uint32 hashcode) {
	uint32 idx = hashcode % FREELISTBRUCKET;
	return (FreelistManage*)(((char*)FreeList) + ((Size)idx) * (LogindexFreeListShmemSize()+FreeListManageShmemSize()));
}

static int popLsnListElem(uint32 hashcode,lsn_list_t**data) {
	FreelistManage* curFreelist = getFreeList(hashcode);
	SpinLockAcquire(&curFreelist->mutex);
	if (curFreelist->curNum == FREELISTSIZE) {
		SpinLockRelease(&curFreelist->mutex);
		return 0;
	}
	curFreelist->curNum++;
	*data = curFreelist->head;
	curFreelist->head = curFreelist->head->next;
	SpinLockRelease(&curFreelist->mutex);
	return 1;
}

static int pushLsnListElemArr(uint32 hashcode,lsn_list_t*head,lsn_list_t*tail,int num) {
	FreelistManage* curFreelist = getFreeList(hashcode);
	SpinLockAcquire(&curFreelist->mutex);
	if (curFreelist->curNum == 0) {
		SpinLockRelease(&curFreelist->mutex);
    	return 0;
	}
	curFreelist->curNum -= num;
	tail->next = curFreelist->head;
	curFreelist->head = head;
	SpinLockRelease(&curFreelist->mutex);
	return 1;
}

page_head_list_t*
PageLogindexInsert(BufferTag *tagPtr, uint32 hashcode, XLogRecPtr lsn,XLogRecPtr endlsn)
{
	page_head_list_t  *result;
	bool		found;
	lsn_list_t *data = NULL;
	int re;	
	re = popLsnListElem(hashcode,(void**)&data);
	if (re == 0) {
		return NULL;
	}
	data->lsn = lsn;
	data->endlsn = endlsn;
	data->next = NULL;
	result = (page_head_list_t *)
		hash_search_with_hash_value(PageLogindexHash,
									(void *) tagPtr,
									hashcode,
									HASH_ENTER,
									&found);

	if (found && result->tail->lsn >= lsn) {
		return result;
	}
	
	if (found) {
		result->count++;
		result->tail->next = data;
		result->tail = data;
	} else {
		result->count = 1;
		result->tail = data;
		result->head = data;
	}
	return result;
}

void
PageLogindexDelete(BufferTag *tagPtr, uint32 hashcode,XLogRecPtr lsn)
{
	page_head_list_t *result;

	result = (page_head_list_t *)
		hash_search_with_hash_value(PageLogindexHash,
									(void *) tagPtr,
									hashcode,
									HASH_FIND,
									NULL);
	if (!result) {
		return;
	} else {
		lsn_list_t* tail,*next;
		next = result->head;
		int delNum = 0;
		while(next != NULL && next->lsn < lsn) {
			delNum++;
			tail = next;
			next = tail->next;
			result->count--;
		}
		if (delNum != 0) {
			pushLsnListElemArr(hashcode,result->head,tail,delNum);
		}
		if (next == NULL) {
			result = (page_head_list_t *)
			hash_search_with_hash_value(PageLogindexHash,
										(void *) tagPtr,
										hashcode,
										HASH_REMOVE,
										NULL);

			if (!result)				/* shouldn't happen */
				elog(ERROR, "PageLogindexHash hash table corrupted");
		} else {
			result->head = next;
		}
	}
}

uint32
PageLogindexHashCode(BufferTag *tagPtr)
{
	return get_hash_value(PageLogindexHash, (void *) tagPtr);
}

void
InitLogindexHashBrucket(void)
{
	HASHCTL		info;
	long		init_table_size,
				max_table_size;
	info.keysize = sizeof(BufferTag);
	info.entrysize = sizeof(page_head_list_t);
	init_table_size = NBuffers + NUM_BUFFER_PARTITIONS;
	max_table_size = NBuffers + NUM_BUFFER_PARTITIONS;
	info.num_partitions = NUM_BUFFER_PARTITIONS;
	PageLogindexHash = ShmemInitHash("PageLogindexHash",
									  init_table_size,
									  max_table_size,
									  &info,
									  HASH_ELEM | HASH_BLOBS| HASH_PARTITION);
	LogindexFreeListShmemInit();
}

page_head_list_t *
PageLogindexLookup(BufferTag *tagPtr,uint32_t hashcode)
{
	page_head_list_t *result;
	result = (page_head_list_t *)
		hash_search_with_hash_value(PageLogindexHash,
									(void *) tagPtr,
									hashcode,
									HASH_FIND,
									NULL);
	return result;
}

void cleanOneList(BufferTag *tagPtr,XLogRecPtr cleanLsn) {
	uint32 hashcode = PageLogindexHashCode(tagPtr);
	LWLock	   *partition_lock = LOGIndexPartitionLock(hashcode);
	LWLockAcquire(partition_lock, LW_EXCLUSIVE);
	PageLogindexDelete(tagPtr,hashcode,cleanLsn);
	LWLockRelease(partition_lock);
}

static void threadCleanLogIndex(XLogRecPtr cleanLsn)
{
	HASH_SEQ_STATUS scan_status;
	page_head_list_t  *item;
	hash_seq_init(&scan_status, PageLogindexHash);
	while ((item = (page_head_list_t *) hash_seq_search(&scan_status)) != NULL)
	{
		uint32 hash = PageLogindexHashCode(&item->tag);
		LWLock	   *partition_lock = LOGIndexPartitionLock(hash);
		LWLockAcquire(partition_lock, LW_EXCLUSIVE);
		PageLogindexDelete(&item->tag,hash,cleanLsn);
		LWLockRelease(partition_lock);
	}
}

static void
LogIndexProcShutdownHandler(SIGNAL_ARGS)
{
	int			save_errno = errno;
	
	ShutdownRequestPending = true;
	
	SetLatch(MyLatch);

	errno = save_errno;
}

/*
 * CleanLogIndexMain
 */
void
CleanLogIndexMain(int argc, char *argv[])
{
	sigjmp_buf	local_sigjmp_buf;

	MyBackendType = B_CLEAN_LOGINDEX;
	MemoryContext CleanLogIndex_context;
	init_ps_display(NULL);

	SetProcessingMode(InitProcessing);

	/*
	 * Set up signal handlers.  We operate on databases much like a regular
	 * backend, so we use the same signal handling.  See equivalent code in
	 * tcop/postgres.c.
	 */
	pqsignal(SIGHUP, SIG_IGN);

	/*
	 * SIGINT is used to signal canceling the current table's vacuum; SIGTERM
	 * means abort and exit cleanly, and SIGQUIT means abandon ship.
	 */
	pqsignal(SIGINT, SIG_IGN);
	pqsignal(SIGTERM, LogIndexProcShutdownHandler);
	/* SIGQUIT handler was already set up by InitPostmasterChild */

	pqsignal(SIGPIPE, SIG_IGN);
	pqsignal(SIGUSR1, procsignal_sigusr1_handler);
	pqsignal(SIGUSR2, SIG_IGN);
	pqsignal(SIGCHLD, SIG_DFL);
	/*
	 * Create a memory context that we will do all our work in.  We do this so
	 * that we can reset the context during error recovery and thereby avoid
	 * possible memory leaks.  Formerly this code just ran in
	 * TopMemoryContext, but resetting that would be a really bad idea.
	 */
	CleanLogIndex_context = AllocSetContextCreate(TopMemoryContext,
												 "CleanLogIndexFlush",
												 ALLOCSET_DEFAULT_SIZES);
	MemoryContextSwitchTo(CleanLogIndex_context);

	/*
	 * If an exception is encountered, processing resumes here.
	 *
	 * Unlike most auxiliary processes, we don't attempt to continue
	 * processing after an error; we just clean up and exit.  The autovac
	 * launcher is responsible for spawning another worker later.
	 *
	 * Note that we use sigsetjmp(..., 1), so that the prevailing signal mask
	 * (to wit, BlockSig) will be restored when longjmp'ing to here.  Thus,
	 * signals other than SIGQUIT will be blocked until we exit.  It might
	 * seem that this policy makes the HOLD_INTERRUPTS() call redundant, but
	 * it is not since InterruptPending might be set already.
	 */
	if (sigsetjmp(local_sigjmp_buf, 1) != 0)
	{
		/* since not using PG_TRY, must reset error stack by hand */
		error_context_stack = NULL;

		/* Prevents interrupts while cleaning up */
		HOLD_INTERRUPTS();

		/* Report the error to the server log */
		EmitErrorReport();

		/*
		 * We can now go away.  Note that because we called InitProcess, a
		 * callback was registered to do ProcKill, which will clean up
		 * necessary state.
		 */
		proc_exit(0);
	}

	/* We can now handle ereport(ERROR) */
	PG_exception_stack = &local_sigjmp_buf;

	PG_SETMASK(&UnBlockSig);
	char strname[128];
	char *prefix = "clean logindex ";
	int n = pg_snprintf(strname,sizeof(strname),prefix,strlen(prefix));
	/*
	 * Loop forever
	 */
	SetProcessingMode(NormalProcessing);
	XLogRecPtr pushStandbyPoint = 0;
	XLogRecPtr pushStandbyPrePoint = 0;
	for (;;)
	{
		/* Clear any already-pending wakeups */
		ResetLatch(MyLatch);

		if (ShutdownRequestPending)
			proc_exit(0);
		int hasData = 0;
		
		pushStandbyPrePoint = pushStandbyPoint;
		if (push_standby == true || EnableHotStandby == false || *isPromoteIsTriggered) {
			pushStandbyPoint = GetXLogPushToDisk();
			if (pushStandbyPrePoint < pushStandbyPoint) {
				hasData++;
			}
		} else {
			if (LastPushPoint == 0) {
				PrevPushPoint = *g_redoStartLsn;
			}
			if (PrevPushPoint != 0) {
				XLogRecPtr lastReplPtr = GetXLogReplayRecPtr(NULL);
//                elog(LOG, "deal page from %x to %x", PrevPushPoint, lastReplPtr);
				TagNode *tagList = GetBufTagByLsnRange(PrevPushPoint,lastReplPtr-1);
				if (tagList->next != NULL && tagList->tag.lsn >= PrevPushPoint) {
					LastPushPoint = tagList->tag.lsn;
					TagNode *next = tagList->next;
					int pageNum = 0;
					while(next!=NULL) {
						// elog(LOG,"add tag rel %d, fork %d, blk %d",
						// 		next->tag.tag.rnode.relNode, next->tag.tag.forkNum, next->tag.tag.blockNum);
						addFileKey(&next->tag.tag);
						next = next->next;
						pageNum++;
					}
					FreeTagNode(tagList);
					pushSlaveReplayQueue(pageNum);
					hasData++;
					PrevPushPoint = LastPushPoint+1;
					SetXLogPushToDisk(PrevPushPoint);
					pushStandbyPoint = GetConsistLsn(PrevPushPoint);
				} else {
					LastPushPoint = PrevPushPoint  = lastReplPtr;
					if (pushStandbyPrePoint < PrevPushPoint) {
						SetXLogPushToDisk(PrevPushPoint);
						pushStandbyPoint = GetConsistLsn(PrevPushPoint+1);
					} 
				}
			}
		}
		int pos;
		if (pushStandbyPrePoint < pushStandbyPoint) {
			pos = pg_snprintf(strname+n,sizeof(strname)-n,"lsn from %X/%X to %X/%X tasking",LSN_FORMAT_ARGS(pushStandbyPrePoint),LSN_FORMAT_ARGS(pushStandbyPoint));
			strname[n+pos] = '\0';
			set_ps_display(strname);
		}
		if (pushStandbyPrePoint < pushStandbyPoint) {
			elog(LOG,"start threadCleanLogIndex lsn from %X/%X to %X/%X",LSN_FORMAT_ARGS(pushStandbyPrePoint),LSN_FORMAT_ARGS(pushStandbyPoint));
			CleanLogIndexByPage(pushStandbyPoint);
			//threadCleanLogIndex(LastPushPoint);
			elog(LOG,"end threadCleanLogIndex lsn from %X/%X to %X/%X",LSN_FORMAT_ARGS(pushStandbyPrePoint),LSN_FORMAT_ARGS(pushStandbyPoint));
		}
		if (hasData != 0) {
			continue;
		}
		pos = pg_snprintf(strname+n,sizeof(strname)-n,"to lsn: %X/%X idle",LSN_FORMAT_ARGS(pushStandbyPoint));
		strname[n+pos] = '\0';
		set_ps_display(strname);
		(void) WaitLatch(MyLatch,
		 	WL_LATCH_SET | WL_TIMEOUT | WL_EXIT_ON_PM_DEATH,
		 	50L /* convert to ms */ ,
		 	WAIT_EVENT_CLEAN_LOGINDEX_MAIN);
	}
}

void SignalStartCleanLogIndexWork(void) {
	SendPostmasterSignal(PMSIGNAL_CLEAN_LOGINDEX_WORKER);
}

typedef struct PageValue {
	BufferTag tag;
	uint16_t num;
} PageValue;

static HTAB *PageCountHash = NULL;
static uint32_t curLatchPos = 0;

typedef struct {
	slock_t mutex;
	volatile uint32 gpushpos;
	volatile bool ready;
	volatile uint32 gpos;
	pg_atomic_uint32 latchPos;
	pg_atomic_uint32 taskNum;
	uint32 modifyNum;
	Latch		pageFlushWakeupLatch[PARALLEL_NUM];
	PageValue*gtag[G_QUEUE_LEN];
}PageHashQueueShmemStruct;
static PageHashQueueShmemStruct *PageHashQueueShmem;

void pushSlaveReplayQueue(int pageNum) {
	
	if (PageHashQueueShmem->gpos != 0 && PageHashQueueShmem->ready == false) {
		SpinLockAcquire(&PageHashQueueShmem->mutex);
		PageHashQueueShmem->ready = true;
		SpinLockRelease(&PageHashQueueShmem->mutex);
		WakeupFlushWork();
	} 
	
	while(pageNum > CompletedTaskNum()) {
		pg_usleep(1000L);
	}
	cleanMap();
}

Latch* GetCurrentLatch(uint32_t pos) {
	return &PageHashQueueShmem->pageFlushWakeupLatch[pos];
}

void WakeupOneFlushWork(uint32_t pos) {
	SetLatch(&PageHashQueueShmem->pageFlushWakeupLatch[pos]);
}

static void WakeupFlushWork(void)
{
	for (int i = 0;i<PARALLEL_NUM;i++) {
		SetLatch(&PageHashQueueShmem->pageFlushWakeupLatch[i]);
	}
}

uint32_t AssignLatchPos(void) {
	return pg_atomic_fetch_add_u32(&PageHashQueueShmem->latchPos,1);
}

void ResetFlushLatch(uint32_t pos) {
	ResetLatch(&PageHashQueueShmem->pageFlushWakeupLatch[pos]);
}

void OwnFlushLatch(uint32_t pos) {
	OwnLatch(&PageHashQueueShmem->pageFlushWakeupLatch[pos]);
}

Size
PageHashQueueShmemSize(void)
{
	Size		size;

	/*
	 * Currently, the size of the gtag[] array is arbitrarily set equal to
	 * NBuffers.  This may prove too large or small ...
	 */
	size = offsetof(PageHashQueueShmemStruct, gtag);
	size = add_size(size, mul_size(G_QUEUE_LEN, sizeof(PageValue*)));

	return size;
}

void
PageHashQueueShmemInit(void)
{
	Size		size = PageHashQueueShmemSize();
	bool		found;

	PageHashQueueShmem = (PageHashQueueShmemStruct *)
		ShmemInitStruct("PageHashQueue",
						size,
						&found);

	if (!found)
	{
		SpinLockInit(&PageHashQueueShmem->mutex);
		SpinLockAcquire(&PageHashQueueShmem->mutex);
		PageHashQueueShmem->ready = false;
		PageHashQueueShmem->gpushpos = 0;
		SpinLockRelease(&PageHashQueueShmem->mutex);
		PageHashQueueShmem->gpos = 0;
		pg_atomic_init_u32(&PageHashQueueShmem->taskNum,0);
		pg_atomic_init_u32(&PageHashQueueShmem->latchPos, 0);
		PageHashQueueShmem->modifyNum = 0;
		for (int i = 0;i<PARALLEL_NUM;i++) {
			InitSharedLatch(&PageHashQueueShmem->pageFlushWakeupLatch[i]);
		}
	}
}

static Size RedoStartPointSize(void) {
	return sizeof(XLogRecPtr);
}

Size PageHashMapSize(void) {
	return RedoStartPointSize() + hash_estimate_size(G_QUEUE_LEN,sizeof(PageValue));
}

void
InitBufferPoolHashMap(void)
{
	HASHCTL		info;
	long		init_table_size,
				max_table_size;
	info.keysize = sizeof(BufferTag);
	info.entrysize = sizeof(PageValue);
	init_table_size = G_QUEUE_LEN;
	max_table_size = G_QUEUE_LEN;
	PageCountHash = ShmemInitHash("PageHashCount",
									  init_table_size,
									  max_table_size,
									  &info,
									  HASH_ELEM | HASH_BLOBS);
	bool found;
	g_redoStartLsn = (XLogRecPtr*)ShmemInitStruct("redoStartPoint",
						RedoStartPointSize(),
						&found);
	if (!found)
	{
		memset(g_redoStartLsn,0,RedoStartPointSize());
	}
}

uint32_t  addFileKey(BufferTag*onePage) {
	PageValue *result;
	bool		found;
	uint32_t newHash = get_hash_value(PageCountHash,onePage);
	result = (PageValue*)
		hash_search_with_hash_value(PageCountHash,
									(void *) onePage,
									newHash,
									HASH_ENTER,
									&found);
	if (found == false) {
		result->num = 0;
		uint32_t gpos = PageHashQueueShmem->gpos++;
		PageHashQueueShmem->gtag[gpos] = result;
	}
	result->num++;
	PageHashQueueShmem->modifyNum++;
	return PageHashQueueShmem->modifyNum;
}

void cleanMap(void) {
	HASH_SEQ_STATUS scan_status;
	PageValue  *item;

	hash_seq_init(&scan_status, PageCountHash);
	while ((item = (PageValue *) hash_seq_search(&scan_status)) != NULL)
	{
	
		if (hash_search(PageCountHash, (const void *) &item->tag,
						HASH_REMOVE, NULL) == NULL)
			elog(ERROR, "hash table corrupted");
	}
	SpinLockAcquire(&PageHashQueueShmem->mutex);
	PageHashQueueShmem->ready = false;
	PageHashQueueShmem->gpushpos = 0;
	SpinLockRelease(&PageHashQueueShmem->mutex);
	PageHashQueueShmem->gpos = 0;
	pg_atomic_init_u32(&PageHashQueueShmem->taskNum,0);
	PageHashQueueShmem->modifyNum = 0;
}

uint32_t hashMapSize(void) {
	return hash_get_num_entries(PageCountHash);
}

static int cmp(const void* a,const void* b) {
	return (*((const PageValue**)b))->num - (*((const PageValue**)a))->num; 
}

void SortPageQueue(void) {
	if (PageHashQueueShmem->gpos != 0 && PageHashQueueShmem->ready == false) {
		qsort(PageHashQueueShmem->gtag,PageHashQueueShmem->gpos,sizeof(PageValue*),cmp);
		SpinLockAcquire(&PageHashQueueShmem->mutex);
		PageHashQueueShmem->ready = true;
		SpinLockRelease(&PageHashQueueShmem->mutex);
		WakeupFlushWork();
		return;
	}
}

BufferTag* QueuePushPage(void) {
	uint32_t gpushpos;
	bool hasData = false;
	if (PageHashQueueShmem->ready == true) {
		SpinLockAcquire(&PageHashQueueShmem->mutex);
		if (PageHashQueueShmem->ready == true && PageHashQueueShmem->gpushpos < PageHashQueueShmem->gpos) {
			hasData = true;
			gpushpos = PageHashQueueShmem->gpushpos++;		
		}
		SpinLockRelease(&PageHashQueueShmem->mutex);
	}
	if (hasData == false) {
		return NULL;
	} else {
		return &(PageHashQueueShmem->gtag[gpushpos]->tag);
	}
}

void ProcFlushBufferToDisk(BufferTag*tag) {
	Buffer buffer = XLogReadBufferExtended(tag->rnode, tag->forkNum, tag->blockNum,
										   RBM_NORMAL);
	if (!BufferIsValid(buffer))
	{	
		elog(PANIC,"ProcFlushBufferToDisk is invalid rel %d,flk %d,blk %d",tag->rnode.relNode,tag->forkNum,tag->blockNum);
		pg_atomic_fetch_add_u32(&PageHashQueueShmem->taskNum,1);
		return;
	}
	
	// elog(LOG, "replay rel %d, fork %d, blkno %d, pagelsn %X/%X", tag->rnode.relNode,
	// 		tag->forkNum,tag->blockNum, LSN_FORMAT_ARGS(PageGetLSN(BufferGetPage(buffer))));
	//slave no need to flush disk
	if (push_standby == true) {
		BufferDesc *buf;
		buf = GetBufferDescriptor(buffer-1);
		uint32 buf_state = pg_atomic_read_u32(&buf->state);	
		if (buf_state  & BM_DIRTY) {
			LWLockAcquire(BufferDescriptorGetContentLock(buf),
											 	LW_SHARED);
			FlushOneBuffer(buffer);
			LWLockRelease(BufferDescriptorGetContentLock(buf));
			ScheduleBufferTagForWriteback(&BackendWritebackContext,
												  &buf->tag);
		}
	}
	ReleaseBuffer(buffer);
	pg_atomic_fetch_add_u32(&PageHashQueueShmem->taskNum,1);
}

uint32_t CompletedTaskNum(void) {
	return pg_atomic_read_u32(&PageHashQueueShmem->taskNum);
}


static void
ParallelFlushProcShutdownHandler(SIGNAL_ARGS)
{
	int			save_errno = errno;
	
	ShutdownRequestPending = true;
	
	WakeupOneFlushWork(curLatchPos);

	errno = save_errno;
}

/*
 * PageFlushWorkerMain
 */
NON_EXEC_STATIC void
PageFlushWorkerMain(int argc, char *argv[])
{
	sigjmp_buf	local_sigjmp_buf;
	InRecovery = true;
	MyBackendType = B_PARALLEL_FLUSH;
	MemoryContext parallelflush_context;
	init_ps_display(NULL);

	SetProcessingMode(InitProcessing);

	/*
	 * Set up signal handlers.  We operate on databases much like a regular
	 * backend, so we use the same signal handling.  See equivalent code in
	 * tcop/postgres.c.
	 */
	pqsignal(SIGHUP, SIG_IGN);

	/*
	 * SIGINT is used to signal canceling the current table's vacuum; SIGTERM
	 * means abort and exit cleanly, and SIGQUIT means abandon ship.
	 */
	pqsignal(SIGINT, SIG_IGN);
	pqsignal(SIGTERM, ParallelFlushProcShutdownHandler);
	/* SIGQUIT handler was already set up by InitPostmasterChild */

	pqsignal(SIGPIPE, SIG_IGN);
	pqsignal(SIGUSR1, procsignal_sigusr1_handler);
	pqsignal(SIGUSR2, SIG_IGN);
	pqsignal(SIGCHLD, SIG_DFL);
	/*
	 * Create a memory context that we will do all our work in.  We do this so
	 * that we can reset the context during error recovery and thereby avoid
	 * possible memory leaks.  Formerly this code just ran in
	 * TopMemoryContext, but resetting that would be a really bad idea.
	 */
	parallelflush_context = AllocSetContextCreate(TopMemoryContext,
												 "ParallelFlush",
												 ALLOCSET_DEFAULT_SIZES);
	MemoryContextSwitchTo(parallelflush_context);

	/* Early initialization */
	BaseInit();

	/*
	 * Create a per-backend PGPROC struct in shared memory, except in the
	 * EXEC_BACKEND case where this was done in SubPostmasterMain. We must do
	 * this before we can use LWLocks (and in the EXEC_BACKEND case we already
	 * had to do some stuff with LWLocks).
	 */
#ifndef EXEC_BACKEND
	InitProcess();
#endif

	/*
	 * If an exception is encountered, processing resumes here.
	 *
	 * Unlike most auxiliary processes, we don't attempt to continue
	 * processing after an error; we just clean up and exit.  The autovac
	 * launcher is responsible for spawning another worker later.
	 *
	 * Note that we use sigsetjmp(..., 1), so that the prevailing signal mask
	 * (to wit, BlockSig) will be restored when longjmp'ing to here.  Thus,
	 * signals other than SIGQUIT will be blocked until we exit.  It might
	 * seem that this policy makes the HOLD_INTERRUPTS() call redundant, but
	 * it is not since InterruptPending might be set already.
	 */
	if (sigsetjmp(local_sigjmp_buf, 1) != 0)
	{
		/* since not using PG_TRY, must reset error stack by hand */
		error_context_stack = NULL;

		/* Prevents interrupts while cleaning up */
		HOLD_INTERRUPTS();

		/* Report the error to the server log */
		EmitErrorReport();

		/*
		 * We can now go away.  Note that because we called InitProcess, a
		 * callback was registered to do ProcKill, which will clean up
		 * necessary state.
		 */
		proc_exit(0);
	}

	/* We can now handle ereport(ERROR) */
	PG_exception_stack = &local_sigjmp_buf;

	PG_SETMASK(&UnBlockSig);

	curLatchPos = AssignLatchPos();
	
	OwnFlushLatch(curLatchPos);
	char strname[64];
	char *prefix = "parallel flush workid: ";
	int n = pg_snprintf(strname,sizeof(strname),prefix,strlen(prefix));
	/*
	 * Loop forever
	 */
	for (;;)
	{
		/* Clear any already-pending wakeups */
		ResetFlushLatch(curLatchPos);

		if (ShutdownRequestPending)
			proc_exit(0);
		BufferTag *tag = NULL;
		SetProcessingMode(NormalProcessing);
		int pos = pg_snprintf(strname+n,sizeof(strname)-n,"%d tasking",curLatchPos);
		strname[n+pos] = '\0';
		set_ps_display(strname);
		while((tag=QueuePushPage())!=NULL) {
			ProcFlushBufferToDisk(tag);
		}		
		pos = pg_snprintf(strname+n,sizeof(strname)-n,"%d idle",curLatchPos);
		strname[n+pos] = '\0';
		set_ps_display(strname);
		(void) WaitLatch(GetCurrentLatch(curLatchPos),
		 	WL_LATCH_SET | WL_TIMEOUT | WL_EXIT_ON_PM_DEATH,
		 	1000L /* convert to ms */ ,
		 	WAIT_EVENT_PAGEFLUSH_MAIN);
	}
}

void SignalStartFlushWork(void) {
	SendPostmasterSignal(PMSIGNAL_PARALLEL_FLUSH_WORKER);
	//sleep wait flush work startup
	usleep(200000);
}

/*
 * Main entry point for autovacuum worker process.
 *
 * This code is heavily based on pgarch.c, q.v.
 */
int
StartPageFlushWorker(void)
{
	pid_t		worker_pid;

#ifdef EXEC_BACKEND
	switch ((worker_pid = avworker_forkexec()))
#else
	switch ((worker_pid = fork_process()))
#endif
	{
		case -1:
			ereport(LOG,
					(errmsg("could not fork autovacuum worker process: %m")));
			return 0;

#ifndef EXEC_BACKEND
		case 0:
			/* in postmaster child ... */
			InitPostmasterChild();

			/* Close the postmaster's sockets */
			ClosePostmasterPorts(false);

			CreateAuxProcessResourceOwner();

			//MyPMChildSlot = AssignPostmasterChildSlot();
			
			IsParallelFlushWorker = true;
			
			PageFlushWorkerMain(0, NULL);
			break;
#endif
		default:
			return (int) worker_pid;
	}

	/* shouldn't get here */
	return 0;
}


