#include "mq.h"
#include "th.h"

#include "mempool.h"
#include <assert.h>

C_CODE_BEGIN

struct msgblock {
	/* linked pointer */
	struct msgblock* next;
};

struct msgqueue {
	struct msgblock* head;
	struct msgblock* tail;
	/* this 2 locks are used in locking head and tail */
	fastlock headlk , taillk;
	size_t objsize;
	/* using a memory pool to handle dynamic memory allocation */
	struct mempool* mem;
	fastlock memlk;
};



EXPORT_FUNC struct msgqueue* msgqueue_create( size_t reserve_slots ,size_t objsize ) {
	struct msgqueue* mq = malloc( sizeof(struct msgqueue) );
	thread_fastlock_create( &(mq->headlk) );
	thread_fastlock_create( &(mq->taillk) );
	thread_fastlock_create( &(mq->memlk) );
	mq->mem = mempool_create( 4 , objsize+sizeof(struct msgblock) , reserve_slots );
	/*
	 * First of all, I will add a dummy element
	 * this dummy element will make the msgqueue 
	 * never "empty" internally .
	 */
	mq->head = mq->tail = (struct msgblock*)mempool_fetch(mq->mem);
	mq->tail->next = NULL;
	mq->objsize = objsize;

	return mq;
}

EXPORT_FUNC void msgqueue_enqueue( struct msgqueue* mq, void* obj ) {
	struct msgblock* node;

	thread_fastlock_require( &(mq->memlk) , NULL );
	node = (struct msgblock*) mempool_fetch( mq->mem );
	thread_fastlock_release( &(mq->memlk) );

	memcpy( ((char*)(node)) + sizeof(struct msgblock) , obj , mq->objsize );
	node->next = NULL;
	/* get the head lock since we will enqueue through head of queue */
	thread_fastlock_require( &(mq->taillk) , NULL );

#ifdef _MSC_VER
	/*
	 * Ensure the write of the pointer is atomically.
	 */
	InterlockedExchangePointer( (PVOID volatile*)( &(mq->tail->next) ), (LONG)node);
#elif __GUNC__
	__sync_lock_test_and_set( &(mq->tail->next), node );
#else
	mq->tail->next = node;
#endif


#if 0 
	/*
	 * This method has been dropped since I really don't know whether it is right
	 * or almost right. However I know, if it is right, it will be faster than
	 * above code. In future ,if we really need to do performance tuning, memory
	 * order semantic may be useful for us to push performance to limit 
	 */
	/*
	 * Testing if a memory is aligned with 4, if so , it is safe to assume
	 * such write is a atomic write ,otherwise no.
	 */
#define _IS_ALIGN(x) \
	((((int)(x) + (4)-1) & ~((4)-1)) == (int)(x))

	assert( _IS_ALIGN(&(mq->tail->next)) );

#undef _IS_ALIGN
	/* 
	 * Maybe a memory barrier should be put here to enable the another thread
	 * can see the real node pointer value instead of partial next pointer value
	 * Based on my knowledge, in IA-32/64 a single write to a alignment ok memory
	 * will be atomic write, otherwise no.
	 */
#ifdef _MSC_VER
	MemoryBarrier();
#elif __GUNC__
	__sync_sychronize();
#else
#endif
#endif // if 0
	mq->tail = node;
	/* release lock */
	thread_fastlock_release( &(mq->taillk) );
}

EXPORT_FUNC void msgqueue_dequeue( struct msgqueue* mq ) {
	struct msgblock* new_node;
	thread_fastlock_require( &(mq->headlk) , NULL );
	assert( mq->head );
	/*
	 * I assume the previous write is totally ok. 
	 * A better approach is using memory order semantic 
	 * to write very efficient atomic level read and write.
	 * But this method is ok, if more performance needed when
	 * port to the architecture who has memory order. The
	 * specific release, require etc method will be used.

	 * Right now, no partial read , it is safe.
	 */
	new_node = mq->head->next;
	if( new_node == NULL ) {
		/* node node here */
		thread_fastlock_release( &(mq->headlk) );
		return;
	}
	/* release the memory to the memory pool */
	thread_fastlock_require( &(mq->memlk) , NULL );
	mempool_return(mq->mem,mq->head);
	thread_fastlock_release( &(mq->memlk) );
	/* change the header pointer */
	mq->head = new_node;
	thread_fastlock_release( &(mq->headlk) );
}

EXPORT_FUNC void*  msgqueue_xdequeue( struct msgqueue* mq, void* buf ) {
	struct msgblock* new_node;
	thread_fastlock_require( &(mq->headlk) , NULL );
	assert( mq->head );
	/*
	 * I assume the previous write is totally ok. 
	 * A better approach is using memory order semantic 
	 * to write very efficient atomic level read and write.
	 * But this method is ok, if more performance needed when
	 * port to the architecture who has memory order. The
	 * specific release, require etc method will be used.

	 * Right now, no partial read , it is safe.
	 */
	new_node = mq->head->next;
	if( new_node == NULL ) {
		/* node node here */

		thread_fastlock_release( &(mq->headlk) );
		return NULL;
	}
	memcpy(buf,(void*)((char*)new_node+sizeof(struct msgblock)),mq->objsize);
	/* release the memory to the memory pool */
	thread_fastlock_require( &(mq->memlk) , NULL );
	mempool_return(mq->mem,mq->head);
	thread_fastlock_release( &(mq->memlk) );
	/* change the header pointer */
	mq->head = new_node;
	thread_fastlock_release( &(mq->headlk) );
	return buf;
}

EXPORT_FUNC void* msgqueue_head( struct msgqueue* mq ) {
	struct msgblock* blk;
	thread_fastlock_require( &(mq->headlk) , NULL );
	blk = mq->head->next;
	thread_fastlock_release( &(mq->headlk) );
	if( blk == NULL )
		return NULL;
	return (void*)((char*)blk+sizeof(struct msgblock));
}

EXPORT_FUNC size_t msgqueue_size( struct msgqueue* mq ) {
	return mempool_get_usesize(mq->mem)-1;
}

EXPORT_FUNC void  msgqueue_clear( struct msgqueue* mq ) {
	thread_fastlock_require( &(mq->memlk) , NULL );
	mempool_clear( mq->mem );
	thread_fastlock_release( &(mq->memlk) );
}

EXPORT_FUNC void  msgqueue_destroy( struct msgqueue* mq ) {
	mempool_destroy(mq->mem);
	thread_fastlock_destroy(&(mq->headlk));
	thread_fastlock_destroy(&(mq->taillk));
	thread_fastlock_destroy(&(mq->memlk));
	free(mq);
}

EXPORT_FUNC bool msgqueue_empty( struct msgqueue* mq) {
	return mempool_get_usesize(mq->mem) == 1;
}


C_CODE_END