/* memPartLibP.h - private memory management library header file */

/* 
 * Copyright (c) 1992-2005 Wind River Systems, Inc. 
 *
 * The right to copy, distribute, modify or otherwise make use 
 * of this software may be licensed only pursuant to the terms 
 * of an applicable Wind River license agreement. 
 */ 


/*
modification history
--------------------
01u,27sep05,zl   added ED&R option bits to MEM_OPTIONS_MASK.
01t,09aug05,zl   use semMTake() and semMGive() in the lock/unlock macros.
01s,26jul05,mil  Added prototype for memAllotLibInit.
01r,12jul05,mil  Added memAllot support to minimal kernel.
01q,14apr05,zl   changed CHUNK_MIN_SIZE to larger minimum block
01p,16feb05,aeg  added memBlockSizeGet() function prototype (SPR #106381).
01o,03may04,zl   made cumWordsAllocated 64-bit, removed minBlockWords.
01n,03feb04,zl   added allocated block guard signature support.
01m,03nov03,zl   added create, delete, alloc, free, realloc hooks.
01l,20oct03,zl   updated for new avlLib API.
01k,27jun03,tam  merge with latest VxWorks AE version
01i,14mar03,pch  runtime sizing of BLOCK_HDR for PPC32
01h,23oct01,tam  re-organized FREE_BLOCK
01g,19oct01,tam  added definition for KMEM_XXX macros
01f,25sep01,tam  added definition of the KHEAP_XXX macros
01e,19aug96,dbt  removed memPartAlignedAlloc prototype (fixed SPR #6898).
01e,05aug96,kkk  made padding in BLOCK_HDR generic to all arch with alignment
		 at 16.
01d,22sep92,rrr  added support for c++
01c,28jul92,jcf  added external declaration for memPartOptionsDefault.
01b,19jul92,pme  added external declarations for sm partition functions.
01a,01jul92,jcf  extracted from memLib v3r.
*/

#ifndef __INCmemPartLibPh
#define __INCmemPartLibPh

#ifdef __cplusplus
extern "C" {
#endif

#include <vxWorks.h>
#include <vwModNum.h>
#include <memLib.h>
#include <sllLib.h>
#include <avlUintLib.h>
#include <private/semLibP.h>
#include <classLib.h>
#include <private/objLibP.h>


/*
 * Kernel memory allocation macros (KHEAP and KMEM macros)
 *
 * NOTE: They should not be used by any application code.
 * It is also needed by code being back-ported from VxWorks AE 1.x. to VxWorks
 * 5.5.x.
 */

/*
 * KHEAP macros:
 * These macros are defined simply for cosmetic reasons, to make it more
 * obvious that buffers are allocated or freed from the kernel heap, also
 * called the system memory partition.
 * No guaranty is given that the buffer allocated from the Kernel
 * Heap via KHEAP_ALLOC has a known and constant virtual to physical mapping
 * (1 to 1 mapping for instance) if virtual memory support is included (i.e.
 * INCLUDE_MMU_BASIC or INCLUDE_MMU_FULL components).
 *
 * NOTE: with the exception of KHEAP_REALLOC(), the definitions are based
 *       on API from memPartLib.c, to prevent any requirements on 
 *       INCLUDE_MEM_MGR_FULL (memLib.c).
 */

#define KHEAP_ALLOC(nBytes)                                             \
        memPartAlloc(memSysPartId, (nBytes))
#define KHEAP_FREE(pChar)                                               \
        memPartFree(memSysPartId, (char *)(pChar))
#define KHEAP_ALIGNED_ALLOC(nBytes, alignment)                          \
        memPartAlignedAlloc(memSysPartId, (nBytes), (alignment))
#define KHEAP_REALLOC(pChar, newSize)                                    \
        memPartRealloc(memSysPartId, (pChar), (newSize))


/*
 * KMEM macros:
 * If a known and constant virtual to physical mapping is required for buffer
 * allocated, KMEM macros should be used instead of KHEAP macros.
 * For instance memory block dynamically allocated that may be accessed when
 * the processor MMU is turn off or on should always be allocated using
 * KMEM_ALLOC() or KMEM_ALIGNED_ALLOC(). Good examples are memory blocks used
 * to store MMU translation information on some processor, or memory blocks
 * accessed by DMA devices.
 * Good care should be taken before using KMEM macros. If no constant virtual
 * to physical mapping is required, then KHEAP macros should always be used
 * instead.
 *
 * NOTE: With VxWorks 5.5.x, there's no differences between KHEAP_XXX and
 * KMEM_XXX macros. However this will change in future releases.
 */

#ifdef INCLUDE_MEM_ALLOT

/* minimal kernel definitions, KMEM_FREE is not available */

#define KMEM_ALLOC(nBytes)                                              \
        memAllot((nBytes), 0)
#define KMEM_ALIGNED_ALLOC(nBytes, alignment)                           \
        memAllot((nBytes), (alignment))

extern void * memAllot (unsigned size, unsigned alignment);
extern STATUS memAllotLibInit (char * pPool, unsigned poolSize);

#else /* INCLUDE_MEM_ALLOT */

#define KMEM_ALLOC(nBytes)                                              \
        memPartAlloc(memSysPartId, (nBytes))
#define KMEM_FREE(pChar)                                                \
        memPartFree(memSysPartId, (pChar))
#define KMEM_ALIGNED_ALLOC(nBytes, alignment)                           \
        memPartAlignedAlloc(memSysPartId, (nBytes), (alignment))

#endif  /* INCLUDE_MEM_ALLOT */


/*
 * For PowerPC, _ALLOC_ALIGN_SIZE is a variable whose runtime value is not
 * known when compiling generic code (i.e. with CPU=PPC32), so the conditional
 * padding in the ALLOC_CHUNK_HDR and MEM_SECTION_HDR definitions does not work.
 * The folowing definitions are based on the size of ALLOC_CHUNK_HDR being 16,
 * and the size of MEM_SECTION_HDR being 8.
 * NOTE: The assembly code will be slightly longer for PowerPC CPUs than
 * for architectures for which _ALLOC_ALIGN_SIZE value can be resolved by the
 * processor.
 */

#define ALLOC_CHUNK_HDR_SIZE	sizeof(ALLOC_CHUNK_HDR)
#define MEM_SECTION_HDR_SIZE	max(_ALLOC_ALIGN_SIZE, sizeof(MEM_SECTION_HDR))


#define PREV_CHUNK_IS_FREE	1
#define CHUNK_SIZE_MASK 	(~(PREV_CHUNK_IS_FREE))

/* macros for getting to next and previous blocks */

#define NEXT_CHUNK_HDR(pHdr)	((FREE_CHUNK_HDR *) (((UINT32) (pHdr)) +    \
					((pHdr)->size & CHUNK_SIZE_MASK)))
#define PREV_CHUNK_HDR(pHdr)	((FREE_CHUNK_HDR *) (((UINT32) (pHdr)) -    \
					((pHdr)->prevSize)))


/* 
 * macros for converting between the "block" that caller knows
 * (actual available data area) and the chunk header in front of it
 * The memory chunck is the header plus the data block.
 */

#define BLOCK_TO_CHUNK_HDR(pBlock) ((ALLOC_CHUNK_HDR *) (((UINT32) (pBlock)) - \
						ALLOC_CHUNK_HDR_SIZE))
#define CHUNK_HDR_TO_BLOCK(pHdr)   ((char *) (((UINT32) (pHdr)) + 	      \
					        ALLOC_CHUNK_HDR_SIZE))


/* macro to get to end guard */

#define BLOCK_END_GUARD(pHdr)	((UINT *) (((UINT32) (pHdr)) +    \
					((pHdr)->size & CHUNK_SIZE_MASK) - \
					allocChunkEndGuardSize))

/*
 * optimized version of NEXT_CHUNK_HDR for cases where we know that
 * the current block is free (so the previous one can not be free !)
 */

#define NEXT_CHUNK_HDR_FREE(pHdr) ((CHUNK_HDR *) (((UINT32) (pHdr)) + 	\
						  ((pHdr)->size)))


/* overhead of allocated, free chunks and region */

#define CHUNK_ALLOC_OVERHEAD 	(ALLOC_CHUNK_HDR_SIZE + allocChunkEndGuardSize)
#define CHUNK_FREE_OVERHEAD 	(sizeof(FREE_CHUNK_HDR))
#define MEM_SECTION_OVERHEAD 	(MEM_SECTION_HDR_SIZE)
#define MIN_BLOCK_WORDS		((CHUNK_FREE_OVERHEAD) >> 1)

/*
 * minimum meaningfull size for a chunk. This is due to the fact that:
 *	- any allocted chunk must be convertible into a free chunk eventually.
 *	- a free chunk should accomodate allocation of at least 16 bytes to
 *        avoid creation of very small free blocks that could cause 
 *        fragmentation.
 */

#define CHUNK_MIN_SIZE 		(max (CHUNK_FREE_OVERHEAD, 	\
				      CHUNK_ALLOC_OVERHEAD + MEM_ROUND_UP(16)))

/*
 * minimum size of a memory region added via memPartAddToPool. This is due to 
 * the fact that any region has to host at least 3 allocated blocks of size 0
 * plus a free block of size 0 plus a region header. Assume worst case with,
 * end guards enabled.
 */

#define REGION_START_OVERHEAD	(MEM_SECTION_OVERHEAD + ALLOC_CHUNK_HDR_SIZE + \
				 END_GUARD_WORDS * sizeof (UINT) + \
				 CHUNK_FREE_OVERHEAD)
#define REGION_END_OVERHEAD	(2 * (ALLOC_CHUNK_HDR_SIZE + \
				      END_GUARD_WORDS * sizeof (UINT)))

#define REGION_MIN_SIZE		(REGION_START_OVERHEAD + REGION_END_OVERHEAD)

/*
 * actual size of the data block within an allocated chunck, retrieved from
 * the chunk header address (BLOCK_SIZE) or the buffer address (MEM_BLOCK_SIZE)
 */

#define	BLOCK_SIZE(pHdr)	(((pHdr)->size & CHUNK_SIZE_MASK) - 	\
				 CHUNK_ALLOC_OVERHEAD)
#define	MEM_BLOCK_SIZE(pBlock)	memBlockSizeGet((void *)(pBlock))


#define ALIGN_NEXT(size,align)	((((UINT)(size)) + align - 1) & (~(align - 1)))



#define	MEM_PART_SHOW_LOG_LVL_1	1 /* memPartShow: display numbers of free    */
				  /* blocks of a given size                  */
#define	MEM_PART_SHOW_LOG_LVL_2	2 /* memPartShow: display numbers of free    */
				  /* blocks of a given size plus the address */
				  /* of each free block                      */

#define SIZE_NODE_MIN_NB	4	/* minimum number of available bin  */
					/* size nodes at any time           */
#define SIZE_NODE_INIT_NB	8	/* number of available bin at the   */
					/* creation of the partition should */
					/* NOT be LESS than                 */
					/* SIZE_NODE_MIN_NB                 */
#define SIZE_NODE_DEFAULT_NB	8	/* number of bin size nodes         */
					/* allocated at a time by default   */

/* partition option mask */

#define MEM_OPTIONS_MASK (MEM_BLOCK_CHECK |			\
			  MEM_ALLOC_ERROR_EDR_FATAL_FLAG |	\
			  MEM_ALLOC_ERROR_EDR_WARN_FLAG |	\
			  MEM_ALLOC_ERROR_LOG_FLAG | 		\
			  MEM_ALLOC_ERROR_SUSPEND_FLAG |	\
			  MEM_BLOCK_ERROR_EDR_FATAL_FLAG |	\
			  MEM_BLOCK_ERROR_EDR_WARN_FLAG |	\
			  MEM_BLOCK_ERROR_LOG_FLAG |		\
			  MEM_BLOCK_ERROR_SUSPEND_FLAG |	\
			  MEM_ALLOC_ERROR_MASK |		\
			  MEM_BLOCK_ERROR_MASK)


/* pattern to fill guard zones */

#define GUARD_PATTERN			0xa110cb1c
#define END_GUARD_WORDS			4


#ifndef _ASMLANGUAGE

#define MEM_PART_ID_VERIFY(partId)				\
		(((partId) == NULL) ? ERROR :  			\
		 OBJ_VERIFY ((partId), memPartClassId))
#define MEM_PART_INVALIDATE(partId)				\
		objCoreInvalidate (&(partId)->objCore)

#define MEM_PART_LOCK(partId)					\
		semMTake (&(partId)->sem, WAIT_FOREVER)
#define MEM_PART_VERIFIED_LOCK(partId)				\
		memPartVerifiedLock(partId)
#define MEM_PART_UNLOCK(partId)					\
		semMGive (&(partId)->sem)

/* typedefs */

typedef void * (*FUNC_ALLOC) (PART_ID partId, unsigned nBytes, 
			      unsigned alignment);
typedef STATUS (*FUNC_FREE) (PART_ID partId, char * pBlk);
typedef void * (*FUNC_REALLOC) (PART_ID partId, char * pBlk, unsigned nBytes);
typedef STATUS (*FUNC_CREATE_HOOK) (PART_ID partId, FUNC_ALLOC allocFunc,
				    FUNC_FREE freeFunc, 
				    FUNC_REALLOC reallocFunc, 
				    unsigned defAlign);
typedef void * (*FUNC_ALLOC_HOOK) (PART_ID partId, void * arg, unsigned nBytes,
				   unsigned alignment);
typedef STATUS (*FUNC_FREE_HOOK) (PART_ID partId, void * arg, char * ptr);
typedef void * (*FUNC_REALLOC_HOOK) (PART_ID partId, void * arg, char * ptr,
				     unsigned nBytes);
typedef STATUS (*FUNC_DELETE_HOOK) (PART_ID partId, void * arg);

/* 
 * Data structure for a memory section (i.e. piece of memory added to a
 * a partition via memPartAddToPool
 */

typedef struct mem_section_hdr
    {
    /* pointer to the next section part of the memory partition */

    SL_NODE 	sectionNode;

    /* size of this section */

    UINT	size;

    /* padding is done through MEM_SECTION_HDR_SIZE */

    } MEM_SECTION_HDR;

/* data structure for a free chunk */

typedef struct free_chunk_hdr
    {
    /* size of previous chunk */

    UINT			prevSize;

    /* size of current chunk + flag indicating if previous chunk is free */

    UINT			size;

    /*
     * pointers to previous and next free chunks of the same size
     */

    struct free_chunk_hdr *	next;
    struct free_chunk_hdr *	prev;

    } FREE_CHUNK_HDR;

/* the order of the fields in MEM_PART_SIZE_NODE is important */

typedef struct
    { 
    AVLU_NODE    	avlSize;        /* AVL tree sorted by the size key  */
    FREE_CHUNK_HDR *	next;		/* pointer to the first free chunck */
    FREE_CHUNK_HDR *	prev;		/* pointer to the last free chunck  */

    } MEM_PART_SIZE_NODE;

/* memory partition data structure */

typedef struct mem_part
    {
    OBJ_CORE	objCore;		/* object management */
    
    AVLU_TREE 	avlSize;		/* root of the size node AVL tree     */
    UINT	sizeNodeNb;		/* number of used avl size node       */

    MEM_PART_SIZE_NODE * freeSizeNode;	/* pointer to first free size node    */
    UINT	freeSizeNodeNb;		/* number of remaining free size node */

    SEMAPHORE	sem;			/* partition semaphore 		      */
    unsigned	totalWords;		/* total number of words in partition */
    unsigned	options;		/* options */
    SL_LIST	sectionHdrLst;		/* list of sections in partition      */

    /* alloc hooks */

    FUNC_ALLOC_HOOK   allocHook;	/* hook for memPartAlignedAlloc       */
    FUNC_FREE_HOOK    freeHook;		/* hook for memPartFree               */
    FUNC_REALLOC_HOOK reallocHook;	/* hook for memPartRealloc            */
    FUNC_DELETE_HOOK  deleteHook;	/* hook for memPartDelete             */
    void *	      hookArg;		/* argument for hooks                 */ 

    /* allocation statistics */

    UINT32	curBlocksFreed;		/* current # of blocks freed          */
    UINT32	curWordsFreed;		/* current # of words freed           */
    UINT32	curBlocksAllocated;	/* current # of blocks allocated      */
    UINT32	curWordsAllocated;	/* current # of words allocated       */
    UINT64	cumBlocksAllocated;	/* cumulative # of blocks allocated   */
    UINT64	cumWordsAllocated;	/* cumulative # of words allocated    */
    UINT32	maxWordsAllocated;	/* peak # of words allocated  	      */
    UINT32	curBlocksAllocatedInternal; /* current # of blocks allocated  */
    					/* for internal use		      */
    UINT32	curWordsAllocatedInternal; /* current # of words allocated    */
    					/* for internal use		      */

#ifdef ENABLE_LOCALITY_PRESERVATION
    /* locality preservation support */

    FREE_CHUNK_HDR * lastSplittedChunkHdr;
    UINT     	     lastSplittedChunkSize;
#endif
    } PARTITION;


/* data structure for an allocated chunk */

typedef struct alloc_chunk_hdr
    {
    /* size of previous chunk */

    UINT		prevSize;

    /* size of current chunk + flag indicating if previous chunk is free */

    UINT		size;

    /* partition ID */

    PART_ID		memPartId;

    /* head guard */

    UINT		headGuard;

    } ALLOC_CHUNK_HDR;


/* definitions for backward compatibility */

#define	BLOCK_HDR		ALLOC_CHUNK_HDR
#define	FREE_BLOCK		FREE_CHUNK_HDR
#define	BLOCK_TO_HDR(pBuf)	BLOCK_TO_CHUNK_HDR(pBuf)


/* variable declarations */

extern CLASS_ID memPartClassId;		/* memory partition class id */
extern FUNCPTR  memPartBlockErrorRtn;	/* block error method */
extern FUNCPTR  memPartAllocErrorRtn;	/* alloc error method */
extern FUNCPTR  memPartSemInitRtn;	/* partition semaphore init method */
extern unsigned	memPartOptionsDefault;	/* default partition options */
extern UINT	memDefaultAlignment;	/* default alignment */
extern int	mutexOptionsMemLib;	/* mutex options */

/* shared memory manager function pointers */

extern FUNCPTR  smMemPartAddToPoolRtn;
extern FUNCPTR  smMemPartFreeRtn;
extern FUNCPTR  smMemPartAllocRtn;


/* function declarations */

extern BOOL	memPartBlockIsValid (PART_ID partId, FREE_CHUNK_HDR *pHdr, 
				     BOOL isFree);
extern STATUS   memPartBlockValidate (PART_ID partId, char * pBlock);
extern STATUS	memPartCreateHookSet (FUNC_CREATE_HOOK createHook, 
				      BOOL guardEnable);
extern STATUS	memPartHooksInstall (PART_ID partId, void *  hookArg, 
				     FUNC_ALLOC_HOOK allocHook, 
				     FUNC_FREE_HOOK freeHook,
				     FUNC_REALLOC_HOOK reallocHook,
				     FUNC_DELETE_HOOK deleteHook);
extern STATUS	memPartVerifiedLock (PART_ID partId);
extern UINT     memBlockSizeGet     (void * pBlock);

#endif /* _ASMLANGUAGE */

#ifdef __cplusplus
}
#endif

#endif /* __INCmemPartLibPh */
