/*  REVIEWED_FOR_64BIT=YES  */
//
// Copyright (C) 1991, All Rights Reserved, by
// Digital Equipment Corporation, Maynard, Mass.
//
// This software is furnished under a license and may be used and copied
// only  in  accordance  with  the  terms  of such  license and with the
// inclusion of the above copyright notice. This software or  any  other
// copies thereof may not be provided or otherwise made available to any
// other person. No title to and ownership of  the  software  is  hereby
// transferred.
//
// The information in this software is subject to change without  notice
// and  should  not be  construed  as  a commitment by Digital Equipment
// Corporation.
//
// Digital assumes no responsibility for the use or  reliability  of its
// software on equipment which is not supplied by Digital.
//
//
//  FACILITY:
//
//  Calaveras mempool
//
//  ABSTRACT:
//
//  Pool based memory allocator
//
//  AUTHORS:
//
//  Uresh Vahalia
//
//  CREATION DATE:
// 
//  13 Oct 1992
//


//  MODIFICATION HISTORY:
//
// 	RCS Log Removed.  To view log, use: rlog <file>




#ifndef _MEMPOOL_HXX_
#define _MEMPOOL_HXX_

#if !defined(lint) && defined(INCLUDE_ALL_RCSID)
/* $Id: mempool.hxx,v 11.14.4.1 2012/02/13 23:14:44 mathec Exp $ */
/* $Source: /usr/local/cvs/archive/Dart/server/src/kernel/min_kernel/include/mempool.hxx,v $ */
#endif

#ifndef _STDLIB_H_
#include <stdlib.h>
#endif

#ifndef _STDIO_H_
#include <stdio.h>
#endif

#ifndef _SLOCK_HXX_
#include <slock.hxx>
#endif

#ifndef _MEMOWNER_HXX_
#include <memowner.hxx>
#endif

#ifndef _ADDRSPAC_HXX_
#include <addrspac.hxx>
#endif

#ifndef _KERNEL_H_
#include <kernel.h>
#endif

#ifndef _LOGGER_HXX_
#include <logger.hxx>
#endif

#ifndef _DQUEUE_HXX_
#include <dqueue.hxx>
#endif

#ifndef _BOOLEAN_H_
#include <boolean.h>
#endif

#ifdef DEBUG_ALL
#define Mempool_DEBUG
#endif

#ifdef Mempool_DEBUG
#include <stdio.h>
#endif

#ifdef Mempool_DEBUG
#define Mempool_assert(x) assert(x)
#endif

#ifdef Mempool_assert
#include <assert.h>
#endif

#ifndef Mempool_assert
#define Mempool_assert(ignore) ((void)0)
#endif

#ifdef DEBUG_ALL
#ifndef _STRING_H_
#include <string.h>
#endif
#endif

#include <generic.h>




// Memory pools are used when you need to do repeated allocation and freeing
// of fixed size objects.  They provide a more efficient replacement for
// new/delete or malloc/free, at the cost of reduced generality.
// The programmer needs to set up the pool as explained below, after which
// he can use new and delete to get and release the base objects, without
// being concerned with the underlying pool implementation.  The following 
// lines show how to set up the pool for a sample class foo:
//
// In the hxx file for class foo:
//      #include <pool.hxx>
//      declare(Pool,foo)
//      class foo   {
//          ...
//        public:
//          ...
//          POOL_MANAGE_MEMORY(foo)
//      };
//      declare(PoolMember,foo)
//
// In a .cxx file
//      implement(Pool,foo)
//      foo_Pool foo::thePool("some descriptive name");
//
// Finally, in some function that does a setup:
//      foo::thePool.initialize(nEntries);
// 
// The POOL_MANAGE_MEMORY macro declares new and delete functions for the
// class, and declares thePool as a static member.  The initialize function is
// a static member of the foo_PoolMember class, and must be called with an
// argument specifying the number of objects to initially allocate in the
// pool.
// 
// The pool will grow automatically whenever it runs out of entries unless
// the maximum number of entries allowed for the pool is set to the initial
// request (limit_maxEntries).  The pool will not shrink automatically.
// There are, however, management functions provided which will shrink a
// particular pool (freeChunk), or extract free memory out of any pool on
// the system that has surplus memory.
//
// The use of this class is encouraged for several reasons.  It is space
// efficient, as it avoids any power of two rounding.  The base object
// occupies only one extra longword of storage, as in the case of malloc.  The
// algorithms are faster than the regular new/delete, since it avoids the
// extra function call to malloc/free, and avoids the computation of the
// bucket size, etc.  In fact, the pool-based new is at least twice as fast as
// the regular new, and the bigger the base object, the greater the speed
// difference.

// Oct, 2003:  To solve the memory fragmentation issue, initialization logic of
// mempool has been changed to allocate either 1 entry at a time ot whatever we can
// fit in single page.  That way if some application initialize a mempool with
// objects which require the allocation of multiple contigous pages, we won't
// panic with "out of memory" if we can't find that much number of contigous pages
// in the system.  We now always allocate 1 page at a time or 1 entry at a time
// based on the size of entry.



// It is also possible to use pools for objects whose size may take one of
// several values.  This is the case of the dcache objects, whose size depends
// on the type of device they are associated with.  In this case, we need a
// separate pool for each possible size, and a few changes on the way they are
// used.  The macro POOL_MANAGE_MEMORY should not be used, nor should you
// declare the fooPool.  Instead, you will need to create several pools, and
// initialize each of them with two arguments - nEntries and entrySize.
// Instead of using new and delete, you will need to directly call allocate
// and deallocate, or write wrappers for them.  All this is probably needed
// only by the dcache and no one else.
// 
// If you need to have a base class B and a derived class D both using pool
// based allocation, everything will still work, with one simple precaution.
// the base class needs to have a (possibly empty) virtual destructor.  This
// ensures that objects get returned to the correct pools no matter how they
// are referenced.

//
// The maximum number of entries in a pool has to be different than the pool
// initialCount in order to allow pool expansion. To disable
// pool expansion limit_maxEntries can be used after pool initialization to
// set maxEntries to the initialCount. 
#define MAX_OBJECTS 0x9999

#ifdef Mempool_DEBUG
const int Mempool_IdleData = 0xfd;
#endif

#ifndef MEMPOOL_OFFSETOF
#if defined(GCC34)
#define MEMPOOL_OFFSETOF(type, member) ((size_t)((uchar *)&(((type *)0x100000)->member) - (uchar *)0x100000))
#else
#ifdef WINDOWS_BUILD
#define MEMPOOL_OFFSETOF(type, member) ((size_t)((uchar *)&(((type *)0)->member)))
#else
#define MEMPOOL_OFFSETOF(typ, mem)  ((size_t)(&typ::mem))
#endif // WINDOWS_BUILD
#endif // GCC34
#endif // MEMPOOL_OFFSETOF

#define DATA_SIZE  (entrySize - MEMPOOL_OFFSETOF(MemPoolMember<typ>, body))

extern Memory_Owner PooledMemory;
#define PooledMemoryOwner   "PooledMemory"
class MemoryPool;
declare (DQueue, MemoryPool)

class MemoryPool {
protected:
	// for 64 bit builds, we have to change this alignment from 4 byte to 8 bytes 
	// to fix CBFS windows memory corruption issue
	#ifndef __X86_64
	  enum {poolAlignmentMask = 0x03 };
	#else
	  enum {poolAlignmentMask = 0x07 };
	#endif

  static MemoryPool_Head  poolList;   // chain all pools for mgmt
  static Sthread_MutexSpl listMutex;  // protects poolList and nextPool
  MemoryPool_Link nextPool;
  Sthread_MutexSpl mutex;
  const char  *const poolName;
  size_t initialCount;
  size_t totEntries;
  size_t nFree;
  size_t nChunks;
  size_t entrySize;
  size_t nAllocs;
  size_t maxEntries;
public:
  MemoryPool (const char *name);
  virtual ~MemoryPool() {}
  static void dumpAll();
  // dumps summary info about all pools
  virtual void dump (size_t count) = 0;
  // count is number of free entries to print
  virtual size_t freeChunk (size_t npages) = 0;
  // returns the number of memory pages freed
  virtual boolean_t scavange();
  // try to free entries, return TRUE if non were found, FALSE if at
  // lease one was deallocated, called by allocate before expanding
  // the pool.  The default implementation of scavange() returns
  // TRUE causing the pool to be expanded.
  virtual void removePool();
  // removes the pool from the pool list and frees all the pages associated
  // with it, if any.
  static size_t freePages (size_t npages);
  // tries to free up at least npages pages, taking from any pool that has
  // free pages.  If npages < 0, shrinks all pools to minimum size
};

template<class typ>
class MemPoolMember;

template<class typ>
class MemPool;

template<class typ>
class MemPoolChunk {
public: // This is required for offsetof, i.e. class *must* be POD and can not have private data
  friend class MemPool<typ>;
  MemPoolChunk<typ>   *nextChunk;
  size_t      entriesInChunk;
  size_t      nPages;
  char        firstMember;    /* must be last */

  static long_n  memberOffset() {
    return offsetof(MemPoolChunk<typ>,firstMember);
  }

  size_t recover (MemPoolMember<typ> *freelist) {
    /* mutex on the pool must be locked */
    MemPoolMember<typ> *pm = (MemPoolMember<typ> *) &firstMember;
    while (pm < (MemPoolMember<typ> *) &firstMember + entriesInChunk)
      if ((pm++)->next == NULL)
	return 0;
    pm = (MemPoolMember<typ> *) &firstMember;
    while (pm < (MemPoolMember<typ> *) &firstMember + entriesInChunk) {
      if (pm->next != NULL)
	pm->next->prev() = pm->prev();
      if (pm->prev() != NULL)
	pm->prev()->next = pm->next;
      else    /* first element on free list */
	freelist = pm->next;
      pm++;
    }
    return entriesInChunk;
  }
};

template<class typ>
class MemPool : public MemoryPool {
  MemPoolMember<typ>  *freelist;
  MemPoolChunk<typ>   *chunkList;
  Memory_Owner        MPowner;

  size_t getChunk (size_t nEntries, int size, MemPoolMember<typ> * list) {
    Mempool_assert (mutex.assertLocked());
    Mempool_assert (freelist == NULL);
    Mempool_assert ((size & poolAlignmentMask) == 0);
    size_t pageCount = Memory_TheAddressSpace.nPages(nEntries * size + MemPoolChunk<typ>::memberOffset());
    MemPoolChunk<typ> *newChunk = (MemPoolChunk<typ> *)
      Memory_TheAddressSpace.allocPages (pageCount, MPowner);
    if (newChunk == NULL)
      return 0;
#ifdef Mempool_DEBUG
    memset((char*)newChunk , Mempool_IdleData, pageCount * Memory_PageSize);
#endif
    nChunks++;
    nEntries = (pageCount * Memory_PageSize - MemPoolChunk<typ>::memberOffset()) / size;
    newChunk->nextChunk = chunkList;
    chunkList = newChunk;
    nFree += nEntries;
    totEntries += chunkList->entriesInChunk = nEntries;
    chunkList->nPages = pageCount;
    MemPoolMember<typ> *pm = freelist = (MemPoolMember<typ> *)&(chunkList->firstMember);
    MemPoolMember<typ> *prev = NULL;
    while (pm < freelist + nEntries - 1) {
      pm->prev() = prev;
      prev = pm;
      pm = pm->next = pm+1;
    }
    pm->prev() = prev;
    pm->next = list;
    if (list != NULL)
      list->prev() = pm;
    return nEntries;
  }

  /* pre:  mutex must be locked, freelist must be empty */
public:

  MemPool(const char *name, Memory_Owner owner = PooledMemory)
    : MemoryPool(name), freelist(NULL), chunkList(NULL), MPowner(owner)
  {}

  MemPool(const char *name, size_t nEntries, Memory_Owner owner = PooledMemory)
    : MemoryPool(name), freelist(NULL), chunkList(NULL), MPowner(owner)
  {
    initialize(nEntries,0);
  }

  ~MemPool() {
    if (chunkList != NULL)
      panic ("deleting non-empty memory pool");
    listMutex.lock();
    nextPool.remove();
    listMutex.unlock();
  }

  void initialize (size_t nEntries, int extra = 0) { 
    mutex.lock();
    if (initialCount > 0)
    {
        mutex.unlock();
        return ;
    }
    maxEntries = MAX_OBJECTS;
    entrySize = (extra + sizeof (MemPoolMember<typ>) + poolAlignmentMask) & ~poolAlignmentMask;
    nFree = 0;
    MemPoolMember<typ> *prev = NULL;
    while (initialCount < (size_t)nEntries) {
      prev = freelist;
      freelist = NULL;
      initialCount += getChunk (1, (int)entrySize, prev);
    }
    mutex.unlock();
  }

  size_t freeChunk(size_t npages) {
    MemPoolChunk<typ> *cp;
    MemPoolChunk<typ> *prev = NULL;
    size_t nfreed = 0;
    size_t i;
    mutex.lock();
    for (cp = chunkList; cp != NULL; cp = cp->nextChunk) {
      if ((i = cp->recover (freelist)) != 0) {
	if (prev == NULL)
	  chunkList = cp->nextChunk;
	else
	  prev->nextChunk = cp->nextChunk;
	nfreed += cp->nPages;
	totEntries -= i;
	nFree -= i;
	nChunks--;
	Memory_TheAddressSpace.freePages ((addr_t)cp, npages, PooledMemory);
	if (nfreed >= npages)
	  break;
      }
      prev = cp;
    }
    mutex.unlock();
    return nfreed;
  }

  /* if npages < 0, free all possible chunks */ 
  /* if npages >= 0, try to free atleast npages */
  /* returns the number of pages actually freed */
  void dump (size_t count) {
    logIO::logmsg(LOG_LIB,LOG_DEBUG,"Pool %s at addr %p\n", poolName, this);
    logIO::logmsg(LOG_LIB,LOG_DEBUG,"initCnt %lx, totEntries %lx, nFree %lx\n",
		  initialCount, totEntries, nFree);
    logIO::logmsg(LOG_LIB,LOG_DEBUG,"nAllocs %lx, nChunks %lx, entrySize %lx\n    chunk list:\n",
		  nAllocs, nChunks, entrySize);
    size_t i = 0;
    if (count <= 0)
      count = 64;
    MemPoolChunk<typ> *ch;
    for (ch = chunkList; i < count && ch != NULL; i++, ch = ch->nextChunk) {
      logIO::logmsg(LOG_LIB,LOG_DEBUG,"%p ", ch);
      if (i%8 == 7)
	logIO::logmsg(LOG_LIB,LOG_DEBUG,"\n");
    }
    logIO::logmsg(LOG_LIB,LOG_DEBUG,"\n   free list:\n");
    MemPoolMember<typ> *pm;
    for (pm = freelist; i < count && pm != NULL; i++, pm = pm->next) {
      logIO::logmsg(LOG_LIB,LOG_DEBUG,"%p ", pm);
      if (i%8 == 7)
	logIO::logmsg(LOG_LIB,LOG_DEBUG,"\n");
    }
  }

  typ *allocate(void) {
    mutex.lock();
    typ *f;
    if ((freelist == NULL) && ((initialCount == maxEntries) ||
			       (scavange() &&
				(getChunk (1, (int)entrySize, NULL) == 0))))
      f = NULL;
    else {
      f = &(freelist->body);
      MemPoolMember<typ> *mp = freelist;
      freelist = freelist->next;
      if (freelist != NULL)
	freelist->prev() = NULL;
      mp->next = NULL;
      nAllocs++;
      nFree--;
#ifdef Mempool_DEBUG
      for (u_int j = sizeof(MemPoolMember<typ> *); j < DATA_SIZE;j++)
	Mempool_assert(((u_char*)(f))[j] == Mempool_IdleData);
#endif
    }
    mutex.unlock();
    return f;
  }

  typ *tryAllocate(void) {
    mutex.lock();
    typ *f;
    if (freelist == NULL) {
      f = NULL;
    } else {
      f = &(freelist->body);
      MemPoolMember<typ> *mp = freelist;
      freelist = freelist->next;
      if (freelist != NULL)
	freelist->prev() = NULL;
      mp->next = NULL;
      nAllocs++;
      nFree--;
    }
    mutex.unlock();
    return f;
  }

  void removePool(void) {
    mutex.lock();
    MemPoolChunk<typ> *cp = chunkList;
    MemPoolChunk<typ> *nextEntry = NULL;
    while (cp != NULL) {
      nextEntry = cp->nextChunk;
      Memory_TheAddressSpace.freePages ((addr_t)cp, cp->nPages, MPowner);
      cp = nextEntry;
    }
    chunkList = NULL;
    freelist = NULL;
    totEntries = nFree = nChunks = 0;
    mutex.unlock();
  }

  void deallocate (typ *f) {
    MemPoolMember<typ> *pm = MemPoolMember<typ>::toMember (f);
    Mempool_assert (pm->next == NULL);
#ifdef Mempool_DEBUG
    memset((char*)f , Mempool_IdleData, DATA_SIZE);
#endif
    mutex.lock();
    pm->next = freelist;
    pm->prev() = NULL;
    if (freelist != NULL)
      freelist->prev() = pm;
    freelist = pm;
    nFree++;
    mutex.unlock();
  }

  void limit_maxEntries(void) {
    maxEntries = initialCount;
  }

  /* set maxEntries to the initialCount so the pool cannot expand */
  void set_maxEntries (ulong32 nobjects) {
    maxEntries = nobjects;
  }

  /* set maxEntries to nobjects - if used, check condition in allocate*/
  //  void* operator new (size_t sz);
  //  void operator delete (void *p);
};

template<class typ>
class MemPoolMember {
public:
  //  friend typ;
  friend class MemPool<typ>;
  friend class MemPoolChunk<typ>;
  MemPoolMember<typ>  *next;
  typ body;   /* first word overwritten by prev ptr when on freelist */
  MemPoolMember<typ> *& prev(void) {
    return *(MemPoolMember<typ>**)&body;
  }
  MemPoolMember<typ> *bodyoffset (void) {
    return ((MemPoolMember<typ> *)&body);
  }
  static MemPoolMember<typ> *toMember (typ *f) {
    MemPoolMember<typ> * xxx = 0;
    return (MemPoolMember<typ> *)((char *)f - (long_n)xxx->bodyoffset());
  }
};

#define POOL_MANAGE_MEMORY(typ) \
    friend class MemPoolMember<typ>; \
    static MemPool<typ> thePool;  \
    void* operator new (size_t ) \
        {   return thePool.allocate();  } \
    void operator delete (void *p) \
        {   thePool.deallocate ((typ *)p);  }

/* For backward compatibility */

#define Pooldeclare(typ) \
  class typ; \
  typedef MemPoolMember<typ> name2(typ,_PoolMember); \
  typedef MemPool<typ> name2(typ,_Pool); \
  typedef MemPoolChunk<typ> name2(typ,_PoolChunk);

#define Poolimplement(typ)

#define PoolMemberdeclare(typ) typedef MemPoolMember<typ> name2(typ,_PoolMember);
#define PoolMemberimplement(typ)

#endif  // _MEMPOOL_HXX_
