#ifndef PROTOMSG_CONFIG_H_
#define PROTOMSG_CONFIG_H_

// Protocol message is influenced by the
// Google protocol buffer library 
// It simply aims at more simple API design 
// pattern to do the data serialization
// and deserialization .It is a part of the 
// project XGE ( x-game-engine ) 
// home-page: http://code.google.com/p/x-game-engine/
// this game engine is simply a experimental game engine , 
// it support RAD for STG game . 
// Protocol message basically do not provide 
// original way , namely compiler which
// generate c++ class to integrate into 
// the user's project . It provide way to 
// efficient obtain data from outside and 
// than do the serialization and deserialization
// as fast as possible .It do not involve any 
// dependency , including STL and std::string 
// Since , the standard string , in fact , 
// in many engine ( including XGE ) , do not take
// it directly , also , the encapsulation makes 
// the overhead of the code very subtle and 
// invisible . We will implement our local 
// data-structure and memory buffer ( as we know,
// Google protocol buffer take advantage of the STL 
// container and std::string ) .

// Disable the warning of the VS
#define _CRT_SECURE_NO_WARNINGS

#ifdef _MSC_VER
#include "msv/stdint.h"
#include <Windows.h>
#else
#error "Current version do not support,just waiting!"
#endif
#include <cassert>
#include <stdarg.h>
#include <string.h>
#include <string>

// The field length of the protocol message field name
#define MAX_PROTOMSG_FIELD_KEY_LENGTH 64
// using the standard 
// Disallow evil copy and assign operation in c++ class
#define DISALLOW_COPY_AND_ASSIGN(ClassName) \
  ClassName& operator = ( const ClassName& ); \
  ClassName( const ClassName& );
// Unreferenced parameter 
#define DUMMY_PAREMETER(Parameter) (void)(Parameter)
// DASSERT : just simply replace it with the standard assert in <assert.h>
#define DASSERT assert
// DEVERIFY: if the expression is false , it will generate 
// assertion information in debug version.
// However, the expression will do on RELEASE mode as well.
#define DVERIFY(Expression) do{ if(!Expression) DASSERT(Expression); } while(0)
// Log level definition 
#define INFO  1
#define WARNING 2
#define PANIC 4
#define FATAL 8
// Log stuff goes here
// Log using example :
// Before using the log stuff , we should 
// call the initialize log routine explicitly .
// We just use log like this: 
// VLOG(INFO,"this is test %s.\n","hello world");
// this log information will output to the INFO level log file . 
// DLOG(INFO,"this is test %s.\n","good job");
// this log information will ONLY log when the compile 
// selection is DEBUG So it is useful to do the
// check when it is debug time.
#ifdef _DEBUG
#define DLOG(Level,Format,...) protomsg::util::LogMessage_##Level(Level,__FILE__,__FUNCTION__,__LINE__,Format,__VA_ARGS__)
#else
#define DLOG(Level,Format,...) void(0)
#endif
// Verbose log , this kind of log , no matter it is 
// release or dummy , will always keeping record .
#define VLOG(Level,Format,...) protomsg::util::LogMessage_##Level(Level,__FILE__,__FUNCTION__,__LINE__,Format,__VA_ARGS__)
// ASSERTION_LOG: will not only do the assertion but 
// also generate information recorded on the specific
// target log level
// eg:
// const char* ptr = GetTimeString();
// ASSERTION_LOG( ptr != NULL , FATAL , 
// "This ptr can not be null , if so , there must be something evil");

#ifdef _DEBUG
#define ASSERTION_LOG(Expression,Level,Format,...) \
  (void)( (Expression) || DLOG(Level,Format,__VA_ARGS__) ); \
  DASSERT(Expression)
#else
#define ASSERTION_LOG(Expression,Level,Format,...) void(0)
#endif

namespace protomsg {
  // 8 byte char type
  typedef char achar_t ; 
  // 16 byte char type
  typedef wchar_t uchar_t;
  // Utility namespace 
  // Setting the memory using pair .
  typedef void* (*MallocService)( size_t size );
  typedef void  (*FreeService) (void* ptr);
  typedef void* (*ReallocService) (void* original ,size_t new_size);
  // Setting the environmental malloc free realloc function pointer .
  // If you do not call this specifically , the framework will work-on default type
  // of allocator
  void SetMemoryService(
    MallocService malloc_func,
    FreeService   free_func ,
    ReallocService reallocate_func
    );
namespace util {
  // Utility for memory service 
  extern MallocService gMalloc;
  extern FreeService gFree;
  extern ReallocService gRealloc;
  // A simple mutex wrapper for critical section( on windows ) 
  // pthread_mutex ( on posix-compatible os ).
  class Mutex {
  public:
    inline void AcquireMutex() ;
    inline bool TryAcquireMutex();
    inline void DropMutex();
    Mutex() {
      ::InitializeCriticalSectionAndSpinCount( &win32_mutex_ , 2000 );
    }
    ~Mutex() {
      ::DeleteCriticalSection(&win32_mutex_);
    }
  private:
    ::CRITICAL_SECTION win32_mutex_;
  };

  inline void Mutex::AcquireMutex() {
    ::EnterCriticalSection( &win32_mutex_ );
  }
  inline bool Mutex::TryAcquireMutex() {
    return ::TryEnterCriticalSection( &win32_mutex_ ) == TRUE;
  }
  inline void Mutex::DropMutex() {
    ::LeaveCriticalSection( &win32_mutex_ );
  }
  // A simple mutex helper to lock the whole language block 
  class AutoMutex {
  public:
    AutoMutex( Mutex& mutex ) : mutex_(mutex) {
      mutex.AcquireMutex();
    }
    ~AutoMutex() {
      mutex_.DropMutex();
    }
  private:
    Mutex& mutex_;
  };
  // LogMessage represent a single log calling , it 
  // contains the logging message required to implement
  // and other useful selection for this logging message .
  // Generally , we will use this log service like this : 
  // VLOG(INFO,"this is test %s.\n","hello world");
  // However , we may want to flush this log message 
  // directly without waiting for the internal buffer 
  // is full. So we can call this function like this : 
  // VLOG(INFO,"this is test %s.\n","hello world").Flush();
  class LogMessage {
  protected:
    LogMessage(){}
  public:
    // log message ctor
    LogMessage( 
      int type , const char* file , const char* func , 
      int line , const char* format , ... ) 
      :target_(type),ignore_(false),file_(file),func_(func),line_(line) {
        va_list vars;
        va_start( vars , format );
        Format2( format,vars );
    }
  public:
    // flush this log message directly into the device .
    void Flush() {
      Flush2(true);
    }
    // duplicate this log message to other log level ,
    // you can duplicate this message to the FATAL as well
    LogMessage& Duplicate( int level ) {
      DASSERT( level >= INFO && level <= FATAL );
      target_ |= level;
      return *this;
    }
    // ignore the time message for this log message .
    LogMessage& IgnoreAppending() {
      ignore_ = false;
      return *this;
    }
    // If nothing happens the real log message 
    // will be flushed into the outside device
    // when the dtor calls .
    ~LogMessage() { Flush2(); }
  protected:
    void Flush2( bool bflush = false );
    void Format2( const char* format ,va_list vars );
    bool ignore_;
    int target_;
    size_t msg_size_;
    const char* file_;
    const char* func_;
    int line_;
    char msg_buffer_[1024];
  };
  // Fatal call back will be called before the exit the process
  // generally the fatal information will force all the log
  // being output to the device at once .
  typedef void (*FatalCallback)();
  void SetFatalExitCallback( FatalCallback callback );
  // A fatal message implementation 
  class FatalLogMessage : public LogMessage{
  public:
    FatalLogMessage( int type , const char* file , const char* func , int line , const char* format , ... ) {
      file_ = file;
      func_ = func;
      line_ = line;
      ignore_ = false;
      target_ = type;
      va_list vars ;
      va_start(vars,format);
      Format2( format , vars );
    }
    ~FatalLogMessage();
  };

  typedef LogMessage LogMessage_INFO ;
  typedef LogMessage LogMessage_WARNING ;
  typedef LogMessage LogMessage_PANIC ;
  typedef FatalLogMessage LogMessage_FATAL ;

  // Before any calling to the log function , we 
  // must initialize the log stuff by calling this function 
  bool IniitializeLoggingService ( void* option_argument = NULL ) ;


}//namespace util

namespace util {
  // Memory-pool , using this memory pool to avoid 
  // the potential heap allocation . For fast fetching speed ,
  // the code is brought from XGE directly with very tiny adjustment.
  class MemoryPool {
  public:
    inline MemoryPool(
      size_t base_reserve_size,
      size_t alignment_size,
      size_t object_size
      ) ;
    inline ~MemoryPool();
  public:
    // This function may fail : when the 
    // reserved memory is not enough
    // You Should Call The Grow explicitly 
    inline void* Buy();

    // This function will put the memory back into the memory pool 
    inline void  Sell( void* ptr );
    // Grow the internal pool :
    // You should call this function explicitly 
    // when you do the operation:
    // T* ptr = mem.Buy() ;
    void  Grow( size_t ratio=1 );
    // Free all the memory block in the pool
    // You should notice that ALL the block will be
    // free , therefore , any allocated memory block
    // will be invalid !!!
    // If the force value is false . The pool will 
    // check if the size == 0 , if so the pool will
    // do the free job and return true 
    // otherwise , if won't do the clear
    // if the force is true 
    // it will do the clear anyway 
    bool Clear( bool force = false );
  public:
    bool   empty()        const {
      return next_ == NULL;
    }
    size_t alloc_size()   const {
      return alloc_size_;
    }
    size_t residual_size() const {
      return residual_size_;
    }
    size_t base_reserve_size() const{
      return base_reserve_size_;
    }
    void set_base_reserve_size( size_t size ) {
      base_reserve_size_ = size;
    }
    size_t alignment_size() const {
      return alignment_size_;
    }
    size_t object_size() const {
      return object_size_;
    }
  private:
    size_t Padding( size_t original , size_t pad ) {
      return static_cast<size_t> ( (original + pad-1)& ~(pad-1) );
    }
  private:
    // Free list 
    struct FreeList {
      // This pointer will point to the start of the next block of memory
      FreeList* next_addr;
    };
    // Linked list for recording how many block of memory has been allocated
    struct LinkedList {
      size_t block_size;
      LinkedList* next_block;
    };
    // The next available address
    FreeList* next_;
    // Size of the allocated size
    size_t alloc_size_;
    // Start place
    LinkedList* block_ptr_  ;
    // Residual size
    size_t residual_size_;
    // reserve size
    size_t base_reserve_size_;
    // alignment size
    size_t alignment_size_;
    // object size
    size_t object_size_;
    // not allow to copy and assign
    DISALLOW_COPY_AND_ASSIGN( MemoryPool );
  };
  // Dynamic array interface , act as the replacement of the std::vector
  // This is not aims to replace the stl::vector , it is just use in our project
  // do not use this in your project , it may leads to unexpected results for its
  // internal implementation .
  // Where do we need a dynamic array ? Does it provide way to do the search ?
  // We simply need this array as our way to represent the " repeated " option for
  // a certain element protocol . 
  // As we say :
  // <Element Name="telephone-number" Type="string" Option="repeated" />
  // now we need this to hold all the stuff , in fact , we treat all the field
  // as DynamicArray , since we have optimizing the array that , the first element
  // will not take other step on heap allocation , so it leads nearly no overhead ,
  // however we can implement it in a more easy way.
  template< typename T > class DynamicArray {
  public:
    // we simply provide way to do the index operation 
    // iterating , you just use the common array operation is ok
    // we do not involve fancy iterator design pattern , we do not
    // need this 
    const T& operator [] ( size_t index ) const {
      DASSERT( index < size_ );
      return ptr_[index];
    }
    T& operator[] ( size_t index ) {
      DASSERT( index < size_ );
      return ptr_[index];
    }
    // push interface will push the certain T object into the array
    // and it will return the object reference 
    inline T& Push( const T& some_thing );
    // Pop will pop the last element in the array 
    inline void Pop() ;
    // Index interface is little different with operator , since operator 
    // will only take look at the already allocated memory , so it is neat
    // to use this as the iterate and change method to operate on the existed
    // element.
    // The index interface will be able to provide your ability to expand the
    // array as you wish .
    // like :
    // Index( size_t index , const T& some_thing );
    // Index( 999 , some_class ) ; 
    // If the real size of the array is 10 , it will expand the array to 999 
    // and use thesome_thing to initialize the object between 10-999 
    // if the index has index on the not existed place , the function will 
    // return true , otherwise it will return false .
    // If you keep the default parameter , it will work as the stl-container::resize .
    // If the idx is inside the [0,size_) , it will return the idx element do nothing.
    T& Index( size_t idx , const T& some_thing = T() );
    // Reserve function will reserve enough memory in the array pool internally 
    bool Reserve( size_t size );
    // Clear the whole array 
    // If you specified the bool force_memory=true , it will 
    // also delete the memory on heap internally
    // however you may want to reuse this array , so you can 
    // just leave the parameter to default value
    // false , this will make the clear operation not touch the 
    // memory stuff,just make the container empty
    void Clear( bool force_memory = false );
    // Swap the array , this is preferred to the copy operation , 
    // in fact , we do not provide copy operation
    // if you want to copy , you have to code it by your own or 
    // use swap function , which is very fast .
    void Swap( DynamicArray<T>& another_array );
    // Trim the array memory to fit this operation will look at the
    // internal memory implementation for the array , and change the memory
    // allocation for fit-for . This operation is VERY inefficient , 
    // since it generally will allocate another fitful memory , 
    // and copy the memory from the original memory 
    // buffer to this new fitful memory and than free that memory internally .
    void Trim();
  public:
    // Calling this ctor is able to initialize a standard 
    // default DynamicArray , it will holds nothing
    // but initialize the basic internal states
    inline DynamicArray() ; 
    // Calling this ctor is able to reserve enough size 
    // of this array , this is pretty useful when you
    // know how-much memory will cost before using this class . 
    // It is useful to act as a container for something
    inline explicit DynamicArray( size_t reserve_size );
    // Calling this ctor is able to resize the whole class 
    // to the aimed resize_size , passing a some_thing
    // to do the initialization .
    inline DynamicArray( size_t resize_size , const T& some_thing );
    // Dtor of this array , just clear all the internal memory in this array 
    ~DynamicArray() {
      Clear(true);
    }
  public:
    // Capability will return the internal capability size of this array
    size_t capability() const {
      return cap_;
    }
    // Size will return the internal used size of this array
    size_t size() const {
      return size_;
    }
    // Testing if the container is empty or not
    bool empty() const {
      return size()==0;
    }

  private:
    // in order to perform our little trick , 
    // we should do some little meta-programming here
    template < bool result > struct da_cap_size {
      static const size_t size = 1;
    };
    template<> struct da_cap_size<true> {
      static const size_t size = 4;
    };
    enum {
      // we keep a strategy to handle the internal storage buffer 
      // if the object size is no less than 128 byte . 
      // we just keep 1 as non-heap allocation stuff ,
      // otherwise we keep 4 not on the heap .
      DA_CAP_THRESHOLD = 128 ,
      DA_CAP_NUM  = da_cap_size< sizeof(T) < DA_CAP_THRESHOLD >::size 
    };
    // do the union stuff here
    uint8_t internal_storage_[sizeof(T)*DA_CAP_NUM];
    T* ptr_;
    // array size is used to record the array increment size 
    // if the size_ > DA_CAP_NUM , which means the ptr_ has been allocated
    // the heap memory buffer . So we should also take care of this .
    size_t size_;
    // the capability of this array memory 
    size_t cap_;
    // We do not provide way to copy a array , if you really want to copy a array ,
    // code it handy .Copy a array is so inefficient , we forbid this .
    DISALLOW_COPY_AND_ASSIGN(DynamicArray);
  };

  // Hash function 
  typedef size_t hash_value; 
  namespace {
    template< typename Str > struct defualt_hasher {
      hash_value operator() ( const Str& str ) const {
        typedef typename Str::value_type char_type;
        typedef typename Str::traits_type char_traits;
        const char_type* c_str = str.c_str();
        size_t len = char_traits::length(c_str);
        size_t h = len;
        size_t step = (len>>5)+1;
        size_t i;
        for (i=len; i>=step; i-=step)
          h = h ^ ((h<<5)+(h>>2)+(size_t)c_str[i-1]);
        return h;
      } 
    };
  }  
  // Map implementation , act as the replacement of the stl::map , the stl map using
  // red-black tree as their internal representation of a search map , we take advantage
  // of the hash implementation to implement a map . 
  // Hash map implementation is very common , we use the bucket way to resolve to collision 
  // In fact , we can use many existed algorithm like google-sparse-table , however , it is
  // too fancy for us to use , in addition , I do not want to have so many dependence on other
  // library , so we implement our own hash map.
  template < typename T , typename Val , typename Hasher = defualt_hasher<T> > class HashMap {
  public: 
    enum {
      HM_RESERVED_SIZE = 128 
    };
  public:
    HashMap( const size_t reserved_size = HM_RESERVED_SIZE ) ;
    ~HashMap();
    // Insert a key-value into the hash map
    bool Insert( const T& key , const Val& val ) {
      std::pair<T,Val>* objects = object_pool_.Fetch( std::pair<T,Val>(key,val) );
      if( !Insert( objects , Hasher()(key) ) ) {
        object_pool_.Payback(objects);
        return false;
      } 
      ++size_;
      return true;
    }
    // Query a key-value inside of the hash map
    // if it existed , it will return pointer , otherwise return NULL
    Val* Query( const T& key ) {
      size_t idx = QueryIndex(key);
      return idx == size_ ? NULL : &(slot_entry_[idx].object_ptr->second);
    }
    // Query the internal key-value pair instead of
    // only the value object
    std::pair<T,Val>* QueryPair( const T& key ) {
      size_t idx = QueryIndex(key);
      return idx == size_ ? NULL : &(slot_entry_[idx].object_ptr);
    }
    // Delete a key-value inside of the hash map 
    bool Delete( const T& key );
  public:
    // get the size of this hash_map
    size_t size() const {
      return size_;
    }
    // testing if this hash_map is empty
    bool empty() const {
      return size() ==0;
    }
  private:
    // Slot 
    struct hash_slot {
      // this is for internal use simply for the 
      // optimizing trick to reduce the complexity of
      // finding for a empty hash_slot , if this slot 
      // has been used , the next_free_slot will always be NULL .
      hash_slot* next_free_slot;
      // link to the previous free slot 
      hash_slot* previous_free_slot;
      // this will point to the object
      std::pair<T,Val>* object_ptr;
      // this will point to the next slot position
      // -1 means the end of the day 
      hash_slot* next ;
      // this is the hash related to this link( real hash value ) 
      hash_value hash_result;
    };
    // get an empty slot in the memory buffer
    inline hash_slot* NextFreeSlot();
    // free this slot from the empty list
    inline void FreeFromFreeSlot( hash_slot* slot );
    // put back a hash slot into the empty list
    inline void PutbackFreeSlot( hash_slot* slot );
    // rehash the whole hash map
    void Rehash ();
    // initialize the hash map internally 
    void InitializeHashSlot( size_t reserved_size );
    // internal insert operation
    bool Insert( std::pair<T,Val>* object_ptr , hash_value full_hash );
    // internal query
    size_t QueryIndex( const T& key );
    // query index without adjusting the hit_rcd 
    // this is typically used by the Delete routine
    bool QueryIndexWithoutAdjusting( const T& key , size_t* prev_index , size_t* this_index );
  private:
    // A tiny wrapper for the memory pool internally use
    template< typename T > class ObjectPool {
    public:
      T* Fetch( const T& some_thing = T() ) {
        void* ret = internal_pool_.Buy();
        if( ret== NULL ) {
          internal_pool_.Grow(1);
          ret = internal_pool_.Buy();
        }
        DASSERT( ret != NULL );
        return new(ret) T(some_thing);
      }
      void Payback( T* ptr ) {
        ptr->~T();
        internal_pool_.Sell(ptr);
      }
      ObjectPool( size_t reserved_size = 128 , size_t alignment = 8 ) 
        : internal_pool_(reserved_size,alignment,sizeof(T)) {}
    private:
      MemoryPool internal_pool_;
      DISALLOW_COPY_AND_ASSIGN(ObjectPool);
    };
    // using memory pool do the free stuff
    ObjectPool< std::pair<T,Val> > object_pool_;
    // we will use a union to make the code more clear
    // in fact , the both has the same ptr address , we
    // use slot_entry for array operation
    // use the free_slot_entry for the list operation .
    hash_slot* slot_entry_ ;
    // free hash slot entry :
    // this technique will allow us to get an empty slot in O(1) instead of
    // using rehashing which may leads to too much performance overhead 
    // once the slot_entry_ has run out of the memory the free_slot_entry will
    // point to NULL 
    hash_slot* free_slot_entry_;
    // size of the slot in used 
    size_t size_ ;
    // capacity of this hash slot entry
    size_t cap_  ;
  };

}// namespace util


// Field key stuff 
namespace util{
  // Fixed string implementation . 
  // Fixed string is not a dynamic string , but a 
  // fixed-length string, this is for performance reason .
  // This is only used as the internal string not as the 
  // String Container or String Builder, it is not
  // the replacement of std::string , it is a very light-weight 
  // string which provide some fundamental feature.
  template < 
    size_t length , 
    typename char_type = achar_t , 
    typename ct = std::char_traits<char_type> 
  > class FixedString {
  public:
    typedef char_type value_type;
    typedef ct traits_type;
    // This string is stable string which do not need to modify
    char_type operator[]( size_t idx ) const {
      DASSERT( idx <size_-1 );
      return char_[idx];
    }
    const char_type* c_str() const {
      return char_; 
    }
    FixedString( const char_type* str ) {
      size_ = ct::length(str);
      Init(str,size_);
    }
    FixedString( const char_type* str , size_t size ):size_(size){
      Init(str,size);
    }
    void Clear() {
      char_[0]= ct::eof();
    }
    size_t size() const {
      return size_;
    }
  public:
    // do the comparison here
    bool operator == ( const FixedString& another_string) const {
      if( another_string.size_ != size_ ) 
        return false;
      return ct::compare( char_ , another_string.char_ , size_ )==0;
    }
    bool operator != ( const FixedString& another_string) const {
      return !((*this) == another_string);
    }
  public:
    // copy and assign operator
    FixedString& operator = ( const FixedString& str ) {
      memcpy( char_ , str.char_ , length );
      size_ = str.size_;
      return *this;
    }
    // copy-ctor
    FixedString( const FixedString& str ) {
      *this = str;
    }
  private:
    // internal initialize function for the string 
    void Init( const char_type* str , size_t size ) {
      DASSERT( size < length-1 );
      memcpy( char_ , str , size*sizeof(char_type) );
      char_[size] = char_type();
    }
  private:
    char_type char_[length];
    size_t size_;
  };
}//namespace util


// Now lets starts from the simple things , type , selection , and name 
typedef int FieldType;
// the message-field-name is a little bit trick to  implement
// since we will handle all the message-field in a message 
// organized into HashMap , so in fact the message-field-name
// is the key to the HashMap , since the HashMap will store
// both the key and the object itself , so we do not really
// have a message-field object which contains the name , instead
// it only contains a reference to that name .
typedef util::FixedString< MAX_PROTOMSG_FIELD_KEY_LENGTH > FieldKey;
typedef int FieldOption;
// some type of definition , including the enumeration and other things
// supported type in the protomsg lib
enum {
  PROTOMSG_UNKOWN   =0 ,
  PROTOMSG_STRING   =1 , // a utf-8 string 
  PROTOMSG_REAL     =2 , // a float number which takes 4 bytes exactly
  PROTOMSG_FINT32   =3 , // a fixed int 32 
  PROTOMSG_FINT64   =4 , // a fixed int 64
  PROTOMSG_INT      =5 , // a int type which will be encoded by variant int coding technique
  PROTOMSG_BOOLEAN  =6 , // a boolean type which will be encoded with only one bytes 
  PROTOMSG_SCALAR=7 , // a none-enumeration type
  PROTOMSG_MESSAGE  =8 , // a other type message 
  PROTOMSG_ENUM     =9   // a enumeration type , this should always be the last one to decalared 
};
// corresponding cpp type enumeration value
enum {
  CPP_STRING , // represent a immutable str
  CPP_FLOAT   , // represent a float value
  CPP_INT32  , // represent a int32 value
  CPP_INT64  , // represent a int64 value
  CPP_BOOLEAN, // represent a bool value
  CPP_VARINT , // represent a var-int class 
  CPP_MESSAGE_CLASS , // represent a message class 
  CPP_ENUM_CLASS      // represent a enum class
};
class Message;
class MessageFieldEnumeration;
class Enumerator;
struct immutable_str;
// The following is the corresponding CPP type which 
// used to represent the PROTOMSG type inside of the
// protocol file .
template< size_t EnumType > struct TargetCppType {
  // if we reach here , that means we have giving a 
  // unsafe type into the stuff to make it work .
};
// The PROTOMSG_STRING return type
template<> struct TargetCppType< PROTOMSG_STRING > {
  typedef immutable_str value_type;
  typedef immutable_str* ptr_type;
  static const int cpp_type = CPP_STRING;
};
// The PROTOMSG_REAL return type
template<> struct TargetCppType< PROTOMSG_REAL > {
  typedef float  value_type;
  typedef float* ptr_type;
  static const int cpp_type = CPP_FLOAT;
};
// The PROTOMSG_FINT32 return type
template<> struct TargetCppType< PROTOMSG_FINT32 > {
  typedef int32_t value_type;
  typedef int32_t* ptr_type;
  static const int cpp_type = CPP_INT32;
};
// The PROTOMSG_FINT64 return type
template<> struct TargetCppType< PROTOMSG_FINT64 > {
  typedef int64_t value_type;
  typedef int64_t* ptr_type;
  static const int cpp_type = CPP_INT64;
};
// The PROTOMSG_INT return type;
template<> struct TargetCppType< PROTOMSG_INT > {
  typedef int32_t value_type;
  typedef int32_t* ptr_type;
  static const int cpp_type = CPP_VARINT;
};
// The PROTOMSG_BOOLEAN return type
template<> struct TargetCppType< PROTOMSG_BOOLEAN > {
  typedef bool value_type;
  typedef bool* ptr_type;
  static const int cpp_type = CPP_BOOLEAN;
};
// The PROTOMSG_ENUM return type;
template<> struct TargetCppType< PROTOMSG_ENUM > {
  typedef Enumerator value_type;
  typedef Enumerator* ptr_type;
  static const int cpp_type = CPP_ENUM_CLASS;
};
// The PROTOMSG_MESSAGE return type;
template<> struct TargetCppType< PROTOMSG_MESSAGE> {
  typedef Message value_type;
  typedef Message* ptr_type ;
  static const int cpp_type = CPP_MESSAGE_CLASS;
};
// option for different descriptor 
enum {
  PROTOMSG_REQUIRED =1, // required option
  PROTOMSG_OPTIONAL =2, // optional option
  PROTOMSG_REPEATED =3  // repeated option
};
// the internal string buffer representation 
struct immutable_str {
  // this string can be a utf-8 string 
  // so there is no clue for it is a 
  // unicode-string or a simple ansi-string
  const char* str ;
  // length in byte
  size_t length  ;
};
// A enumerator representation , this represent the key value
// pair in a enum type ,it is the internal representation of
// a enum type inside of the enum type 
class Enumerator {
public:
  Enumerator( const FieldKey& key , int value );
  Enumerator( const char* key , int value );
  Enumerator( const char* key , size_t len , int value );
public:
  int value() const {
    return value_;
  }
  const char* c_str_key() const {
    return key_.c_str();
  }
  const FieldKey& key() const {
    return key_;
  }
private:
  FieldKey key_;
  int value_;
};

// Other useful technique detail 
namespace util {
  // detail information
  namespace {
    template< bool comparision , size_t left , size_t right > 
    struct binary_comp_imp {};
    template< size_t left, size_t right > 
    struct binary_comp_imp<true,left,right> {
      static const size_t value = left;
    };
    template< size_t left, size_t right > 
    struct binary_comp_imp<false,left,right> {
      static const size_t value = right;
    };
  }
  // the following meta-programming is used to determine the min value of two 
  // compile time determined value , the usage is as below :
  // const size_t min_value = binary_min< 4,6 >::value;
  // the min_value will be 4 
  template< size_t left , size_t right > 
  struct binary_min {
    static const size_t value = binary_comp_imp< (left < right) , left , right >::value;
  };
  // the following meta-programming is used to determine the max value of two 
  // compile time determined value , the usage is as below :
  // const size_t max_value = binary_min< 4,6 >::value;
  // the max_value will be 5 
  template< size_t left , size_t right >
  struct binary_max {
    static const size_t value = binary_comp_imp< (left > right) , left , right >::value;
  };
  // the following meta-programming is used to determine the min value of three
  template< size_t value1 , size_t value2 , size_t value3 >
  struct triple_min {
    static const size_t value = binary_min<
      binary_min<value1,value2>::value , value3 > ::value;
  };
  // the following meta-programming is used to determine the max value of three
  template< size_t value1 , size_t value2 , size_t value3 >
  struct triple_max {
    static const size_t value = binary_max<
      binary_max<value1,value2>::value,value3>::value;
  };
}//namespace util
}//namespace protomsg




////////////////////////////////////////// Implementation of the Template Class //////////////////////////////////////////////

namespace protomsg {
namespace util{
  //------------------------------------------------------------------------------
  inline MemoryPool::MemoryPool( size_t base_reserve_size , size_t alignment_size, size_t object_size ):
  next_(NULL),
    block_ptr_(NULL),
    alloc_size_(0),
    residual_size_(0),
    base_reserve_size_(base_reserve_size),
    object_size_(object_size),
    alignment_size_(alignment_size){
      Grow();
  }
  //------------------------------------------------------------------------------
  inline MemoryPool::~MemoryPool() {
    Clear();
  }
  //------------------------------------------------------------------------------
  inline void* MemoryPool::Buy() {
    if( next_ == NULL ) {
      return NULL;
    }
    void* ret = next_ ;
    next_ = next_->next_addr;
    ++alloc_size_;
    --residual_size_;
    return ret;
  }
  //------------------------------------------------------------------------------
  inline void MemoryPool::Sell( void* ptr ) {
    DASSERT(alloc_size_!=0);
    ((FreeList*)(ptr))->next_addr = next_;
    next_ = (FreeList*)(ptr);
    --alloc_size_;
    ++residual_size_;
  }
  //------------------------------------------------------------------------------
  template< typename T > 
  bool DynamicArray<T>::Reserve( size_t size ) {
    // before using this function 
    // we should check the cap_ of this array 
    if( cap_ >= size )
      // we do not need to do the reserve operation
      return false;
    // now we need to have another memory which MUST BE allocated from heap 
    T* target_buffer = (T*)gMalloc( size*sizeof(T) );
    // now we need to initialize this target_buffer now 
    memcpy( target_buffer , ptr_ , sizeof(T)*size_ );
    // do the free job here
    if( cap_ > DA_CAP_NUM )
      gFree( ptr_ );
    ptr_ = target_buffer;
    // do the assignment
    cap_ = size ;
    return true;
  }
  //------------------------------------------------------------------------------
  template< typename T > 
  inline DynamicArray<T>::DynamicArray( size_t reserve_size ) 
    : cap_(DA_CAP_NUM) , size_(0) , ptr_( (T*)internal_storage_ ) {
    Reserve(reserve_size);
  }
  //------------------------------------------------------------------------------
  template < typename T > 
  inline DynamicArray<T>::DynamicArray( size_t resize_size , const T& some_thing = T() ) 
    : cap_(DA_CAP_NUM) , size_(0) , ptr_((T*)(internal_storage_)) {
    Index(resize_size,some_thing);
  }
  //------------------------------------------------------------------------------
  template< typename T > 
  inline DynamicArray<T>::DynamicArray() : cap_(DA_CAP_NUM) , size_(0) , ptr_((T*)internal_storage_) {}
  //------------------------------------------------------------------------------
  template< typename T > 
  void DynamicArray<T>::Clear( bool force_memory /* = false */ ) {
    // call the destructor of each object in the array
    for( size_t i = 0 ; i < size_ ; ++i )
      (ptr_+i)->~T();
    // free memory or not
    if( force_memory ) {
      if( cap_ > DA_CAP_NUM)
        gFree(ptr_); 
      ptr_ = (T*)(internal_storage_);
      cap_ = DA_CAP_NUM ;
    }
    size_ = 0;
  }
  //------------------------------------------------------------------------------
  template< typename T > 
  inline T& DynamicArray<T>::Push( const T& some_thing ) {
    // push a value into the array 
    if( size_ == cap_ )
      DVERIFY( Reserve( size_*2 ) );
    T* return_ptr = new ( ptr_ + size_ ) T(some_thing);
    ++size_;
    return *return_ptr;
  }
  //------------------------------------------------------------------------------
  template< typename T > 
  T& DynamicArray<T>::Index(size_t idx , const T& some_thing /* = T() */) {
    // get enough potential memory buffer to avoid reallocating on heap
    Reserve( idx+1 );
    for( ; size_ <= idx ; ++size_ )
      new (ptr_+size_) T(some_thing);
    return ptr_[idx];
  }
  //------------------------------------------------------------------------------
  template< typename T > 
  void DynamicArray<T>::Swap( DynamicArray<T>& another_array ) {
    // there do exist 3 situation to cover .
    // 1. both array do not use the heap memory 
    // 2. one array use the heap , the other use the internal
    // 3. both array use the heap memory
    // 1. both use the internal storage 
    if( another_array.cap_ == DA_CAP_NUM &&
        cap_ == DA_CAP_NUM 
      ) {
        // we just do the copy 
        uint8_t local_stack_buffer [ DA_CAP_NUM*sizeof(T) ];
        // copy the another_array buffer to the temp buffer
        memcpy( local_stack_buffer , another_array.ptr_ , size_*sizeof(T) );
        // copy this array buffer to another_array buffer
        memcpy( another_array.ptr_ , ptr_ , size_*sizeof(T) );
        // copy the temp buffer to this array buffer
        memcpy( ptr_ , local_stack_buffer , size_*sizeof(T) );
        // now do other swap 
        std::swap( size_ , another_array.size_ );
    } else if ( another_array.cap_ > DA_CAP_NUM && cap_ == DA_CAP_NUM ) {
      // this ptr_ gets the heap allocation from the another_array
      ptr_ = another_array.ptr_ ; 
      cap_ = another_array.cap_ ;
      // another_array gets the memory from this buffer 
      memcpy( another_array.internal_storage_ , internal_storage_ , size_ * sizeof(T) );
      // swap the other stuff here
      std::swap( size_ , another_array.size_ );
      another_array.cap_ = DA_CAP_NUM;
      another_array.ptr_ = (T*)another_array.internal_storage_;
    } else if ( another_array.cap_ == DA_CAP_NUM && cap_ > DA_CAP_NUM  ) {
      another_array.ptr_ = ptr_ ;
      another_array.cap_ = cap_ ;
      memcpy( internal_storage_ , another_array.internal_storage_ , another_array.size_ * sizeof(T) );
      std::swap( size_ , another_array.size_ );
      cap_ = DA_CAP_NUM;
      ptr_ = (T*)internal_storage_;
    } else {
      std::swap( size_ , another_array.size_ );
      std::swap( cap_ , another_array.cap_ );
      std::swap( ptr_ , another_array.ptr_ );
    }
  }
  //------------------------------------------------------------------------------
  template< typename T > 
  void DynamicArray<T>::Trim() {
    if( cap_ > DA_CAP_NUM ) {
      if( size_ == cap_ )
        return;
      T* target_buffer = (T*)gMalloc(size_);
      memcpy( target_buffer , ptr_ , sizeof(T)*size_ );
      gFree(ptr_);
      ptr_ = target_buffer;
      cap_ = size_;
    }
  }
  //------------------------------------------------------------------------------
  template< typename T >
  inline void DynamicArray<T>::Pop() {
    // pop the last element in the array
    if( empty() )
      return;
    //call its destructor
    (ptr_+ --size_)->~T();
  }
  //------------------------------------------------------------------------------
  template< typename T , typename Val , typename Hasher > 
  void HashMap<T,Val,Hasher>::InitializeHashSlot( size_t reserved_size ) {
    //malloc the memory internally
    slot_entry_ = (hash_slot*)gMalloc( sizeof(hash_slot)*reserved_size );
    //initialize the slot_entry_ ptr
    for( size_t i= 1 ; i < reserved_size-1 ; ++i ) {
      // double linked all the slot 
      // previous slot will be linked to this slot
      ( slot_entry_[i] ).previous_free_slot = slot_entry_ + i-1;
      // next slot will be linked to this slot
      ( slot_entry_[i] ).next_free_slot = slot_entry_ +i+1;
      // indicate not in use
      ( slot_entry_[i] ).object_ptr = NULL;
    }
    // adjusting the first and last linked element
    slot_entry_[0].previous_free_slot = NULL;
    slot_entry_[0].next_free_slot = slot_entry_ + 1;
    slot_entry_[reserved_size-1].next_free_slot = NULL;
    slot_entry_[reserved_size-1].previous_free_slot = slot_entry_ + reserved_size - 2;
    slot_entry_[0].object_ptr = NULL;
    slot_entry_[reserved_size-1].object_ptr = NULL;
    free_slot_entry_ = slot_entry_;
  }
  //------------------------------------------------------------------------------
  template< typename T , typename Val , typename Hasher > 
  bool HashMap<T,Val,Hasher>::Insert( std::pair<T,Val>* object_pair_ptr , hash_value full_hash ) {
    if( size_ == cap_ ) {
      // do the rehash now
      Rehash();
    }
    // now we check the target place here
    size_t idx = full_hash & ( cap_ -1 );
    // this slot is the entry of the collision chain 
    hash_slot* slot = slot_entry_ + idx ;
    // 2 situation occurs 
    if( slot->object_ptr == NULL ) {
      // free this slot from the free slot list
      FreeFromFreeSlot(slot);
      // initialize the slot
      slot->hash_result = full_hash ;
      slot->object_ptr = object_pair_ptr;
      slot->next = NULL;
    } else {
      // we should do other things now 
      // fetching where we should lie  
      hash_slot* near_next_slot = slot;
      hash_slot* next_slot = slot->next;
      while( next_slot != NULL ) {
        // check if the target key-value is existed or not
        if( next_slot->object_ptr->first == object_pair_ptr->first )
          return false;
        near_next_slot = next_slot;
        next_slot = next_slot->next;
      }
      // get the empty slot and link this slot to the chain 
      hash_slot* tar_slot = NextFreeSlot();
      tar_slot->next = next_slot;
      near_next_slot->next = tar_slot;
      // initialize the slot
      tar_slot->hash_result = full_hash;
      tar_slot->object_ptr = object_pair_ptr;
    }
    return true;
  }
  //------------------------------------------------------------------------------
  template< typename T , typename Val , typename Hasher > 
  inline void HashMap<T,Val,Hasher>::FreeFromFreeSlot( hash_slot* slot ) {
    // freeing a slot from the free linked list 
    // since this slot can be the same slot which the free_slot_entry_ point to
    // therefore , we should get rid of this constraint .
    if( slot == free_slot_entry_ ) {
      // we should move this slot from the list and also 
      // move the free_slot_entry_
      free_slot_entry_ = free_slot_entry_->next_free_slot;
    }
    // now we just move this slot out of the linked list 
    if( slot->next_free_slot != NULL )
      slot->next_free_slot->previous_free_slot = slot->previous_free_slot;
    if( slot->previous_free_slot != NULL )
      slot->previous_free_slot->next_free_slot = slot->next_free_slot;
    slot->next_free_slot = NULL;
    slot->previous_free_slot = NULL;
  }
  //------------------------------------------------------------------------------
  template< typename T , typename Val , typename Hasher > 
  inline typename HashMap<T,Val,Hasher>::hash_slot* HashMap<T,Val,Hasher>::NextFreeSlot() {
    hash_slot* ret = free_slot_entry_ ;
    DASSERT(ret!=NULL);
    free_slot_entry_ = free_slot_entry_->next_free_slot;
    if( free_slot_entry_ != NULL )
      free_slot_entry_->previous_free_slot = NULL;
    ret->previous_free_slot = ret->next_free_slot = NULL;
    return ret;
  }
  //------------------------------------------------------------------------------
  template< typename T , typename Val , typename Hasher > 
  inline void HashMap<T,Val,Hasher>::PutbackFreeSlot( hash_slot* slot ) {
    if( free_slot_entry_ == NULL ) {
      free_slot_entry_ = slot;
      free_slot_entry_->next_free_slot = NULL;
      free_slot_entry_->previous_free_slot = NULL;
    } else {
      slot->previous_free_slot = NULL;
      slot->next_free_slot = free_slot_entry_;
      free_slot_entry_->previous_free_slot = slot;
      free_slot_entry_ = slot;
    }
  }
  //------------------------------------------------------------------------------
  template< typename T , typename Val , typename Hasher > 
  inline void HashMap<T,Val,Hasher>::Rehash() {
    cap_*=2;
    hash_slot* old_slot = slot_entry_;
    InitializeHashSlot( cap_ );
    // now we just insert all the hash stuff here from there
    for( size_t i = 0 ; i < size_ ; ++i )
      DVERIFY( Insert( old_slot[i].object_ptr , old_slot[i].hash_result ) );
    // do not forget clear the memory here
    gFree( old_slot );
  }
  //------------------------------------------------------------------------------
  template< typename T , typename Val , typename Hasher > 
  size_t HashMap<T,Val,Hasher>::QueryIndex( const T& key ) {
    // query the hash map 
    hash_value full_hash = Hasher()(key);
    size_t idx = full_hash & ( cap_ -1 );
    hash_slot* entry_slot = slot_entry_ + idx ;
    // testing here 
    if( entry_slot->object_ptr != NULL ) {
      if( entry_slot-> hash_result == full_hash && 
        entry_slot->object_ptr->first == key ) {
      return idx;
      } else {
      // must have collision here 
      hash_slot* chain_slot_next = entry_slot->next ;
      while( chain_slot_next != NULL ) {
        if( chain_slot_next->hash_result == full_hash && 
            chain_slot_next->object_ptr->first == key ) {
              return (size_t)(chain_slot_next - slot_entry_);
        }
        chain_slot_next = chain_slot_next->next;
      }//while
      // when we reaching here means no one match 
      return size_;
      }
    }
    return size_;
  }
  //------------------------------------------------------------------------------
  template< typename T , typename Val , typename Hasher > 
  bool HashMap<T,Val,Hasher>::QueryIndexWithoutAdjusting( const T& key , size_t* prev_index , size_t* this_index ) {
    hash_value full_hash = Hasher()(key);
    size_t idx = full_hash & (cap_-1);
    hash_slot* entry_slot = slot_entry_+idx;
    if( entry_slot->object_ptr == NULL )
      return false;
    if( entry_slot->hash_result == full_hash && 
        entry_slot->object_ptr->first == key ) {
          *prev_index = size_;
          *this_index = idx ;
          return true;
    } else {
      hash_slot* prev_slot = entry_slot;
      entry_slot = entry_slot->next;
      while( entry_slot != NULL ) {
        if( entry_slot->hash_result == full_hash && 
            entry_slot->object_ptr->first == key ) {
              *prev_index = prev_slot-slot_entry_;
              *this_index = entry_slot-slot_entry_;
              return true;
        }
        prev_slot = entry_slot;
        entry_slot = entry_slot->next;
      }
      return false;;
    }
  }
  //------------------------------------------------------------------------------
  template< typename T , typename Val , typename Hasher > 
  bool HashMap<T,Val,Hasher>::Delete( const T& key ) {
    size_t prev_index;
    size_t this_index;
    if( !QueryIndexWithoutAdjusting(key,&prev_index,&this_index) ) 
      return false;
    if( prev_index == size_ ) {
      // the deletion is happened on the entry of the chain 
      hash_slot* entry_slot = slot_entry_ + this_index;
      object_pool_.Payback( entry_slot->object_ptr );
      if( entry_slot->next == NULL ) {
        entry_slot->object_ptr = NULL;
        PutbackFreeSlot(entry_slot);
      } else {
        hash_slot* next = entry_slot->next;
        entry_slot->next = next->next;
        entry_slot->object_ptr = next->object_ptr;
        next->object_ptr = NULL;
        PutbackFreeSlot(next);
      }
    } else {
      hash_slot* this_slot = slot_entry_ + this_index;
      hash_slot* prev_slot= slot_entry_ + prev_index;
      prev_slot->next = this_slot->next;
      object_pool_.Payback( this_slot->object_ptr );
      PutbackFreeSlot( this_slot );
    }
    --size_;
    return true;
  }
  //------------------------------------------------------------------------------
  template< typename T , typename Val , typename Hasher > 
  HashMap<T,Val,Hasher>::~HashMap() {
    for( size_t i = 0 ; i < cap_ ; ++i ) {
      if( slot_entry_[i].object_ptr != NULL ) {
        object_pool_.Payback( slot_entry_[i].object_ptr );
      }
    }
    gFree(slot_entry_);
  }
  //------------------------------------------------------------------------------
  // I have no way but to make this ctor ugly like this .
  template< typename T , typename Val , typename Hasher > 
  HashMap<T,Val,Hasher>::HashMap( const size_t reserved_size /* = HM_RESERVED_SIZE */ ) 
    :object_pool_(reserved_size==1?2:reserved_size),size_(0),cap_(reserved_size==1?2:reserved_size){
    InitializeHashSlot(reserved_size==1?2:reserved_size);
  }
  //------------------------------------------------------------------------------
}//namespace util
}//namespace protomsg




#endif// PROTOLMSG_CONFIG_H_