
#ifndef SYS_BULK_STORAGE_H
#define SYS_BULK_STORAGE_H 1

#define _GNU_SOURCE
#define _LARGEFILE_SOURCE
#define _LARGEFILE64_SOURCE
#define _FILE_OFFSET_BITS 64

#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <unistd.h>
#include <malloc.h>

/**

 \defgroup BulkStorage Bulk Storage

 The bulk_storage module provides the following functionality:

 Uniform interface for:

 Memory-based storage.
 Disk-based storage.
 Cached storage.

 The following methods control the different storage modules:

 This call is dependant on the storage method in use.  Each storage
 method defines its own dataStore struct.  See below.

 dataStore ds = open(filename, sizeof(struct_to_be_stored);
 close(ds);

 dataStoreAddress idx = getFreeBlock(ds);

 struct = readBlock(idx, int block);
 newBlock(idx, int block, &struct);
 updateBlock(idx, int block, &struct);
 freeBlock(idx, int block);

 I've been thinking about the exact semantics of each of the calls below, and this is
 what I think will work best / fastest, and still allow memory management to be handled
 by the bulk_storage implementation (necessary for caching):

 open:    Open or create a new data store.  (Dependent on store implementation)

 read:    Return a pointer to memory containing the requested block.  The client may write to
          this memory location, making this location "dirty".  However, the result is undefined
	  if read is called on a "dirty" block.  Read also locks the memory to prevent
	  deallocation by the bulk_storage library.  In general, calling read on a locked block
	  has undefined behavior.

 write:   Make a "dirty" block not-dirty.  Uses the copy of the block that read returned.

 release: Allow the memory region to be released by the library.  The behavior is undefined
          if release is called on a dirty block.

 close:   Release the resources needed by the data store, and make sure that any persistent
          backing structures are properly updated.  The behavior is undefined if the
          data store contains dirty or locked blocks.




*/
#include <stdio.h>

/* definitions for generic store usage. */

/**
   A store is the basic primitive for dealing with the bulk_storage
   library.  It contains an instance of one of the bulk storage
   implementations, and information that allows the generic store_*
   macros to call the functions in the appropriate
   implementation. (The two implementations are currently memStore and
   cacheStore)

   The performance of the store macros could be improved.  Currently,
   direct calls to memstore (which can be inlined) are about twice as
   fast as calls using the store macros (which can't be inlined.)
   This could be fixed by setting up the store macros to use
   if()...else... blocks instead of function pointers, but would make
   it more difficult to add new store implementations later on.

   @see store_read, store_write, store_release, store_free,
   store_close, store_open_cached, store_open_memory

   \ingroup BulkStorage
*/

struct store {
  const struct store_functions * functions;
  void * store_p;
};


/**
    \ingroup BulkStorage
*/
struct store_functions {
  void   (*close)(void*);
  void * (*read)(void*, off_t);
  void   (*write)(void*, off_t);
  off_t  (*new_store)(void*);
  void   (*release)(void*, off_t);
  void   (*free)(void*, off_t);
  size_t (*blockSize)(void*);
};

//#define _store_close(store)        (*(store->functions->close))(store->store)


/**
   Read (and lock) a block from the store.

   @param store (store *) The store you're reading from.
   @param block (off_t) The block number you are reading.

   @return (void *) A pointer to the block.  Do not call free() on
   this pointer, as memory management is handled by the store.
   Instead, call store_release() on the block number you passed into
   read.
    \ingroup BulkStorage
*/
#define store_read(stor, block)    (*(stor->functions->read))     (stor->store_p, block)

/**
   Write a block to the store.  If you modifiy the contents of a
   pointer returned by store_read, you must call store_write, or bad
   things can happen.  If you want to modify the data without
   affecting the store, make a local copy.

   @param store (store *) The store you're writing to.
   @param block (off_t) The blocu number you are writing.
    \ingroup BulkStorage
 */
#define store_write(stor, block)   (*(stor->functions->write))    (stor->store_p, block)
/**
    Allocate a slot in the store for a new block.
    @return The blockNumber of the new block.
*/
#define store_new(stor)            (*(stor->functions->new_store))      (stor->store_p)


/**
   Release the lock that store_read creates when it is called.  This
   function must be called once for every call to store_read, or
   unexpected behavior may result.  (Actually, memStore currently
   ignores all of this, so you can code using only store_read, and run
   against a memStore.  Then, you can run against a cacheStore, and
   add the store_release calls as necessary.  cacheStore will
   terminate your program when store_close() is called if you have
   unreleased blocks.  This prevents memory leaks and memory
   corruption.  It is also a good way to test your program for memory
   leaks.
    \ingroup BulkStorage
*/
#define store_release(stor, block) (*(stor->functions->release))  (stor->store_p, block)

/**
   Frees a block from the store.  This means that the block
   may be reclaimed by future calles to store_new.

   @param stor (store *) The stor containing the block to be freed.

   @param block (off_t) The number of the block to be freed.  The
   block must not be locked.  (That is, for every call to store_read
   for this block, there has been a corresponding call to
   store_write.)  Otherwise, the library could free a pointer still in
   use by your program.
    \ingroup BulkStorage
*/
#define store_free(stor, block)    (*(stor->functions->free))     (stor->store_p, block)

/**
   Get the block size for this store.  Each store has a fixed
   blocksize that was defined by the caller of store_open_*.

   @param stor (store *) The store we're interested in.

   @return the size of the blocks in store.  Corresponds to the result
   of the sizeof() macro.
    \ingroup BulkStorage
*/
#define store_blockSize(stor)      (*(stor->functions->blockSize))(stor->store_p)


/**
   Open a memStore.

   @param blockSize the size of the blocks to be kept in this store.
   (Use the standard sizeof() macro to determine this parameter)

   @param blockCount suggested initial size of the store.  The store
   will grow as necessary, and reclaim unused space, but will never shrink.

   \ingroup BulkStorage
   \ingroup MemStore
*/
struct store * store_open_memory(size_t blockSize, off_t blockCount);

/**
   Open a cacheStore.  @see store_open_memory

   @param storeFile The name of the file where the actual data is to
   be stored.  This file is blockSize * blockCount bytes long.

   @param fatFile The name of the file where the allocation table for
   the data is to be stored.  The allocation table is blockCount / 8
   bytes long.

   @param blockCount The minimum initial size of the store.  The store
   will grow as necessary.  If you are opening an existing store, and
   this number is less than the size of the existing store, it will be
   ignored.

   @param cacheBlockCount The estimated maximum number of objects in
   cache.  Unreleased objects cannot be freed from cache, so this
   count may be exceeded if many objects are simultaneously locked by
   the application.

   \ingroup BulkStorage
   \ingroup MemStore

*/
struct store * store_open_cached(const char * storeFile, const char * fatFile, size_t blockSize, off_t blockCount, off_t cacheBlockCount);

/**
   Close a store that contains no locked blocks.  Failure to release
   blocks from a store before closing it could result in unexpected
   behavior.  For instance, cacheStore causes a core dump if it
   detects this situation.

   \ingroup BulkStorage
*/
void store_close(struct store * store);
/* Definitions for specific store types... */

/**

   \defgroup FileStore

   File store backs cacheStore, and does not comply with the bulk
   storage api.  It is not generally useful, except to support cache
   store.  If you need an uncached store backed by a file, consider
   using a cacheStore with a small cache size.


   @todo  Provide "bulk read" and "bulk write functions to read contiguous blocks off
   the disk.

   \ingroup BulkStorage

*/

struct fileStore;


/**
   \ingroup FileStore
*/
/*@{*/
struct fileStore * ds_open(const char * filename, const char * fatname, size_t blockSize, off_t blockCount);
void ds_close(struct fileStore *);

void* ds_readBlock(const struct fileStore * fs, off_t blockNumber);
void ds_writeBlock(void * value, struct fileStore * fs, off_t blockNumber);
void ds_writeBulk(void * value, struct fileStore * fs, off_t blockNumber, size_t blockCount);
off_t ds_newBlock(struct fileStore * fs);
void ds_freeBlock(struct fileStore * fs, off_t blockNumber);

/**
   Since fileStore doesn't track memory allocation, you need to pass a
   pointer to the block that you're are releasing.  Otherwise, it
   would be unable to free the pointer.
 */
void ds_releaseBlock(const struct fileStore * fs, off_t blockNumber, void * block);
size_t ds_blockSize(const struct fileStore * fs);

int ds_dumpBackup(struct fileStore * fs, const char * data, const char * fat) ;
int ds_prepareForBackup(struct fileStore * fs, const char * data, const char * fat);
/*@}*/

/**
   \defgroup MemStore MemStore

   An in-memory implementation of the store API.  Reads incur
   virtually no overhead over using fixed size arrays.

   No methods providing for saving the memStore to disk exist, but it
   is 2-3x faster than a cacheStore operating with 100% hit ratio.

   If you are sure that your application will never
   need a cacheStore, you can hard code it to use memStore by using
   the ms_* operations.

   This typically results in a further 2x speedup under the intel
   compiler, as the ms_* calls may be inlined, but the store_* marcos
   result in a call to a function pointer and can never be inlined.

   I have not tried replacing the function pointer marcos with a
   "store_type ? ms_ : cs_" style macro.  This may (will?) result in
   faster operation for memStore, and does not require hardcoding to a
   particular store type.


   \ingroup BulkStorage
*/
struct memStore;

/**
   \ingroup BulkStorage
*/
/*@{*/
struct memStore * ms_open(size_t blockSize, off_t blockCount);
void   ms_close(struct memStore * ms);
void * ms_readBlock(struct memStore * ms, off_t blockNumber);
void   ms_releaseBlock(const struct memStore * ms, off_t blockNumber);
void   ms_writeBlock(const struct memStore * mem, off_t blockNumber);
void   ms_freeBlock(struct memStore * ms, off_t blockNumber);
off_t    ms_newBlock(struct memStore * ms);

size_t ms_blockSize(struct memStore * ms);
off_t ms_blockCount(struct memStore * ms);
void ms_print(struct memStore * ms);
/*@}*/

/**

   \defgroup CacheStore

   The cacheStore provides a bulk_storage implementation backed by files on disk.  It caches the data in these files, and is in turn backed by a fileStore which handles all of the disk I/O.  fileStore uses 64bit file operations, and is not limited to 2 gigabytes of space.

   @see store

   @todo Aggregate write requests on contiguous blocks into a single write.

   @todo Prefetch nearby blocks in a bulk read, provided that the
   blocks don't overwrite existing cache entries.

   \ingroup BulkStorage
*/

/*@{*/
struct cacheStore;
struct cacheBlock;
struct cacheStore * cs_open(const char * filename,
			    const char * fatname,
			    size_t blockSize,
			    off_t blockCount,
			    off_t cacheBlockCount);
void cs_close(struct cacheStore * cs);
void * cs_readBlock(struct cacheStore * cs, off_t blockNumber);
void cs_releaseBlock(struct cacheStore * cs, off_t blockNumber);
void cs_freeBlock(struct cacheStore * cs, off_t blockNumber);
void cs_writeBlock(struct cacheStore * cs, off_t blockNumber);
off_t cs_newBlock(struct cacheStore * cs);
size_t cs_blockSize(struct cacheStore * cs);
void cs_print(struct cacheStore * cs);

int cs_prepareForBackup(struct cacheStore * cs, const char * data, const char * fat);
int cs_dumpBackup(struct cacheStore * cs, const char * data, const char * fat) ;

#define TRIVIAL 0
#define INFO    10
#define debug(level, arg) if( level >= INFO ) printf arg

/**
   Instead of writing on the write call, mark pages dirty.  Then, write them
   when they're freed.
*/

//#define FASTWRITES

/**
   Before flushing a block scheduled for writing to disk, check to see
   if neighboring blocks in cache belong in a contiguous region of the
   file, and are also scheduled for writing.  If so, copy them into a
   write buffer, and write all the blocks in a single set of system
   calls.

   This option requires FASTWRITES and conflicts with GOODHASH.
*/

//#define BULKWRITES

/**
    By default, contiguous blocks on disk are usually contiguous in
    cache.  This option applies a true hash function, and makes
    cacheStore behave like a traditional hash table.

    Unfortunately, GOODHASH degrades system performance on trees.  The
    BULKWRITES operation conflicts with GOODHASH.  It would probably
    fare better in some other circumstance.
*/
#ifndef BULKWRITES
//#define GOODHASH
#endif

struct cacheStore {
  struct fileStore  * fileStore;
  struct cacheBlock * cache;
  off_t cacheBlockCount;
  long hits;
  long total;
#ifdef BULKWRITES
  /// writeBuffer is defined as a char * so we can do pointer arithmetic on it. (Assuming that sizeof(char) == 1)
  char * writeBuffer;
#endif
};
/*@}*/


struct fileStore {
  int data_file;
  int fat_file;
  off64_t blockSize;
  off64_t blockCount;
  off64_t newLastBlock;
  int calledFree;
};






#endif /*SYS_BULK_STORAGE_H*/
