/**
 * @file   zrena.cc
 * @author karpar Hu <huzhili@gmail.com>
 * @date   Tue May  5 02:53:34 2009
 * 
 * @brief  zrena implementation.
 * 
 * 
 */

#include "zrena.h"
#include <inttypes.h>

using std::vector;

#define ZRENASET(x) (x)

#define ARRAYSIZE(a)                                    \
  (sizeof(a) / sizeof((*a)) /                           \
   static_cast<size_t>(!(sizeof(a) % sizeof((*a)))))

#define CHECK(x) assert(x)
#define DCHECK(x) assert(x);
#define CHECK_LT(x, y) assert(x < y);
#define DCHECK_LT(x, y) assert(x < y);
#define CHECK_GE(x, y) assert(x >= y);
#define DCHECK_GE(x, y) assert(x >= y);

BaseZrena::BaseZrena(char *first_block, const size_t block_size)
 : first_block_own(first_block ? 1 : 0),
   block_size_(block_size),
   freestart_(NULL),
   last_alloc_(NULL),
   remaining_(0),
   blocks_alloced_(1),
   overflow_blocks_(NULL),
   handle_alignment_(1) {
  assert( block_size > kDefaultAlignment);
  if (first_block)
    first_blocks_[0].mem = first_block;
  else
    first_blocks_[0].mem = reinterpret_cast<char*>(::operator new(block_size));
  Reset();
}

BaseZrena::~BaseZrena() {
  FreeBlocks();
  assert(overflow_blocks_ == NULL); 
  for (int i = first_block_own; i < blocks_alloced_; ++i)
    free(first_blocks_[i].mem);
}

void BaseZrena::FreeBlocks() {
  for (int i = 1; i < blocks_alloced_; ++i) { // keep first block
    free(first_blocks_[i].mem);
    first_blocks_[i].mem = NULL;
    first_blocks_[i].size = 0;
  }
  blocks_alloced_ = 1;
  if (overflow_blocks_ != NULL) {
    vector<AllocatedBlock>::iterator it;
    vector<AllocatedBlock>::iterator end;
    for (it = overflow_blocks_->begin(), 
           end = overflow_blocks_->end(); it != end; ++it) {
      free(it->mem);
    }
    delete overflow_blocks_;
    overflow_blocks_ = NULL;
  }
}

void BaseZrena::Reset() {
  FreeBlocks();
  freestart_ = first_blocks_[0].mem;
  remaining_ = first_blocks_[0].size;
  last_alloc_ = NULL;

  ZRENASET(status_.bytes_allocated_ = block_size_);

  const int overage = reinterpret_cast<uintptr_t>(freestart_) &
    (kDefaultAlignment - 1);

  if (overage > 0) {
    const int waste = kDefaultAlignment - overage;
    freestart_ += waste;
    remaining_ -= waste;
  }
  freestart_when_empty_ = freestart_;
  assert(!(reinterpret_cast<uintptr_t>(freestart_)&(kDefaultAlignment - 1)));
}

int BaseZrena::block_count() const {
  return (blocks_alloced_ +
          (overflow_blocks_ ? static_cast<int>(overflow_blocks_->size()) : 0));
}

BaseZrena::AllocatedBlock* BaseZrena::AllocNewBlock(const size_t block_size) {
  AllocatedBlock *block;
// Find the next block.
  if (blocks_alloced_ < ARRAYSIZE(first_blocks_)) {
    block = &first_blocks_[blocks_alloced_++];
  } else {
    if (overflow_blocks_ == NULL) overflow_blocks_ = new vector<AllocatedBlock>;
    overflow_blocks_->resize(overflow_blocks_->size()+1);
    block = &overflow_blocks_->back();
  }
  block->mem = reinterpret_cast<char*>(malloc(block_size));
  block->size = block_size;

  ZRENASET(status_.bytes_allocated_ += block_size_);
  return block;
}

void BaseZrena::MakeNewBlock() {
  AllocatedBlock *block = AllocNewBlock(block_size_);
  freestart_ = block->mem;
  remaining_ += block->size;
}

const BaseZrena::AllocatedBlock* BaseZrena::IndexToBlock(int index) const {
  if (index < ARRAYSIZE(first_blocks_)) {
    return &first_blocks_[index];
  }
  CHECK(overflow_blocks_ != NULL);
  int index_in_overflow_blocks = index - ARRAYSIZE(first_blocks_);
  CHECK_GE(index_in_overflow_blocks, 0);
  CHECK_LT(static_cast<size_t>(index_in_overflow_blocks),
           overflow_blocks_->size());
  return &(*overflow_blocks_)[index_in_overflow_blocks];
}

void* BaseZrena::GetMemoryFallback(const size_t size, const int align_as_int) {

  if (0 == size) {
    return NULL;
  }

  const size_t align = static_cast<size_t>(align_as_int);

  assert(align_as_int > 0 && 0 == (align & (align - 1)));

  if (block_size_ == 0 || size > block_size_ / 4) {
    assert(align <= kDefaultAlignment);
    return AllocNewBlock(size)->mem;
  }

  const size_t overage = 
    (reinterpret_cast<uintptr_t>(freestart_) & (align - 1));
  if (overage) {
    const size_t waste = align - overage;
    freestart_ += waste;
    if (waste < remaining_) {
      remaining_ -= waste;
    } else {
      remaining_ = 0;
    }
  }
  if (size > remaining_) {
    MakeNewBlock();
  }
  remaining_ -= size;
  last_alloc_ = freestart_;
  freestart_ += size;
  assert( 0 == (reinterpret_cast<uintptr_t>(last_alloc_) & (align - 1)));
  return reinterpret_cast<void*>(last_alloc_);
}

bool BaseZrena::AdjustLastAlloc(void* last_alloc, const size_t newsize) {
  if (last_alloc == NULL || last_alloc != last_alloc_) return false;
  assert(freestart_ >= last_alloc_ && freestart_ <= last_alloc_ + block_size_);
  assert(remaining_ >= 0);
  if (newsize > (freestart_ - last_alloc_) + remaining_)
    return false;
  const char* old_freestart = freestart_;
  freestart_ = last_alloc_ + newsize;
  remaining_ -= (freestart_ - old_freestart);
  return true;
}

void* BaseZrena::GetMemoryWithHandle(
  const size_t size, BaseZrena::Handle* handle) {
  CHECK(handle != NULL);

  void* p = GetMemory(size, handle_alignment_);
  int block_index;
  const AllocatedBlock* block = NULL;
  for (block_index = block_count() - 1; block_index >= 0; --block_index) {
    block = IndexToBlock(block_index);
    if ((p >= block->mem) && (p < (block->mem + block->size))) {
      break;
    }
  }
  CHECK_GE(block_index, 0); // "Failed to find block that was allocated from"
  CHECK(block != NULL);  // "Failed to find block that was allocated from"
  const uint64_t offset = reinterpret_cast<char*>(p) - block->mem;
  DCHECK_LT(offset, block_size_);
  DCHECK((offset % handle_alignment_) == 0);
  DCHECK((block_size_ % handle_alignment_) == 0);
  uint64_t handle_value =
      (static_cast<uint64_t>(block_index) * block_size_ + offset) /
      handle_alignment_;
  if (handle_value >= static_cast<uint64_t>(0xFFFFFFFF)) {
    // We ran out of space to be able to return a handle, so return an invalid
    // handle.
    handle_value = Handle::kInvalidValue;
  }
  handle->handle_ = static_cast<uint32_t>(handle_value);
  return p;
}

void* BaseZrena::HandleToPointer(const Handle& h) const {
  CHECK(h.valid());
  uint64_t handle = static_cast<uint64_t>(h.handle_) * handle_alignment_;
  int block_index = static_cast<int>(handle / block_size_);
  size_t block_offset = static_cast<size_t>(handle % block_size_);
  const AllocatedBlock* block = IndexToBlock(block_index);
  CHECK(block != NULL);
  return reinterpret_cast<void*>(block->mem + block_offset);
}

char* UnsafeZrena::Realloc(char* s, size_t oldsize, size_t newsize) {
  assert(oldsize >= 0 && newsize >= 0);
  if ( AdjustLastAlloc(s, newsize) )             // in case s was last alloc
    return s;
  if ( newsize <= oldsize ) {
    return s;  // no need to do anything; we're ain't reclaiming any memory!
  }
  char * newstr = Alloc(newsize);
  memcpy(newstr, s, oldsize < newsize ? oldsize : newsize);
  return newstr;
}
