#include "allocator_interface.h"
#include "allocator_defines.h"

/* All blocks must have a specified minimum alignment. */
// #define ALIGNMENT 8

/* Rounds up to the nearest multiple of ALIGNMENT. */
// #define ALIGN(size) (((size) + (ALIGNMENT-1)) & ~(ALIGNMENT-1))

/* The smallest aligned size that will hold a size_t value. */
// #define SIZE_T_SIZE (ALIGN(sizeof(size_t)))

/* Block header structure is as follows:
 * Allocated:
 *  - One size_t at start
 *  - One size_t at end
 * Both indicating the size of the block, to allow for O(1) coalescing.
 *
 * Free:
 *  - One size_t at start
 *  - One void * for pointer to previous free block
 *  - One void * for pointer to next free block
 *  - One size_t at end
 * Low bit of size_t representing block size indicates whether or not the block is in use.
 * The (LOG_MIN_SIZE - 1) bits are available for such status information :-D
 */

/* 
 * Returns the floor of log2(v). Defined only for unsigned 32-bit integers with 
 * value greater than 1.
 */
static inline int logbase2(uint32_t v) {
  static const int MultiplyDeBruijnBitPosition[32] = 
  {
    0, 9, 1, 10, 13, 21, 2, 29, 11, 14, 16, 18, 22, 25, 3, 30,
    8, 12, 20, 28, 15, 17, 24, 7, 19, 27, 23, 6, 26, 5, 4, 31
  };
  v |= v >> 1; // first round down to one less than a power of 2 
  v |= v >> 2;
  v |= v >> 4;
  v |= v >> 8;
  v |= v >> 16;
  return MultiplyDeBruijnBitPosition[(uint32_t)(v * 0x07C4ACDDU) >> 27];
}

namespace my
{
  void *allocator::freeLists[NUM_FREE_LISTS];
  __thread void *allocator::threadFreeLists[NUM_THREAD_FREE_LISTS];
  __thread int allocator::threadCounts[NUM_THREAD_FREE_LISTS];
  __thread int allocator::threadCountMinima[NUM_THREAD_FREE_LISTS];
  __thread size_t allocator::threadCacheSize = 0;
  pthread_mutex_t allocator::listMutexes[NUM_FREE_LISTS];

  static volatile int version = 0;
  __thread int allocator::versionID = -1;
  /*
   * init - Initialize the malloc package.  Called once before any other
   * calls are made.
   */
  int allocator::init()
  {
    ++version;
    for(int i = 0; i < NUM_FREE_LISTS; ++i) {
      freeLists[i] = NULL;
      pthread_mutex_init(&listMutexes[i],NULL);
    }
    //Sentinel value indicating we need to grow the heap
    freeLists[NUM_FREE_LISTS-1] = (void *)-1;
    return 0;
  }

  void allocator::thread_init() {
    for(int i = 0; i < NUM_THREAD_FREE_LISTS; ++i) {
      threadFreeLists[i] = NULL;
      threadCounts[i] = 0;
      threadCountMinima[i] = 0;
    }
    //Sentinel value indicating we need to retrieve from the global free lists
    threadFreeLists[NUM_THREAD_FREE_LISTS-1] = (void *)-1;
    versionID = version;
    threadCacheSize = 0;
  }

  /*
   * Grows the heap by either the requested size, a power of two, or the minimum
   * growth size (e.g. a pagesize); updates the free lists with the newly allocated
   * memory and returns the index of free list containing the newly allocated memory.
   *
   * Assumes freeLists at all indices above the  index corresponding to the requested size
   * are empty. Reasonable assumption; heap only needs to be grown if this is the case.
   *
   * Returns -1 on error.
   *
   * FIXME semantics: hold lock on return index when exit (unless returning -1)
   */
  int allocator::grow_heap(size_t requested_size) {
      size_t alloc_amt = (MIN_SBRK > requested_size) ? MIN_SBRK : requested_size;
      void *head = mem_sbrk(alloc_amt);
      if (head == (void *)-1) {
        //Whoops, an error of some sort occurred.  We return NULL to let
        //the client code know that we weren't able to allocate memory.
        return -1;
      }
      //Update the current index to the retrieved amount, insert the new free block.
      int current_index = FREE_LIST_INDEX(alloc_amt);
      *START_SIZE_ADDR(head) = SET_FREE(alloc_amt);
      *END_SIZE_ADDR(head) = SET_FREE(alloc_amt);
      pthread_mutex_lock(&listMutexes[current_index]);
      LIST_INSERT_HEAD(&freeLists[current_index], head);
      return current_index;
  }

  /*
   * Retrieves up to 2**(NUM_THREAD_FREE_LISTS - 1 - index) of the size corresponding to the given 
   * index from the global heap, as an intact free list; if that isn't possible, returns a 
   * larger block of the size corresponding to the largest size in thread free lists.
   *
   * If necessary, grows the global heap. Returns NULL on failure.
   */
  void * allocator::grow_thread_heap(int index) {
    assert(index < NUM_THREAD_FREE_LISTS - 1);
    size_t nblocks = (size_t)1 << (NUM_THREAD_FREE_LISTS - 1 - index);
    int current_index = index;
    while (current_index < NUM_THREAD_FREE_LISTS - 1) {
      pthread_mutex_lock(&listMutexes[current_index]);
      if (freeLists[current_index] != NULL) {
        //We found blocks that should satisfy the conditions!
        size_t i = nblocks;
        void * cur_block = freeLists[current_index];
        void *ret;
        while ((i > 0) && (cur_block != NULL)) {
          cur_block = NEXT_PTR(cur_block);
          --i;
        }
        if (cur_block == NULL) {
          //We found fewer than nblocks blocks, but still enough to return.
          ret = freeLists[current_index];
          freeLists[current_index] = NULL;
        } else {
          //We found nblocks blocks. Fantastic.
          ret = freeLists[current_index];
          *NEXT_PTR_ADDR(PREV_PTR(cur_block)) = NULL;
          *PREV_PTR_ADDR(cur_block) = NULL;
          freeLists[current_index] = cur_block;
        }
        pthread_mutex_unlock(&listMutexes[current_index]);
        return ret;
      }
      //Move along to the next free list
      pthread_mutex_unlock(&listMutexes[current_index]);
      ++current_index;
      nblocks >>= 1;
    }
    //We didn't find blocks small enough. Split up bigger blocks to satisfy; maybe grow heap.
    pthread_mutex_lock(&listMutexes[current_index]);
    while (freeLists[current_index] == NULL) {
      pthread_mutex_unlock(&listMutexes[current_index]);
      ++current_index;
      pthread_mutex_lock(&listMutexes[current_index]);
    }

    if (current_index == NUM_FREE_LISTS - 1) {
      pthread_mutex_unlock(&listMutexes[current_index]);
      //FIXME update documentation -- semantics of growheap include locking at new current index
      current_index = grow_heap((size_t)1 << (NUM_THREAD_FREE_LISTS - 1 + LOG_MIN_SIZE));
      if (-1 == current_index) {
        return NULL;
      }
    }

    //current_index gives us a block that is bigger than the largest block for a thread free list.
    int requested_index = NUM_THREAD_FREE_LISTS - 1 - 1;
    assert(current_index > requested_index);
    //Split it up and return it as a valid free list corresponding to NUM_THREAD_FREE_LISTS - 1 - 1;
    //Copied straight from the one and only mallocator. FIXME Clean up before final project.
    /*
     * Removes the memory chunk from the free list
     */
    void *head = freeLists[current_index];
    LIST_REMOVE_HEAD(&(freeLists[current_index]));
    assert(freeLists[current_index] != head);
    pthread_mutex_unlock(&listMutexes[current_index]);

    /*
     * If the chunk is not a power of 2 size, split it up before doing any necessary binary
     * division.
     */
    size_t list_min = (size_t)1 << (current_index + LOG_MIN_SIZE);
    size_t current_size = START_SIZE(head);
    if (current_size != list_min) {
      assert(current_size > list_min);
      void *top = ((char *)head) + list_min;
      *START_SIZE_ADDR(top) = SET_FREE(current_size - list_min);
      *END_SIZE_ADDR(top) = SET_FREE(current_size - list_min);
      *START_SIZE_ADDR(head) = SET_FREE(list_min);
      *END_SIZE_ADDR(head) = SET_FREE(list_min);
      int index = FREE_LIST_INDEX(current_size - list_min);
      pthread_mutex_lock(&listMutexes[index]);
      LIST_INSERT_HEAD(&freeLists[index],top);
      assert(freeLists[index] == top);
      pthread_mutex_unlock(&listMutexes[index]);
    }

    /*
     * Binary division of head block until we have the right size
     */
    assert((START_SIZE(head) & (START_SIZE(head) - 1)) == 0);
    while (current_index > requested_index) {
      --current_index;
      list_min /= 2;
      void *top = (void *)(((char *)head) + list_min);
      *START_SIZE_ADDR(top) = SET_FREE(list_min);
      *END_SIZE_ADDR(top) = SET_FREE(list_min);
      pthread_mutex_lock(&listMutexes[current_index]);
      LIST_INSERT_HEAD(&freeLists[current_index], top);
      pthread_mutex_unlock(&listMutexes[current_index]);
    } 

    *START_SIZE_ADDR(head) = SET_FREE(list_min);
    *PREV_PTR_ADDR(head) = NULL;
    *NEXT_PTR_ADDR(head) = NULL;
    *END_SIZE_ADDR(head) = SET_FREE(list_min);
    assert(START_SIZE(head) == END_SIZE(head));
    return head;
  }

  void * allocator::global_malloc(int requested_index) {
    /*
     * Check lists for available blocks of necessary / splittable size
     */
    int current_index = requested_index;
    current_index = (current_index < NUM_FREE_LISTS - 1) ? current_index : NUM_FREE_LISTS - 1;
    pthread_mutex_lock(&listMutexes[current_index]);
    while (freeLists[current_index] == NULL) {
      pthread_mutex_unlock(&listMutexes[current_index]);
      ++current_index;
      pthread_mutex_lock(&listMutexes[current_index]);
    }

    /*
     * If necessary, grows the heap to make room for the new block.
     */
    if (current_index == (NUM_FREE_LISTS - 1)) {
      pthread_mutex_unlock(&listMutexes[current_index]);
      //FIXME update documentation -- semantics of growheap include locking at new current index
      current_index = grow_heap((size_t)1 << (requested_index + LOG_MIN_SIZE));
      if (-1 == current_index) {
        return NULL;
      }
    }

    /*
     * Removes the memory chunk from the free list
     */
    assert(freeLists[current_index] != NULL);
    void *head = freeLists[current_index];
    LIST_REMOVE_HEAD(&(freeLists[current_index]));
    pthread_mutex_unlock(&listMutexes[current_index]);

    /*
     * If the chunk is not a power of 2 size, split it up before doing any necessary binary
     * division.
     */
    size_t list_min = (size_t)1 << (current_index + LOG_MIN_SIZE);
    size_t current_size = START_SIZE(head);
    if (current_size != list_min) {
      assert(current_size > list_min);
      void *top = ((char *)head) + list_min;
      *START_SIZE_ADDR(top) = SET_FREE(current_size - list_min);
      *END_SIZE_ADDR(top) = SET_FREE(current_size - list_min);
      *START_SIZE_ADDR(head) = SET_FREE(list_min);
      *END_SIZE_ADDR(head) = SET_FREE(list_min);
      int index = FREE_LIST_INDEX(current_size - list_min);
      pthread_mutex_lock(&listMutexes[index]);
      LIST_INSERT_HEAD(&freeLists[index],top);
      assert(freeLists[index] == top);
      pthread_mutex_unlock(&listMutexes[index]);
    }

    /*
     * Binary division of head block until we have the right size
     */
    assert((START_SIZE(head) & (START_SIZE(head) - 1)) == 0);
    while (current_index > requested_index) {
      --current_index;
      list_min /= 2;
      void *top = (void *)(((char *)head) + list_min);
      *START_SIZE_ADDR(top) = SET_FREE(list_min);
      *END_SIZE_ADDR(top) = SET_FREE(list_min);
      pthread_mutex_lock(&listMutexes[current_index]);
      LIST_INSERT_HEAD(&freeLists[current_index], top);
      pthread_mutex_unlock(&listMutexes[current_index]);
    } 

    *START_SIZE_ADDR(head) = list_min;
    *END_SIZE_ADDR(head) = list_min;
    assert(START_SIZE(head) == END_SIZE(head));
    assert(START_SIZE(head) >= ((size_t)1 << (requested_index + LOG_MIN_SIZE)));
    return (void *)((char *)head + sizeof(size_t));
  }

  /*
   * If the size requested is less than the maximum size of free block stored in the thread
   * cache, checks the local thread's free lists for a block of the appropriate size or larger; 
   * if a larger block is found, splits the block up until an appropriate size is obtained.
   * If no such block is found, retrieves additional storage from the global free lists.
   *
   * Otherwise, retrieves directly from the global free lists.
   */
  void * allocator::malloc(size_t size)
  {
    if (versionID != version) thread_init();

    if (size ==  0) return NULL;

    /* 
     * Finds the index of the list containing the next available block that can accomodate
     * the requested size + necessary headers.
     */
    int aligned_size = size + HEADER_SIZE;
    if (aligned_size < MIN_SIZE) aligned_size = MIN_SIZE;
    //TODO clarify
    int requested_index = logbase2(aligned_size - 1) + 1 - LOG_MIN_SIZE; //Ceil, not floor like regular indexing.
    int current_index;

    if (requested_index >= NUM_THREAD_FREE_LISTS - 1) {
      return global_malloc(requested_index);
    }

    /*
     * Check thread free lists for available blocks of necessary / splittable size
     */
    current_index = requested_index;
    while (threadFreeLists[current_index] == NULL) {
      ++current_index;
    }

    /*
     * If thread free lists don't have the right blocks, pull from the global free lists.
     */
    assert(current_index < NUM_THREAD_FREE_LISTS);
    assert(requested_index < (NUM_THREAD_FREE_LISTS - 1));
    if (current_index == NUM_THREAD_FREE_LISTS - 1) {
      //We know that requested_index is < NUM_THREAD_FREE_LISTS - 1 because of the global malloc
      void *block = grow_thread_heap(requested_index);
      if (block == NULL) {
        return NULL;
      }
      //Update thread cache size with retrieved linked list
      assert(START_SIZE(block) >= (size + HEADER_SIZE));
      void *tmp = block;
      current_index = FREE_LIST_INDEX(START_SIZE(block));
      while (tmp != NULL) {
        assert(START_SIZE(tmp) == END_SIZE(tmp));
        threadCacheSize += START_SIZE(tmp);
        tmp = NEXT_PTR(tmp);
        ++threadCounts[current_index];
      }
      assert(threadFreeLists[current_index] == NULL);
      assert(current_index < (NUM_THREAD_FREE_LISTS - 1));
      threadFreeLists[current_index] = block;
    }

    /*
     * Removes the memory chunk from the free list
     */
    void *head = threadFreeLists[current_index];
    --threadCounts[current_index];
    if (threadCounts[current_index] < threadCountMinima[current_index]) {
      threadCountMinima[current_index] = threadCounts[current_index];
    }
    LIST_REMOVE_HEAD(&(threadFreeLists[current_index]));
    assert(threadFreeLists[current_index] != head);

    /*
     * If the chunk is not a power of 2 size, split it up before doing any necessary binary
     * division.
     */
    size_t list_min = (size_t)1 << (current_index + LOG_MIN_SIZE);
    size_t current_size = START_SIZE(head);
    if (current_size != list_min) {
      assert(current_size > list_min);
      void *top = ((char *)head) + list_min;
      *START_SIZE_ADDR(top) = SET_FREE(current_size - list_min);
      *END_SIZE_ADDR(top) = SET_FREE(current_size - list_min);
      *START_SIZE_ADDR(head) = SET_FREE(list_min);
      *END_SIZE_ADDR(head) = SET_FREE(list_min);
      int index = FREE_LIST_INDEX(current_size - list_min);
      ++threadCounts[index];
      LIST_INSERT_HEAD(&threadFreeLists[index],top);
      assert(threadFreeLists[index] == top);
    }

    /*
     * Binary division of head block until we have the right size
     */
    //Check for power of two size
    assert((START_SIZE(head) & (START_SIZE(head) - 1)) == 0);
    while (current_index > requested_index) {
      --current_index;
      list_min /= 2;
      void *top = (void *)(((char *)head) + list_min);
      *START_SIZE_ADDR(top) = SET_FREE(list_min);
      *END_SIZE_ADDR(top) = SET_FREE(list_min);
      ++threadCounts[current_index];
      LIST_INSERT_HEAD(&threadFreeLists[current_index], top);
    } 

    *START_SIZE_ADDR(head) = list_min;
    *END_SIZE_ADDR(head) = list_min;
    assert(START_SIZE(head) == END_SIZE(head));
    assert(threadCacheSize >= START_SIZE(head));
    threadCacheSize -= START_SIZE(head);
    assert(START_SIZE(head) >= (size + HEADER_SIZE));
    return (void *)((char *)head + sizeof(size_t));
  }

  /*
   * Return blocks to the global free lists.
   *
   * We maintain a count L of the 'low-water' mark for each thread free list between garbage collections,
   * as in TCMalloc; we move L_i/2 objects from thread free list i to global free list i on each garbage
   * collection. Nice and simple!
   * 
   * We do not perform any coalescing because it's hella hard to do without being able to inspect other
   * threads' free lists, which in itself would present quite the difficult problem and introduce way
   * too much synchronization.
   */
  void allocator::garbage_collect() {
    for (int i = 0; i < (NUM_THREAD_FREE_LISTS - 1); ++i) {
      //Remove threadCountMinima[i]/2 items from threadFreeLists[i]
      //Guaranteed to have that many items available.
      void *head = threadFreeLists[i];
      if ((head == NULL) || threadCountMinima[i] / 2 == 0) {
        continue;
      }
      void *tmp = head;
      for (int j = 0; j < threadCountMinima[i] / 2; ++j) {
        tmp = NEXT_PTR(tmp);
        assert(tmp != NULL);
      }
      threadFreeLists[i] = tmp;
      tmp = PREV_PTR(tmp);
      *PREV_PTR_ADDR(threadFreeLists[i]) = NULL;
      //Insert into global free list
      pthread_mutex_lock(&listMutexes[i]);
      *NEXT_PTR_ADDR(tmp) = freeLists[i];
      if (freeLists[i] != NULL) {
        *PREV_PTR_ADDR(freeLists[i]) = tmp;
      }
      freeLists[i] = head;
      pthread_mutex_unlock(&listMutexes[i]);
      //Reset the minima
      threadCounts[i] -= threadCounts[i] / 2;
      threadCountMinima[i] = threadCounts[i];
    }
  }
  
  /*
   * free - Freeing a block does nothing.
   */
  void allocator::free(void *ptr)
  {
    if (versionID != version) thread_init();

    void *realptr = (void *)((char *)ptr - sizeof(size_t));
    assert(START_SIZE(realptr) == END_SIZE(realptr));
    *START_SIZE_ADDR(realptr) = SET_FREE(START_SIZE(realptr));
    *END_SIZE_ADDR(realptr) = SET_FREE(START_SIZE(realptr));
    int index = FREE_LIST_INDEX(START_SIZE(realptr));
    //If this block fits in the thread caches, put it there
    if (index < NUM_THREAD_FREE_LISTS - 1) {
      ++threadCounts[index];
      LIST_INSERT_HEAD(&threadFreeLists[index],realptr);
      threadCacheSize += START_SIZE(realptr);
      if (threadCacheSize >= THREAD_GC_THRESHOLD) {
        garbage_collect();
      }
      return;
    }
    //Otherwise, free it into the global free lists
    pthread_mutex_lock(&listMutexes[index]);
    LIST_INSERT_HEAD(&freeLists[index],realptr);
    pthread_mutex_unlock(&listMutexes[index]);
    return;
  }

  /*
   * realloc - Implemented simply in terms of malloc and free
   */
  void * allocator::realloc(void *ptr, size_t size)
  {
    if (versionID != version) thread_init();

    void *realptr = (void *)((size_t*)ptr - 1);
    assert(START_SIZE(realptr) == END_SIZE(realptr));
    if (size < (START_SIZE(realptr) - HEADER_SIZE)) {
      return ptr;
    }

    size_t copy_size;
    /* Allocate a new chunk of memory, and fail if that allocation fails. */
    void *newptr = malloc(size);
    if (NULL == newptr)
      return NULL;

    /* Get the size of the old block of memory.  Take a peek at malloc(),
       where we stashed this in the SIZE_T_SIZE bytes directly before the
       address we returned.  Now we can back up by that many bytes and read
       the size. */
    copy_size = *(size_t*)((uint8_t*)ptr - sizeof(size_t)) - HEADER_SIZE;

    /* If the new block is smaller than the old one, we have to stop copying
       early so that we don't write off the end of the new block of memory. */
    if (size < copy_size)
      copy_size = size;

    /* This is a standard library call that performs a simple memory copy. */
    std::memcpy(newptr, ptr, copy_size);

    /* Release the old block. */
    free(ptr);

    /* Return a pointer to the new block. */
    void *tmpptr = (void *)((size_t*)newptr - 1);
    assert(START_SIZE(tmpptr) == END_SIZE(tmpptr));
    return newptr;
  }

  /* call mem_reset_brk. */
  void allocator::reset_brk()
  {
    mem_reset_brk() ;
  }

  /* call mem_heap_lo */
  void * allocator::heap_lo()
  {
    return mem_heap_lo() ;
  }

  /* call mem_heap_hi */
  void * allocator::heap_hi()
  {
    return mem_heap_hi() ;
  }


};
