#include "PreCompile.hpp"
#include "AsyncRingBuffer.hpp"

#include <glib.h>

namespace BuildNG
{
  AsyncRingBuffer::AsyncRingBuffer(int size)
  {
    assert((size & 3) == 0);    // size must be aligned.

    blob_size = size;
    blob = new guint8[blob_size];
    memset(blob,0,blob_size);

    write_ptr = 0;
    pop_ptr = 0;
    free_ptr = 0;
  }

  AsyncRingBuffer::~AsyncRingBuffer()
  {
    // first free all items that are still left in the buffer.
    while(1)
    {
      void *ptr = pop_acquire();
      if(!ptr)
        break;
      pop_release(ptr);
    }

    delete [] blob;
  }

  bool AsyncRingBuffer::push(const void *ptr,int size,void (*copy_func)(void *dest,const void *src),void (dtor_func)(void *ptr))
  {
    int item_size = size + sizeof(Header);

    while(1)
    {
      int cur_write_ptr = write_ptr;
      int cur_free_ptr = free_ptr;

      int wrapped_write_ptr = cur_write_ptr % blob_size;
      int wrapped_free_ptr = cur_free_ptr % blob_size;

      if(wrapped_write_ptr >= wrapped_free_ptr)
      {
        // the write ptr is after the free ptr.

        // see if there's actually space available at the 
        // beginning of the buffer.
        if(wrapped_free_ptr == 0 && 
          wrapped_write_ptr + item_size >= blob_size)
        {
          return false;
        }

        if(wrapped_write_ptr + item_size > blob_size)
        {
          // we have to wrap around. We insert a dummy node, that will tell the pop ptr
          // to wrap around, when it's here.

          int new_write_ptr = cur_write_ptr + (blob_size - wrapped_write_ptr);

          if(g_atomic_int_compare_and_exchange(&write_ptr,cur_write_ptr,new_write_ptr))
          {
            Item *cur_item = (Item*)(blob + wrapped_write_ptr);

            // a size of INT_MAX tells the popper that it's a dummy node, and
            // that it should jump to the next "wrap-boundary".
            g_atomic_int_set(&cur_item->size,INT_MAX);
          }

          continue;
        }
      }
      else
      {
        // the write ptr is behind the free ptr.

        int available = wrapped_free_ptr - wrapped_write_ptr;
        if(item_size >= available)
        {
          // no space.
          return false;
        }
      }

      if(g_atomic_int_compare_and_exchange(&write_ptr,cur_write_ptr,cur_write_ptr + item_size))
      {
        Item *cur_item = (Item*)(blob + wrapped_write_ptr);
        cur_item->dtor = dtor_func;
        copy_func(cur_item->data,ptr);

        g_atomic_int_set(&cur_item->size,item_size);

        return true;
      }
    }
  }

  void* AsyncRingBuffer::pop_acquire()
  {
    while(1)
    {
      int cur_pop_ptr = pop_ptr;
      int wrapped_pop_ptr = cur_pop_ptr % blob_size;

      Item *cur_item = (Item*)(blob + wrapped_pop_ptr);
      int item_size = g_atomic_int_get(&cur_item->size);
      if(item_size == 0)
        return false;
      else if(item_size == INT_MAX)
      {
        int new_pop_ptr = cur_pop_ptr + (blob_size - wrapped_pop_ptr);
        if( g_atomic_int_compare_and_exchange(&pop_ptr,cur_pop_ptr,new_pop_ptr))
        {
          g_atomic_int_set(&cur_item->size,new_pop_ptr - cur_pop_ptr);
        }
        continue;
      }


      if( g_atomic_int_compare_and_exchange(&pop_ptr,cur_pop_ptr,cur_pop_ptr + item_size))
      {
        return cur_item->data;
      }
    }
  }

  void AsyncRingBuffer::pop_release(void *ptr)
  {
    Item *item = (Item*)((guint8*)ptr - sizeof(Header));

    item->dtor(item->data);
    memset((int*)item + 1,0,item->size - sizeof(int));

    // by making the size negative, we tell other threads that they can also try to free it,
    // so from now on, we have to be careful, to not have any race conditions.
    g_atomic_int_set(&item->size,-item->size);

    while(1)
    {
      gint cur_pop_ptr = pop_ptr;
      gint cur_free_ptr = free_ptr;

      gint wrapped_pop_ptr = cur_pop_ptr % blob_size;
      gint wrapped_free_ptr = cur_free_ptr % blob_size;

      if(wrapped_free_ptr == wrapped_pop_ptr)
        break;

      item = (Item*)(blob + wrapped_free_ptr);
      if(item->size > 0)
      {
        // this slot is still acquired.
        return;
      }

      int cur_item_size = -item->size;
      
      if(cur_item_size == 0)
      {
        // another thread is already taking care of cleaning up.
        return;
      }

      g_atomic_int_set(&item->size,0);

      g_atomic_int_compare_and_exchange(&free_ptr,cur_free_ptr,cur_free_ptr + cur_item_size);
    }
  }
}
