#include <cassert>
#include <cstring>
#include <cstdlib>
#include <boost/interprocess/sync/interprocess_mutex.hpp>

#include <boost/interprocess/allocators/node_allocator.hpp>

#include <boost/interprocess/sync/scoped_lock.hpp>
#include <boost/interprocess/managed_shared_memory.hpp>
#include <boost/interprocess/containers/list.hpp>

#include <boost/interprocess/sync/interprocess_condition.hpp>
#include <boost/interprocess/containers/map.hpp>


#include "native_shm.h"
#include "misc.h"
#include "dfruntime.h"

#define DFRT_SHM_NAME "DFRT_native_shm"
#define DFRT_SHM_SIZE 736870912 // 512MB
using namespace boost::interprocess;

typedef long PtrType;

typedef node_allocator<int, managed_shared_memory::segment_manager> OwnersAllocator;
typedef node_allocator
<dfrt_mem_release_mesg,
managed_shared_memory::segment_manager>
RlsListAllocator;

typedef char SmallType [768] ;


typedef node_allocator<SmallType,
                        managed_shared_memory::segment_manager>
                            SmallBlocksAllocator;


typedef list<int,OwnersAllocator> OwnersList;
typedef list<dfrt_mem_release_mesg,RlsListAllocator> RlsList;


// Structures for acquire/release.
static OwnersAllocator *alloc_own;

struct nshm_memchunck {
    nshm_memchunck() :
        address(0), size(0), exclusive(false), owners(*alloc_own) {}

    nshm_memchunck( PtrType _address, size_t _size, bool _exclusive ):
        address( _address), size(_size), exclusive(_exclusive), owners(*alloc_own) {}

    PtrType address;
    size_t size;
    bool exclusive;


    OwnersList owners;
};

struct waiting_token {
public :
    waiting_token()
        :address(0),
          size(0),
          async(false),
          owners(*alloc_own) {}

    waiting_token( PtrType _address,  size_t _size,  bool _async)
        :address(_address),
          size(_size),
          async(_async),
          owners(*alloc_own){}

    waiting_token(const waiting_token &k)
        :address(k.address),
          size(k.size),
          async(k.async),
          owners(k.owners) {}

    waiting_token &operator=(const waiting_token &k) {
        address = k.address;
        size = k.size;
        async = k.async;
        owners = k.owners;
        return *this;
    }

    PtrType address;
    size_t size;
    bool async;
    boost::interprocess::interprocess_condition  page_cond;
    OwnersList owners;
};

typedef allocator<std::pair<PtrType,nshm_memchunck>, managed_shared_memory::segment_manager>
ManagerAllocator;
typedef allocator<std::pair<const PtrType,waiting_token>, managed_shared_memory::segment_manager>
WaitersAllocator;

typedef map<PtrType, nshm_memchunck, std::less<PtrType>, ManagerAllocator> ManagerMap;
typedef map<PtrType, waiting_token, std::less<PtrType>, WaitersAllocator> WaitersMap;



static __thread ManagerMap *acquire_release_manager;
static __thread WaitersMap *acquire_release_waiters;
static bool __inited = false;
static __thread bool __postinited = false;
static __thread SmallBlocksAllocator *small_blocks_allocator;
static __thread interprocess_mutex *acquire_release_mutex;
static managed_shared_memory shm_region;


// Creates allocators :

static ManagerAllocator *alloc_man;
static WaitersAllocator *alloc_wai;
static RlsListAllocator *alloc_rls;

static __thread RlsList **rls_lists;
static __thread interprocess_mutex **rls_lists_mutexes;

static inline RlsList & get_rls_list( int who ) {
    CFATAL(__postinited == false, "Initialization incorrect.");
    ASSERT( who < dfrt_get_num_nodes());
    return *rls_lists[who];
}

static void construct_rls_list() {
    CFATAL(__postinited == false, "Initialization incorrect.");
    rls_lists = (RlsList **)
            calloc( dfrt_get_num_nodes(), sizeof( RlsList *));
    rls_lists_mutexes = (interprocess_mutex**)
            calloc( dfrt_get_num_nodes(), sizeof( interprocess_mutex *));

    for ( int node=0; node < dfrt_get_num_nodes(); ++ node) {
        char buffer[128];
        sprintf( buffer, "Rls_list%d",node);
        rls_lists[node] = shm_region.find_or_construct<RlsList>
                (buffer, std::nothrow)
                ( *alloc_rls );
        CFATAL( rls_lists[node] ==  NULL, "Failed to init rls lists.");

        sprintf( buffer, "Rls_list_mutex%d", node);
        rls_lists_mutexes[node] = shm_region.find_or_construct<interprocess_mutex>
                (buffer, std::nothrow) ();
        CFATAL( rls_lists_mutexes[node] ==  NULL, "Failed to init rls lists mutexes.");
    }
}

void native_shm_init_pre_fork() {
    shared_memory_object::remove(DFRT_SHM_NAME);
    shm_region = managed_shared_memory(open_or_create,
                                       DFRT_SHM_NAME,
                                       DFRT_SHM_SIZE);
    __inited=true;
}




void  native_shm_init_last() {


    alloc_man =  new ManagerAllocator(shm_region.get_segment_manager());
    alloc_wai =  new WaitersAllocator(shm_region.get_segment_manager());
    alloc_own =  new OwnersAllocator(shm_region.get_segment_manager());
    alloc_rls =  new RlsListAllocator(shm_region.get_segment_manager());
    small_blocks_allocator = new SmallBlocksAllocator(shm_region.get_segment_manager());
    //Construct objects
    acquire_release_mutex = shm_region.find_or_construct<interprocess_mutex>
            ("AcquireReleaseMutex",std::nothrow)();
    CFATAL( acquire_release_mutex == NULL, "Failed to create acquire mutex");

    acquire_release_manager  = shm_region.find_or_construct<ManagerMap>
            ("AcquireReleaseManager", std::nothrow)
            (std::less<PtrType>(), *alloc_man );
    CFATAL ( acquire_release_manager == NULL,"Coundln't initialize acquire_release_manager.");

    acquire_release_waiters = shm_region.find_or_construct<WaitersMap>
            ("AcquireReleaseWaiters", std::nothrow)
            (std::less<PtrType>(), *alloc_wai );
    CFATAL ( acquire_release_waiters == NULL,"Coundln't initialize waiters map.");
    // Get the release list :
    __postinited = true;
    construct_rls_list();

    CFATAL( rls_lists == NULL, "Coulnd't get the release list.");
}



#ifdef DFRT_AR_SMALLIMPACT

#define ACC_R 1
#define ACC_W 2
#define ACC_RW 3
#define ACC_SYNCBLOCK 4


// Note about owner count :
// Owner count is not used in the exclusive case.
// In the shared case, it is atomically incremented before an attempt.
// If the acquire fails ( <=> exclusive posesses )
// Then his attempt will be followed by a notice.
// And it decreases the ownership counter after failure notification.

void native_shm_wake_async_from_prec( long prec, PtrType vptr );


void * native_shm_malloc(size_t size) {
    CFATAL(__inited == false, "Initialization incorrect.");

    struct page_hdr * retval = NULL;
    if (true ) {// for DEBUG size > 128 - sizeof( page_hdr)) {
        retval = (struct page_hdr *) shm_region.allocate(size +512+ sizeof(struct page_hdr),
            std::nothrow);
        memset( retval, 0, sizeof( struct page_hdr ) );
    } else {
        retval = (struct page_hdr *) &*small_blocks_allocator->allocate(1);
        memset( retval, 0, sizeof( struct page_hdr ));
    }

    retval->size = size;
    SET_CANARIS( retval );
    __sync_synchronize();
    CHECK_CANARIS( retval );

    return (void*) ((intptr_t) retval + sizeof(struct page_hdr) );
}


bool native_shm_is_acquired(void * vptr) {
    bool retval = false;

    struct page_hdr * phdr =
            (struct page_hdr *) ((intptr_t) vptr - sizeof(struct page_hdr));

    //CHECK_CANARIS( phdr );

    CFATAL( phdr->field_nodes_to_notice != 0 && __builtin_ctz( phdr->field_nodes_to_notice ) >= dfrt_get_num_nodes() , "Invalid field in %p : 0x%lx",vptr,phdr->field_nodes_to_notice);
    CFATAL( phdr->owners_field == 0 && phdr->counter != 0, "Incoherent state for is_acquired." );

    retval = ((1<<dfrt_get_node_num())&phdr->owners_field) ? true : false;

    return retval;
}

void native_shm_free( void * vptr ) {
    CFATAL((__inited == false), "Initialization incorrect.");

    PtrType ptr = (PtrType) vptr;

    _DEBUG( "Calling free : %p", vptr);

    struct page_hdr * page_hdr = (struct page_hdr *) ((intptr_t) vptr - sizeof(struct page_hdr));
    CHECK_CANARIS( page_hdr );
    page_hdr->canari1= 0x43;
    page_hdr->canari2= 0x44;
    CFATAL( page_hdr->field_nodes_to_notice != 0, "Freeing unproperly released memory  %p (notice code : %ld )", vptr, page_hdr->field_nodes_to_notice);

    if ( true ) { // for debug page_hdr->size > 128 - sizeof(page_hdr) ) {
        shm_region.deallocate((void *)((intptr_t)ptr - sizeof(struct page_hdr) ));
    } else {
        small_blocks_allocator->deallocate(offset_ptr<SmallType>((SmallType*)page_hdr),1);
    }
}

bool native_shm_gen_acquire( void *vptr,
                             size_t size,
                             bool exclusive,
                             bool blocking,
                             bool async ) {
    CFATAL(dfrt_get_num_nodes() >= 64, "Too many nodes (limit=64).");
    CFATAL(blocking && async, "Access cannot be async and blocking.");
    __DEBUG("Acquire attempt : %p", vptr);
begin:
    struct page_hdr * phdr =
            (struct page_hdr *) ((intptr_t) vptr - sizeof(struct page_hdr));
    CHECK_CANARIS( phdr );
    // Coded : -1 for R/WR
    //          0 for Free
    // 			>= 1 encodes number of shared owners.

    // If needed register for notification in case of failure :
    if ( async || blocking ) {
        __sync_fetch_and_or(&phdr->field_nodes_to_notice, 1<<dfrt_get_node_num());
    }

    bool write_perms = false;
    // Then, tries to acquire :
    if ( ! exclusive ) {
        int counter_val = -1;
        do {
            counter_val = phdr->counter;
            if (counter_val <  0 ) {
                goto failure;
            }
            write_perms = ( counter_val == 0 );
        } while ( ! __sync_bool_compare_and_swap( &phdr->counter, counter_val, counter_val+1) );

        goto success; // We acquired successfully.

    } else {
        // Case of read/write acquire
        if ( __sync_bool_compare_and_swap( &phdr->counter, 0, -1 ) ) {
            write_perms = true;
            goto success;
        } else {
            goto failure;
        }
    }



    // prec == 0, no one here :

success:
    if ( write_perms )  {
        __DEBUG("Acquire writing perms.");
        phdr->perm = 0;
        phdr->perm |= exclusive?ACC_RW:ACC_R;
        phdr->perm |=  ((!async)&&blocking)  ? ACC_SYNCBLOCK:0;
        phdr->owners_field = 0;
    }
    phdr->owners_field |= 1<<dfrt_get_node_num();

    __DEBUG( "Acquire succeeds.");
    CHECK_CANARIS( phdr );
    return true;
failure:

    // We can be :
    if ( async && !blocking ) {
        // Async acquire
        // Nothing to do.


    } else if ( blocking ) {
        int val = phdr->counter;
        // Blocking acquire :
        if ( (exclusive && val != 0)
             || (!exclusive && val < 0) ) {
            futex_wait(&phdr->counter, val);
        }
        // Try again.
        __DEBUG("Gen acquire looping.");
        CHECK_CANARIS( phdr );
        goto begin;

    } else if ( !async && !blocking) {
        // Simple try acquire
        // Nothing to do.
    }
    _DEBUG("Acquire failed %p", vptr);
    CHECK_CANARIS( phdr );
    return false;
}


void native_shm_release( void * vptr ) {
    __DEBUG("Releasing %p", vptr);
    struct page_hdr * phdr =
            (struct page_hdr *) ((intptr_t) vptr - sizeof(struct page_hdr));
    CHECK_CANARIS( phdr );
    bool use_condition = (phdr->perm & ACC_SYNCBLOCK )?true:false;
    bool need_wake = false;

    long to_notice = 0;

    // todo : This assert failed : log : assert_408.log
    ASSERT( phdr->counter != 0 );
    // Examine the value :
    if ( phdr->counter == -1 ) {
        // Then frees the page :
        need_wake = true;
        phdr->owners_field = 0;

        phdr->counter = 0; // No need for atomic here.
        to_notice = __sync_fetch_and_and( &phdr->field_nodes_to_notice, 0 );


    } else {
        long new_val =  __sync_sub_and_fetch( &phdr->counter, 1 );
        if (new_val == 0) {
            phdr->owners_field = 0;
            need_wake = true;
            to_notice = __sync_fetch_and_and( &phdr->field_nodes_to_notice, 0 );

        }
    }

    if ( need_wake ) {
        if (use_condition) {
            futex_wake( &phdr->counter, INT_MAX );
            native_shm_wake_async_from_prec( to_notice, (PtrType) vptr);

        } else {
            // Async case
            native_shm_wake_async_from_prec( to_notice, (PtrType) vptr);
        }
	

    }
}

void native_shm_wake_async_from_prec( long prec, PtrType vptr ) {
    DEBUG( "In async waker for %p", (void*)vptr );
    struct page_hdr * phdr =
            (struct page_hdr *) ((intptr_t) vptr - sizeof(struct page_hdr));
    CHECK_CANARIS( phdr );
    while ( prec ) {
        int waiter = __builtin_ctz(prec);
        CFATAL(waiter >= dfrt_get_num_nodes(), "Invalid waiter : (prec = %lx)",prec);
        prec &= ~(1<<waiter);
        _DEBUG("Looping for advertisement : %p for %d",
              (void *) vptr, waiter);
        if ( waiter == dfrt_get_node_num() ) {
            // We don't need to warn ourselves (because pages are shared by node).
            continue;
        }

        scoped_lock<interprocess_mutex>
                pushlock(*rls_lists_mutexes[waiter]);
        // Add notification :
        dfrt_mem_release_mesg msg;
        msg.pageaddr = (void *) vptr;
        msg.size = 1; // TODO use real size.
        rls_lists[waiter]->push_front(msg);
        CHECK_CANARIS( phdr );


    }

}

#endif
void native_shm_blocking_acquire(void * vptr, size_t size, bool exclusive ) {
    CFATAL(__postinited == false, "Initialization incorrect.");

    native_shm_gen_acquire(vptr,size,exclusive,true, false);

}

bool native_shm_try_acquire(void * vptr, size_t size, bool exclusive ) {
    CFATAL(__postinited == false, "Initialization incorrect.");

    return native_shm_gen_acquire(vptr,size,exclusive,false, false);

}
bool native_shm_async_acquire(void * vptr, size_t size, bool exclusive ) {
    CFATAL(__postinited == false, "Initialization incorrect.");

    return native_shm_gen_acquire(vptr,size,exclusive,false, true);

}


bool native_shm_next_rls( struct dfrt_mem_release_mesg * msgbuf ) {
    CFATAL(__postinited == false, "Initialization incorrect.");

    //if ( rls_lists[dfrt_get_node_num()]->empty() ) {
    //    return false;
    //}

    scoped_lock<interprocess_mutex>
            pushlock(*rls_lists_mutexes[dfrt_get_node_num()]);
    // TODO : add proper sync
    if (!rls_lists[dfrt_get_node_num()]->empty()) {
        *msgbuf = * rls_lists[dfrt_get_node_num()]->begin();
        _DEBUG( "Processing rls : %p", msgbuf->pageaddr );

        // To debug canari values :
        struct page_hdr * phdr =
                (struct page_hdr *) ((intptr_t) msgbuf->pageaddr - sizeof(struct page_hdr));
        CHECK_CANARIS( phdr );

        rls_lists[dfrt_get_node_num()]->pop_front();
        return true;
    } else {
        __DEBUG("Finished processing rls messages.");
        return false;
    }
}

#define SET_GLOBAL_STORAGE_NAME "DFRT_SetGlobal_Ptr"
static __thread PtrType *global_storage_addr;
static __thread bool global_storage_initiated = false;
void native_shm_set_global(void * ptr) {
    CFATAL(__inited == false, "Initialization incorrect.");

    if (! global_storage_initiated ) {
        global_storage_addr = shm_region.find_or_construct<PtrType>
                (SET_GLOBAL_STORAGE_NAME, std::nothrow) ();
        global_storage_initiated = true;
    }

    *global_storage_addr = (PtrType) ptr;

}


void * native_shm_get_global() {
    CFATAL(__inited == false, "Initialization incorrect.");

    if (! global_storage_initiated ) {
        global_storage_addr = shm_region.find_or_construct<PtrType>
                (SET_GLOBAL_STORAGE_NAME, std::nothrow) ();
        global_storage_initiated = true;
    }

    return (void*) *global_storage_addr;
}



void native_shm_allthread_barrier () {
    int * barrier  = shm_region.find_or_construct<int>
                ("BarrierInt", std::nothrow)
                ();

    int res = __sync_add_and_fetch( barrier, 1 );
    while ( true ) {
        int val = *barrier;
        if ( val < dfrt_get_num_nodes() ) {
            futex_wait( barrier, val );
        } else {
            break;
        }
    }

    futex_wake( barrier, INT_MAX);

}


// cleanup
void native_shm_fini() {
    CFATAL(__postinited == false, "Initialization incorrect.");

    // TODO
    // Teardown shm.
}
