#include <boost/unordered/unordered_map.hpp>
#include "dfruntime_private.h"
#include "global_queue.h"
#include "hooks.h"
#include "wish_queue.h"
#include "memory_management.h"
#include "asyncpages.h"

// from dfrt.c
extern "C" {
extern void release_created_frames();
}

static int inner_task_counter = 0;
// Additional inner queue :
struct inner_queue {
    struct inner_queue * next;
    struct frame_struct * frame;
};


typedef intptr_t PtrType;
typedef boost::unordered_map<PtrType,int> DecMap;

static __thread DecMap * decmap = NULL;

void scheduling_init_tls() {
    decmap = new DecMap();
}


static inline void add_to_stored_tdecs( struct frame_struct * fptr ) {
    PtrType f = (PtrType) fptr;
    if ( decmap->find(f) != decmap->end() ) {
        (*decmap)[f] ++;
    } else {

        (*decmap)[f] = 1;
    }

}

void perform_tdecs(struct frame_struct * fptr) {
    PtrType f = (PtrType) fptr;

    // TODO : from several atomic decreases do only one ( benign )
    if ( decmap->find(f) == decmap->end() ) {
        // First task, because it is not created by a task but manually and
        // does its own release in the next tend ofthe thread, even though
        // it doens't tdec itself of course... : )

        DEBUG( "(WARNING : only for first task and potential errors) unregistered tdec on %p. ", fptr);
    }

    for ( int i = 0; i < (*decmap)[f]; ++ i ) {
        dfrt_effective_tdecrease(fptr);
    }

    decmap->erase(f);
}

// TODO : make it thread local ?
static struct inner_queue * alt_inner_head = NULL;
static struct inner_queue * alt_inner_tail = NULL;

static __thread bool properly_terminated = false;

static void add_to_alt_inner(struct frame_struct * frame) {

    struct inner_queue * new_node =
            (struct inner_queue*) malloc( sizeof(struct inner_queue) );
    new_node->frame = frame;
    new_node->next = NULL;

#pragma omp critical (alt_inner_queue)
    {
        if ( alt_inner_tail == NULL ) {
            alt_inner_tail = new_node;
            alt_inner_head = new_node;
        } else {
            alt_inner_tail->next = new_node;
            alt_inner_tail = new_node;
        }
    }

}

struct frame_struct * pop_alt_inner() {
    struct frame_struct * ret = NULL;
#pragma omp critical (alt_inner_queue)
{
    if ( alt_inner_tail == NULL ) {
        // Pass
    } else {
    ret = alt_inner_head->frame;
    void * next = alt_inner_head->next;
    free(alt_inner_head);
    alt_inner_head = (struct inner_queue *) next;
}

}
return ret;
}



void dfrt_delayed_tdecrease(struct frame_struct *frame ) {
    add_to_stored_tdecs(frame);
}


bool is_recursive( struct frame_struct * frame ) {
    return (bool) frame->infos->recursive;
}

static inline bool inner_queue_full() {
    // TODO : inquire about this crucial value.
    return inner_task_counter>=48*omp_get_num_threads();
}


void schedule_regulate(struct frame_struct * frame) {
    
    if (! is_recursive(frame) ) {
        DEBUG("%s is leaf : for local", frame->infos->misc?frame->infos->misc->fname:NULL);
        schedule_forlocal(frame);
        return;
    }



    // We will not add other recursive frames if we
    // are using the inner queue.
    if ( inner_task_counter >= 5*omp_get_num_threads()  ) {
        DEBUG("%s is branch, queue full : global", frame->infos->misc?frame->infos->misc->fname:NULL);

        schedule_forglobal(frame);
        return;
    }
    

    if ( inner_task_counter <= 32*omp_get_num_threads() ) {
        DEBUG("%s is branch, queue non full : local", frame->infos->misc?frame->infos->misc->fname:NULL);

        schedule_forlocal(frame);
        return;
    }

    FATAL( "Undecided scheduling.");
}


void schedule_has_proper_termination() {
    properly_terminated = true;
}

void schedule_forlocal(struct frame_struct * frame) {

    // A task enters OMP queue, the counter grows.
    __sync_add_and_fetch(&inner_task_counter, 1);


    // Execute hook.
    hook_before_local_schedule( frame );



    if ( inner_queue_full() ) {
        // Keep it in a queue.
        add_to_alt_inner(frame);

    } else {
        // We can schedule immediately.
#pragma omp task
        executor(frame);
    }
}

void schedule_forglobal( struct frame_struct * frame ) {
    gqueue_append( frame );
}


void df_pending_writes() {
    DEBUG( "Inside DF_PENDING_WRITES");
    while ( ! wishlist_empty()) {
        async_process_all_messages();
    }
    dfrt_tend();
}

struct misc_info df_pending_writes_misc_info =
{ (char*)"df_pending_writes", (char*)"Cleanup missing writes"};

struct dfrt_info_pending_writes {
    DFRT_INFO_CONTENT;
    // No wrdesc necessary.
};

struct dfrt_info_pending_writes df_pending_writes_infos = {
    &df_pending_writes,
    1, // Immediate activation
    0, // zero ret places
    1, // nonrecursive
    sizeof(struct frame_struct),
    &df_pending_writes_misc_info
};

/* Function for OpenMP that takes care of 
 * updating TLS current frame pointer, and calls the function */

hidden_attr
void executor(struct frame_struct * frame_pointer) {
    // Sets the current_fp as provided by dfruntime.c
    current_fp = frame_pointer;

    // Accesses to the function pointer.
    // Hooking for before task.
    if ( hook_before_task() ) {
        void (*fn)() = current_fp->infos->fn;
        properly_terminated = false;
        fn();
        if ( !properly_terminated ) {
            FATAL( "Error : non terminated task %s ", frame_pointer->infos->misc?
                       frame_pointer->infos->misc->fname:NULL );
        }
    } else {
        FATAL("Before schedule hook returning false is not supported yet.");
        __sync_sub_and_fetch( &inner_task_counter, 1 );
        return;
    }

    // Hooking for after task.
    hook_after_task();

    // TODO : think about that :
    CFATAL( !decmap->empty(), "Missed some tdecs.") ;

    dfrt_mem_free( frame_pointer );
    // A task finished. Decrement the counter.
    int remaining = __sync_sub_and_fetch( &inner_task_counter, 1 );

    // Tries to schedule more tasks from alt_inner_queue.
    while (! inner_queue_full() ) {
        struct frame_struct * frame = pop_alt_inner();
        if ( frame == NULL ) {
            break;
        }

#pragma omp task
        executor(frame);
    }

    if ( remaining == 0 ) {
        // We cannot finish the program with pending pages :
        // We schedule a dataflow task to process the messages.
        // And return without ** nowork **.
        if ( ! wishlist_empty()) {
            struct frame_struct * pending_writes_frame =
                    dfrt_tcreate(1, sizeof( struct frame_struct));
            // That is is !
            pending_writes_frame->infos =
                    (struct dfrt_info*) &df_pending_writes_infos;
            release_created_frames();
            dfrt_effective_tdecrease( pending_writes_frame );

        } else {
            hook_no_work();
        }
    } else if (remaining <= omp_get_num_threads()  ) {
        hook_would_wait();
    }
}

