#ifdef TARG_SW5
#include <share.h>
#endif
#include <assert.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>

#include "aceMesh_concurrent_task.h"
#include "aceMesh_utils.h"
#if (defined TARG_SW5) || (defined TARG_SW9)
#include <stdbool.h>
#endif
#include <string.h>

#include "am_machine.h"
#include "utils/acelog.h"

#if defined(SUCC_BATCH) || defined(SEG_BUFF)
#include "athread.h"
#endif

#ifdef MEMORY_POOL
#include "MemPool.h"
#endif

extern __uncached volatile int master_lock;
extern int sche_num_threads;
extern int total_num_threads;
extern void *cur_super_taskptr;
extern int cgid;
#ifdef TARG_SW9
extern unsigned long seg_succ_addr[64];
extern int agent_id;
#ifdef GS_AGENT
#define thread_id 0
#else
#define thread_id total_num_threads - 1
#endif
#endif
#ifdef DEBUG
extern int my_mpi_rank;
#endif

#ifdef SUCC_BATCH
extern struct m2s_detail M2S_details_64[64];
extern struct m2s_detail M2S_detail_temp_64[64];
#ifdef LOCAL_FLAG_M2S
extern __thread volatile int m2s_flag;
#else
extern volatile int m2s_flag[64];
#endif
#endif

#ifdef ACEMESH_TIME
extern double blocking_pure_exec_time;
extern double all_pure_exec_time[65];
extern double all_task_time[65];
extern double all_pure_exec_time[65];
extern unsigned long master_cycle_time_start;
extern unsigned long master_cycle_time_end;
extern unsigned long blocking_cycle_time_end;
double temp_time = 0.0;
extern double master_seg_sche_time;
extern unsigned long rpcc();
//{
//  unsigned long time;
//  asm("rtc %0":"=r"(time):);
//  return time;
//}
#endif
#ifdef TARG_SW9
// #include "task_dag_graph.h"
extern __uncached volatile unsigned long con_status;
extern __uncached volatile unsigned long con_dest_ptr;
extern __uncached volatile int con_graph;
extern __uncached volatile unsigned long con_src_ptr;
// extern volatile bool con_is_nei;
extern __uncached volatile int con_sum_pre;
extern int total_num_threads;
extern __uncached unsigned long block_time, block_temp;
// extern __uncached volatile unsigned long main_task[BLOCK_SIZE], main_head, main_tail;
// extern __uncached volatile unsigned long dest_task[MULTI_TASK], dest_head, dest_tail;
//  aceMesh_task* dest, int type, tuple_rw_task& src, bool is_neighbor;
extern unsigned long add_time, temp_add_t;
int add_times;
#endif
#ifdef THREAD_TRACE
#include "thread_trace.h"
extern unsigned long trace_time;
extern unsigned long begin_time;
#endif

#ifdef ACEMESH_SCHEDULER_PROFILING
// extern int max_num_successor;
extern int num_successor[65];
/*
extern unsigned long execute_start;
extern unsigned long execute_end;
extern int total_num_threads;
*/
extern unsigned long total_nedges;
#endif
#ifdef ACEMESH_GRAPH_BUILD_PROFILING
extern unsigned long buil_prof[N_BUILD_PROF];
#endif

// #define decrement_ref_count(self) (--((struct task*)self)->ref_count_t)

inline void decrement_ref_count(struct task *self)
{
    --self->ref_count_t;
    asm volatile("memb\n");
}

#ifdef TARG_SW5
#define my_atomic_add(_new_, _addr_)                                                   \
    {                                                                                  \
        asm volatile("ldw_inc   %0,0(%1)\n\t" : "=r"(_new_) : "r"(_addr_) : "memory"); \
        _new_++;                                                                       \
    }
#define my_atomic_sub(_val_, _addr_)                                                   \
    {                                                                                  \
        asm volatile("ldw_dec  %0, 0(%1)\n\t" : "=r"(_val_) : "r"(_addr_) : "memory"); \
    }
#elif TARG_SW9
//#define my_atomic_add(_new_, _val_)                                    \
    {                                                                  \
        _new_ = __sync_val_compare_and_swap(&(_val_), _val_, ++_val_); \
    }
#endif
#define my_atomic_true(_val_, _addr_)                                                 \
    {                                                                                 \
        asm volatile("ldw_set  %0,0(%1)\n\t" : "=r"(_val_) : "r"(_addr_) : "memory"); \
    }

#ifdef CONCURRENT_CONSTRUCT_GRAPH
inline void set_task_state(int val, volatile int *state)
{
    *state = val;
    asm volatile("memb\n");
}
#endif

#ifdef MAIN_MUTEX_PROFILING
unsigned int fail_count = 0;
unsigned int success_count = 0;
unsigned int *fail = &fail_count;
unsigned int *success = &success_count;
#endif

void main_mutex_lock(volatile int *my_lock)
{
#ifdef TARG_SW5
#ifdef MUTEX_BASIC
    unsigned int __addr, __tmpi;
    __asm__ __volatile__(
        "        memb\n"
        "        ldi      %1,%2\n"
        "1:      ldw_inc  %0,0(%1)\n"
        "        bne      %0,1b\n"
        "        memb\n"
        : "=&r"(__tmpi), "=&r"(__addr)
        : "m"(*my_lock)
        : "memory");
    return;
#else
    unsigned int __tmp = 0;
    unsigned int __cnt;
#ifdef MAIN_MUTEX_PROFILING
    unsigned int __fail = 0;
    unsigned int __success = 0;
    __asm__ __volatile__(
        "0:     ldw     %[__tmp], %[my_lock]\n"
        "       beq     %[__tmp], 2f\n"
        "       ldw     %[__fail], %[fail]\n"
        "       addw    %[__fail], 1, %[__fail]\n"
        "       stw     %[__fail], %[fail]\n"
        "       ldi     %[__cnt], 50\n"
        "       sll     %[__cnt], 4, %[__cnt]\n"
        "1:     subw    %[__cnt], 1, %[__cnt]\n"
        "       bne     %[__cnt], 1b\n"
        "       br      0b\n"
        "2:     ldw_inc    %[__tmp], %[my_lock]\n"
        "       bne     %[__tmp], 3f\n"
        "       ldw     %[__success], %[success]\n"
        "       addw    %[__success], 1, %[__success]\n"
        "       stw     %[__success], %[success]\n"
        "       memb    \n"
        "       br      4f\n"
        "3:     ldw     %[__fail], %[fail]\n"
        "       addw    %[__fail], 1, %[__fail]\n"
        "       stw     %[__fail], %[fail]\n"
        "       br      0b\n"
        "4:     unop    \n"
        : [__tmp] "=&r"(__tmp), [__cnt] "=&r"(__cnt), [__fail] "=&r"(__fail),
          [__success] "=&r"(__success), [fail] "=m"(*(fail)), [success] "=m"(*(success))
        : [my_lock] "m"(*(my_lock))
        : "memory");
#else
    __asm__ __volatile__(
        "0:     ldw     %[__tmp], %[my_lock]\n"
        "       beq     %[__tmp], 2f\n"
        "       ldi     %[__cnt], 50\n"
        "       sll     %[__cnt], 4, %[__cnt]\n"
        "1:     subw    %[__cnt], 1, %[__cnt]\n"
        "       bne     %[__cnt], 1b\n"
        "       br      0b\n"
        "2:     ldw_inc    %[__tmp], %[my_lock]\n"
        "       bne     %[__tmp], 0b\n"
        "       memb    \n"
        "       br      3f\n"
        "3:     unop    \n"
        : [__tmp] "=&r"(__tmp), [__cnt] "=&r"(__cnt)
        : [my_lock] "m"(*(my_lock))
        : "memory");
#endif
    return;
#endif
#elif TARG_SW9
#ifdef SPEIO
    my_h2ldm(con_src_ptr, thread_id, cgid, unsigned long) = (unsigned long)my_lock;
    // con_dest_ptr=(unsigned long)my_lock;
    asm volatile("memb\n");
    // con_status=1;
    my_h2ldm(con_status, thread_id, cgid, int) = 1;
    asm volatile("memb\n");
    // while(con_status);
    block_temp = rpcc();
    while (my_h2ldm(con_status, thread_id, cgid, int))
        ;
    block_time += rpcc() - block_temp;
    asm volatile("memb\n");
    return;
#else
    con_dest_ptr = (unsigned long)my_lock;
    asm volatile("memb\n");
    con_status = 1;
    asm volatile("memb\n");
    while (con_status)
        ;
    asm volatile("memb\n");
    return;
#endif
#endif
}
/*
void main_mutex_lock(volatile int* my_lock)
{
#ifdef TARG_SW5

#ifdef MUTEX_BASIC
    unsigned int __addr ;
    __asm__ __volatile__(
        "        memb\n"
        "        ldi      %0,%1\n"
        "        stw  $31,0(%0)\n"
        "        memb\n"
      : "=&r"  (__addr)
      : "m" (*my_lock)
      : "memory");
    return;
#else
    unsigned int __tmp = 0;
    __asm__ __volatile__ (
    "       memb    \n"
    "       mov     0, %[__tmp]\n"
    "       stw     %[__tmp], %[my_lock]\n"
    : [__tmp] "=&r"(__tmp)
    : [my_lock] "m"(*(my_lock))
    : "memory"
    );
    return;
#endif
#elif TARG_SW9
    assert(0);
#endif
}
*/

#ifdef MEMORY_POOL
extern struct MemPool pool;
#endif

/*void set_task_type(struct aceMesh_task* self,task_type type)
{
    self->my_type = type;
}*/
task_type get_task_type(struct aceMesh_task *self) { return self->my_type; }

#ifdef MASTER
#ifdef MPI_SURPPORT
int get_task_suspend(struct aceMesh_task *self) { return self->suspend; }

inline void set_task_suspend(struct aceMesh_task *self, int mode) { self->suspend = mode; }
#endif

#ifdef ACEMESH_PARA
#define mymin(a, b) ((a < b) ? (a) : (b))
#define mymax(a, b) ((a > b) ? (a) : (b))
extern int gid1, gid2, mpre_id;
extern char npar[65][MAXT];
extern char waitcomm[MAXT];

#endif

#if defined(SUCC_BATCH) && defined(CONCURRENT_CONSTRUCT_GRAPH) && (_SERIAL_QUEUE)
extern void emp_d_master_consum_all();
#endif

struct task *execute(struct aceMesh_task *self)  // modify TODO
{
#ifdef THREAD_TRACE_LEVEL_1
    trace_time = rpcc() - begin_time;
    trace_print(trace_time, '0', RTS_execute_successor);
#endif

    int val = 0;
    /*
    main_mutex_lock(&(self->task_base.finished_lock));
    my_atomic_add(val,&(self->task_base.state));
    main_mutex_lock(&(self->task_base.successor_lock));
    my_mutex_unlock(&(self->task_base.finished_lock));
    */
    main_mutex_lock(&(self->task_base.successor_lock));
    unsigned int size = self->successor_count;
#ifndef BIG_CR_REGION
    set_task_state(1, &(self->task_base.over));
    my_mutex_unlock(&(self->task_base.successor_lock));
    asm volatile("memb\n");
#endif
    int new = 0;

    int j, i, dist;
    struct task *t = NULL;
    struct task *vert = NULL;

#ifdef ACEMESH_SCHEDULER_PROFILING
    num_successor[total_num_threads] = mymax(num_successor[total_num_threads], size);
#endif
    alog_debug("master core task self addr: %x", self);
    // printf("master core task self addr: %x\n", self);
    // fflush(stdout);
#ifdef REUSE_GRAPH
    // dag reuse, restore ref_conunt_t
    if (self->task_base.reused) self->task_base.ref_count_t = self->task_base.backup_ref;
#endif

#ifdef SUCC_BATCH
    int destid;
    // struct m2s_detail M2S_detail_temp_64[64];
    // int t_num[16]={0};
    int k, flag_tmp;
    // int pending_pe[16]={-1};
    int total_task_num;
#endif

    /*#ifdef ACEMESH_SCHEDULER_PROFILING
            dist=self->reuse_distance;
            if (dist > 0)
            {
                    ++sum_vert_times;
                    if (dist > maxReuseChain)
                    {
                            maxReuseChain = dist;
                    }
            }
    #endif*/

#ifdef ACEMESH_TIME
    master_cycle_time_end = rpcc();
    temp_time = (double)(master_cycle_time_end - master_cycle_time_start) / MFREQ;
    // all_pure_exec_time[total_num_threads]+=(double)(master_cycle_time_end-master_cycle_time_start)/FREQ;
    all_pure_exec_time[total_num_threads] += temp_time;
    if (get_task_type(self) != BLOCKING_TASK) blocking_pure_exec_time += temp_time;
#endif

#ifdef ACEMESH_PARA
    //    printf("gid1:%d, gid23:%d\n",gid1,gid2);
    gid2 = mymin(gid2, MAXT - 1);
    if (gid2 > mpre_id) {
        gid1 = mymax(gid1, mpre_id + 1);
        if (get_task_type(self) != BLOCKING_TASK) {
            for (i = gid1; i <= gid2; i++) npar[total_num_threads][i] = 1;
        } else {
            for (i = gid1; i <= gid2; i++) {
                waitcomm[i] = 1;
                //                printf("wait_comm_id:%d\n",i);
            }
        }
        mpre_id = gid2;
    }
#endif

#ifdef MPI_SURPPORT  // TODO
#ifdef DEBUG
    alog_debug("task_execute :  type:%d,,suspend:%d\n", get_task_type(self),
               get_task_suspend(self));
#endif

    if (get_task_type(self) == BLOCKING_TASK && get_task_suspend(self)) {
#ifdef DEBUG
        alog_debug("rank:%d, suspend_spawn\n", my_mpi_rank);
#endif

        // dead lock test 0615
        // #ifdef TEMP_MODI_MPI
        mpi_spawn(self);
        my_mutex_unlock(&(self->task_base.successor_lock));
        return NULL;
        // #endif
    }
#endif

#ifdef SEG_BUFF
    int temp_seg = -1;
#ifdef ACEMESH_SCHEDULER_PROFILING
    unsigned long long master_cycle_start = rpcc();
#endif
    //    temp_seg=my_h2ldm(seg_succ_addr[agent_id],agent_id,cgid,int);
    //    printf("need_agent:%p,seg=%d\n",self,temp_seg);
    //    fflush(stdout);
    //  if(temp_seg==SEG_MASTER)
    //      spawn_to_pending_succ(self);
    //  else
    spawn_to_buff_succ(self);
#ifdef ACEMESH_SCHEDULER_PROFILING
    // master_seg_sche_time+=(float)(rpcc()-master_cycle_start)/MFREQ;
#endif
#else
#ifndef SEG_BUFF

    /*    if(size > 130){
            printf("size: %d\n",size);
        }
    */

    for (j = 0; j < size; ++j) {
        t = self->successor_tasks[j];
#ifdef TARG_SW5
        my_atomic_add(new, &(t->ref_count_t));
#elif TARG_SW9
        my_atomic_add(new, t->ref_count_t);
#endif
#ifdef DEBUG
        alog_debug("rank%d, execute ref_count:%d\n", my_mpi_rank, t->ref_count_t);
#endif
        if (new == -1) {
#ifdef SUCC_BATCH
            destid = t->affinity_id;
            if (destid == sche_num_threads) {
                spawn_to_id(t);
            } else {  // 收集后继任务
                if (M2S_detail_temp_64[destid].task_num < PENDING_TASK_NUM) {
                    M2S_detail_temp_64[destid].task_detail[M2S_detail_temp_64[destid].task_num] = t;
                    M2S_detail_temp_64[destid].task_num++;
                } else {
                    printf("m2s_temp[%d] is full\n", destid);
                    assert(0);
                }
            }
#else
            spawn_to_id(t);
#endif
        }
    }

    /*
    for( j = 0; j < size; ++j)
      if( t = self->successor_tasks[j])
      {
          main_mutex_lock(&(t->ref_lock));
#ifdef TARG_SW5
          my_atomic_add(new,&(t->ref_count_t));
#elif TARG_SW9
          my_atomic_add(new,t->ref_count_t);
#endif
          my_mutex_unlock(&(t->ref_lock));
#ifdef DEBUG
          alog_debug("rank%d, execute ref_count:%d\n",my_mpi_rank,t->ref_count_t);
#endif
          if(new==-1){
#ifdef SUCC_BATCH
              destid = t->affinity_id;
              if (t->state<1) {
                    my_mutex_unlock(&(self->task_base.successor_lock));
                    while (t->state<1) {
                    }
                    main_mutex_lock(&(self->task_base.successor_lock));
                    if (ref_count(t) == -1 && pre_count(t) != 0) {
                    if(destid==sche_num_threads){
                       spawn_to_id(t);
                    }else{//收集后继任务
                       if(M2S_detail_temp_64[destid].task_num<PENDING_TASK_NUM){
                        M2S_detail_temp_64[destid].task_detail[M2S_detail_temp_64[destid].task_num]=t;
                       M2S_detail_temp_64[destid].task_num++;
                     }else{
                    printf("m2s_temp[%d] is full\n",destid);
                    assert(0);
                  }
              }
                    }
                }else if (t->state==1&&ref_count(t) == -1 && pre_count(t) != 0) {
                    if(destid==sche_num_threads){
                       spawn_to_id(t);
                    }else{//收集后继任务
                       if(M2S_detail_temp_64[destid].task_num<PENDING_TASK_NUM){
                        M2S_detail_temp_64[destid].task_detail[M2S_detail_temp_64[destid].task_num]=t;
                       M2S_detail_temp_64[destid].task_num++;
                     }else{
                    printf("m2s_temp[%d] is full\n",destid);
                    assert(0);
                  }
              }
                }
#else
            if (t->state<1) {
                my_mutex_unlock(&(self->task_base.successor_lock));
                while (t->state<1) {
                }
                main_mutex_lock(&(self->task_base.successor_lock));
                if (ref_count(t) == -1 &&pre_count(t) != 0) {
                    spawn_to_id(t);
                }
            }
            else if (t->state==1 && ref_count(t) == -1 && pre_count(t) != 0) {
                    spawn_to_id(t);
            }
#endif
           }
      }
    */

    vert = self->vertical_task;
    //    alog_debug("self addr: %x", self);
    //    alog_debug("vert addr: %x", vert);
    //    //after composite execute reset build ctx
    //    if (self->my_type == COMPOSITE_TASK) {
    //        cur_super_taskptr = NULL;
    //    }

#ifdef _RETURN_VERTICAL
    if (vert) {
        main_mutex_lock(&(vert->ref_lock));
#ifdef TARG_SW5
        my_atomic_add(new, &(vert->ref_count_t));
#elif TARG_SW9
        my_atomic_add(new, (vert->ref_count_t));
#endif
        my_mutex_unlock(&(vert->ref_lock));
        if (new == -1) {
            if (vert->state < 1) {
                my_mutex_unlock(&(self->task_base.successor_lock));
                while (vert->state < 1) {
                }
                main_mutex_lock(&(self->task_base.successor_lock));
                if (ref_count(vert) == -1 && pre_count(vert) != 0 &&
                    (vert->affinity_id) == total_num_threads) {
#ifdef ACEMESH_SCHEDULER_PROFILING
                    slave_inc_reuse_distance((struct aceMesh_task *)(vert), dist);
#endif
                    my_atomic_add(val, &(self->task_base.state));
                    my_mutex_unlock(&(self->task_base.successor_lock));
                    return vert;
                } else if (ref_count(vert) == -1 && pre_count(vert) != 0) {
                    spawn_to_id(vert);
                }
            } else if (vert->state == 1 && ref_count(vert) == -1 && pre_count(vert) != 0) {
                if ((vert->affinity_id) == total_num_threads) {
#ifdef ACEMESH_SCHEDULER_PROFILING
                    slave_inc_reuse_distance((struct aceMesh_task *)(vert), dist);
#endif
                    my_atomic_add(val, &(self->task_base.state));
                    my_mutex_unlock(&(self->task_base.successor_lock));
#ifdef THREAD_TRACE_LEVEL_1
                    trace_time = rpcc() - begin_time;
                    trace_print(trace_time, '0', RTS_event_end);
#endif

                    return vert;
                } else {
                    spawn_to_id(vert);
                }
            }
        }
    }
    // return NULL;
#else

    if (vert) {
        alog_debug("before my_atomic_add: %d", vert->ref_count_t);
#ifdef TARG_SW5
        my_atomic_add(new, &(vert->ref_count_t));
#elif TARG_SW9
        my_atomic_add(new, (vert->ref_count_t));
#endif
        alog_debug("after my_atomic_add vert: %x", vert);
        if (new == -1) {
            /*#ifdef ACEMESH_SCHEDULER_PROFILING
                        slave_inc_reuse_distance((struct aceMesh_task*)(vert), dist);
            #endif*/

#ifdef SUCC_BATCH
            destid = vert->affinity_id;
            if (destid == sche_num_threads) {
                spawn_to_id(t);
            } else {  // 收集后继任务
                if (M2S_detail_temp_64[destid].task_num < PENDING_TASK_NUM) {
                    M2S_detail_temp_64[destid].task_detail[M2S_detail_temp_64[destid].task_num] =
                        vert;
                    M2S_detail_temp_64[destid].task_num++;
                } else {
                    printf("m2s_temp[%d] is full\n", destid);
                    assert(0);
                }
            }

#else
            spawn_to_id(vert);
#endif
        }
    }

    /*
    if(vert)
    {
        alog_debug("before my_atomic_add: %d", vert->ref_count_t);
       main_mutex_lock(&(vert->ref_lock));
#ifdef TARG_SW5
        my_atomic_add(new,&(vert->ref_count_t));
#elif TARG_SW9
        my_atomic_add(new,(vert->ref_count_t));
#endif
        my_mutex_unlock(&(vert->ref_lock));
        alog_debug("after my_atomic_add vert: %x", vert);
        if(new==-1)
        {
    */
    /*#ifdef ACEMESH_SCHEDULER_PROFILING
                slave_inc_reuse_distance((struct aceMesh_task*)(vert), dist);
    #endif*/
    /*
#ifdef SUCC_BATCH
              destid = vert->affinity_id;
              if (vert->state<1) {
               my_mutex_unlock(&(self->task_base.successor_lock));
               while (vert->state<1) {
              }
               main_mutex_lock(&(self->task_base.successor_lock));
              if(ref_count(vert)==-1&&pre_count(vert)!=0){
                if(destid==sche_num_threads){
                    spawn_to_id(t);
              }else{//收集后继任务
                  if(M2S_detail_temp_64[destid].task_num<PENDING_TASK_NUM){
                    M2S_detail_temp_64[destid].task_detail[M2S_detail_temp_64[destid].task_num]=vert;
                    M2S_detail_temp_64[destid].task_num++;
                  }else{
                    printf("m2s_temp[%d] is full\n",destid);
                    assert(0);
                  }
              }
              }
              }else if (vert->state==1 && ref_count(vert) == -1 &&pre_count(vert) != 0) {
                  if(destid==sche_num_threads){
                         spawn_to_id(vert);
                    }else{//收集后继任务
                  if(M2S_detail_temp_64[destid].task_num<PENDING_TASK_NUM){
                    M2S_detail_temp_64[destid].task_detail[M2S_detail_temp_64[destid].task_num]=vert;
                    M2S_detail_temp_64[destid].task_num++;
                  }else{
                    printf("m2s_temp[%d] is full\n",destid);
                    assert(0);
                  }
              }
              }
#else
             if (vert->state<1) {
                my_mutex_unlock(&(self->task_base.successor_lock));
                while (vert->state<1) {
                }
                main_mutex_lock(&(self->task_base.successor_lock));
                if (ref_count(vert) == -1 &&pre_count(vert) != 0) {
                    spawn_to_id(vert);
                }
            }
            else if (vert->state==1 && ref_count(vert) == -1 && pre_count(vert) != 0) {
                    spawn_to_id(vert);
            }
#endif
        }
    }
    */
    // return NULL;
#endif

#ifdef SUCC_BATCH  // TODO

#ifdef SUCC_BATCH_V1
    for (i = 0; i < sche_num_threads; i++) {
        total_task_num = M2S_detail_temp_64[i].task_num;
        if (total_task_num > 0) {  // 当前从核有任务
#ifdef LOCAL_FLAG_M2S
            if ((flag_tmp = h2ldm(m2s_flag, i, cgid)) == 1) {
#else
            if (m2s_flag[i] == 1) {
#endif
                for (k = 0; k < total_task_num; k++) {  // 主核放任务
                    M2S_details_64[i].task_detail[k] = M2S_detail_temp_64[i].task_detail[k];
                }

                M2S_details_64[i].task_num = total_task_num;

                if (total_task_num > 0) {  // 当前从核有任务
                    asm volatile("memb\n");
#ifdef LOCAL_FLAG_M2S
                    h2ldm(m2s_flag, i, cgid) = 0;
#else
                    m2s_flag[i] = 0;
#endif
                    asm volatile("memb\n");
                    M2S_detail_temp_64[i].task_num = 0;
                    M2S_detail_temp_64[i].is_pending = 0;
                } else {
                    M2S_detail_temp_64[i].is_pending = 1;
                }
            }
        }
    }
#else
    emp_d_master_consum_all();
    /*for(i=0;i<sche_num_threads;i++){
        total_task_num=M2S_detail_temp_64[i].task_num;
        if(total_task_num>0){//当前从核有任务
#ifdef LOCAL_FLAG_M2S
          while((flag_tmp=h2ldm(m2s_flag,i,cgid))==0);
#else
          while(m2s_flag[i]==0);
#endif
          for(k=0;k<total_task_num;k++){//主核放任务
              M2S_details_64[i].task_detail[k]
                    =M2S_detail_temp_64[i].task_detail[k];
          }
          M2S_details_64[i].task_num=total_task_num;
          asm volatile ("memb\n");
#ifdef LOCAL_FLAG_M2S
          h2ldm(m2s_flag,i,cgid)=0;
#else
          m2s_flag[i]=0;
#endif
          asm volatile ("memb\n");
          M2S_detail_temp_64[i].task_num=0;
       }
        }*/
#endif

#endif

#endif

#endif
    // my_atomic_add(val,&(self->task_base.state));
#ifdef BIG_CT_REGION
    set_task_state(1, &(self->task_base.over));
    my_mutex_unlock(&(self->task_base.successor_lock));
#endif
#ifdef THREAD_TRACE_LEVEL_1
    trace_time = rpcc() - begin_time;
    trace_print(trace_time, '0', RTS_event_end);
#endif

    return NULL;
}

#endif

#ifdef SAVE_RW_INFO
void add_addr(struct aceMesh_task *self, void *addr, int area_type, int rw_type, int is_neighbor)
{
    struct addr_info *newitem;
    if (self->rw_addrs_count >= self->capacity_addrs) {
        struct addr_info *new_rw_addrs;
        self->capacity_addrs += ADDR_CHUNKMORE;
        new_rw_addrs = (struct addr_info *)realloc(
            self->rw_addrs, sizeof(struct addr_info) * (self->capacity_addrs));
        if (new_rw_addrs == NULL) {
            printf("cannot allocate space for new_rw_addrs\n");
            exit(1);
        }
        self->rw_addrs = new_rw_addrs;
    }
    newitem = &(self->rw_addrs[self->rw_addrs_count]);
    newitem->addr = addr;
    newitem->area_type = area_type;
    newitem->rw_type = rw_type;
    newitem->is_neighbor = is_neighbor;

    ++(self->rw_addrs_count);
}
#endif

// struct aceMesh_task  aceMesh_task_constructor(aceMesh_task* self)
void aceMesh_task_constructor(aceMesh_task *self)
{
    // struct aceMesh_task self ;
    // self->task_base = task_constructor();
    task_constructor(&(self->task_base));
#ifdef REUSE_GRAPH
    self->task_base.is_base_task = false;
#endif
    self->vertical_task = NULL;
    self->my_type = NOT_SET;
    self->capacity_addrs = ADDR_CHUNKINIT;
    self->successor_tasks = NULL;
    self->last_successor = NULL;
#ifdef MEMORY_POOL
#ifdef USE_SIMD
#ifdef TARG_SW5
    self->successor_tasks =
        (struct task **)acemesh_myalloc_aligned_32(sizeof(struct task *) * ADDR_CHUNKINIT);
#elif TARG_SW9
    self->successor_tasks =
        (struct task **)acemesh_myalloc_aligned_64(sizeof(struct task *) * ADDR_CHUNKINIT);
#endif
#else
    self->successor_tasks =
        (struct task **)acemesh_myalloc_aligned_16(sizeof(struct task *) * ADDR_CHUNKINIT);
#endif
#else
#ifdef TARG_SW5
    self->successor_tasks = (struct task **)malloc(sizeof(struct task *) * ADDR_CHUNKINIT);
#elif TARG_SW9
    // self->successor_tasks=(struct task**)libc_uncached_aligned_malloc(sizeof(struct
    // task*)*ADDR_CHUNKINIT);
    self->successor_tasks =
        (struct task **)my_malloc_aligned(sizeof(struct task *) * ADDR_CHUNKINIT);
#else
    assert(0);
#endif
#endif
    self->successor_count = 0;
    // self->loop_count=0;	//add by gxr 2017/03/13
    self->loop_id = 0;
    self->task_id = 0;
#ifdef DEBUG_GRAPH
    self->ref_count_for_check = 0;
    self->is_joint = 0;
#endif
#ifdef MASTER
#ifdef MPI_SURPPORT
    self->suspend = 0;
#endif
#endif

#ifdef ACEMESH_SCHEDULER_PROFILING
    self->reuse_distance = 0;
#endif
#ifdef SAVE_RW_INFO
    self->rw_addrs = NULL;
    self->rw_addrs = (struct addr_info *)malloc(sizeof(struct addr_info) * ADDR_CHUNKINIT);
    self->rw_addrs_count = 0;
#endif
#ifdef AUTO_PARTITION
    self->group_id = -1;
#endif
#ifdef SUPPORT_PARTITION
    self->my_part_id = -1;
#endif
    // return self;
}

void aceMesh_task_destructor(aceMesh_task *p_acemesh_task)
{
#ifndef MEMORY_POOL
#ifdef TARG_SW5
    free(p_acemesh_task->successor_tasks);
#elif TARG_SW9
    my_free_aligned(p_acemesh_task->successor_tasks);
#else
    printf("losing free successor\n");
#endif
#endif
#ifdef SAVE_RW_INFO
    free(p_acemesh_task->rw_addrs);
#endif
}

int get_total_successor_sum(struct aceMesh_task *self)
{
    return self->vertical_task == NULL ? self->successor_count : (self->successor_count) + 1;
}

void add_end_successor(struct aceMesh_task *self, struct task *t)
{
    /*
    self->vertical_task = t;
    main_mutex_lock(&(t->ref_lock));
    decrement_ref_count(t);
    my_mutex_unlock(&(t->ref_lock));
    */
#ifndef WITHOUT_CONCUR_OVER_STATE
    if (self->task_base.over == 1) {
        return;
    }
#endif
//   printf("add_end_successor\n");
/*
#ifdef SECONDARY_LOCK
    while(self->task_base.slave);
    my_atomic_add(&(self->task_base.master));
    if(self->task_base.slave){
    my_atomic_sub(&(self->task_base.master));
    while(self->task_base.slave);
    my_atomic_add(&(self->task_base.master));
    }
#else
*/
#ifdef UNBLOCK_AGENT
    my_add_end_successor(self);

#else
    // main_mutex_lock(&(master_lock));
    main_mutex_lock(&(self->task_base.successor_lock));
    // #endif
    if (self->task_base.over == 1) {
        my_mutex_unlock(&(self->task_base.successor_lock));
        // my_mutex_unlock(&(master_lock));
        return;
    }
    self->vertical_task = t;
    int val;
#ifdef TARG_SW5
    my_atomic_sub(val, &(t->ref_count_t));
    my_mutex_unlock(&(self->task_base.successor_lock));
#else
    // printf("before_sub ref_count:%d,state:%d\n",t->ref_count_t,self->task_base.over);
    // main_mutex_lock(&(t->ref_lock));
    my_atomic_sub(&(t->ref_count_t));
    // t->ref_count_t=t->ref_count_t-1;
    // asm volatile("memb\n");
    // my_mutex_unlock(&(t->ref_lock));
    // printf("after_sub ref_count:%d\n",t->ref_count_t);
    my_mutex_unlock(&(self->task_base.successor_lock));
    // my_mutex_unlock(&(master_lock));
#endif
#endif
}
// unsigned long main_succ=0,main_temp=0;
// int main_num=0;
int add_successor(struct aceMesh_task *self, struct aceMesh_task *t)
{
#ifdef ACEMESH_GRAPH_BUILD_PROFILING
#ifdef FINE_PROF
    unsigned long add_start = rpcc();
#endif
#endif
// main_temp=rpcc();
#ifndef WITHOUT_CONCUR_OVER_STATE
    // if(self->task_base.state<2){
    if (self->task_base.over == 1) {
#ifdef ACEMESH_GRAPH_BUILD_PROFILING
#ifdef FINE_PROF
        buil_prof[ADD_SUCC] += (rpcc() - add_start);
#endif
#endif
        return 0;
    }
#endif
#ifdef UNBLOCK_AGENT
    my_add_successor(self);
#else
    temp_add_t = rpcc();
    ++add_times;
    // printf("add_successor\n");
    main_mutex_lock(&(self->task_base.successor_lock));
    // main_mutex_lock(&(master_lock));
    /*
     while(self->task_base.slave);
      my_atomic_add(&(self->task_base.master));
      if(self->task_base.slave){
       my_atomic_sub(&(self->task_base.master));
       while(self->task_base.slave);
       my_atomic_add(&(self->task_base.master));
            }
    */
    if (self->task_base.over == 1) {
        my_mutex_unlock(&(self->task_base.successor_lock));
        //    my_mutex_unlock(&(master_lock));
#if defined(ACEMESH_GRAPH_BUILD_PROFILING) && defined(FINE_PROF)
        buil_prof[ADD_SUCC] += (rpcc() - add_start);
#endif
        add_time += rpcc() - temp_add_t;
        return 0;
    } else {
        int i;
        int val = 0;
        if (self->last_successor == t) {
            my_mutex_unlock(&(self->task_base.successor_lock));
            // my_mutex_unlock(&(master_lock));
#if defined(ACEMESH_GRAPH_BUILD_PROFILING) && defined(FINE_PROF)
            buil_prof[ADD_SUCC] += (rpcc() - add_start);
#endif
            add_time += rpcc() - temp_add_t;
            return 0;
        }
        /*
        if (self->task_base.state<2){
        if(t == NULL)
        {
            my_mutex_unlock(&(self->task_base.successor_lock));
            return 0;
        }
        for(i = 0; i < self->successor_count; ++i)
        {
        struct task* itr = self->successor_tasks[i];
            if(itr == (struct task*)t)
            {
                my_mutex_unlock(&(self->task_base.successor_lock));
                return 0;
            }
        }
        */
        if (self->vertical_task == (struct task *)t) {
            // my_mutex_unlock(&(master_lock));
            my_mutex_unlock(&(self->task_base.successor_lock));
#if defined(ACEMESH_GRAPH_BUILD_PROFILING) && defined(FINE_PROF)
            buil_prof[ADD_SUCC] += (rpcc() - add_start);
#endif
            add_time += rpcc() - temp_add_t;
            return 0;
        }
        if (self->successor_count >= self->capacity_addrs) {
            //        printf("\nsuccessor_count=%d,capacity=%d\t",self->successor_count,self->capacity_addrs);
            //        fflush(stdout);
            struct task **new_succ_task;
            self->capacity_addrs += ADDR_CHUNKMORE;
#ifdef MEMORY_POOL
#ifdef USE_SIMD
#ifdef TARG_SW5
            new_succ_task = (struct task **)acemesh_myalloc_aligned_32(sizeof(struct task *) *
                                                                       self->capacity_addrs);
#elif TARG_SW9
            new_succ_task = (struct task **)acemesh_myalloc_aligned_64(sizeof(struct task *) *
                                                                       self->capacity_addrs);
#endif
#else
            new_succ_task = (struct task **)acemesh_myalloc_aligned_16(sizeof(struct task *) *
                                                                       self->capacity_addrs);
#endif
            memcpy(new_succ_task, self->successor_tasks,
                   sizeof(struct task *) * (self->capacity_addrs - ADDR_CHUNKMORE));
#else
            printf("need realloc successor\n");
            fflush(stdout);
            printf("realloc=%d\t", sizeof(struct task *) * self->capacity_addrs);
            fflush(stdout);
            new_succ_task = (struct task **)realloc(self->successor_tasks,
                                                    sizeof(struct task *) * self->capacity_addrs);
#endif
#ifdef DEBUG
            alog_debug("realloc successor\n");
#endif
            if (new_succ_task == NULL) {
                printf("cannot allocate enough space for addr info!\n");
                exit(1);
            }
            self->successor_tasks = new_succ_task;
        }
        self->successor_tasks[self->successor_count] = (struct task *)t;
        ++(self->successor_count);
        self->last_successor = t;
#ifdef ACEMESH_SCHEDULER_PROFILING
        total_nedges++;
//    max_num_successor=mymax(max_num_successor,self->successor_count);
#endif
        // my_atomic_true(val,&(t->task_base.edge));
        // my_my_mutex_unlock(&(t->task_base.endtask));
        // my_mutex_unlock(&(self->task_base.successor_lock));
        // main_mutex_lock(&(t->task_base.ref_lock));

        // my_mutex_unlock(&(t->task_base.ref_lock));
#ifdef TARG_SW5
        my_atomic_sub(val, &(t->task_base.ref_count_t));
        my_mutex_unlock(&(self->task_base.successor_lock));
#else
        //   printf("before_sub ref_count:%d\n",t->task_base.ref_count_t);
        // main_mutex_lock(&(self->task_base.ref_lock));
        // t->task_base.ref_count_t=t->task_base.ref_count_t-1;
        my_atomic_sub(&(t->task_base.ref_count_t));
        // asm volatile("memb\n");
        //  my_mutex_unlock(&(t->task_base.ref_lock));
        // printf("after_sub ref_count:%d\n",t->task_base.ref_count_t);
        // my_mutex_unlock(&(master_lock));
        my_mutex_unlock(&(self->task_base.successor_lock));
#endif
#if defined(ACEMESH_GRAPH_BUILD_PROFILING) && defined(FINE_PROF)
        buil_prof[ADD_SUCC] += (rpcc() - add_start);
#endif
        add_time += rpcc() - temp_add_t;
        return 1;
    }
#endif
}

int del_successor(struct aceMesh_task *self, struct aceMesh_task *t)
{
    /*
   if(t->task_base.edge){
    main_mutex_lock(&(self->task_base.successor_lock));
    int ed=0;
    int new=0;
      if(t==NULL){
         my_mutex_unlock(&(self->task_base.successor_lock));
        return 0;
      }
      if(self->vertical_task == (struct task*)t){
        self->vertical_task=NULL;
       my_mutex_unlock(&(self->task_base.successor_lock));
       main_mutex_lock(&(t->task_base.ref_lock));
         my_atomic_add(new,&(t->task_base.ref_count_t));
       my_mutex_unlock(&(t->task_base.ref_lock));
       // my_my_mutex_unlock(&(self->mutex_add_successor));
        return 1;
      }
      else{
       int i,j=0;
       printf("del_ref_count_before:%d\n",t->task_base.ref_count_t);
        for( i = 0; i < self->successor_count; ++i)
        {
            struct  task * itr = self->successor_tasks[i];
            if(itr == (struct task*)t)
            {
               j=i;
              while(j<self->successor_count-1){
              self->successor_tasks[j]=self->successor_tasks[j+1];
              ++j;
              }
             self->successor_tasks[self->successor_count-1]=NULL;
             --(self->successor_count);
            main_mutex_lock(&(t->task_base.ref_lock));
             my_atomic_add(new,&(t->task_base.ref_count_t));
            my_mutex_unlock(&(t->task_base.ref_lock));
              ++ed;
           }
        }
        printf("del_ref_count_after:%d\n",t->task_base.ref_count_t);
        my_mutex_unlock(&(self->task_base.successor_lock));
        return ed;
      }

   my_mutex_unlock(&(self->task_base.successor_lock));
 }
    */
    return 0;
}

#ifdef REUSE_GRAPH
void store_info(aceMesh_task *self)
{
    if (self->task_base.stored == true) return;
#if defined(ACEMESH_GRAPH_BUILD_PROFILING) && defined(FINE_PROF)
    unsigned long restore_start = rpcc();
#endif
    store_ref_count(self);
    set_reused_flag(self, true);
    struct task *vt = self->vertical_task;
    if (vt) {
        // printf("store info point 0\n");
        if (vt->is_base_task == false) {
            // printf("store info point 1\n");
            store_info((struct aceMesh_task *)vt);
        } else if (vt->stored == false) {
            store_ref_count(vt);
            set_reused_flag(vt, true);
            set_stored(vt, true);
        }
    }
    int i;
    for (i = 0; i < self->successor_count; ++i) {
        struct task *task = self->successor_tasks[i];
        if (task->is_base_task == false) {
            store_info((struct aceMesh_task *)task);
        }
    }
    set_stored(self, true);
#if defined(ACEMESH_GRAPH_BUILD_PROFILING) && defined(FINE_PROF)
    buil_prof[RESTROE_GRAPH] += (rpcc() - restore_start);
#endif
}
#endif

int set_vertical_task(struct aceMesh_task *self, struct aceMesh_task *t)
{
#if defined(ACEMESH_GRAPH_BUILD_PROFILING) && defined(FINE_PROF)
    unsigned long add_start = rpcc();
#endif
#ifndef WITHOUT_CONCUR_OVER_STATE
    // if(self->task_base.state<2){
    if (self->task_base.over == 1) {
#if defined(ACEMESH_GRAPH_BUILD_PROFILING) && defined(FINE_PROF)
        buil_prof[ADD_SUCC] += (rpcc() - add_start);
#endif
        return 0;
    }
#endif
    main_mutex_lock(&(self->task_base.successor_lock));
    // if(self->task_base.state<2){
    if (self->task_base.over == 1) {
        my_mutex_unlock(&(self->task_base.successor_lock));
#if defined(ACEMESH_GRAPH_BUILD_PROFILING) && defined(FINE_PROF)
        buil_prof[ADD_SUCC] += (rpcc() - add_start);
#endif
        return 0;
    }
    int val = 0;
    /*
    if( t == NULL )
    {
       my_mutex_unlock(&(self->task_base.successor_lock));
       return 0;
    }
    */
    if (self->vertical_task == (struct task *)t) {
        my_mutex_unlock(&(self->task_base.successor_lock));
#if defined(ACEMESH_GRAPH_BUILD_PROFILING) && defined(FINE_PROF)
        buil_prof[ADD_SUCC] += (rpcc() - add_start);
#endif
        return 0;
    } else {
        if (self->last_successor == t) {
            my_mutex_unlock(&(self->task_base.successor_lock));
#if defined(ACEMESH_GRAPH_BUILD_PROFILING) && defined(FINE_PROF)
            buil_prof[ADD_SUCC] += (rpcc() - add_start);
#endif
            return 0;
        }
        /*
        int i;
        for( i = 0; i < self->successor_count; ++i)
        {
            struct  task* itr = self->successor_tasks[i];
            if(itr == (struct task*)t)
            {
              my_mutex_unlock(&(self->task_base.successor_lock));
              return 0;
            }
        }
        */
        if (self->vertical_task != NULL) {
            if (self->successor_count >= self->capacity_addrs) {
                //        printf("\nsuccessor_count=%d,capacity=%d\t",self->successor_count,self->capacity_addrs);
                //        fflush(stdout);
                struct task **new_succ_task;
                self->capacity_addrs += ADDR_CHUNKMORE;
#ifdef MEMORY_POOL
#ifdef USE_SIMD
#ifdef TARG_SW5
                new_succ_task = (struct task **)acemesh_myalloc_aligned_32(sizeof(struct task *) *
                                                                           self->capacity_addrs);
#elif TARG_SW9
                new_succ_task = (struct task **)acemesh_myalloc_aligned_64(sizeof(struct task *) *
                                                                           self->capacity_addrs);
#endif
#else
                new_succ_task = (struct task **)acemesh_myalloc_aligned_16(sizeof(struct task *) *
                                                                           self->capacity_addrs);
#endif
                memcpy(new_succ_task, self->successor_tasks,
                       sizeof(struct task *) * (self->capacity_addrs - ADDR_CHUNKMORE));
#else
                printf("need realloc successor\n");
                fflush(stdout);
                printf("realloc=%d\t", sizeof(struct task *) * self->capacity_addrs);
                fflush(stdout);
                new_succ_task = (struct task **)realloc(
                    self->successor_tasks, sizeof(struct task *) * self->capacity_addrs);
#endif
                if (new_succ_task == NULL) {
                    printf("cannot allocate enough space for addr info!\n");
                    exit(1);
                }
                self->successor_tasks = new_succ_task;
            }
            self->successor_tasks[self->successor_count] = (struct task *)t;
            ++(self->successor_count);
            self->last_successor = t;
#ifdef ACEMESH_SCHEDULER_PROFILING
            total_nedges++;
#endif
            /*
            my_atomic_true(val,&(t->task_base.edge));
            my_mutex_unlock(&(self->task_base.successor_lock));
            main_mutex_lock(&(t->task_base.ref_lock));
            decrement_ref_count((struct task*)t);
            my_mutex_unlock(&(t->task_base.ref_lock));
            */
            int val;
            my_atomic_sub(val, &(t->task_base.ref_count_t));
            my_mutex_unlock(&(self->task_base.successor_lock));
#if defined(ACEMESH_GRAPH_BUILD_PROFILING) && defined(FINE_PROF)
            buil_prof[ADD_SUCC] += (rpcc() - add_start);
#endif
            return 1;
        }
        self->vertical_task = (struct task *)t;
        /*
        my_atomic_true(val,&(t->task_base.edge));
        my_mutex_unlock(&(self->task_base.successor_lock));
        main_mutex_lock(&(t->task_base.ref_lock));
        decrement_ref_count((struct task*)t);
        my_mutex_unlock(&(t->task_base.ref_lock));
        */
        int val;
        my_atomic_sub(val, &(t->task_base.ref_count_t));
        my_mutex_unlock(&(self->task_base.successor_lock));
#if defined(ACEMESH_GRAPH_BUILD_PROFILING) && defined(FINE_PROF)
        buil_prof[ADD_SUCC] += (rpcc() - add_start);
#endif
        return 1;
    }
    //}
    // my_mutex_unlock(&(self->task_base.successor_lock));
    //}
    // return 0;
}

#ifdef PAPI_PERFORMANCE
void papi_performance_start() {}
void papi_performance_end() {}
#endif

int get_loop_id(struct aceMesh_task *self) { return self->loop_id; }

int get_task_id(struct aceMesh_task *self) { return self->task_id; }
void set_loop_id(struct aceMesh_task *self, int id) { self->loop_id = id; }

void set_task_id(struct aceMesh_task *self, int id) { self->task_id = id; }

#ifdef DEBUG_GRAPH
int get_ref_count_for_check(struct aceMesh_task *self) { return self->ref_count_for_check; }

int inc_ref_count_for_check(struct aceMesh_task *self)
{
    ++(self->ref_count_for_check);
    return self->ref_count_for_check;
}
void set_ref_count_for_check(struct aceMesh_task *self, int value)
{
    self->ref_count_for_check = value;
}

void set_joint(struct aceMesh_task *self) { self->is_joint = 1; }
int is_end_task(struct aceMesh_task *self) { return self->is_joint; }

struct aceMesh_task *get_vertical_task(struct aceMesh_task *self)
{
    return (struct aceMesh_task *)(self->vertical_task);
}
struct aceMesh_task *get_successor_task(struct aceMesh_task *self, int i)
{
    return (struct aceMesh_task *)(self->successor_tasks[i]);
}

void dfs(struct aceMesh_task *self, int *task_nums, int deep_length)
{
    unsigned int i;
    if (self->is_joint == 0 && get_successor_sum(self) == 0) {
        printf("alone task! in deep length : %d ", deep_length);
        printf("warining! add end to this task");
        assert(0);
    }
    // assert(0);
    --task_nums;
    if (self->vertical_task != NULL) {
        struct aceMesh_task *t = (struct aceMesh_task *)(self->vertical_task);
        if (t->ref_count_for_check == -1) {
            t->ref_count_for_check = ref_count((t->task_base));
        } else {
            assert(t->ref_count_for_check > 0);
        }
        --(t->ref_count_for_check);
        if (t->ref_count_for_check == 0) {
            dfs(t, task_nums, deep_length + 1);
        }
    }
    for (i = 0; i < self->successor_count; ++i) {
        struct task *tmp;
        if (tmp = self->successor_tasks[i]) {
            struct aceMesh_task *t = (struct aceMesh_task *)(tmp);
            if (t->ref_count_for_check == -1) {
                t->ref_count_for_check = ref_count((t->task_base));
            } else {
                // std::cout<<"task ref_count :"<< t->ref_count() << std::endl;
                assert(t->ref_count_for_check > 0);
            }
            --(t->ref_count_for_check);
            if (t->ref_count_for_check == 0) {
                dfs(t, task_nums, deep_length + 1);
            }
        }
    }
}

int get_successor_sum(struct aceMesh_task *self) { return self->successor_count; }

#endif
