#include <assert.h>
#include <stdarg.h>
#include <time.h>

#include "aceMesh_clang.h"
#include "aceMesh_stack.h"
#include "aceMesh_task.h"
#include "aceMesh_utils.h"
#include "ldm_privatequeue.h"
#include "reg_transfer.h"
#include "scheduler.h"
#include "simd.h"
#include "slave.h"
#include "task.h"
//#include "args.h"
__uncached volatile  int slave_lock __attribute__((aligned(32)));
__uncached volatile int master_lock __attribute__((aligned(32)));
#ifdef THREAD_TRACE
#include "thread_trace.h"
extern unsigned long diff;
__thread_local_fix unsigned long begin_time_s;
__thread_local_fix unsigned long trace_time_s;
__thread_local_fix int trace_myid;
__thread_local_fix char *trace_id;
__thread_local_fix char traces_id_s[SLAVE_SIZE];
__thread_local_fix unsigned long traces_time_s[SLAVE_SIZE / 4];
__thread_local_fix int count_s = 0;
extern int total[64];
#endif
//extern volatile int eq_status[64];
//extern volatile uint64_t eq_ptr_slot[64];
#if defined(CONCURRENT_CONSTRUCT_GRAPH)&&defined(TARG_SW9)&&!defined(GS_AGENT)
// #include "task_dag_graph.h"
//__thread_local_fix volatile int con_close;
#include "MemPool.h"
extern struct MemPool pool;
extern void *acemesh_myalloc_aligned_64(int datasize);
extern __uncached volatile unsigned long con_status;
extern __uncached volatile unsigned long con_dest_ptr;
extern volatile unsigned long con_graph;
extern __uncached volatile unsigned long con_src_ptr;
//extern volatile bool con_is_nei;
extern __uncached volatile unsigned long con_task_ptr[MULTI_TASK];
extern __uncached volatile unsigned long temp_task[MULTI_TASK];
//extern __uncached volatile int spawn_status[MULTI_STATUS];
extern __uncached volatile int con_sum_pre;
extern __uncached volatile unsigned long con_task_num;
extern __uncached volatile unsigned long main_task[BLOCK_SIZE],main_head,main_tail;
// aceMesh_task* dest, int type, tuple_rw_task& src, bool is_neighbor;
#endif

#ifdef EMP_PROFILING
extern __thread_local_fix unsigned long once_send_time,once_recv_time,rma_time;
#endif  
#ifdef EMP_QUEUE
#include "emp_queue.h"
#ifdef TARG_SW9
//__thread_local_share  EQ_queue global_cent_queue; // __attribute__ ((aligned(64)));
//__thread_local_fix struct  task *buff=NULL;
//__thread_local_fix struct client_buff task_buff_client;
//__thread_local_fix struct chip_buff task_buff[64];
#endif
#endif
#ifdef MLOCK
#include "MS_final_fair.h"
#endif

#ifdef TARG_SW5
#include "ldm_malloc.h"
#endif

#if defined(TARG_SW5) || defined(TARG_SW9)
#include <stdbool.h>
#endif

#ifdef LOCAL_FLAG
#ifdef TARG_SW9
extern unsigned long close_all_threads_addr[64];
extern unsigned long is_run_addr[64];
#endif
__thread_local volatile int close_all_threads;
__thread_local volatile int is_run;
#else
#ifdef TARG_SW5
extern volatile int is_run;
extern volatile int close_all_threads;
#elif TARG_SW9
extern __uncached volatile int is_run;
extern __uncached volatile int close_all_threads;
#endif
#endif

#if defined(EMP_RANDOM_STEALING) && defined(TARG_SW9)
__thread_local_fix volatile int steal_id[3]={0};
__thread_local_fix volatile int steal_i=0;
#define remote_ldm(coreid,ldm_var) ((unsigned long)&ldm_var | ((unsigned long)coreid << 20 ) |(1ULL << 45))
#define remote_ldm_addr(coreid,ldm_var_addr) ((unsigned long)ldm_var_addr | ((unsigned long)coreid << 20 ) |(1ULL << 45))
__thread_local_fix int steal_status=1;
extern __thread_local_fix struct task_chip_buff all_chip_buff[64];
#endif
extern __thread_local_fix int recv_num;
#if (defined(_SERIAL_QUEUE) && defined(MASTER)) || (defined(_CIRCULAR_QUEUE) && defined(MASTER))
#ifdef LOCAL_FLAG_M2S
__thread_local_fix volatile struct task *from_master = NULL;
__thread_local_fix volatile int master_flag = 0;
#else
volatile struct task *from_master[64] = {NULL};
volatile int master_flag[64] = {0};
#endif
#endif

#ifdef SEG_BUFF
#if defined(LOCAL_SEG) || defined(TWIN_SEG)
// status for pending_succossor_task
__thread_local volatile int l_seg_succ = SEG_MASTER;
// status for pending_spawn_task
__thread_local volatile int l_seg_spawn = SEG_SLAVE;
#ifdef TARG_SW9
extern unsigned long seg_succ_addr[64];
extern unsigned long seg_spawn_addr[64];
#endif
#endif

#if defined(MMEM_SEG) || defined(TWIN_SEG)
extern __uncached volatile int seg_succ;
extern __uncached volatile int seg_spawn;
#endif
#endif

__thread_local_fix int total_tiles_num;
__thread_local_fix unsigned long cycle_times_start;
__thread_local_fix unsigned long cycle_times_end;
__thread_local_fix double pure_exec_times = 0.0;
__thread_local_fix double task_pure_exec_times = 0.0;
__thread_local_fix double pure_task_compute_times = 0.0;
__thread_local_fix double pure_task_dma_times = 0.0;
__thread_local_fix long pure_task_num = 0;
__thread_local_fix int no = 0;
//__thread_local_fix int reuse[LDM_DIRECTORY_SIZE];
//__thread_local_fix int varno[LDM_DIRECTORY_SIZE];
__thread_local_fix struct aceMesh_task *tmp;

#ifdef SUCC_BATCH
#ifdef LOCAL_FLAG_M2S
__thread_local_fix volatile int m2s_flag = 1;
#else
volatile int m2s_flag[64] = {1};
#endif

extern struct m2s_detail M2S_details_64[64];
#endif

extern struct task *slave_user_execute(ci_task *self);

extern __thread_local_fix int pop;
extern double all_pure_exec_time[65];
extern double all_task_pure_exec_time[65];
extern double all_pure_task_compute_time[65];
extern double all_pure_task_dma_time[65];
extern long all_pure_task_num[65];
extern int sche_num_threads;
extern int total_num_threads;
__thread_local_fix int local_sche_num_threads;
__thread_local_fix int local_total_num_threads;

#ifdef ACEMESH_SCHEDULER_PROFILING
extern __thread_local_fix long sum_vert_times;
extern __thread_local_fix int maxReuseChain;
extern __thread_local_fix long reuse_times;
extern long *all_threads_sum_vert_times;
extern long *all_threads_reuse_times;
extern int *all_threads_maxReuseChain;
extern __thread_local_fix unsigned short max_ldmq;
extern __thread_local_fix long num_fulls;
extern long *all_nfull;
extern __thread_local_fix int local_num_succ;
extern int num_successor[64];
#endif

#ifdef ACEMESH_PROFILING_INST
extern unsigned long inst_perf[64];
extern __thread_local unsigned long local_inst_perf;
#endif

#ifdef TEMP_AFFI
extern int my_mpi_rank;
#endif
#ifdef ACEMESH_PARA
__thread_local_fix unsigned long t_init;
__thread_local_fix unsigned long b_build;
__thread_local_fix char ldm_npar[BUF_SIZE];
__thread_local_fix int global_id1, global_id2;
__thread_local_fix int dma_id1, dma_id2;
__thread_local_fix int local_id1, local_id2;
__thread_local_fix int pre_id = -1;
__thread_local volatile unsigned long ace_put_reply;
extern unsigned long time_init, tt_build;
extern char npar[65][MAXT];
#endif

#ifdef _BLACKROW
extern int black_topo;
#endif

#define NO_AFFINITY -1

__thread_local_fix int my_id;

#if defined(_SERIAL_QUEUE)
#ifdef LOCAL_MULTI_PRIORI
// using multiple task queues to simulate multiple priorities queue
__thread_local_fix struct aceMesh_stack *my_private_queue[LOCAL_MULTI_SIZE];
#else
__thread_local_fix struct aceMesh_stack *my_private_queue;
#if defined(ASYN_INSERT_Q)
__thread_local_fix struct concurrent_aceMesh_stack *my_lower_private_queue;
#endif
#endif
#else

#if defined(_CIRCULAR_QUEUE)
__thread_local_fix struct task **my_mqueue_base;
__thread_local_fix int mqueue_top;
__thread_local_fix int mqueue_bottom;
__thread_local_fix volatile int init_flag = 0;
#else
__thread_local_fix struct concurrent_aceMesh_stack *my_private_queue;
#ifdef USE_PRIORITY_QUEUE
__thread_local_fix struct concurrent_aceMesh_stack *my_priority_queue;
#endif
#endif
#endif

#if defined(_CIRCULAR_QUEUE) || defined(_SERIAL_QUEUE)
#ifdef TARG_SW9
extern unsigned long mqueue_empty_addr[64];
#endif

#ifdef LOCAL_MULTI_PRIORI
extern __thread_local_fix int mqueue_empty[LOCAL_MULTI_SIZE];
extern __thread_local_fix unsigned short ldmq_full[LOCAL_MULTI_SIZE];
extern __thread_local_fix unsigned short top[LOCAL_MULTI_SIZE];  // available
extern __thread_local_fix unsigned short bottom[LOCAL_MULTI_SIZE];
extern __thread_local_fix struct task *ldmqueue[LOCAL_MULTI_SIZE][LDM_QUEUE_SIZE];
#else
extern __thread_local_fix int mqueue_empty;
extern __thread_local_fix unsigned short ldmq_full;
extern __thread_local_fix unsigned short top;  // available
extern __thread_local_fix unsigned short bottom;
extern __thread_local_fix struct task *ldmqueue[LDM_QUEUE_SIZE];
#endif
#endif

#ifdef TARG_SW5
#ifdef DISTRIBUTED_SCHEDULER
extern generic_scheduler schedulers[65];
#else
extern generic_scheduler schedulers[2];
#endif
#elif TARG_SW9
extern generic_scheduler *schedulers;
#endif
char init_trace[2] = "";

#ifdef LDMM
extern struct layout_info tilelayout[MAX_DTILEBOOK];
extern struct access_arrays *invalidateTab;
__thread_local_fix int tiles_num[MAX_DTILEBOOK];
__thread_local_fix region_3d rgn_pool[2 * MAX_3d_BUF];
__thread_local_fix region_3d *req[MAX_3d_BUF];
__thread_local_fix region_3d *dtiles[MAX_3d_BUF];
//__thread_local_fix struct slot3d dtiles[MAX_3d_BUF];
__thread_local_fix region_2d rgn_pool_2d[2 * MAX_2d_BUF];
__thread_local_fix region_2d *req_2d[MAX_2d_BUF];
__thread_local_fix region_3d *dtiles_2d[MAX_2d_BUF];
//__thread_local_fix slot2d dtiles_2d[MAX_2d_BUF];
__thread_local_fix int flag;
__thread_local_fix int req3d, req2d;

#ifdef SWF
void slave_ldm_postreq_tile_swf_(void *mem_addr, int ndim, int *tileno, int wr_info)
{
    slave_ldm_postreq_tile(mem_addr, ndim, tileno, wr_info);
}
void slave_ldm_getreq_tile_swf_(int *ldm_varno, int *ldm_reuse)
{
    slave_ldm_getreq_tile(ldm_varno, ldm_reuse);
}
void slave_ldm_postreq_tile_2d_swf_(void *mem_addr, int ndim, int *tileno, int wr_info)
{
    slave_ldm_postreq_tile_2d(mem_addr, ndim, tileno, wr_info);
}
void slave_ldm_getreq_tile_2d_swf_(int *ldm_varno, int *ldm_reuse)
{
    slave_ldm_getreq_tile_2d(ldm_varno, ldm_reuse);
}
#endif
#endif

#ifdef EMP_STEALING_PROF
extern unsigned emp_stealing_fail[64];
extern unsigned emp_stealing_success[64];
extern __thread_local_fix unsigned steal_fail;
extern __thread_local_fix unsigned steal_success;
#endif

#ifdef EMP_RANDOM_STEALING
extern __thread_local_fix unsigned always_fail;
#endif

#ifdef THREADS_IDLING_PROF
__thread_local_fix unsigned total_get_count = 0;
__thread_local_fix unsigned total_idle_count = 0;
extern unsigned thread_get_count[64];
extern unsigned thread_idle_count[64];
#endif

#ifdef ACEMESH_PROFILING_SUCC
extern unsigned long all_spawn_slave_task_time[65];
__thread_local_fix unsigned long local_spawn_slave_task_time = 0;
extern unsigned long all_spawn_slave_trans1_time[65];
__thread_local_fix unsigned long local_spawn_slave_trans1_time = 0;
extern unsigned long all_spawn_slave_trans2_time[65];
__thread_local_fix unsigned long local_spawn_slave_trans2_time = 0;
extern unsigned long all_spawn_master_task_time[65];
__thread_local_fix unsigned long local_spawn_master_task_time = 0;
extern unsigned long all_reg_put_time[64];
__thread_local_fix unsigned long local_reg_put_time;  // cycles

extern unsigned int all_reg_put_full_times[64];
extern unsigned int all_total_reg_put_times[64];
__thread_local_fix unsigned int local_reg_put_full_times = 0, total_local_reg_put_times = 0;

extern unsigned int all_succ_reg_own_times[64];
extern unsigned int all_succ_reg_direct_times[64];
extern unsigned int all_succ_reg_transfer_times[64];
__thread_local_fix unsigned int local_reg_own_times = 0, local_reg_direct_times = 0,
                                local_reg_transfer_times = 0;
#endif

int get_my_id()
{
    my_id = athread_get_id(-1);
    return my_id;
}

inline int get_random_stealing_destid()
{
    unsigned long seed = rtc();
    // unsigned long seed=rand();
    // srand((unsigned int)time(NULL));
    // long int seed=random();
    return seed % local_sche_num_threads;
}

#ifdef LDMM
void ldm_postreq_tile(void *mem_addr, int ndim, int *tileno, int wr_info)
{
    int i, j;
    if (!flag) {
        for (i = 0; i < MAX_DTILEBOOK; i++) {
            tiles_num[i] = tilelayout[i].Ntiles;
        }
        flag = 1;
        req3d = req2d = 0;
        for (i = 0; i < 2 * MAX_3d_BUF; i++) {
            rgn_pool[i].mem_addr = NULL;
            rgn_pool[i].wr = -1;
            rgn_pool[i].loop_id = -1;
            rgn_pool[i].ndim = -1;
            for (j = 0; j < MAX_DIM; j++) {
                rgn_pool[i].tileno[j] = -1;
            }
        }
        for (i = 0; i < MAX_3d_BUF; i++) {
            req[i] = &rgn_pool[i];
            dtiles[i] = &rgn_pool[i + MAX_3d_BUF];
        }
        for (i = 0; i < 2 * MAX_2d_BUF; i++) {
            rgn_pool_2d[i].mem_addr = NULL;
            rgn_pool_2d[i].wr = -1;
            rgn_pool_2d[i].loop_id = -1;
            rgn_pool_2d[i].ndim = -1;
            for (j = 0; j < MAX_DIM; j++) {
                rgn_pool_2d[i].tileno[j] = -1;
            }
        }
        for (i = 0; i < MAX_2d_BUF; i++) {
            req_2d[i] = &rgn_pool_2d[i];
            dtiles_2d[i] = &rgn_pool_2d[i + MAX_2d_BUF];
        }
    }
    if (req3d > MAX_3d_BUF) {
        printf("req>MAX_3d_BUF!");
        exit(0);
    }
    req[req3d]->mem_addr = mem_addr;
    req[req3d]->loop_id = tmp->loop_id;
    req[req3d]->wr = wr_info;
    req[req3d]->ndim = ndim;
    for (j = 0; j < ndim; j++) req[req3d]->tileno[j] = tileno[j];
    req3d++;
}

void ldm_postreq_tile_2d(void *mem_addr, int ndim, int *tileno, int wr_info)
{
    int i, j;
    if (!flag) {
        for (i = 0; i < MAX_DTILEBOOK; i++) {
            tiles_num[i] = tilelayout[i].Ntiles;
        }
        flag = 1;
        req3d = req2d = 0;
        for (i = 0; i < 2 * MAX_3d_BUF; i++) {
            rgn_pool[i].mem_addr = NULL;
            rgn_pool[i].wr = -1;
            rgn_pool[i].loop_id = -1;
            rgn_pool[i].ndim = -1;
            for (j = 0; j < MAX_DIM; j++) {
                rgn_pool[i].tileno[j] = -1;
            }
        }
        for (i = 0; i < MAX_3d_BUF; i++) {
            req[i] = &rgn_pool[i];
            dtiles[i] = &rgn_pool[i + MAX_3d_BUF];
        }
        for (i = 0; i < 2 * MAX_2d_BUF; i++) {
            rgn_pool_2d[i].mem_addr = NULL;
            rgn_pool_2d[i].wr = -1;
            rgn_pool_2d[i].loop_id = -1;
            rgn_pool_2d[i].ndim = -1;
            for (j = 0; j < MAX_DIM; j++) {
                rgn_pool_2d[i].tileno[j] = -1;
            }
        }
        for (i = 0; i < MAX_2d_BUF; i++) {
            req_2d[i] = &rgn_pool_2d[i];
            dtiles_2d[i] = &rgn_pool_2d[i + MAX_2d_BUF];
        }
    }
    if (req2d > MAX_2d_BUF) {
        printf("req>MAX_2d_BUF!");
        exit(0);
    }
    req_2d[req2d]->mem_addr = mem_addr;
    req_2d[req2d]->loop_id = tmp->loop_count;
    req_2d[req2d]->wr = wr_info;
    req_2d[req2d]->ndim = ndim;
    for (j = 0; j < ndim; j++) req_2d[req2d]->tileno[j] = tileno[j];
    req2d++;
}

int is_same_tileno(short *dtileno, short *rtileno)
{
    int i;
    for (i = 0; i < MAX_DIM; ++i) {
        if (dtileno[i] != rtileno[i]) {
            return 0;
        }
    }
    return 1;
}

void ldm_getreq_tile(int *ldm_varno, int *ldm_reuse)
{
    int i, j, k, p, n, inside, inside2;
    short vict_num;
    short ndim;
    //    short emptylist[total_tiles_num];
    short available[tiles_num[0]];
    short need_victim[req3d];
    void *tmp_req = NULL;

    total_tiles_num = tiles_num[0];
    ndim = req[0]->ndim;
    vict_num = 0;
    for (i = 0; i < total_tiles_num; i++)  // cur_id
    {
        if (dtiles[i]->mem_addr == NULL) {
            available[i] = 0;
        } else {
            available[i] = 1;
        }
    }
    for (i = 0; i < req3d; i++) {
        // if(_MYID==0)
        // printf("req[%d]->add=%p,dtile[0]=%p,dtile[1]=%p,dtile[2]=%p\n",i,req[i]->mem_addr,dtiles[0]->mem_addr,dtiles[1]->mem_addr,dtiles[2]->mem_addr);
        inside2 = 0;
        for (j = 0; j < total_tiles_num; j++) {
            if (available[j] == 1) {
                if (req[i]->mem_addr == dtiles[j]->mem_addr) {
                    inside2 = 1;
                    // ldm_varno[i]=dtiles[j].slot_id;
                    ldm_varno[i] = j;
                    available[j] = 0;
                    if (req[i]->wr == IN) {
                        if (is_same_tileno(dtiles[j]->tileno, req[i]->tileno)) {
                            if (dtiles[j]->loop_id < req[i]->loop_id) {
                                inside = 0;
                                for (k = dtiles[j]->loop_id + 1; k < req[i]->loop_id - 1; k++) {
                                    for (p = 0; p < invalidateTab[j].num; p++) {
                                        if (req[i]->mem_addr == invalidateTab[k].arrList[p].array) {
                                            inside = 1;
                                            for (n = 0; n < ndim; n++) {
                                                short *cur_rgn;
                                                cur_rgn =
                                                    &(invalidateTab[k].arrList[p].region[n][0]);
                                                if (cur_rgn[0] == -1) continue;
                                                if (cur_rgn[0] > req[i]->tileno[i] ||
                                                    req[i]->tileno[i] > cur_rgn[1]) {
                                                    inside = 0;
                                                    break;
                                                }
                                            }
                                            if (inside) break;
                                        }
                                    }
                                    if (inside) break;
                                }
                                if (!inside) {
                                    ldm_reuse[i] = 1;
                                    // printf("1,");
#ifdef ACEMESH_SCHEDULER_PROFILING
                                    reuse_times++;
#endif
                                    dtiles[j]->loop_id = req[i]->loop_id;
                                    break;
                                }
                            } else {
                                // printf("2,");
                                ldm_reuse[i] = 0;
                                dtiles[j]->loop_id = req[i]->loop_id;
                                dtiles[j]->wr = req[i]->wr;
                                break;
                            }
                        } else {
                            // printf("3,");
                            ldm_reuse[i] = 0;
                            // #ifdef _SWAPP
                            tmp_req = req[i];
                            req[i] = dtiles[j];
                            dtiles[j] = tmp_req;
#ifdef _NO_SWAPP
                            dtiles[j]->mem_addr = req[i]->mem_addr;
                            dtiles[j]->loop_id = req[i]->loop_id;
                            dtiles[j]->wr = req[i]->wr;
                            for (k = 0; k < MAX_DIM; k++) {
                                dtiles[j]->tileno[k] = req[i]->tileno[k];
                            }
#endif
                            break;
                        }
                    } else {
                        // printf("4,");
                        ldm_reuse[i] = 0;
                        // #ifdef _SWAPP
                        tmp_req = req[i];
                        req[i] = dtiles[j];
                        dtiles[j] = tmp_req;
#ifdef _NO_SWAPP
                        dtiles[j]->mem_addr = req[i]->mem_addr;
                        dtiles[j]->loop_id = req[i]->loop_id;
                        dtiles[j]->wr = req[i]->wr;
                        for (k = 0; k < MAX_DIM; k++) {
                            dtiles[j]->tileno[k] = req[i]->tileno[k];
                        }
#endif
                        break;
                    }
                }
            }
        }
        if (inside2 == 0) {
            need_victim[vict_num] = i;
            vict_num++;
        }
    }
    inside2 = 0;
    k = 0;
    for (i = 0; i < vict_num; ++i) {
        p = need_victim[i];
        inside = 0;
        if (inside2 == 0) {
            for (j = k; j < total_tiles_num; j++) {
                if (dtiles[j]->mem_addr == NULL) {
                    // #ifdef _SWAPP
                    tmp_req = req[p];
                    req[p] = dtiles[j];
                    dtiles[j] = tmp_req;
// #else
#ifdef _NO_SWAPP
                    dtiles[j]->mem_addr = req[p]->mem_addr;
                    dtiles[j]->wr = req[p]->wr;
                    dtiles[j]->loop_id = req[p]->loop_id;
                    for (k = 0; k < MAX_DIM; k++) {
                        dtiles[j]->tileno[k] = req[p]->tileno[k];
                    }
#endif
                    // ldm_varno[p]=dtiles[j].slot_id;
                    ldm_varno[p] = j;
                    ldm_reuse[p] = 0;
                    // printf("5,");
                    inside = 1;
                    k = j;
                    break;
                }
            }
        }
        if (inside == 1)
            continue;
        else
            inside2 = 1;

        for (j = 0; j < total_tiles_num; j++) {
            if (available[j]) {
                /*                dtiles[j].info->mem_addr=req[p]->mem_addr;
                                dtiles[j].info->wr=req[p]->wr;
                                dtiles[j].info->loop_id=req[p]->loop_id;
                                for(k=0;k<MAX_DIM;k++)
                                {
                                    dtiles[j].info->tileno[k]=req[p]->tileno[k];
                                }
                                ldm_varno[p]=dtiles[j].slot_id;
                                ldm_reuse[p]=0;*/
                printf("6,");
                tmp_req = req[p];
                req[p] = dtiles[j];
                dtiles[j] = tmp_req;
                available[j] = 0;
                // ldm_varno[p]=dtiles[j].slot_id;
                ldm_varno[p] = j;
                ldm_reuse[p] = 0;
            }
        }
    }

    req3d = 0;
}

void ldm_getreq_tile_2d(int *ldm_varno, int *ldm_reuse)
{
    int i, j, k, p, n, inside, inside2;
    short vict_num;
    short ndim;
    //    short emptylist[total_tiles_num];
    short available[tiles_num[1]];
    short need_victim[req2d];
    void *tmp_req = NULL;

    total_tiles_num = tiles_num[1];
    ndim = req_2d[0]->ndim;
    vict_num = 0;
    for (i = 0; i < total_tiles_num; i++)  // cur_id
    {
        if (dtiles_2d[i]->mem_addr == NULL) {
            available[i] = 0;
        } else {
            available[i] = 1;
        }
    }
    for (i = 0; i < req2d; i++) {
        // if(_MYID==0)
        // printf("req[%d]->add=%p,dtile[0]=%p,dtile[1]=%p,dtile[2]=%p\n",i,req[i]->mem_addr,dtiles[0]->mem_addr,dtiles[1]->mem_addr,dtiles[2]->mem_addr);
        inside2 = 0;
        for (j = 0; j < total_tiles_num; j++) {
            if (available[j] == 1) {
                if (req_2d[i]->mem_addr == dtiles_2d[j]->mem_addr) {
                    inside2 = 1;
                    // ldm_varno[i]=dtiles[j].slot_id;
                    ldm_varno[i] = j;
                    available[j] = 0;
                    if (req_2d[i]->wr == IN) {
                        if (is_same_tileno(dtiles_2d[j]->tileno, req_2d[i]->tileno)) {
                            if (dtiles_2d[j]->loop_id < req_2d[i]->loop_id) {
                                inside = 0;
                                for (k = dtiles_2d[j]->loop_id + 1; k < req_2d[i]->loop_id - 1;
                                     k++) {
                                    for (p = 0; p < invalidateTab[j].num; p++) {
                                        if (req_2d[i]->mem_addr ==
                                            invalidateTab[k].arrList[p].array) {
                                            inside = 1;
                                            for (n = 0; n < ndim; n++) {
                                                short *cur_rgn;
                                                cur_rgn =
                                                    &(invalidateTab[k].arrList[p].region[n][0]);
                                                if (cur_rgn[0] == -1) continue;
                                                if (cur_rgn[0] > req_2d[i]->tileno[i] ||
                                                    req_2d[i]->tileno[i] > cur_rgn[1]) {
                                                    inside = 0;
                                                    break;
                                                }
                                            }
                                            if (inside) break;
                                        }
                                    }
                                    if (inside) break;
                                }
                                if (!inside) {
                                    ldm_reuse[i] = 1;
//                                printf("1,");
#ifdef ACEMESH_SCHEDULER_PROFILING
                                    reuse_times++;
#endif
                                    dtiles_2d[j]->loop_id = req_2d[i]->loop_id;
                                    break;
                                }
                            } else {
                                //                            printf("2,");
                                ldm_reuse[i] = 0;
                                dtiles_2d[j]->loop_id = req_2d[i]->loop_id;
                                dtiles_2d[j]->wr = req_2d[i]->wr;
                                break;
                            }
                        } else {
                            //                        printf("3,");
                            ldm_reuse[i] = 0;
                            // #ifdef _SWAPP
                            tmp_req = req_2d[i];
                            req_2d[i] = dtiles_2d[j];
                            dtiles_2d[j] = tmp_req;
#ifdef _NO_SWAPP
                            dtiles_2d[j]->mem_addr = req_2d[i]->mem_addr;
                            dtiles_2d[j]->loop_id = req_2d[i]->loop_id;
                            dtiles_2d[j]->wr = req_2d[i]->wr;
                            for (k = 0; k < MAX_DIM; k++) {
                                dtiles_2d[j]->tileno[k] = req_2d[i]->tileno[k];
                            }
#endif
                            break;
                        }
                    } else {
                        //                      printf("4,");
                        ldm_reuse[i] = 0;
                        // #ifdef _SWAPP
                        tmp_req = req_2d[i];
                        req_2d[i] = dtiles_2d[j];
                        dtiles_2d[j] = tmp_req;
#ifdef _NO_SWAPP
                        dtiles_2d[j]->mem_addr = req_2d[i]->mem_addr;
                        dtiles_2d[j]->loop_id = req_2d[i]->loop_id;
                        dtiles_2d[j]->wr = req_2d[i]->wr;
                        for (k = 0; k < MAX_DIM; k++) {
                            dtiles_2d[j]->tileno[k] = req_2d[i]->tileno[k];
                        }
#endif
                        break;
                    }
                }
            }
        }
        if (inside2 == 0) {
            need_victim[vict_num] = i;
            vict_num++;
        }
    }
    inside2 = 0;
    k = 0;
    for (i = 0; i < vict_num; ++i) {
        p = need_victim[i];
        inside = 0;
        if (inside2 == 0) {
            for (j = k; j < total_tiles_num; j++) {
                if (dtiles_2d[j]->mem_addr == NULL) {
                    // #ifdef _SWAPP
                    tmp_req = req_2d[p];
                    req_2d[p] = dtiles_2d[j];
                    dtiles_2d[j] = tmp_req;
// #else
#ifdef _NO_SWAPP
                    dtiles_2d[j]->mem_addr = req_2d[p]->mem_addr;
                    dtiles_2d[j]->wr = req_2d[p]->wr;
                    dtiles_2d[j]->loop_id = req_2d[p]->loop_id;
                    for (k = 0; k < MAX_DIM; k++) {
                        dtiles_2d[j]->tileno[k] = req_2d[p]->tileno[k];
                    }
#endif
                    // ldm_varno[p]=dtiles[j].slot_id;
                    ldm_varno[p] = j;
                    ldm_reuse[p] = 0;
                    //                printf("5,");
                    inside = 1;
                    k = j;
                    break;
                }
            }
        }
        if (inside == 1)
            continue;
        else
            inside2 = 1;

        for (j = 0; j < total_tiles_num; j++) {
            if (available[j]) {
                /*                dtiles[j].info->mem_addr=req[p]->mem_addr;
                                dtiles[j].info->wr=req[p]->wr;
                                dtiles[j].info->loop_id=req[p]->loop_id;
                                for(k=0;k<MAX_DIM;k++)
                                {
                                    dtiles[j].info->tileno[k]=req[p]->tileno[k];
                                }
                                ldm_varno[p]=dtiles[j].slot_id;
                                ldm_reuse[p]=0;*/
                printf("6,");
                tmp_req = req_2d[p];
                req_2d[p] = dtiles_2d[j];
                dtiles_2d[j] = tmp_req;
                available[j] = 0;
                // ldm_varno[p]=dtiles[j].slot_id;
                ldm_varno[p] = j;
                ldm_reuse[p] = 0;
            }
        }
    }

    req2d = 0;
}
#endif

// three types ready queue
// type 1. seq queue, cyclic array!
#if defined(_CIRCULAR_QUEUE)
struct task *get_next_task()
{
    struct task *t = NULL;
    int succ;
    int nrecv;
#ifdef MASTER
    if (master_flag != 0) {
        t = (struct task *)from_master;
        asm volatile("memb\n");
        master_flag = 0;
        asm volatile("memb\n");
        return t;
    }
#endif
    nrecv = recvtaskfromRegNT();
#ifdef _LDMQ
    // #ifdef LOCAL_MULTI_PRIORI
    //    //    slave_ldmq_trypop(t,succ,t->priority_id);
    // #else
    // used inlined version, but not better
    slave_ldmq_trypop(t, succ);
    // #endif
    if (succ) return t;
    if (mqueue_top != mqueue_bottom) {
        // once empty, no need to check private queue anymore!
#ifdef _BULKTRANS
        if (slave_try_bulkpop(my_mqueue_base)) {
            slave_ldmq_trypop(t, succ);
#else
        if (slave_try_pop(my_mqueue_base, &t)) {
#endif
            return t;
        } else {
            return t;
        }  // get null
    }      // go to mqueue

#else  // serial_circular_queue, no ldmq
    if (slave_try_pop(my_mqueue_base, &t)) return t;
#endif
#ifdef USE_SHARED_STACK
    if (shared_queue.slave_try_pop(&t))  // bug fix
        return t;
#endif
    return NULL;
}

// type 2. seq queue, linked list!

#elif defined(_SERIAL_QUEUE)  // linked list

struct task *get_next_task()
{
    struct task *t = NULL;
    int succ;
    int nrecv;
#ifdef LOCAL_MULTI_PRIORI
    int dest_qid;
#endif
#ifdef TARG_SW5
#ifdef MASTER

#ifdef SUCC_BATCH
    int i;
    struct task *temp = NULL;
#ifdef LOCAL_FLAG_M2S
    if (m2s_flag == 0) {
#else
    if (m2s_flag[_MYID] == 0) {
#endif
        for (i = 0; i < M2S_details_64[my_id].task_num; i++) {
            temp = (struct task *)(M2S_details_64[my_id].task_detail[i]);
#ifdef _LDMQ  // serial queue, with ldmq
            slave_ldmq_push(temp);
#else         // serial queue, no ldmq
            slave_push(my_private_queue, temp);
#endif
        }
        asm volatile("memb\n");
#ifdef LOCAL_FLAG_M2S
        m2s_flag = 1;
#else
        m2s_flag[_MYID] = 1;
#endif
        asm volatile("memb\n");
    }
#else  // of SUCC_BATCH
    // printf("slave_get_next,\n");

#ifdef LOCAL_FLAG_M2S
    if (master_flag != 0) {
        // printf("error???\n");
        // fflush(stdout);
        t = (struct task *)from_master;
        asm volatile("memb\n");
        master_flag = 0;
        asm volatile("memb\n");
        return t;
    }
#else
    if (master_flag[_MYID] != 0) {
        t = (struct task *)from_master[_MYID];
        asm volatile("memb\n");
        master_flag[_MYID] = 0;
        asm volatile("memb\n");
        return t;
    }

#endif

#endif

#endif

// #endif
#endif
    // hierarchical private queue

#ifdef SEG_BUFF

#ifdef AGENT_COMPU
    nrecv = recvtaskfromRegNT();
#else
    if (_MYID < local_total_num_threads - N_SLAVE_AGENT)
        nrecv = recvtaskfromRegNT();
    else {
        if (_MYID == local_total_num_threads - N_SLAVE_AGENT) slave_clear_RegNT_to_need_spawn();
#ifdef TEMP_ASSERT
        else
            assert(0);
#endif
    }
#endif

#else
    nrecv = recvtaskfromRegNT();
#endif

#ifdef _LDMQ
#ifdef LOCAL_MULTI_PRIORI
    for (dest_qid = LOCAL_MULTI_SIZE - 1; dest_qid >= 0; dest_qid--) {
        slave_ldmq_trypop(t, succ, dest_qid);

        if (succ) return t;
        if (!mqueue_empty[dest_qid]) {
            // once empty, no need to check private queue anymore!
            // promote one currently
            if (slave_try_pop(my_private_queue[dest_qid], &t)) {
                return t;
            } else {
                mqueue_empty[dest_qid] = true;
                return t;
            }  // get null
        }
    }
#else
    slave_ldmq_trypop(t, succ);

    if (succ) return t;
    if (!mqueue_empty) {
        // once empty, no need to check private queue anymore!
        // promote one currently
        if (slave_try_pop(my_private_queue, &t)) {
            return t;
        } else {
            mqueue_empty = true;
            return t;
        }  // get null
    }
#endif

#else  // serial_queue, no ldmq
    if (slave_try_pop(my_private_queue, &t)) return t;
#endif

#if defined(ASYN_INSERT_Q)
    if (slave_concur_try_pop(my_lower_private_queue, &t)) return t;
#endif

#ifdef EMP_D_RANDOM_STEALING
    int stealing_destid = get_random_stealing_destid();
    struct aceMesh_stack *dest_stealing_queue = &(schedulers[stealing_destid].lower_private_queue);
    if (slave_concur_try_pop(dest_stealing_queue, &t)) {
        return t;
    }
#endif

#if defined(EMP_RANDOM_STEALING) && defined(TARG_SW9)
    if(steal_status){
    int iter=0;
    int destid;
    struct task_chip_buff* self; 
    
    while (iter < 3) {
      destid=steal_id[iter];
      if (destid != _MYID) {
       self = remote_ldm(destid, (all_chip_buff[_MYID]));
       } else {
       destid=(destid + 1) % local_total_num_threads;
       self =remote_ldm(destid, (all_chip_buff[_MYID]));
       }
       int local_tail = (self->tail + CHIP_BUFF_SIZE - 1) % CHIP_BUFF_SIZE;
        if ((self->send_num-self->recv_num) >2) {
        self->tail = local_tail;
        self->send_num=self->send_num-1;
        return self->base[local_tail];
       }
       iter++;
            }
       steal_status=0;
       }
#endif


#ifdef USE_SHARED_STACK
    if (shared_queue.slave_try_pop(&t))  // bug fix
        return t;
#endif

    return NULL;
}

// type 3. concurrent queue, (stack)
#else

#ifdef MULTI_POP
__thread_local_fix struct task *pop_buff[3] = {NULL};
__thread_local_fix int pop_count = 0;
#endif

struct task *get_next_task()
{
    struct task *t = NULL;

#ifdef USE_PRIORITY_QUEUE
    if (slave_try_pop(my_priority_queue, &t)) {
        return t;
    }
#endif
//    printf("t->affinity_id=%d\n",t->affinity_id);
#ifdef EMP_QUEUE
#ifdef MULTI_POP
    if (pop_count == 3 || pop_buff[pop_count] == NULL) {
        pop_count = 0;
        EQ_multi_pop(pop_buff);
    }
    t = pop_buff[pop_count];
    pop_count++;
#else
    t = EQ_pop();
#endif
    if (t)
#else
    //printf("before\n");
    if (slave_try_pop(my_private_queue, &t))
#endif
    {
#ifdef DEBUG
//       printf("t->affinity_id=%d\n",t->affinity_id);
//       if(t->affinity_id!=_MYID)
//         printf("my_id=%d,affi=%d\n",_MYID,t->affinity_id);
#endif
       // printf("get_next_task\n");
        return t;
    }

#ifdef CONCUR_Q_RANDOM_STEALING
    int stealing_destid = get_random_stealing_destid();
    struct aceMesh_stack *dest_stealing_queue = &(schedulers[stealing_destid].private_queue);
    if (slave_try_pop(dest_stealing_queue, &t)) {
        //printf("stealing\n");
        return t;
    }
#endif

#ifdef USE_SHARED_STACK
    if (shared_queue.slave_try_pop(&t))  // bug fix
        return t;
#endif

    return NULL;
}

#endif

#ifdef SEG_BUFF
struct task *get_next_task_from_buff(struct aceMesh_stack *self)
{
    struct task *t = NULL;
    if (slave_try_pop(self, &t)) {
        return t;
    }
    return NULL;
}

void slave_agent_to_update_successor_from_pending(generic_scheduler *my_scheduler)
{
    int mask = 0;
    struct task *t = NULL;
    // while(t=get_next_task_from_buff(&(my_scheduler->pending_successor))!=NULL)
    // while(my_scheduler->pending_successor.task_pool!=NULL)
    while (1) {
#ifdef TEMP_DEBUG
        printf("slave_update_pending+pool=%p\n", my_scheduler->pending_successor.task_pool);
        fflush(stdout);
#endif
#ifdef ARRAY_STACK
        if (my_scheduler->pending_successor.top > 0)
#else
        if (my_scheduler->pending_successor.task_pool != NULL)
#endif
        {
            if (slave_ser_try_pop(&(my_scheduler->pending_successor), &t)) {
#ifdef TEMP_DEBUG
                printf("slave_pending_successor:%p,%d\n", t, t->affinity_id);
                fflush(stdout);
#endif
#ifdef TEMP_M2L
#ifdef AGENT_COMPU
                if (t->affinity_id >= 0 &&
                    t->affinity_id <= local_total_num_threads - N_SLAVE_AGENT)
#else
                if (t->affinity_id >= 0 && t->affinity_id < local_total_num_threads - N_SLAVE_AGENT)
#endif
                {
                    slave_spawn_to_id(t);
                    mask = 1;
                    continue;
                } else if (t->affinity_id == local_total_num_threads) {
#endif
#ifdef ACEMESH_TIME
                    cycle_times_start = rtc();
#endif
#ifdef ACEMESH_SCHEDULER_PROFILING
                    pure_task_num++;
#endif
                    slave_execute((struct aceMesh_task *)t);
#ifdef ACEMESH_TIME
                    cycle_times_end = rtc();
                    task_pure_exec_times += (double)(cycle_times_end - cycle_times_start) / SFREQ;
#endif
                    mask = 1;
#ifdef TEMP_M2L
                }
#ifdef TEMP_ASSERT
                else {
                    printf("agent_push,task=%p,affinity=%d\n", t, t->affinity_id);
                    assert(0);
                }
#endif
#endif
            }
        } else {
            //            printf("pending_succ_is_null,mask=%d\n",mask);
            //            fflush(stdout);
            if (mask > 0) {
#if defined(MMEM_SEG) || defined(TWIN_SEG)
                seg_succ = SEG_MASTER;
#endif
#if defined(LOCAL_SEG) || defined(TWIN_SEG)
                l_seg_succ = SEG_MASTER;
#endif
                //                printf("Slave: I've changed seg_succ from slave to master\n");
                //                fflush(stdout);
            }
            break;
        }
    }
}

void slave_push_need_spawn_to_pending(generic_scheduler *my_scheduler)
{
    int mask = 0;
    struct task *t = NULL;
    // while(my_scheduler->buff_spawn.task_pool!=NULL)
    while (1)
    // while(t=get_next_task_from_buff(&(my_scheduler->buff_spawn))!=NULL)
    {
#ifdef LDMQ
        // firstly, clear RegNT to pending_spawn once more;
        // secondly, clear buff_spawn to pend_spawn;
#endif
        if (my_scheduler->buff_spawn.task_pool != NULL) {
            if (slave_try_pop(&(my_scheduler->buff_spawn), &t)) {
                slave_ser_push(&(my_scheduler->pending_spawn), t);
                mask = 1;
            }
        } else {
            if (mask > 0) {
#if defined(MMEM_SEG) || defined(TWIN_SEG)
                seg_spawn = SEG_MASTER;
#endif
#if defined(LOCAL_SEG) || defined(TWIN_SEG)
                l_seg_spawn = SEG_MASTER;
#endif
            }
            break;
        }
    }
}
struct task *slave_agent_random_stealing_task()
{
    struct task *t = NULL;
    //    srand((unsigned int)time(NULL));
    int victim_sed = rand() % (local_total_num_threads - N_SLAVE_AGENT);
    generic_scheduler *victim_scheduler = &schedulers[victim_sed];
    if (slave_try_pop(&(victim_scheduler->private_queue), &t)) return t;
    return NULL;
}

#ifdef _LDMQ
extern __thread_local_fix struct task_chip_buff all_chip_buff[64];
#endif

#endif

#ifdef THREAD_TRACE
int m_sprintf(char *buff, int myid)
{
    char d;
    int r = 10;
    int i = 0;
    char s[20];
    int j = 0;
    do {
        d = (char)(myid % r);
        myid /= r;
        s[i++] = d + '0';
    } while (myid && i < sizeof(s));
    do {
        buff[j++] = s[--i];

    } while (i);
    buff[j++] = '0';
}
#endif

#if defined(TARG_SW9)&&defined(CONCURRENT_CONSTRUCT_GRAPH)
/*
inline int build_releationship
    (aceMesh_task* dest, int type, tuple_rw_task& src, bool is_neighbor) 
{

#ifdef THREAD_TRACE_LEVEL_1
    trace_time_s=rtc()-begin_time_s;
    slave_trace_print(trace_time_s,trace_myid,RTS_build_releationship);
#endif 
    int ed1,ed2;   //carry return val of add or delete; 
    int res = 0;
    aceMesh_task* tmp = NULL;

    switch (type) {
                case IN:
                {
                    if (src.w_task.t!=NULL) {  //W-R
  #ifdef THREAD_TRACE_LEVEL_1
                    trace_time_s=rtc()-begin_time_s;
                    slave_trace_print(trace_time_s,trace_myid,RTS_set_vertical_task);
  #endif
                        add_successor(src.w_task.t,dest);   //W空时，构造wR边
  #ifdef THREAD_TRACE_LEVEL_1
                    trace_time_s=rtc()-begin_time_s;
                    slave_trace_print(trace_time_s,trace_myid,RTS_event_end);
  #endif
                        ++res;
                    }
#ifdef IMMEDIA_UPDATE
//push itself
                    task_significance t_s;
                    t_s.t = dest;
                    src.r_tasks.push_back(t_s);
#endif
                }
                    break;
                case OUT:
                case INOUT:
                {
                    if(src.r_tasks.size()!=0) {   //iterate build R-W, clear r_tasks
                        for(std::vector<task_significance>::iterator r_task_itr = src.r_tasks.begin();  //why without WAW??？
                            r_task_itr != src.r_tasks.end(); ++r_task_itr)
                        {                  
  #ifdef THREAD_TRACE_LEVEL_1
                    trace_time_s=rtc()-begin_time_s;
                    slave_trace_print(trace_time_s,trace_myid,RTS_set_vertical_task);
  #endif
                            add_successor(r_task_itr->t,dest);
  #ifdef THREAD_TRACE_LEVEL_1
                    trace_time_s=rtc()-begin_time_s;
                    slave_trace_print(trace_time_s,trace_myid,RTS_event_end);
  #endif
                            ++res;
                        }
#ifdef IMMEDIA_UPDATE
                        src.r_tasks.clear();
#endif
                    }
                    else {   //W-W
  #ifdef THREAD_TRACE_LEVEL_1
                    trace_time_s=rtc()-begin_time_s;
                    slave_trace_print(trace_time_s,trace_myid,RTS_set_vertical_task);
  #endif
                        add_successor(src.w_task.t,dest);
  #ifdef THREAD_TRACE_LEVEL_1
                    trace_time_s=rtc()-begin_time_s;
                    slave_trace_print(trace_time_s,trace_myid,RTS_event_end);
  #endif
                        ++res;
                    }
//update w task
#ifdef IMMEDIA_UPDATE
                    src.w_task.t = dest;
#endif
                }
                    break;
                default:
                    break;
            }


#ifdef THREAD_TRACE_LEVEL_1
    trace_time_s=rtc()-begin_time_s;
    slave_trace_print(trace_time_s,trace_myid,RTS_event_end);
#endif 
    return res;
}
*/
 int add_successor(struct aceMesh_task* self,struct aceMesh_task* t)
 {

 #ifndef WITHOUT_CONCUR_OVER_STATE
     //if(self->task_base.state<2){
     if (self->task_base.over == 1) {
         return 0;
     }
 #endif
     slave_my_mutex_lock(&(self->task_base.successor_lock));
    // printf("slave_add_successor\n");
     if (self->task_base.over == 1) {
         slave_my_mutex_unlock(&(self->task_base.successor_lock));
         return 0;
     }else {
         int i;
         int val=1;
         if (self->last_successor == t) {
            slave_my_mutex_unlock(&(self->task_base.successor_lock));
             return 0;
         }
         if (self->vertical_task == (struct task *)t) {
             slave_my_mutex_unlock(&(self->task_base.successor_lock));
             return 0;
             }
         if (self->successor_count >= self->capacity_addrs) {
             //        printf("\nsuccessor_count=%d,capacity=%d\t",self->successor_count,self->capacity_addrs);
             //        fflush(stdout);
             struct task **new_succ_task;
             self->capacity_addrs += ADDR_CHUNKMORE;
 #ifdef MEMORY_POOL
 #ifdef USE_SIMD
 #ifdef TARG_SW5
             new_succ_task = (struct task **)acemesh_myalloc_aligned_32(sizeof(struct task *) *
                                                                        self->capacity_addrs);
 #elif TARG_SW9
             //new_succ_task = (struct task **)acemesh_myalloc_aligned_64(sizeof(struct task *) *
              //                                                          self->capacity_addrs);
 #endif
 #else
             new_succ_task = (struct task **)acemesh_myalloc_aligned_16(sizeof(struct task *) *
                                                                        self->capacity_addrs);
 #endif
             memcpy(new_succ_task, self->successor_tasks,
                    sizeof(struct task *) * (self->capacity_addrs - ADDR_CHUNKMORE));
 #else
             printf("need realloc successor\n");
             fflush(stdout);
             printf("realloc=%d\t", sizeof(struct task *) * self->capacity_addrs);
             fflush(stdout);
             new_succ_task = (struct task **)realloc(self->successor_tasks,
                                                     sizeof(struct task *) * self->capacity_addrs);
 #endif
 #ifdef DEBUG
//             alog_debug("realloc successor\n");
 #endif
             if (new_succ_task == NULL) {
                 printf("cannot allocate enough space for addr info!\n");
                 exit(1);
             }
             self->successor_tasks=new_succ_task;
         }
         self->successor_tasks[self->successor_count] = (struct task*)t;
         ++(self->successor_count);
         self->last_successor = t;
         updt_subw(val, &(t->task_base.ref_count_t));
       //  t->task_base.ref_count_t= t->task_base.ref_count_t-1;
         slave_my_mutex_unlock(&(self->task_base.successor_lock));
         return 1;
     }
 }
#define my_atomic_add(_new_, _addr_)                                                \
    {                                                                               \
        asm volatile("faal   %0,0(%1)\n\t" : "=r"(_new_) : "r"(_addr_) : "memory"); \
        _new_++;                                                                    \
    }

#endif

//__thread_local_fix unsigned long agent_time=0;

void *worker_kernel_func()
{
#ifdef THREAD_TRACE
#ifdef TARG_SW5
    begin_time_s = rtc() - diff;
#else
    begin_time_s = rtc() - diff/210*225;
#endif
    char buff[5];
    trace_myid = _MYID;
    m_sprintf(&buff, _MYID + 1);
    trace_id=&buff;
#endif
    slave_lock=0;
    master_lock=0;
    struct task *t;
    my_id = _MYID;
    local_total_num_threads = total_num_threads;
    local_sche_num_threads = sche_num_threads;
#ifdef EMP_LOCK
 int l=0;
 if(_MYID==0){
 for(l=0;l<8;l++)
   emp_lock[l]=0;
   }
#endif
//#ifdef EMP_QUEUE
  //  EQ_begin();
   // EQ_start();
//#endif
#if defined(TARG_SW9)&&defined(CONCURRENT_CONSTRUCT_GRAPH)&&!defined(GS_AGENT)
   if(_MYID==total_num_threads-1){
    int val=1;
    int sub=1;
    int i=0,j=0;
    //close=1;
    volatile int con_close;
    volatile unsigned long con_ptr;
    volatile unsigned long con_pre_ptr;
    volatile int status;
   
    #ifndef BLOCK_AGENT
    int sum_pre=0;
    volatile int head=0,tail=0;
    volatile unsigned long task[BLOCK_SIZE];
    main_head=&head;
    main_tail=&tail;
    for(i=0;i<BLOCK_SIZE;i++)
       main_task[i]=&task[i];
    #endif 
    con_graph=&con_close;
    con_status=&status;
    con_src_ptr=&con_ptr;
    con_dest_ptr=&con_pre_ptr;
    //printf("con_graph:%ld\n",con_graph);
    #if defined(MULTI_SPAWN) &&defined(BLOCK_AGENT)
    volatile unsigned long task_ptr[MULTI_TASK];
    volatile int task_num;
   // volatile int slave_status[MULTI_STATUS];
    volatile unsigned long get_reply;
   /*
   for(j=0;j<MULTI_STATUS;j++)
    {
    spawn_status[j]=&slave_status[j];
    slave_status[j]=0;
    */
    for(i=0;i<MULTI_TASK;i++)
     con_task_ptr[i]=&task_ptr[i];
    
   // }
    //con_task_ptr[1]=&task_ptr[1];
    //con_task_ptr[2]=&task_ptr[2];
    con_task_num=&task_num;
    task_num=0;
    #endif
    con_close=1;
    status=0;
    while(con_close){
    #ifdef BLOCK_AGENT
        switch(status){
            case 0:
                break;
            case 1:
                //con_sum_pre=build_releationship((aceMesh_task *)con_dest_ptr,con_type,(tuple_rw_task&)con_src_ptr,false);
                // add_successor((struct aceMesh_task *)con_src_ptr,(struct aceMesh_task *)con_dest_ptr);
               // printf("1:slave_lock_before\n");
                slave_my_mutex_lock((volatile int *)con_ptr);
               // printf("1:slave_lock_after\n");

                asm volatile("memb\n"); 
                status=0;
                asm volatile("memb\n"); 
                break;
            case 2:
                my_atomic_add(con_sum_pre,(volatile int *)con_ptr);
                asm volatile("memb\n"); 
                status=0;
                asm volatile("memb\n"); 
                break;
            case 3:
                //int i;
                #ifdef MULTI_SPAWN
               /*
               j=0;
               while(status){
              if(slave_status[j]==-1)
                break;
              while(!slave_status[j]);
              task_num=slave_status[j];
              get_reply=0;
                athread_get(PE_MODE,&temp_task[j][0]
                               , &task_ptr[j][0]
                               , task_num*8, (void*)&get_reply
                               , 0, 0,0);
                while(get_reply!=1);
                slave_status[j]=0;
                asm volatile("memb\n");
                // j=(j+1)%MULTI_STATUS;
               */
               for(i=0;i<task_num;i++){
                slave_spawn_to_id((struct task *)task_ptr[i]);
                }
               // j=(j+1)%MULTI_STATUS;
               // }
                #else
                slave_spawn_to_id((struct task *)con_ptr);
                #endif
                asm volatile("memb\n"); 
                status=0;
                asm volatile("memb\n"); 
                break;
            case 4:
                slave_my_mutex_unlock((volatile int *)con_ptr);
                asm volatile("memb\n"); 
                status=0;
                asm volatile("memb\n"); 
                break;
            case 5:
                updt_subw(sub,(volatile int *)con_ptr);
                asm volatile("memb\n"); 
                status=0;
                asm volatile("memb\n"); 
                break;
            case 6:
              //  printf("6:add_succ\n");
                /*
                task_num=0;
                while(status==6);
                agent_time=rtc();
                while(task_num<1000){
                */
                add_successor((struct aceMesh_task*)con_pre_ptr,(struct aceMesh_task*)con_ptr);
               // task_num++;
               // }
               // agent_time=rtc()-agent_time;
               // printf("agent time:%ld\n",agent_time);
                asm volatile("memb\n");
                status=0;
                asm volatile("memb\n");
                break;
            default:
                status=0;
                asm volatile("memb\n");
                break;

        }
    #else
    if(head!=tail){

        my_atomic_add(sum_pre,&(((struct aceMesh_task*)task[head])->task_base.ref_count_t));
        
        switch(status){
            case 0:
                break;
            case 1:   
                slave_my_mutex_lock((volatile int *)con_ptr);
                asm volatile("memb\n"); 
                status=0;
                asm volatile("memb\n"); 
                break;
            case 4:
                slave_my_mutex_unlock((volatile int *)con_ptr);
                asm volatile("memb\n"); 
                status=0;
                asm volatile("memb\n"); 
                break;
            case 5:
                updt_subw(sub,(volatile int *)con_ptr);
                asm volatile("memb\n"); 
                status=0;
                asm volatile("memb\n"); 
                break;
            default:
                status=0;
                asm volatile("memb\n");
                break;  
        }
        if(sum_pre==-1){
       // #ifdef EMP_QUEUE
       //   printf("slave_spawn:%x\n",(struct task *)(&((struct aceMesh_task *)task[head])->task_base));
          slave_spawn_to_id((struct task *)(&((struct aceMesh_task *)task[head])->task_base));
        //#else  
        //  init_spawn((struct task *)(&((struct aceMesh_task *)task[head])->task_base));
        //#endif
        }
        head=(head+1)%BLOCK_SIZE;
        switch(status){
            case 0:
                break;
            case 1:   
                slave_my_mutex_lock((volatile int *)con_ptr);
                asm volatile("memb\n"); 
                status=0;
                asm volatile("memb\n"); 
                break;
            case 4:
                slave_my_mutex_unlock((volatile int *)con_ptr);
                asm volatile("memb\n"); 
                status=0;
                asm volatile("memb\n"); 
                break;
            case 5:
                updt_subw(sub,(volatile int *)con_ptr);
                asm volatile("memb\n"); 
                status=0;
                asm volatile("memb\n"); 
                break;
            default:
                status=0;
                asm volatile("memb\n");
                break;  
        }
        
    }
    switch(status){
            case 0:
                break;
            case 1:   
                slave_my_mutex_lock((volatile int *)con_ptr);
                asm volatile("memb\n"); 
                status=0;
                asm volatile("memb\n"); 
                break;
            case 4:
                slave_my_mutex_unlock((volatile int *)con_ptr);
                asm volatile("memb\n"); 
                status=0;
                asm volatile("memb\n"); 
                break;
            case 5:
                updt_subw(sub,(volatile int *)con_ptr);
                asm volatile("memb\n"); 
                status=0;
                asm volatile("memb\n"); 
                break;
            default:
                status=0;
                asm volatile("memb\n");
                break;  
    }
    #endif
           
    }
#ifdef EMP_QUEUE
    EQ_exit();
#endif
}else{
#endif
#ifdef EMP_QUEUE
    EQ_begin();
    EQ_start();
#endif

#ifdef MLOCK
    MS_init();
    MS_begin();
#endif

#ifdef DISTRIBUTED_SCHEDULER
    generic_scheduler *my_scheduler = &schedulers[my_id];
#else
    generic_scheduler *my_scheduler = &schedulers[SL_SCHED];
#endif
    int window = 0;
#ifdef DEBUG

    printf("slave_worker_myid=%d,%d,%d\n", _MYID, _PEN, athread_get_id(-1));
    //printf("slave_queue=%p\n", &global_cent_queue);
    fflush(stdout);
#endif
#ifdef LOCAL_FLAG
#ifdef TARG_SW9
    is_run_addr[my_id] = &is_run;
    close_all_threads_addr[my_id] = &close_all_threads;
#endif
    is_run = 0;
#endif

#ifdef TARG_SW9
#ifdef SEG_BUFF
#if defined(LOCAL_SEG) || defined(TWIN_SEG)
    seg_succ_addr[my_id] = &l_seg_succ;
    seg_spawn_addr[my_id] = &l_seg_spawn;
#endif
#endif
#ifdef _LDMQ
    mqueue_empty_addr[my_id] = &mqueue_empty;
    init_chip_queue();
#endif
#endif

#ifdef DEBUG
    // printf("I'm coming %d,%lu",_MYID,rtc());
#endif
//    my_scheduler = &schedulers[my_id];
#if defined(_BLACKROW)
    if (is_black(my_id)) {
        while (is_run == 0)
            ;
        while (1) {
            if (!csr_getr_empty() || !csr_getc_empty()) {
                if (!csr_getr_empty())
                    if (recv_one_task(ROW) != NULL) printf("error recv_task_ROW");
                //            }
                //            while(!csr_getc_empty())
                //            {
                if (!csr_getc_empty())
                    if (recv_one_task(COL) != NULL) printf("error recv_task_COL");
            }
            // recvtaskfromRegNT();
            window++;
            if (window == SCHE_POLL_INTERVAL) window = 0;
            if (window == SCHE_POLL_INTERVAL - 1 && close_all_threads) break;
        }
    } else {
#endif

#if defined(_MAP_ROWS) || defined(_MAP_COLM)
        if (is_blank(my_id)) {
        } else {
#endif

#if defined(_CIRCULAR_QUEUE)
            my_mqueue_base = my_scheduler->private_queue.base;
#else
#ifdef LOCAL_MULTI_PRIORI
    int i = 0;
    for (i = 0; i < LOCAL_MULTI_SIZE; i++) my_private_queue[i] = &(my_scheduler->private_queue[i]);
#else
    my_private_queue = &(my_scheduler->private_queue);
#if defined(ASYN_INSERT_Q)
    my_lower_private_queue = &(my_scheduler->lower_private_queue);
#endif
#endif
#ifdef USE_PRIORITY_QUEUE
    my_priority_queue = &(my_scheduler->priority_queue);
#endif
#endif

#ifdef DEBUG_GRAPH
            slave_init_trace_ctl(init_trace);
#endif

#ifdef DEBUG
            printf("XXXXbefore run in worker_kernel,%d,%d\n", is_run, _MYID);
#endif
/*
#if defined(EMP_QUEUE)&&!defined(CONCURRENT_CONSTRUCT_GRAPH)&& defined(TARG_SW9)       
        eq_ptr_slot[_MYID]=0;
        asm volatile("memb\n");
        eq_status[_MYID]=0;
        asm volatile("memb\n");
      //if(_MYID==total_num_threads-1){
          //printf("init_end:%d\n",init_end);
          //while(init_end==0)
          // printf("init_end\n");
          while(is_run==0||eq_status[_MYID]){
            
           
            if(eq_status[_MYID])
            {
               printf("slave_spawn:%d\n",_MYID);
              // slave_spawn_to_id((struct task *)eq_ptr_slot[_MYID]);
               EQ_push((struct task *)eq_ptr_slot[_MYID]);
               eq_status[_MYID]=0;
               asm volatile("memb\n");
              }

           }
        //   }
           
#else
*/
            while (is_run == 0)
                ;
//#endif
//printf("slave_is_run=%d\t",is_run);
// initialzation must be here
#if defined(_CIRCULAR_QUEUE)
            mqueue_top = my_scheduler->private_queue.top;
            mqueue_bottom = my_scheduler->private_queue.bottom;
#endif
#ifdef ACEMESH_PARA
            t_init = rtc();
//    printf("master_init:%ld,slave_init:%ld\n",time_init,t_init);
#endif
            t = NULL;
#ifdef TEMP_DEBUG
            printf("XXXXbefore get in worker_kernel,is_run=%d,id=%d\n", is_run, _MYID);
//    if(my_id==total_num_threads-1)
//    {
//      printf("init_seg_succ=%d, init_seg_spawn=%d\n",seg_succ,seg_spawn);
//      fflush(stdout);
//    }
#endif

#if defined(_SERIAL_QUEUE) && defined(EMP_RANDOM_STEALING) &&defined(TARG_SW5)
            int steal_allowed = 0;
            // int steal_allowed = 1;
            int empty_count = 0;
#endif

#ifdef THREADS_IDLING_PROF
            int thread_first_spawned = 0;
#endif

            while (1) {
#if defined(DELAYED_SEND)
                consume_delayed_buffer();
#endif
                while (is_run == 0 && close_all_threads == 0) {
#if defined(_CIRCULAR_QUEUE)
                    if (init_flag != 0) {
                        mqueue_top = my_scheduler->private_queue.top;
                        mqueue_bottom = my_scheduler->private_queue.bottom;
                    }
#endif
                    // printf("is_run=0");
                }

#ifdef SEG_BUFF
                //        printf("I'm going to clear pending_success,seg=%d,%d\n",l_seg_succ,_MYID);
                if (my_id >= local_total_num_threads - N_SLAVE_AGENT)  // for slave_agent
                {
                    // printf("I'm going to clear pending_success,seg=%d,%d\n",l_seg_succ,_MYID);
                    // fflush(stdout);
                    my_scheduler = &schedulers[local_total_num_threads];

#ifdef AGENT_COMPU
                    slave_recvtaskfromRegNT();
#else
                    slave_clear_RegNT_to_need_spawn();
#endif
#if defined(TWIN_SEG) || defined(LOCAL_SEG)
                    if (l_seg_succ == SEG_SLAVE)
#endif
#if defined(MMEM_SEG)  //|| defined(TWIN_SEG)
                        if (seg_succ == SEG_SLAVE)
#endif
                        {
#ifdef TEMP_DEBUG
                            printf("slave_I'm going to clear pending_success,%d\n", _MYID);
                            fflush(stdout);
#endif
                            slave_agent_to_update_successor_from_pending(my_scheduler);
                        }
#if defined(LOCAL_SEG) || defined(TWIN_SEG)
                    if (l_seg_spawn == SEG_SLAVE)
#endif
#if defined(MMEM_SEG)  //||defined(TWIN_SEG)
                        if (seg_spawn == SEG_SLAVE)
#endif
                        {
                            slave_push_need_spawn_to_pending(my_scheduler);
                        }
#ifdef LDMQ
                        else {
                            // when seg_spawn=maser, can't control pending queue
                            // need clear RenNT to need_spawn
#ifdef AGENT_COMPU
                            recvtaskfromRegNT();
#else
                            slave_clear_RegNT_to_need_spawn();
#endif
                        }
#endif
#ifdef AGENT_RANDOM_STEALING
                    t = slave_agent_random_stealing_task();
#else
                    if (t == NULL) {
                        t = k ref_countet_next_task();
                        window++;
                        if (window == SCHE_POLL_INTERVAL) window = 0;
                    }

#endif
                    if (t != NULL) {
#ifdef TARG_SW9
#ifdef EMP_RANDOM_STEALING
    steal_status=1;
#endif
#ifdef _SERIAL_QUEUE
                    recv_num=0;
#endif
#endif
#ifdef TEMP_ASSERT
                        if (t->affinity_id < 0 || t->affinity_id > local_total_num_threads - 1)
                            assert(0);
#endif
                            //                assert(0);
#ifdef ACEMESH_SCHEDULER_PROFILING
                        pure_task_num++;
#ifdef ACEMESH_TIME
                        cycle_times_start = rtc();
#endif

#endif
#ifdef THREAD_TRACE
                        trace_time_s = rtc() - begin_time_s;
                        int task_type = ((aceMesh_task *)t)->my_type;
                        switch (task_type) {
                            case 0:
                                slave_trace_print(trace_time_s, trace_myid, RTS_execute_task);
                                break;
                            case 1:
                                slave_trace_print(trace_time_s, trace_myid,
                                                  RTS_execute_stencil_task);
                                break;
                            case 2:
                                slave_trace_print(trace_time_s, trace_myid,
                                                  RTS_execute_noaffinity_task);
                                break;
                            case 3:
                                slave_trace_print(trace_time_s, trace_myid,
                                                  RTS_execute_blocking_task);
                                break;
                            case 4:
                                slave_trace_print(trace_time_s, trace_myid,
                                                  RTS_execute_composite_task);
                                break;
                            case 5:
                                slave_trace_print(trace_time_s, trace_myid,
                                                  RTS_execute_composite_end_task);
                                break;
                            case 6:
                                slave_trace_print(trace_time_s, trace_myid,
                                                  RTS_execute_undefined_task_1);
                                break;
                            case 7:
                                slave_trace_print(trace_time_s, trace_myid,
                                                  RTS_execute_undefined_task_2);
                                break;
                            default:
                                slave_trace_print(trace_time_s, trace_myid, RTS_execute_task);
                                break;
                        }
#endif
                        t = slave_user_execute((ci_task *)t);
#ifdef THREAD_TRACE
                        trace_time_s = rtc() - begin_time_s;
                        slave_trace_print(trace_time_s, trace_myid, RTS_event_end);
#endif
#ifdef ACEMESH_TIME
                        cycle_times_end = rtc();
                        task_pure_exec_times +=
                            (double)(cycle_times_end - cycle_times_start) / SFREQ;
#endif
                    }
#if defined(TARG_SW9)   && defined(_SERIAL_QUEUE)             
                else{
                    
             recv_num=1;
                }
#endif        
                    if (close_all_threads) break;
                } else {
#endif

#ifdef ACEMESH_PARA
                    //        t_init=time_init;
                    b_build = tt_build;
#ifdef DEBUG
                    // printf("t_init:%ld,b_build:%ld\n",t_init,b_build);
#endif
#endif
                    if (t == NULL) {
                        t = get_next_task();
#ifdef THREADS_IDLING_PROF
                        if (thread_first_spawned) {
                            total_get_count++;
                            if (t == NULL) total_idle_count++;
                        }
#endif
                        window++;
                        if (window == SCHE_POLL_INTERVAL) window = 0;
                    }

                    if (t != NULL) {
#ifdef TARG_SW9
#ifdef EMP_RANDOM_STEALING
    steal_status=1;
#endif
#ifdef _SERIAL_QUEUE
                    recv_num=0;
#endif
#endif
#ifdef THREADS_IDLING_PROF
                        if (!thread_first_spawned) thread_first_spawned = 1;
#endif
#if defined(_SERIAL_QUEUE) && defined(EMP_RANDOM_STEALING)  &&defined(TARG_SW5)
                        if (!steal_allowed) {
                            steal_allowed = 1;
                            always_fail = 0;
                        }
#endif
#ifdef DEBUG
                        if (t->affinity_id != _MYID)
                            printf("kenerl_my_id=%d,affi=%d\n", _MYID, t->affinity_id);
#endif
#ifdef TEMP_ASSERT
                        if (t->affinity_id < 0 || t->affinity_id > local_total_num_threads - 1) {
                            printf("error affinity:%d\n", t->affinity_id);
                            assert(0);
                        }
#endif
                        // struct task* tmp = t;
                        // tmp=(aceMesh_task*)t;
#ifdef ACEMESH_SCHEDULER_PROFILING
                        pure_task_num++;
#ifdef ACEMESH_TIME
                        cycle_times_start = rtc();
#endif

#endif
//             if(t!=NULL)
#ifdef THREAD_TRACE
                        trace_time_s = rtc() - begin_time_s;
                        int task_type = ((aceMesh_task *)t)->my_type;
                        switch (task_type) {
                            case 0:
                                slave_trace_print(trace_time_s, trace_myid, RTS_execute_task);
                                break;
                            case 1:
                                slave_trace_print(trace_time_s, trace_myid,
                                                  RTS_execute_stencil_task);
                                break;
                            case 2:
                                slave_trace_print(trace_time_s, trace_myid,
                                                  RTS_execute_noaffinity_task);
                                break;
                            case 3:
                                slave_trace_print(trace_time_s, trace_myid,
                                                  RTS_execute_blocking_task);
                                break;
                            case 4:
                                slave_trace_print(trace_time_s, trace_myid,
                                                  RTS_execute_composite_task);
                                break;
                            case 5:
                                slave_trace_print(trace_time_s, trace_myid,
                                                  RTS_execute_composite_end_task);
                                break;
                            case 6:
                                slave_trace_print(trace_time_s, trace_myid,
                                                  RTS_execute_undefined_task_1);
                                break;
                            case 7:
                                slave_trace_print(trace_time_s, trace_myid,
                                                  RTS_execute_undefined_task_2);
                                break;
                            default:
                                slave_trace_print(trace_time_s, trace_myid, RTS_execute_task);
                                break;
                        }
#endif
                        struct task *old_t = t;
                        t = slave_user_execute((ci_task *)t);
#ifdef THREAD_TRACE
                        trace_time_s = rtc() - begin_time_s;
                        slave_trace_print(trace_time_s, trace_myid, RTS_event_end);
#endif
#ifdef DEBUG
                        printf("end of slave_execute %p,myid=%d\n", old_t, _MYID);
#endif
                        //             printf("\nloop_count:%d\n",tmp->loop_count);
                        /*inline is of no better*/
                        //((ci_task*)t)->slave_ci_task_func(((ci_task*)t)->ci_args);
                        // t=slave_execute(t);

                        /*CPE cannot use free heap  object! */
                        // TODO need free immediately!
                        // free(t);
                        // t=NULL;
#ifdef ACEMESH_TIME
                        cycle_times_end = rtc();
                        task_pure_exec_times +=
                            (double)(cycle_times_end - cycle_times_start) / SFREQ;
#endif
                    } else {
#if defined(TARG_SW9)   && defined(_SERIAL_QUEUE)                              
             recv_num=1;
#endif        
#if defined(_CIRCULAR_QUEUE)
                        if (mqueue_top == mqueue_bottom && ldmq_empty)
                            if (close_all_threads) break;
#else
#if defined(_SERIAL_QUEUE) && defined(EMP_RANDOM_STEALING)&&defined(TARG_SW5)
            /*XXX*/
            if (always_fail > 2) steal_allowed = 0;

            if (steal_allowed) {
                empty_count++;
                if (empty_count > 10) {
                    emp_random_stealing();
                    empty_count = 0;
                }
            }
#endif
            if (window == SCHE_POLL_INTERVAL - 1 && close_all_threads) break;
#endif
                    }
#ifdef SEG_BUFF
                }
#endif
            }
            //    printf("end_of_worker_kernel:%d\n",_MYID);
            //    fflush(stdout);
//endTask ref_count
#ifdef EMP_STEALING_PROF
            emp_stealing_fail[_MYID] = steal_fail;
            emp_stealing_success[_MYID] = steal_success;
#endif

#ifdef THREADS_IDLING_PROF
            thread_get_count[_MYID] = total_get_count;
            thread_idle_count[_MYID] = total_idle_count;
#endif

#ifdef EMP_QUEUE
            EQ_exit();
            EQ_end();
#endif

#ifdef MLOCK
            MS_end();
#endif
//#if defined(TARG_SW9)&&defined(CONCURRENT_CONSTRUCT_GRAPH)&&!defined(GS_AGENT)
//        }
//#endif
#ifdef ACEMESH_PROFILING_INST
            inst_perf[my_id] = local_inst_perf;
#endif
#ifdef ACEMESH_SCHEDULER_PROFILING
            all_threads_sum_vert_times[my_id] = sum_vert_times;
            // printf("vert=%ld,%ld",all_threads_sum_vert_times[my_id],sum_vert_times);
            all_threads_maxReuseChain[my_id] = maxReuseChain;
            all_pure_exec_time[my_id] = pure_exec_times;
            all_task_pure_exec_time[my_id] = task_pure_exec_times;

            all_pure_task_compute_time[my_id] = pure_task_compute_times;
            all_pure_task_dma_time[my_id] = pure_task_dma_times;

            all_pure_task_num[my_id] = pure_task_num;
            all_threads_reuse_times[my_id] = reuse_times;
            num_successor[my_id] = local_num_succ;

#if defined(ACEMESH_PROFILING_SUCC)
            all_spawn_slave_task_time[my_id] = local_spawn_slave_task_time;
            all_spawn_slave_trans1_time[my_id] = local_spawn_slave_trans1_time;
            all_spawn_slave_trans2_time[my_id] = local_spawn_slave_trans2_time;
            all_spawn_master_task_time[my_id] = local_spawn_master_task_time;

            all_reg_put_time[my_id] = local_reg_put_time;

            all_reg_put_full_times[my_id] = local_reg_put_full_times;
            all_total_reg_put_times[my_id] = total_local_reg_put_times;

            all_succ_reg_own_times[my_id] = local_reg_own_times;
            all_succ_reg_direct_times[my_id] = local_reg_direct_times;
            all_succ_reg_transfer_times[my_id] = local_reg_transfer_times;
#endif
#endif
//    }
#ifdef ACEMESH_PARA
            if (local_id2 != BUF_SIZE) {
                ace_put_reply = 0;
                athread_put(PE_MODE, &ldm_npar[0], &npar[_MYID][dma_id2 * BUF_SIZE], BUF_SIZE,
                            (void *)&ace_put_reply, 0, 0);
                while (ace_put_reply != 1)
                    ;
            }
#endif
#ifdef DEBUG_GRAPH
            end_file_spawn();
#endif
#if defined(ACEMESH_SCHEDULER_PROFILING) && defined(_LDMQ)
            //    printf("max=%d,full=%ld",max_ldmq,num_fulls);
            all_nfull[my_id] = num_fulls;
//    printf("%d,",all_nfull[my_id]);
#endif
#if defined(_BLACKROW) || defined(_MAP_ROWS) || defined(_MAP_COLM)
        }
#endif
#if defined(_BLACKROW) && defined(_MAP_ROWS)
    }
#endif
#ifdef DEBUG
    printf("end_of_worker_kernel2:%d\n", _MYID);
#endif
 #if defined(TARG_SW9)&&defined(CONCURRENT_CONSTRUCT_GRAPH)&&!defined(GS_AGENT)
            }
 #endif

#ifdef THREAD_TRACE
    slave_trace_print_final(trace_myid);
    if (trace_myid == 0) {
        printf("total_numeber_of_one_slave_trace:%d\n", total[trace_myid]);
    }
#endif
#if defined(EMP_PROFILING) && !defined(EMP_QUEUE)
    printf("id:%d,rma_time:%ld,once_send_time:%ld,once_recv_time:%ld\n",_MYID,rma_time,once_send_time,once_recv_time);
#endif
    return NULL;
}

inline int slave_get_affinity_id(struct task *self)
{
    //    printf("get_affinity:%p,%d\n",self,self->affinity_id);
    //    fflush(stdout);
    return self->affinity_id;
}

// three types ready queue
// type 1. seq queue, cyclic array!
#if defined(_CIRCULAR_QUEUE)
void spawn_to_id(struct task *t)
{
    int nrecv;
    int destid = slave_get_affinity_id(t);
    if (destid > -1) {
        if (my_id == destid) {
#ifdef _LDMQ                     // serial queue, with ldmq
            slave_ldmq_push(t);  // TODO:
#else                            // serial queue, no ldmq
            slave_push(my_mqueue_base, t);
#endif
        }
#ifdef MASTER
        else if (destid == local_total_num_threads) {
            generic_scheduler *that_scheduler = &schedulers[destid];
            slave_push_to_master(&(that_scheduler->master_private_queue), t);
        }
#endif
        else {
            nrecv = recvtaskfromRegNT();
            sendtask2PE(t, destid);
        }

    } else if (destid == FOLLOW_AFFINITY) {
        slave_push(my_mqueue_base, t);
    } else if (destid == NO_SET_AFFINITY) {
        slave_scheduler_enqueue(t);
    } else {
        assert(0);
    }
}
// type 2. seq queue, linked list!
#elif defined(_SERIAL_QUEUE)  // linked list

#if defined(ASYN_INSERT_Q) || defined(DELAYED_SEND)
__thread_local_fix int is_lower_empty = 1;
void asyn_insert_to_task_q(struct task *t)
{
    int destid = slave_get_affinity_id(t);
#ifdef ASSERT
    if (destid >= local_total_num_threads || destid < 0) {
        printf("asyn_insert_to %d, wrong\n", destid);
        assert(0);
    }
#endif
#ifdef ASYN_INSERT_REMOTE_Q
    generic_scheduler *that_scheduler = &schedulers[destid];
#elif defined(ASYN_INSERT_LOCAL_Q)
    generic_scheduler *that_scheduler = &schedulers[my_id];
#elif defined(DELAYED_SEND)
    generic_scheduler *that_scheduler = &schedulers[my_id];
#endif
#if defined(DELAYED_SEND)
    // slave_push_to_master(&(that_scheduler->lower_private_queue),t);
    slave_push_to_master(my_lower_private_queue, t);
    printf("push to own\n");
    // is_lower_empty=0;
#else
    slave_push_to_master(&(that_scheduler->lower_private_queue), t);
#endif
}
#if defined(DELAYED_SEND)
extern unsigned int csr_put_full();
int consume_delayed_buffer()
{
    // generic_scheduler  *that_scheduler = &schedulers[my_id];
    struct task *t = NULL;
    // if(!is_lower_empty)
    //{

    while (1) {
        printf("consum1 \n");
        slave_try_pop(my_lower_private_queue, &t);
        recvtaskfromRegNT();
        if (t != NULL) {
            printf("consum2\n");
            if (!csr_put_full()) {
                recvtaskfromRegNT();
                sendtask2PE(t, slave_get_affinity_id(t));
            } else {
                slave_push_to_master(my_lower_private_queue, t);
                break;
                return 0;
            }
        } else {
            printf("consum3\n");
            // printf("consum\n");
            break;
            return 0;
        }
    }
    // is_lower_empty=1;
    //}
}
#endif
#endif

void spawn_to_id(struct task *t)
{
    int nrecv;
    int destid = slave_get_affinity_id(t);
#ifdef TEMP_AFFI
    if (destid != t->bak_affinity_id) {
        printf("warning: slave_destid has changed!!!!!rankid=%d,%d,%d,task=%p\n", my_mpi_rank,
               destid, t->bak_affinity_id, t);
        fflush(stdout);
    }
#endif
    if (destid > -1) {
#ifdef ACEMESH_PROFILING_SUCC
        unsigned long spawn_end = 0;
        unsigned long spawn_start = rtc();
#endif

        ///*-----------------*/
        //        if(destid < local_total_num_threads && destid>-1)
        //        {
        //           slave_ldmq_push(t);
        //        }
        ////        else if(destid==local_total_num_threads)
        ////        {
        ////           generic_scheduler  *that_scheduler = &schedulers[destid];
        ////           slave_push_to_master(&(that_scheduler->master_private_queue),t);
        ////        }
        //        else
        //           assert(0);
        // #ifdef ACEMESH_PROFILING_SUCC
        //           all_spawn_slave_task_time[my_id]+=(rtc()-spawn_start);
        // #endif
        //
        //  }
        //}
        //
        ///*----------------------*/

        nrecv = recvtaskfromRegNT();

        if (my_id == destid) {
#ifdef _LDMQ  // serial queue, with ldmq
            slave_ldmq_push(t);
#else         // serial queue, no ldmq
            slave_push(my_private_queue, t);
#endif
#ifdef ACEMESH_PROFILING_SUCC
            local_spawn_slave_task_time += (rtc() - spawn_start);
            local_reg_own_times++;
#endif
        }  // my task
#ifdef SEG_BUFF
#ifdef AGENT_COMPU
        else if (destid < local_total_num_threads && destid > -1)
#else
        else if (destid < local_total_num_threads - N_SLAVE_AGENT && destid > -1)
#endif
#else
        else if (destid < local_total_num_threads && destid > -1)
#endif
        {
#ifdef SEG_BUFF
#ifdef AGENT_COMPU
            nrecv = recvtaskfromRegNT();
#else
            if (_MYID >= local_total_num_threads - N_SLAVE_AGENT)
                slave_clear_RegNT_to_need_spawn();
            else
                nrecv = recvtaskfromRegNT();
#endif
#else
            // nrecv=recvtaskfromRegNT(); // move to the critical path(up), clear the RegNT in time
#endif
#ifdef DEBUG
            printf("%d,will send task(%p) to %d\n", _MYID, t, destid);
#endif
#if defined(TARG_SW9) && defined(EMP_RANDOM_STEALING)
        steal_id[steal_i]=destid;
        steal_i=(steal_i+1)%3;
#endif
            sendtask2PE(t, destid);
#ifdef ACEMESH_PROFILING_SUCC
            local_spawn_slave_trans1_time += (rtc() - spawn_start);
#endif
#ifdef DEBUG
            printf("%d,has sent task(%p) to %d\n", _MYID, t, total_num_threads - 1);
#endif
        }
#ifdef MASTER
        else if (destid == local_total_num_threads) {
            generic_scheduler *that_scheduler = &schedulers[destid];
#ifdef SEG_BUFF
#ifdef DEBUG
            printf("%d,will send master task(%p) to %d\n", _MYID, t,
                   total_num_threads - N_SLAVE_AGENT);
#endif
#if defined(TARG_SW9) && defined(EMP_RANDOM_STEALING)
       steal_id[steal_i]=destid;
        steal_i=(steal_i+1)%3;
#endif
            sendtask2PE(t, local_total_num_threads - 1);
#ifdef ACEMESH_PROFILING_SUCC
            local_spawn_master_task_time += (rtc() - spawn_start);
#endif
#ifdef DEBUG
            printf("%d,has sent master task(%p) to %d\n", _MYID, t,
                   total_num_threads - N_SLAVE_AGENT);
#endif
#else
#ifdef LOCAL_MULTI_PRIORI
            slave_push_to_master(&(that_scheduler->master_private_queue[t->priority_id]), t);
#else
            slave_push_to_master(&(that_scheduler->master_private_queue), t);
#ifdef ACEMESH_PROFILING_SUCC
            local_spawn_master_task_time += (rtc() - spawn_start);
#endif
#endif
#endif
            // assert(0);
        } else {
            assert(0);
        }
#endif
#ifndef AGENT_COMPU
#ifdef TEMP_ASSERT
        else
        {
            printf("error destid=%d,total_num_thread=%d\n", destid, total_num_threads);
            assert(0);
        }
#endif
#endif
    }
    //    else if(destid == FOLLOW_AFFINITY ){
    //      // generic_scheduler  *my_scheduler = &schedulers[my_id];
    //       slave_push(my_private_queue,t);
    //    }
    //   else if(destid == NO_SET_AFFINITY){
    //        //generic_scheduler *my_scheduler = &schedulers[my_id];
    //        slave_scheduler_enqueue(t);
    //    }
    else {
        assert(0);
    }
}

// type 3. concurrent queue, (stack)
#else

#ifdef SEG_BUFF
void slave_spawn_to_id(struct task *t)
{
    int destid = slave_get_affinity_id(t);
#ifdef TEMP_DEBUG
    printf("task=%p,dest_id=%d\n", t, destid);
#endif
#ifdef TEMP_AFFI
    if (destid != t->bak_affinity_id) {
        printf("warning: slave_destid has changed!!!!!rank_id=%d,%d,%d,task=%p\n", my_mpi_rank,
               destid, t->bak_affinity_id, t);
        fflush(stdout);
    }
#endif
    if (destid > -1) {
        generic_scheduler *that_scheduler = &schedulers[destid];
#ifdef MASTER
        if (destid == local_total_num_threads) {
            slave_push(&(that_scheduler->buff_spawn), t);
        }
#ifdef AGENT_COMPU
        else if (destid <= local_total_num_threads - N_SLAVE_AGENT && destid > -1)
#else
        else if (destid < local_total_num_threads - N_SLAVE_AGENT && destid > -1)
#endif
        {
#endif
        slave_push(&(that_scheduler->private_queue), t);
#ifdef MASTER
        }
#ifdef TEMP_ASSERT
        else {
            printf("error destid=%d,total_num_thread=%d\n", destid, total_num_threads);
            assert(0);
        }
#endif
#endif
    } else {
        assert(0);
    }
}

#else

void slave_spawn_to_id(struct task *t)
{
    // assert(0);
    //printf("spawn_to_id\n");
    int destid = slave_get_affinity_id(t);
#ifdef TEMP_AFFI
    if (destid != t->bak_affinity_id) {
        printf("warning: slave_destid has changed!!!!!rankid=%d,%d,%d,task=%p\n", my_mpi_rank,
               destid, t->bak_affinity_id, t);
        fflush(stdout);
    }
#endif
//printf("spawn_to_id:my_id=%d,destid=%d\n",_MYID,destid);
// printf("spawn_to_id:,destid=%d\n",destid);
// fflush(stdout);
// cannot print,
// slave core  0: ACV1, PC=0x4ffff0424a00: accessed wrong address
// ACV6, PC=0x4ffff0424a00: accessed cached memory space while cache disabled.
// assert(0);
#ifdef USE_PRIORITY_QUEUE
    int temp_pri_id = get_priority_id(t);
#endif
    if (destid > -1) {
#ifdef ACEMESH_PROFILING_SUCC
        unsigned long spawn_start = rtc();
#endif
        generic_scheduler *that_scheduler = &schedulers[destid];
#ifdef MASTER
        if (destid == local_total_num_threads) {
#ifdef EMP_MASTER
            EQ_push_master(t);
#else
            slave_push(&(that_scheduler->master_private_queue), t);
#endif
#ifdef ACEMESH_PROFILING_SUCC
            local_spawn_master_task_time += (rtc() - spawn_start);
#endif
        } else {
#endif
#ifdef USE_PRIORITY_QUEUE
            if (!temp_pri_id) {
                slave_push(&(that_scheduler->private_queue), t);
                // printf("1");
            } else if (temp_pri_id > 0) {
                slave_push(&(that_scheduler->priority_queue), t);
                // printf("0");
            } else
                assert(0);
#else
#ifdef EMP_QUEUE
        EQ_push(t);
#else
        //printf("slave_push\n");
        slave_push(&(that_scheduler->private_queue), t);
#endif
#endif
#ifdef ACEMESH_PROFILING_SUCC
            local_spawn_slave_task_time += (rtc() - spawn_start);
#endif
#ifdef MASTER
        }
#endif
    } else if (destid == FOLLOW_AFFINITY) {
        assert(0);
        slave_push(my_private_queue, t);
    } else if (destid == NO_SET_AFFINITY) {
        assert(0);
        slave_scheduler_enqueue(t);
    } else {
        assert(0);
    }
}
#endif
#endif
/*void slave_local_spawn(generic_scheduler *my_scheduler,struct task* first){
    slave_push(&(my_scheduler->private_queue), first);
}
*/

void scheduler_enqueue(struct task *t)
{
#if defined(USE_SHARED_QUEUE)
    shared_queue.slave_push(t);
#elif defined(USE_SHARED_STACK)
    shared_queue.slave_push(t);
#else
    assert(0);
#endif
}
/*#ifdef MASTER
#ifdef MPI_SURPPORT
void slave_mpi_local_spawn(generic_scheduler *my_scheduler, struct task* first)
{
    push(&(my_scheduler->private_queue),first);
}

void slave_mpi_spawn(struct task* first)
{
    generic_scheduler* that_scheduler = &schedulers[total_num_threads];
    slave_mpi_local_spawn(that_scheduler,first);
}
#endif
#endif
*/

#ifdef _BLACKROW
int is_black(int id)
{
    if (local_sche_num_threads <= 8) return 0;
    if (id / PE_NCOL == black_topo)
        return 1;
    else
        return 0;
}
#endif

#ifdef _MAP_ROWS
int is_blank(int id)
{
    if (id / PE_NCOL % 2 == 1) {
#ifdef _BLACKROW
        if (id / PE_NCOL == black_topo)
            return 0;
        else
#endif
            return 1;
    } else {
        return 0;
    }

    /*    if ((id/PE_NCOL%2 == 1)&&(id/PE_NCOL!=black_topo)){
                printf("%d,",_MYID);
                fflush(stdout);
    //    if ((id/PE_NCOL%2 == 1)
            return 1;}
        else if((id/PE_NCOL%2 == 1)&&(id/PE_NCOL==black_topo)){
                printf("%d,",_MYID);
                fflush(stdout);

            return 0;}*/
}
#endif

#ifdef USE_PRIORITY_QUEUE
int get_priority_id(struct task *self) { return self->priority_id; }
void set_priority_id(struct task *self, int id) { self->priority_id = id; }
#endif

#ifdef _MAP_COLM
int is_blank(int id)
{
    if (id % PE_NCOL > local_sche_num_threads / PE_NCOL)
        return 1;
    else
        return 0;
}
#endif
