#include <assert.h>
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>

#include "aceMesh_task.h"
#include "simd.h"
#include "slave.h"
#include "trace_out.h"
// #include "share.h"
#include "aceMesh_runtime.h"  //for LDM_NEIGHBOR
#include "aceMesh_utils.h"
#include "am_assert.h"
#include "am_machine.h"

#ifdef THREAD_TRACE
#include "thread_trace.h"
extern __thread_local_fix unsigned long begin_time_s;
extern __thread_local_fix unsigned long trace_time_s;
extern __thread_local_fix int trace_myid;
#endif

extern __thread_local_fix int local_sche_num_threads;
extern __thread_local_fix int local_total_num_threads;

#define decrement_ref_count(self) (--((struct task *)self)->ref_count_t)

extern __thread_local_fix unsigned long cycle_times_start;
extern __thread_local_fix unsigned long cycle_times_end;
extern __thread_local_fix double pure_exec_times;
extern volatile int total_exec_nums;
__thread_local_fix int pop = 0;
#ifdef TARG_SW5
#define my_atomic_add(_new_, _addr_)                                                \
    {                                                                               \
        asm volatile("faaw   %0,0(%1)\n\t" : "=r"(_new_) : "r"(_addr_) : "memory"); \
        _new_++;                                                                    \
    }
#elif TARG_SW9
#define my_atomic_add(_new_, _addr_)                                                \
    {                                                                               \
        asm volatile("faal   %0,0(%1)\n\t" : "=r"(_new_) : "r"(_addr_) : "memory"); \
        _new_++;                                                                    \
    }
#endif

#ifdef ACEMESH_SCHEDULER_PROFILING
__thread_local_fix long sum_vert_times;
__thread_local_fix int maxReuseChain;
__thread_local_fix long reuse_times;
__thread_local_fix int local_num_succ = 0;
#endif

#ifdef ACEMESH_SCHEDULER_PROFILING

void slave_inc_reuse_distance(struct aceMesh_task *self, int last_reuse_distance)
{
    self->reuse_distance += (last_reuse_distance + 1);
}

int slave_get_reuse_distance(struct aceMesh_task *self) { return self->reuse_distance; }
#endif
task_type get_task_type(struct aceMesh_task *self) { return self->my_type; }

#ifdef ACEMESH_PARA
//__threal_local_fix long cur_time;
extern __thread_local_fix unsigned long t_init;
extern __thread_local_fix unsigned long b_build;
extern __thread_local_fix int global_id1, global_id2, pre_id;
extern __thread_local_fix int dma_id1, dma_id2;
extern __thread_local_fix int local_id1, local_id2;
extern __thread_local_fix char ldm_npar[BUF_SIZE];
extern __thread_local volatile unsigned long ace_put_reply;
extern char npar[65][MAXT];
#define mymin(a, b) ((a < b) ? (a) : (b))
#define mymax(a, b) ((a > b) ? (a) : (b))
#endif

#ifdef TASK_LIFETIME_PROFILING
extern unsigned long avr_process_cycles[64];
#endif

extern void slave_spawn_to_id(struct task *t);

#ifdef TARG_SW9
extern void slave_clear_RegNT_to_need_spawn();
#endif

#ifdef MULTI_PUSH
extern void EQ_multi_push(struct task *t[3]);
#endif
// extern void EQ_pop_front();
void slave_aceMesh_task_constructor(aceMesh_task *self)
{
    // struct aceMesh_task self ;
    // self->task_base = task_constructor();
    slave_task_constructor(&(self->task_base));
#ifndef FAT_TDG
#ifdef REUSE_GRAPH
    self->task_base.is_base_task = false;
#endif
    self->vertical_task = NULL;
    self->my_type = NOT_SET;
    self->capacity_addrs = ADDR_CHUNKINIT;
    self->successor_tasks = NULL;
    self->last_successor = NULL;
#ifdef MEMORY_POOL
#ifdef USE_SIMD
#ifdef TARG_SW5
    self->successor_tasks =
        (struct task **)acemesh_myalloc_aligned_32(sizeof(struct task *) * ADDR_CHUNKINIT);
#elif TARG_SW9
    self->successor_tasks =
        (struct task **)acemesh_myalloc_aligned_64(sizeof(struct task *) * ADDR_CHUNKINIT);
#endif
#else
    self->successor_tasks =
        (struct task **)acemesh_myalloc_aligned_16(sizeof(struct task *) * ADDR_CHUNKINIT);
#endif
#else
#ifdef TARG_SW5
    self->successor_tasks = (struct task **)malloc(sizeof(struct task *) * ADDR_CHUNKINIT);
#elif TARG_SW9
    // self->successor_tasks=(struct task**)libc_uncached_aligned_malloc(sizeof(struct
    // task*)*ADDR_CHUNKINIT);
    self->successor_tasks =
        (struct task **)slave_my_malloc_aligned(sizeof(struct task *) * ADDR_CHUNKINIT);
#else
    assert(0);
#endif
#endif
    self->successor_count = 0;
    // self->loop_count=0;	//add by gxr 2017/03/13
    self->loop_id = 0;
    self->task_id = 0;
#ifdef DEBUG_GRAPH
    self->ref_count_for_check = 0;
    self->is_joint = 0;
#endif
#ifdef MASTER
#ifdef MPI_SURPPORT
    self->suspend = 0;
#endif
#endif

#ifdef ACEMESH_SCHEDULER_PROFILING
    self->reuse_distance = 0;
#endif
#ifdef SAVE_RW_INFO
    self->rw_addrs = NULL;
    self->rw_addrs = (struct addr_info *)malloc(sizeof(struct addr_info) * ADDR_CHUNKINIT);
    self->rw_addrs_count = 0;
#endif
#ifdef AUTO_PARTITION
    self->group_id = -1;
#endif
#ifdef SUPPORT_PARTITION
    self->my_part_id = -1;
#endif
#endif
    // return self;
}

void aceMesh_task_destructor(aceMesh_task *p_acemesh_task)
{
#ifndef MEMORY_POOL
#ifdef TARG_SW5
    free(p_acemesh_task->successor_tasks);
#elif TARG_SW9
    my_free_aligned(p_acemesh_task->successor_tasks);
#else
    printf("losing free successor\n");
#endif
#endif
#ifdef SAVE_RW_INFO
    free(p_acemesh_task->rw_addrs);
#endif
}
inline void slave_set_access_state(int val, volatile int * state)
{
    *state = val;
    asm volatile("memb\n");
}
__thread_local unsigned long handle_time = 0, t_time = 0;
struct task *slave_execute(struct aceMesh_task *self)
{
#ifdef THREAD_TRACE_LEVEL_1
    trace_time_s = rtc() - begin_time_s;
    slave_trace_print(trace_time_s, trace_myid, RTS_execute_successor);
#endif
#ifdef TASK_LIFETIME_PROFILING
    unsigned long process_start = rtc();
#endif
    int size = self->successor_count;
    int j, i, dist;
    int newt = 0;
    int valtop =0;
    // EQ_pop_front();
    // printf("myid:%d\n",_MYID);
#ifdef _SERIAL_QUEUE
    int nrecv;
#endif
#ifdef ACEMESH_PARA
    int tsize;
    // volatile int reply;
#endif
#ifndef FAT_TDG
    struct task *t = NULL;
    struct task *vert, **first, *tmpt[VECTOR_DBL];
    struct task *succ[LDM_NEIGHBORS];
#endif
#ifdef ACEMESH_SCHEDULER_PROFILING
    local_num_succ = mymax(local_num_succ, size);
#endif
#if DEBUG_GRAPH
#ifdef SAVE_RW_INFO
    FILE *out = slave_get_file();
    if (out == NULL) {
        printf("error on file");
        fflush(stdout);
    }
#endif
#endif
    // dag reuse, restore ref_conunt_t
#ifdef REUSE_GRAPH
    if (self->task_base.reused) self->task_base.ref_count_t = self->task_base.backup_ref;
#endif

#ifdef DEBUG
//    printf("successor_size:%d\n",size);
#endif
#if DEBUG_GRAPH
    slave_print_to_internal_thread_file("loop_id, %d, task_id, %d, task_type, %d, ", self->loop_id,
                                        self->task_id, self->my_type);
#ifdef SAVE_RW_INFO
    for (i = 0; i < self->rw_addrs_count; ++i) {
        struct addr_info *itr = &(self->rw_addrs[i]);
        fputs(" addr:", out);
        fprintf(out, "%p", itr->addr);
        fputs("   area_type:", out);
        fprintf(out, "%d", itr->area_type);
        fputs("  type:", out);
        fprintf(out, "%d", itr->rw_type);
        fputs("   is_neighbor:", out);
        fprintf(out, "%d", itr->is_neighbor);
    }
    fputs(", ", out);
#endif
    slave_print_long_long_thread_file(tick_count_now());
#endif
#ifdef ACEMESH_TIME
    cycle_times_end = rtc();
#ifdef SEG_BUFF
    if (_MYID < local_total_num_threads - N_SLAVE_AGENT)
#endif
        pure_exec_times += (double)(cycle_times_end - cycle_times_start) / SFREQ;
#endif
#ifdef ACEMESH_PARA
    global_id2 = mymin(global_id2, MAXT - 1);
#ifdef DEBUG
    printf("global_id2:%d\n", global_id2);
#endif
    if (global_id2 > pre_id) {
        dma_id2 = global_id2 / BUF_SIZE;
        local_id2 = global_id2 % BUF_SIZE;

        global_id1 = mymax(global_id1, pre_id + 1);
        dma_id1 = global_id1 / BUF_SIZE;
        local_id1 = global_id1 % BUF_SIZE;
#ifdef DEBUG
        printf("global_id1:%d,dma_id1:%d,local_id1:%d,dma_id2:%d,local_id2:%d\n", global_id1,
               dma_id1, local_id1, dma_id2, local_id2);
#endif

        if (dma_id1 == dma_id2) {
            for (i = local_id1; i <= local_id2; i++) ldm_npar[i] = 1;
            if (local_id2 == BUF_SIZE - 1) {
                ace_put_reply = 0;
                athread_put(PE_MODE, &ldm_npar[0], &npar[_MYID][dma_id1 * BUF_SIZE], BUF_SIZE,
                            (void *)&ace_put_reply, 0, 0);  // TODO
                while (ace_put_reply != 1)
                    ;
                // ldm_npar={0};
                for (i = 0; i < BUF_SIZE; i++) ldm_npar[i] = 0;
            } else if (global_id2 == MAXT - 1) {
                tsize = local_id2 + 1;
                ace_put_reply = 0;
                athread_put(PE_MODE, &ldm_npar[0], &npar[_MYID][dma_id1 * BUF_SIZE], tsize,
                            (void *)&ace_put_reply, 0, 0);  // TODO
                while (ace_put_reply != 1)
                    ;
            }
        } else {
            ace_put_reply = 0;
            athread_put(PE_MODE, &ldm_npar[0], &npar[_MYID][dma_id1 * BUF_SIZE], BUF_SIZE,
                        (void *)&ace_put_reply, 0, 0);  //
            while (ace_put_reply != 1)
                ;
#ifdef DEBUG
            printf("put_first_buf\n");
#endif
            // ldm_npar={1};
            for (i = 0; i < BUF_SIZE; i++) ldm_npar[i] = 1;
            for (i = dma_id1 + 1; i < dma_id2; i++) {
                ace_put_reply = 0;
                athread_put(PE_MODE, &ldm_npar[0], &npar[_MYID][i * BUF_SIZE], BUF_SIZE,
                            (void *)&ace_put_reply, 0, 0);
                while (ace_put_reply != 1)
                    ;
#ifdef DEBUG
                printf("put_%d_buf\n", i);
#endif
            }
            // ldm_npar={0};
            for (i = 0; i < BUF_SIZE; i++) ldm_npar[i] = 0;
            for (i = 0; i < local_id2; i++) ldm_npar[i] = 1;
            if (local_id2 == (BUF_SIZE - 1)) {
                ace_put_reply = 0;
                athread_put(PE_MODE, &ldm_npar[0], &npar[_MYID][dma_id2 * BUF_SIZE], BUF_SIZE,
                            (void *)&ace_put_reply, 0, 0);
                while (ace_put_reply != 1)
                    ;
                // ldm_npar={0};
                for (i = 0; i < BUF_SIZE; i++) ldm_npar[i] = 0;
            } else if (global_id2 == (MAXT - 1)) {
                tsize = local_id2 + 1;
                ace_put_reply = 0;
                athread_put(PE_MODE, &ldm_npar[0], &npar[_MYID][dma_id2 * BUF_SIZE], BUF_SIZE,
                            (void *)&ace_put_reply, 0, 0);
                while (ace_put_reply != 1)
                    ;
                // ldm_npar={0};
                for (i = 0; i < BUF_SIZE; i++) ldm_npar[i] = 0;
            }
        }
        pre_id = global_id2;
#ifdef DEBUG
        printf("pre_id:%d\n", pre_id);
#endif
    }
#endif
#ifdef ACEMESH_SCHEDULER_PROFILING
    dist = self->reuse_distance;
    if (dist > 0) {
        // printf(",%ld,",sum_vert_times);
        ++sum_vert_times;
        if (dist > maxReuseChain) {
            maxReuseChain = dist;
        }
    }
#endif
    // #ifdef DEBUG
    //     printf("success_size1:%d\n",size);
    // #endif

#ifdef FAT_TDG

    // struct TaskDataAccesses *accessStruct = &(getDataAccesses(&(self->task_base._dataAccesses)));
    // void *address=nullptr;
    // t_time=rtc();  
    //void *address = NULL;
    //int handle = 0;
   // printf("后继处理\n");  
    //struct DataAccess *parent_access = NULL;
    struct DataAccess *access =NULL;
    struct task *parent = getparent(&(self->task_base));
    // struct TaskDataAccesses *parentaccessStruct = getDataAccesses(parent);
    // task *realease_parent=(accessStruct._accessArray[0])._originator;
#ifdef DEBUG_PBH
 if(parent==NULL){
  
       printf("孩子计数为%d的任务%p后继处理\n",self->task_base._countdownToBeWokenUp,self);
       fflush(stdout);
 }
 #endif
//     if(parent==NULL){
//       printf("MYID%d孩子计数为%d的任务%p后继处理%d\n",_MYID,child_count(&(self->task_base)),self,self->task_base.countlock);
//       fflush(stdout);
// }
//    struct new_slave_table *addresses = self->task_base._dataAccesses._subaccessBottomMap;
//printf("后继处理1\n");  
    if (child_count(&(self->task_base)) == -1) {
        // slave_my_mutex_unlock(&(self->task_base.countlock));
    // if(parent==NULL){
    //   printf("before_houjichuli%p\n",self);
    //   fflush(stdout);
    // }
    // printf("before_houjichuli%p,length%d\n",self,self->task_base._dataAccesses._currentIndex);
    // fflush(stdout);
        for (int i = 0; i < self->task_base._dataAccesses._currentIndex; i++) {
            //address = self->task_base._dataAccesses._addressArray[i];
            access = &(self->task_base._dataAccesses._accessArray[i]);
            //slave_my_mutex_unlock(&(access->mutex_access));   //初始化锁，扁平图需要注释,不注释会死锁
            struct DataAccess *succ = NULL;
             succ = access->_successor;
            if(succ==NULL){continue;}
           // printf("cur_type%d,succ_type%d\n",access->_type,succ->_type);
            //fflush(stdout);
            if(access->_type==OUT||access->_type==INOUT){ //当前为写，可直接处理后继，以及所有读后继
             //printf("write_begin_houjichuli\n");
                if(succ->_type==OUT||succ->_type==INOUT){ //W-W，必然有序，无需lock
                    my_atomic_add(newt, &((succ->_originator)->_predecessorCount));
                       //printf("任务%pnewt:%d\n",succ->_originator,newt);
                    if (newt == -1) {           
                       slave_spawn_to_id(succ->_originator);
                    }
                }else if(succ->_type==IN){ //W-R-R*，W顺序的发起所有R，除非R任务在W任务未完全释放完后继时就已经反悔了，否则不可能有多个人在同时修改top。而且以下逻辑仅将第一个出现的任务置为top，就算R立即返回，也跟这个没关系。
                    slave_set_access_state(1,&(succ->top));
                   //succ->top=true; 
                   //my_atomic_add(valtop, &(succ->top));
//W后的第一个写被标记为top，非first level的DA中，这里是读链的始端
                    while (succ != NULL && succ->_type==IN) { //递归向后处理读链中的后继，直至W
                         my_atomic_add(newt, &((succ->_originator)->_predecessorCount));
                        // printf("newt1:%d\n",newt);
                         if (newt == -1) {
                             slave_spawn_to_id(succ->_originator);
                         }
                        succ = succ->_successor;
                    }
                }else{ //非法的访问类型
                //printf("后继任务为%p,%p地址%d\n",succ->_originator,address,succ->_type);
                    assert(0);
                }
              //printf("write_finish_houjichuli\n");
             // fflush(stdout);
            }else if(access->_type==IN){ 
                //printf("read_begin_houjichuli\n");
               slave_my_mutex_lock(&(access->mutex_access));
                slave_set_access_state(FINISHED,&(access->_accessFlags));
                //access->_accessFlags=FINISHED; //DA的后继处理完成，DA.task再访问该DA
                //printf("access->top%d\n",access->top);
                if(access->top== 1) {      //当前类型为读,且为top。
                    slave_my_mutex_unlock(&(access->mutex_access));
                //  slave_my_mutex_unlock(&(self->task_base._dataAccesses._accessArray[i].mutex_access));
                    if(succ->_type==OUT||succ->_type==INOUT){//当前是读，后继是写，直接处理后继W WRW
                     //printf("R-W\n"); 
                         my_atomic_add(newt, &((succ->_originator)->_predecessorCount));
                             // printf("任务%pnewt:%d\n",succ->_originator,newt);
                            if (newt == -1) {
                               slave_spawn_to_id(succ->_originator);
                            } 
                    }else if(succ->_type==IN){
                        while(true){
                            if(succ == NULL) break;   //串行构图，执行的时候没有就是没有
                            if(succ -> _type == OUT || succ -> _type == INOUT) {
                                my_atomic_add(newt, &((succ->_originator)->_predecessorCount));
                                if (newt == -1) {
                                    slave_spawn_to_id(succ->_originator);
                                }
                                break;
                            }
                            if(succ -> _type == IN){
                               slave_my_mutex_lock(&(succ->mutex_access));
                                if(succ->_accessFlags == FINISHED){
                                   slave_my_mutex_unlock(&(succ->mutex_access));
                                  // printf("1end_succ_IN\n");
                                    succ =succ->_successor;
                                }else{ 
                                    slave_set_access_state(1,&(succ->top)); 
                                    //succ->top=true; //R被标记为top
                                     // my_atomic_add(valtop, &(succ->top));
                                    slave_my_mutex_unlock(&(succ->mutex_access));
                                   // printf("2end_succ_IN\n");
                                    break;
                                }
                                 //printf("end_succ_IN\n");
                            }else{
                                assert(0);
                            }
                        }
                        //printf("end_while\n");
                    }else{  //类型异常
                       // printf("error_type%d\n",succ->_type);
                       /// fflush(stdout);
                        assert(0);
                    }
                }else{
                     //不是top，就直接解锁
                     slave_my_mutex_unlock(&(access->mutex_access));
                     //slave_my_mutex_unlock(&(self->task_base._dataAccesses._accessArray[i].mutex_access));
                }
            //printf("read_finish_houjichuli\n");
            //fflush(stdout);

            }            
        }
    //    printf("%ptask finish_houjichuli\n",self);
    //    fflush(stdout);
       
        if (parent != NULL) {        //!!!!!这里需要对父任务孩子任务计数加锁，否则会执行不完
        // printf("父任务%p不为空\n",parent);
        // fflush(stdout);
            //  slave_my_mutex_lock(&(parent->countlock));
            my_atomic_add(newt, &(parent->_countdownToBeWokenUp));
            asm volatile ("memb\n");
            //printf("父任务%p孩子计数为%d\n",parent,newt);
             fflush(stdout);
            if (newt ==-1) { 
                //slave_my_mutex_unlock(&(parent->countlock));
                // fflush(stdout);
#ifdef DEBUG_PBH
                 printf("解锁前处理父任务%p的后继%d\n",parent,parent->_countdownToBeWokenUp);
#endif
                // slave_my_mutex_unlock(&(parent->countlock));
                return slave_execute((struct aceMesh_task *)parent);
            }
            //   slave_my_mutex_unlock(&(parent->countlock));
           
        }
        
     //printf("finish_houjichuli\n");

    } else {
#ifdef DEBUG_PBH
        printf("复合任务%p后继处理\n",self);
#endif
        // int i;
        // for (i = 0; i < self->task_base._dataAccesses._currentIndex; i++) {
        //     access = &(self->task_base._dataAccesses._accessArray[i]);
        //     struct DataAccess *child = access->_child;
        //     if (child != NULL) {
        //         my_atomic_add(newt, &((child->_originator)->_predecessorCount));
        //         if (newt == -1) {
        //             slave_spawn_to_id(child->_originator);
        //         }

        //         access->_child = NULL;
        //     }
        // } 
        
    }
// t_time=rtc()-t_time;
// handle_time+=t_time;
#else
#ifdef USE_SIMD
    first = self->successor_tasks;
    if (size > 1) {  // more sucessors, use simd_load

        AM_assert1(((unsigned long)first) % (VECTOR_SIZEb / 8) == 0);
        while (size > 0) {
            int ub = mymin(size, LDM_NEIGHBORS);
            // printf("success_tasks_num=%d\n",ub);
            // simd loading
            for (i = 0; i < ub; i += VECTOR_DBL) {
#ifdef TARG_SW5
                simd_load(*((doublev4 *)(&succ[i])), first);
                first = (struct task *)((unsigned long)first + 32);  // skip 32 bytes
#elif TARG_SW9
                // simd_load(*((doublev8*)(&succ[i])),first);   //type conversion error with this
                // usage
                simd_load(*((uint512 *)(&succ[i])), first);
                first = (struct task *)((unsigned long)first + 64);  // skip 64 bytes
#endif
                size -= VECTOR_DBL;
            }

            // deal with successor_task

#ifdef MULTI_PUSH
            struct task *push_arr[3];
            int push_count = 0;
#endif

            for (i = 0; i < ub; i++) {
#ifdef _SERIAL_QUEUE
#ifdef TARG_SW9
                // #if defined(SEG_BUFF) && defined(MASTER)
                //           if(_MYID==local_total_num_threads-N_SLAVE_AGENT)
                //               slave_clear_RegNT_to_need_spawn();
                //           else
                // #endif
                nrecv = recvtaskfromRegNT();
#elif TARG_SW5
                nrecv = recvtaskfromRegNT();  // add by gjie
#endif
#endif
                //          printf("myid=%d,deal_with_success_tasks_size=%d,%d,%p\n",_MYID,i,ub,succ[0],succ[0]->ref_count_t);
                //          fflush(stdout);
                //          if( t=succ[i])
                //          {
                // #ifdef DEBUG
                // printf("successor_ref_count:%d,%p\n",t->ref_count_t,&(t->ref_count_t));
                //            printf("myid=%d,successor_ref_count:%d,%p\n",_MYID,succ[i]->ref_count_t,&(succ[i]->ref_count_t));
                //            fflush(stdout);
                // #endif
                // my_atomic_add(newt,&(t->ref_count_t));
                my_atomic_add(newt, &(succ[i]->ref_count_t));
                if (newt == -1) {
// slave_spawn_to_id(t);
#ifdef MULTI_PUSH
                    push_arr[push_count] = succ[i];
                    push_count++;
#else
                    slave_spawn_to_id(succ[i]);
#endif
                }
                //}

#ifdef MULTI_PUSH
                if (push_count == 3) {
                    EQ_multi_push(push_arr);
                    push_count = 0;
                } else if (i == ub - 1) {
                    int j = 0;
                    for (j = 0; j < push_count; j++) {
                        slave_spawn_to_id(push_arr[j]);
                    }
                }
#endif
            }
        }
        /*      for(i=0;i<size;i+=VECTOR_DBL)
              {
                simd_load(*((doublev4*)(&succ[i])),first);
                first=(struct task*)((unsigned long)first+32); //skip 32 bytes
              }
              for( j = 0; j < size; ++j)
                if( t=succ[j]) //TODO: why null succ?
                {
        #ifdef DEBUG
                   printf("successor_ref_count:%d\n",t->ref_count_t);
        #endif
                   my_atomic_add(newt,&(t->ref_count_t));
                   if(newt==-1)
                       slave_spawn_to_id(t);
                }
              */
    }                      // more succ
    else if (size == 1) {  // one succ, no need to use simd
        if (t = *first)    // TODO: why introduce nul successor?
        {
#ifdef DEBUG
            printf("successor_ref_count:%d,%p\n", t->ref_count_t, &(t->ref_count_t));
#endif
            my_atomic_add(newt, &(t->ref_count_t));
            if (newt == -1) slave_spawn_to_id(t);
        }  // non null vertical succ

    }  // one succ

#else
    first = self->successor_tasks;
    if (size > 0) {
        // printf("no_simd
        // after_success_tasks_size=%d,%p,%p\n",size,self->successor_tasks[0],*(struct task*)first);
        //      printf("no_simd after_success_tasks_size=%d,",size);
        //      printf("%p,",self->successor_tasks[0]);
        //      printf("%p,",*(struct task*)first);
        //      printf("%p\n",succ[0]);
        for (j = 0; j < size; ++j)
            if (t = self->successor_tasks[j])  // TODO: why null succ?
            {
#ifdef DEBUG
//           printf("successor_ref_count:%d,%p\n",t->ref_count_t,t);
#endif
                my_atomic_add(newt, &(t->ref_count_t));
#ifdef DEBUG
                printf("after_successor_ref_count:%d,t=%p,self=%p\n", t->ref_count_t, t, self);
                fflush(stdout);
#endif
                if (newt == -1) {
                    // assert(0);
                    slave_spawn_to_id(t);
                }
            }
    }  // more succ
#endif
    // #if !defined(LIGHT_BUILD_GRAPH) && !defined(LIGHT_BUILD_GRAPH2)
    vert = self->vertical_task;
// #endif
#ifdef DEBUG
    printf("vert addr: %x,%pi,self=%p\n", vert, vert, self);
    fflush(stdout);
#endif

    // #if !defined(LIGHT_BUILD_GRAPH) && !defined(LIGHT_BUILD_GRAPH2)
    //         my_atomic_add(newt,&(vert->ref_count_t));
    //   #ifdef DEBUG
    //         printf("after_update_vert_successor_ref_count:%d,ver=%p,self=%p,MYid=%d\n",
    //                vert->ref_count_t, vert, self, _MYID);
    //         fflush(stdout);
    //   #endif
    //         if (newt == -1){
    //             slave_spawn_to_id(vert);
    //         }
    // #else

#ifdef _RETURN_VERTICAL
    /// there are some bug,
    if (vert) {
        my_atomic_add(newt, &(vert->ref_count_t));
        if (newt == -1) {
#ifdef _SERIAL_QUEUE
#ifdef TARG_SW9
            nrecv = recvtaskfromRegNT();

#elif TARG_SW5
            nrecv = recvtaskfromRegNT();  // add by gjie
#endif
#endif
#ifdef SEG_BUFF
            if ((vert->affinity_id) >= 0 &&
                (vert->affinity_id) < local_total_num_threads - N_SLAVE_AGENT)
#else
            if ((vert->affinity_id) >= 0 && (vert->affinity_id) < local_total_num_threads)
#endif
            {
#ifdef ACEMESH_SCHEDULER_PROFILING
                slave_inc_reuse_distance((struct aceMesh_task *)(vert), dist);
#endif
#ifdef THREAD_TRACE_LEVEL_1
                trace_time_s = rtc() - begin_time_s;
                slave_trace_print(trace_time_s, trace_myid, RTS_event_end);
#endif
                return vert;
            } else {
                slave_spawn_to_id(vert);
            }
        }
    }

//  #ifdef TASK_LIFETIME_PROFILING
//    unsigned long process_end = rtc();
//    if (avr_process_cycles[_MYID] != 0) {
//        avr_process_cycles[_MYID] += (process_end - process_start);
//        avr_process_cycles[_MYID] /= 2;
//    }
//    else {
//        avr_process_cycles[_MYID] = process_end - process_start;
//    }
//  #endif
// #ifdef THREAD_TRACE_LEVEL_1
//    trace_time_s=rtc()-begin_time_s;
//    slave_trace_print(trace_time_s,trace_myid,RTS_event_end);
// #endif
//    return NULL;
#else
    if (vert) {
#ifdef DEBUG
        //        printf("vert_successor_ref_count:%d,%p\n",vert->ref_count_t,vert);
        //        fflush(stdout);
#endif

        my_atomic_add(newt, &(vert->ref_count_t));
#ifdef DEBUG
        printf("after_update_vert_successor_ref_count:%d,ver=%p,self=%p,MYid=%d\n",
               vert->ref_count_t, vert, self, _MYID);
        fflush(stdout);
#endif
        if (newt == -1) {
            slave_spawn_to_id(vert);
        }
    }
#endif
// #endif
#endif  // FAT_TDG
#ifdef TASK_LIFETIME_PROFILING
    unsigned long process_end = rtc();
    if (avr_process_cycles[_MYID] != 0) {
        avr_process_cycles[_MYID] += (process_end - process_start);
        avr_process_cycles[_MYID] /= 2;
    } else {
        avr_process_cycles[_MYID] = process_end - process_start;
    }
#endif
#ifdef THREAD_TRACE_LEVEL_1
    trace_time_s = rtc() - begin_time_s;
    slave_trace_print(trace_time_s, trace_myid, RTS_event_end);
#endif
    return NULL;
}
