#ifdef TARG_SW5
#include <share.h>
#endif
#include <assert.h>
#include <stdio.h>
#include <stdlib.h>

#include "aceMesh_task.h"
#include "aceMesh_utils.h"
#if (defined TARG_SW5) || (defined TARG_SW9)
#include <stdbool.h>
#endif
#include <string.h>
#include "am_machine.h"
#include "utils/acelog.h"

#if defined(SUCC_BATCH) || defined(SEG_BUFF)
#include "athread.h"
#endif

#ifdef MEMORY_POOL
#include "MemPool.h"
#endif
#if defined(EMP_MASTER)&& defined(TARG_SW9)  
__uncached volatile unsigned long suc_buff;
__uncached volatile int suc_status=0;
__uncached volatile int suc_num=0;
#endif
extern int sche_num_threads;
extern int total_num_threads;
extern void *cur_super_taskptr;
extern int cgid;
#ifdef TARG_SW9
extern unsigned long seg_succ_addr[64];
extern int agent_id;
#endif
#ifdef DEBUG
extern int my_mpi_rank;
#endif

#ifdef SUCC_BATCH
extern struct m2s_detail M2S_details_64[64];
extern struct m2s_detail M2S_detail_temp_64[64];
#ifdef LOCAL_FLAG_M2S
extern __thread volatile int m2s_flag;
#else
extern volatile int m2s_flag[64];
#endif
#endif

#ifdef ACEMESH_TIME
extern double blocking_pure_exec_time;
extern double all_pure_exec_time[65];
extern double all_task_time[65];
extern double all_pure_exec_time[65];
extern unsigned long master_cycle_time_start;
extern unsigned long master_cycle_time_end;
extern unsigned long blocking_cycle_time_end;
double temp_time=0.0;
extern double master_seg_sche_time;
extern unsigned long rpcc();
//{
//  unsigned long time;
//  asm("rtc %0":"=r"(time):);
//  return time;
//}
#endif

#ifdef THREAD_TRACE                                                                                     
#include "thread_trace.h"
extern unsigned long trace_time;
extern unsigned long begin_time;
#endif

#ifdef ACEMESH_SCHEDULER_PROFILING
//extern int max_num_successor;
extern int num_successor[65];
/*
extern unsigned long execute_start;
extern unsigned long execute_end;
extern int total_num_threads;
*/
extern unsigned long total_nedges;
#endif
#ifdef ACEMESH_GRAPH_BUILD_PROFILING
extern unsigned long buil_prof[N_BUILD_PROF];
#endif

#define decrement_ref_count(self) (--((struct task*)self)->ref_count_t)
#ifdef TARG_SW5
#define my_atomic_add(_new_, _addr_)                                                   \
    {                                                                                  \
        asm volatile("ldw_inc   %0,0(%1)\n\t" : "=r"(_new_) : "r"(_addr_) : "memory"); \
        _new_++;                                                                       \
    }
#elif TARG_SW9
#define my_atomic_add(_new_, _val_)                                    \
    {                                                                  \
        _new_ = __sync_val_compare_and_swap(&(_val_), _val_, ++_val_); \
    } 
//#define my_atomic_add(new, ref){\
     suc_buff=(unsigned long)&ref;\
     asm volatile("memb\n");\
     suc_status=1;\
    asm volatile("memb\n");\
    while(suc_status);\
    new=suc_num;\
}

#endif

#ifdef MEMORY_POOL
extern struct MemPool pool;
#endif

/*void set_task_type(struct aceMesh_task* self,task_type type)
{
    self->my_type = type;
}*/
task_type get_task_type(struct aceMesh_task *self) { return self->my_type; }

#ifdef MASTER
#ifdef MPI_SURPPORT
int get_task_suspend(struct aceMesh_task *self) { return self->suspend; }

inline void set_task_suspend(struct aceMesh_task *self, int mode) { self->suspend = mode; }
#endif

#ifdef ACEMESH_PARA
#define mymin(a,b) ((a<b)?(a):(b))
#define mymax(a,b) ((a>b)?(a):(b))
extern int gid1,gid2,mpre_id;
extern char npar[65][MAXT];
extern char waitcomm[MAXT];

#endif

#if defined(SUCC_BATCH) && (_SERIAL_QUEUE)
extern void emp_d_master_consum_all();
#endif
int num_execute=0;
struct task* execute(struct aceMesh_task* self)		//modify TODO
{
   // printf("begin_master_execute\n");
    unsigned int size = self->successor_count;
	int new=0;
	int j,i,dist;
	struct task* t = NULL;
	struct task* vert=NULL;
#ifdef THREAD_TRACE_LEVEL_1
    trace_time=rpcc()-begin_time;
    trace_print(trace_time, '0', RTS_execute_successor);
#endif

#ifdef ACEMESH_SCHEDULER_PROFILING
    num_successor[total_num_threads]=mymax(num_successor[total_num_threads],size);
#endif
    alog_debug("master core task self addr: %x", self);
    //printf("master core task self addr: %x\n", self);
    //fflush(stdout);
#ifdef REUSE_GRAPH
    //dag reuse, restore ref_conunt_t
    if (self->task_base.reused) self->task_base.ref_count_t = self->task_base.backup_ref;
#endif

#ifdef SUCC_BATCH
	int destid;
	// struct m2s_detail M2S_detail_temp_64[64];
	// int t_num[16]={0};
	int k,flag_tmp;
	// int pending_pe[16]={-1};
	int total_task_num;
#endif

/*#ifdef ACEMESH_SCHEDULER_PROFILING
	dist=self->reuse_distance;
	if (dist > 0)
	{
		++sum_vert_times;
		if (dist > maxReuseChain)
		{
			maxReuseChain = dist;
		}
	}
#endif*/

#ifdef ACEMESH_TIME
    master_cycle_time_end = rpcc();
    temp_time = (double)(master_cycle_time_end - master_cycle_time_start) / MFREQ;
    // all_pure_exec_time[total_num_threads]+=(double)(master_cycle_time_end-master_cycle_time_start)/FREQ;
    all_pure_exec_time[total_num_threads] += temp_time;
    if (get_task_type(self) != BLOCKING_TASK) blocking_pure_exec_time += temp_time;
#endif

#ifdef ACEMESH_PARA
    //    printf("gid1:%d, gid23:%d\n",gid1,gid2);
    gid2 = mymin(gid2, MAXT - 1);
    if (gid2 > mpre_id) {
        gid1 = mymax(gid1, mpre_id + 1);
        if (get_task_type(self) != BLOCKING_TASK) {
            for (i = gid1; i <= gid2; i++) npar[total_num_threads][i] = 1;
        } else {
            for (i = gid1; i <= gid2; i++) {
                waitcomm[i] = 1;
                //                printf("wait_comm_id:%d\n",i);
            }
        }
        mpre_id = gid2;
    }
#endif

#ifdef MPI_SURPPORT
#ifdef DEBUG
    alog_debug("task_execute :  type:%d,,suspend:%d\n", get_task_type(self),
               get_task_suspend(self));
#endif

    if (get_task_type(self) == BLOCKING_TASK && get_task_suspend(self)) {
#ifdef DEBUG
        alog_debug("rank:%d, suspend_spawn\n",my_mpi_rank);
#endif

//dead lock test 0615
//#ifdef TEMP_MODI_MPI
        mpi_spawn(self);
        return NULL;
//#endif
    }
#endif

#if defined(EMP_MASTER)&&defined(TARG_SW9)
//   printf("master_wait:%x\n",self);
    num_execute++;
    while(suc_status){
   // printf("master_wait\n");
    }
    
    suc_buff=(unsigned long)self;
    asm volatile("memb\n");
    suc_status=1;
    asm volatile("memb\n");
    printf("master_execute:%d,%x\n",num_execute,self);
    //fflush(stdout);
    return NULL;
#endif

#ifdef SEG_BUFF
    int temp_seg=-1;
#ifdef ACEMESH_SCHEDULER_PROFILING
    unsigned long long master_cycle_start=rpcc();
#endif
//    temp_seg=my_h2ldm(seg_succ_addr[agent_id],agent_id,cgid,int);
//    printf("need_agent:%p,seg=%d\n",self,temp_seg);   
//    fflush(stdout);
//  if(temp_seg==SEG_MASTER)
//      spawn_to_pending_succ(self);
//  else
        spawn_to_buff_succ(self);
#ifdef ACEMESH_SCHEDULER_PROFILING
    //master_seg_sche_time+=(float)(rpcc()-master_cycle_start)/MFREQ;
#endif
#else

#ifndef SEG_BUFF

/*    if(size > 130){
        printf("size: %d\n",size);
    }
*/
	
    for (j = 0; j < size; ++j)
        if (t = self->successor_tasks[j]) {
#ifdef TARG_SW5
            my_atomic_add(new, &(t->ref_count_t));
#elif TARG_SW9
            my_atomic_add(new, t->ref_count_t);
#endif
#ifdef DEBUG
            alog_debug("rank%d, execute ref_count:%d\n", my_mpi_rank, t->ref_count_t);
#endif
            if (new == -1) {
#ifdef SUCC_BATCH
                destid = t->affinity_id;
                if (destid == sche_num_threads) {
                    spawn_to_id(t);
                } else {  // 收集后继任务
                    if (M2S_detail_temp_64[destid].task_num < PENDING_TASK_NUM) {
                        M2S_detail_temp_64[destid]
                            .task_detail[M2S_detail_temp_64[destid].task_num] = t;
                        M2S_detail_temp_64[destid].task_num++;
                    } else {
                        printf("m2s_temp[%d] is full\n", destid);
                        assert(0);
                    }
                }
#else
                spawn_to_id(t);
#endif
            }
        }

#if !defined(LIGHT_BUILD_GRAPH)

#else
    vert=self->vertical_task;
//    alog_debug("self addr: %x", self);
//    alog_debug("vert addr: %x", vert);
//    //after composite execute reset build ctx
//    if (self->my_type == COMPOSITE_TASK) {
//        cur_super_taskptr = NULL;
//    }

  #ifdef _RETURN_VERTICAL
  #ifdef TARG_SW5
        my_atomic_add(new, &(vert->ref_count_t));
#elif TARG_SW9
        my_atomic_add(new, (vert->ref_count_t));
#endif

        alog_debug("after my_atomic_add vert: %x", vert);
        if (new == -1) {
/*#ifdef ACEMESH_SCHEDULER_PROFILING
            slave_inc_reuse_distance((struct aceMesh_task*)(vert), dist);
#endif*/
#ifdef SUCC_BATCH
              destid = vert->affinity_id;
              if(destid==sche_num_threads){
                  spawn_to_id(vert);
              }else{//收集后继任务
                  if(M2S_detail_temp_64[destid].task_num<PENDING_TASK_NUM){
                    M2S_detail_temp_64[destid].task_detail[M2S_detail_temp_64[destid].task_num] =
                        vert;
                    M2S_detail_temp_64[destid].task_num++;
                  }else{
                    printf("m2s_temp[%d] is full\n",destid);
                    assert(0);
                  }
              }
#else
              spawn_to_id(vert);
#endif	
        }
    if (vert) {
#ifdef TARG_SW5
        my_atomic_add(new, &(vert->ref_count_t));
#elif TARG_SW9
        my_atomic_add(new, (vert->ref_count_t));
#endif
        if (new == -1) {
            if ((vert->affinity_id) == total_num_threads) {
#ifdef ACEMESH_SCHEDULER_PROFILING
                slave_inc_reuse_distance((struct aceMesh_task *)(vert), dist);
#endif
#ifdef THREAD_TRACE_LEVEL_1
                trace_time = rpcc() - begin_time;
                trace_print(trace_time, '0', RTS_event_end);
#endif
                return vert;
            } else {
                spawn_to_id(vert);
            }
        }
    }
    // return NULL;
  #else
    if (vert) {
        alog_debug("before my_atomic_add: %d", vert->ref_count_t);

#ifdef TARG_SW5
        my_atomic_add(new, &(vert->ref_count_t));
#elif TARG_SW9
        my_atomic_add(new, (vert->ref_count_t));
#endif

        alog_debug("after my_atomic_add vert: %x", vert);
        if (new == -1) {
/*#ifdef ACEMESH_SCHEDULER_PROFILING
            slave_inc_reuse_distance((struct aceMesh_task*)(vert), dist);
#endif*/
#ifdef SUCC_BATCH
              destid = vert->affinity_id;
              if(destid==sche_num_threads){
                  spawn_to_id(vert);
              }else{//收集后继任务
                  if(M2S_detail_temp_64[destid].task_num<PENDING_TASK_NUM){
                    M2S_detail_temp_64[destid].task_detail[M2S_detail_temp_64[destid].task_num] =
                        vert;
                    M2S_detail_temp_64[destid].task_num++;
                  }else{
                    printf("m2s_temp[%d] is full\n",destid);
                    assert(0);
                  }
              }
#else
              spawn_to_id(vert);
#endif	
        }
    }
#endif
    //return NULL;
#endif

#ifdef SUCC_BATCH

#ifdef SUCC_BATCH_V1
      for(i=0;i<sche_num_threads;i++){
          total_task_num=M2S_detail_temp_64[i].task_num;
          if(total_task_num>0){//当前从核有任务
#ifdef LOCAL_FLAG_M2S
              if((flag_tmp=h2ldm(m2s_flag,i,cgid))==1){
#else
              if(m2s_flag[i]==1){
#endif
                  for(k=0;k<total_task_num;k++){//主核放任务
                    M2S_details_64[i].task_detail[k] = M2S_detail_temp_64[i].task_detail[k];
                  }

                  M2S_details_64[i].task_num=total_task_num;

                  if(total_task_num>0){//当前从核有任务
                  asm volatile ("memb\n");
#ifdef LOCAL_FLAG_M2S
                  h2ldm(m2s_flag,i,cgid)=0;
#else
                  m2s_flag[i]=0;
#endif
                  asm volatile ("memb\n");
                  M2S_detail_temp_64[i].task_num=0;
                  M2S_detail_temp_64[i].is_pending=0;
               }else{				
                   M2S_detail_temp_64[i].is_pending=1;			
               }
            }
        }
    }
#else
       emp_d_master_consum_all();
       /*
       for(i=0;i<sche_num_threads;i++){
           total_task_num=M2S_detail_temp_64[i].task_num;
           if(total_task_num>0){//当前从核有任务	
#ifdef LOCAL_FLAG_M2S
              while((flag_tmp=h2ldm(m2s_flag,i,cgid))==0);
#else
              while(m2s_flag[i]==0);
#endif
              for(k=0;k<total_task_num;k++){//主核放任务
                  M2S_details_64[i].task_detail[k]
                        =M2S_detail_temp_64[i].task_detail[k];
              }		
              M2S_details_64[i].task_num=total_task_num;
              asm volatile ("memb\n");
#ifdef LOCAL_FLAG_M2S
              h2ldm(m2s_flag,i,cgid)=0;
#else
              m2s_flag[i]=0;
#endif
              asm volatile ("memb\n");
              M2S_detail_temp_64[i].task_num=0;
           }
	    }
        */
#endif	

#endif

#endif

#endif
#ifdef THREAD_TRACE_LEVEL_1
    trace_time=rpcc()-begin_time;
    trace_print(trace_time, '0', RTS_event_end);
#endif
    return NULL;
}

#endif

#ifdef SAVE_RW_INFO
void add_addr(struct aceMesh_task* self,void* addr, int area_type, int rw_type, int is_neighbor)
{
    struct addr_info* newitem;
    if(self->rw_addrs_count>=self->capacity_addrs){
        struct addr_info *new_rw_addrs;
        self->capacity_addrs+=ADDR_CHUNKMORE; 
        new_rw_addrs = (struct addr_info *)realloc(
            self->rw_addrs, sizeof(struct addr_info) * (self->capacity_addrs));
        if (new_rw_addrs == NULL) {
            printf("cannot allocate space for new_rw_addrs\n");
            exit(1);
        }
        self->rw_addrs=new_rw_addrs; 
    }   
    newitem=&(self->rw_addrs[self->rw_addrs_count]);
    newitem->addr = addr;
    newitem->area_type = area_type;
    newitem->rw_type = rw_type;
    newitem->is_neighbor = is_neighbor;

    ++(self->rw_addrs_count);
}
#endif

//struct aceMesh_task  aceMesh_task_constructor(aceMesh_task* self)
void aceMesh_task_constructor(aceMesh_task* self)
{
    //struct aceMesh_task self ;
    //self->task_base = task_constructor();
    task_constructor(&(self->task_base));
#ifdef REUSE_GRAPH
    self->task_base.is_base_task = false;
#endif
    self->vertical_task = NULL;
    self->my_type = NOT_SET;
    self->capacity_addrs=ADDR_CHUNKINIT;
    self->successor_tasks = NULL;
    self->last_successor = NULL;
#ifdef MEMORY_POOL
#ifdef USE_SIMD
#ifdef TARG_SW5
    self->successor_tasks =
        (struct task **)acemesh_myalloc_aligned_32(sizeof(struct task *) * ADDR_CHUNKINIT);
#elif TARG_SW9
    self->successor_tasks =
        (struct task **)acemesh_myalloc_aligned_64(sizeof(struct task *) * ADDR_CHUNKINIT);
#endif
#else
    self->successor_tasks =
        (struct task **)acemesh_myalloc_aligned_16(sizeof(struct task *) * ADDR_CHUNKINIT);
#endif
#else
#ifdef TARG_SW5
    self->successor_tasks = (struct task **)malloc(sizeof(struct task *) * ADDR_CHUNKINIT);
#elif TARG_SW9
    // self->successor_tasks=(struct task**)libc_uncached_aligned_malloc(sizeof(struct
    // task*)*ADDR_CHUNKINIT);
    self->successor_tasks =
        (struct task **)my_malloc_aligned(sizeof(struct task *) * ADDR_CHUNKINIT);
#else
    assert(0);
#endif
#endif
    self->successor_count = 0;
    //self->loop_count=0;	//add by gxr 2017/03/13
    self->loop_id = 0;
    self->task_id = 0;
#ifdef DEBUG_GRAPH
    self->ref_count_for_check = 0;
    self->is_joint = 0;
#endif
#ifdef MASTER
#ifdef MPI_SURPPORT
    self->suspend=0;
#endif
#endif

#ifdef ACEMESH_SCHEDULER_PROFILING
    self->reuse_distance = 0;
#endif	
#ifdef SAVE_RW_INFO
    self->rw_addrs = NULL; 
    self->rw_addrs= (struct addr_info *)malloc(sizeof(struct addr_info)*ADDR_CHUNKINIT);
    self->rw_addrs_count = 0;
#endif
#ifdef AUTO_PARTITION
    self->group_id = -1;
#endif
#ifdef SUPPORT_PARTITION
    self->my_part_id = -1;
#endif
    //return self;
}

void  aceMesh_task_destructor(aceMesh_task *p_acemesh_task)	
{
#ifndef MEMORY_POOL
#ifdef TARG_SW5
  free(p_acemesh_task->successor_tasks);
#elif TARG_SW9
  my_free_aligned(p_acemesh_task->successor_tasks);
#else
  printf("losing free successor\n");
#endif
#endif
#ifdef SAVE_RW_INFO
  free(p_acemesh_task->rw_addrs);   
#endif
}

int  get_total_successor_sum(struct aceMesh_task* self)
{
    return self->vertical_task == NULL ? self->successor_count : (self->successor_count) + 1;
}

void add_end_successor(struct aceMesh_task* self,struct task* t)
{
#if !defined(LIGHT_BUILD_GRAPH)
    self->successor_tasks[self->successor_count] = (struct task*)t;
    ++(self->successor_count);
#else
    self->vertical_task = t;
#endif
    decrement_ref_count(t);
}


int add_successor_first_ret(struct aceMesh_task* self,struct aceMesh_task* t)
{
#if defined(ACEMESH_GRAPH_BUILD_PROFILING) && defined(FINE_PROF)
    unsigned long add_start=rpcc();
#endif
    int i;
 /*   if(t == NULL) return self->task_base.ref_count_t;
     if (self->last_successor == t) {
 #if defined(ACEMESH_GRAPH_BUILD_PROFILING) && defined(FINE_PROF)
         buil_prof[ADD_SUCC] += (rpcc() - add_start);
 #endif
         return self->task_base.ref_count_t;
     }
 */   
        for(i = 0; i < self->successor_count; ++i)
        {
           struct task* itr = self->successor_tasks[i];
           if(itr == (struct task*)t)
           {
    #if defined(ACEMESH_GRAPH_BUILD_PROFILING) && defined(FINE_PROF)
             buil_prof[ADD_SUCC]+=(rpcc()-add_start);
    #endif
             return self->task_base.ref_count_t;
           }
        }
   
#if !defined(LIGHT_BUILD_GRAPH)
    if (self->vertical_task == (struct task *)t) {
#if defined(ACEMESH_GRAPH_BUILD_PROFILING) && defined(FINE_PROF)
        buil_prof[ADD_SUCC] += (rpcc() - add_start);
#endif
        return self->task_base.ref_count_t;
    }
#endif
    if (self->successor_count >= self->capacity_addrs) {
        struct task **new_succ_task;
        self->capacity_addrs+=ADDR_CHUNKMORE;
#ifdef MEMORY_POOL
 #ifdef USE_SIMD
  #ifdef TARG_SW5
        new_succ_task = (struct task **)acemesh_myalloc_aligned_32(sizeof(struct task *) *
                                                                   self->capacity_addrs);
#elif TARG_SW9
        new_succ_task = (struct task **)acemesh_myalloc_aligned_64(sizeof(struct task *) *
                                                                   self->capacity_addrs);
#endif
#else
        new_succ_task = (struct task **)acemesh_myalloc_aligned_16(sizeof(struct task *) *
                                                                   self->capacity_addrs);
#endif
        memcpy(new_succ_task, self->successor_tasks,
               sizeof(struct task *) * (self->capacity_addrs - ADDR_CHUNKMORE));
#else
        printf("need realloc successor\n");
        fflush(stdout);
        printf("realloc=%d\t",sizeof(struct task*)*self->capacity_addrs);
        fflush(stdout);
        new_succ_task = (struct task **)realloc(self->successor_tasks,
                                                sizeof(struct task *) * self->capacity_addrs);
#endif
        if (new_succ_task == NULL) {
            printf("cannot allocate enough space for addr info!\n");
            exit(1);
        }
        self->successor_tasks=new_succ_task;
    }
    self->successor_tasks[self->successor_count] = (struct task*)t;
    ++(self->successor_count);
    self->last_successor = t;
#ifdef ACEMESH_SCHEDULER_PROFILING
    total_nedges++;
//    max_num_successor=mymax(max_num_successor,self->successor_count);
#endif
    decrement_ref_count((struct task*)t);
#if defined(ACEMESH_GRAPH_BUILD_PROFILING) && defined(FINE_PROF)
    buil_prof[ADD_SUCC]+=(rpcc()-add_start);
#endif
    return self->task_base.ref_count_t;
}

void add_successor(struct aceMesh_task* self,struct aceMesh_task* t)
{
#if defined(ACEMESH_GRAPH_BUILD_PROFILING) && defined(FINE_PROF)
    unsigned long add_start=rpcc();
#endif
    int i;
    if(t == NULL) return ;
#ifndef CLUSTERING
     if (self->last_successor == t) {
 #if defined(ACEMESH_GRAPH_BUILD_PROFILING) && defined(FINE_PROF)
         buil_prof[ADD_SUCC] += (rpcc() - add_start);
 #endif
         return;
     }
#else    
        for(i = 0; i < self->successor_count; ++i)
        {
           struct task* itr = self->successor_tasks[i];
           if(itr == (struct task*)t)
           {
    #if defined(ACEMESH_GRAPH_BUILD_PROFILING) && defined(FINE_PROF)
             buil_prof[ADD_SUCC]+=(rpcc()-add_start);
    #endif
             return;
           }
        }
#endif

#if !defined(LIGHT_BUILD_GRAPH)
    if (self->vertical_task == (struct task *)t) {
#if defined(ACEMESH_GRAPH_BUILD_PROFILING) && defined(FINE_PROF)
        buil_prof[ADD_SUCC] += (rpcc() - add_start);
#endif
        return;
    }
#endif
    if (self->successor_count >= self->capacity_addrs) {
//        printf("\nsuccessor_count=%d,capacity=%d\t",self->successor_count,self->capacity_addrs);
//        fflush(stdout);
        struct task **new_succ_task;
        self->capacity_addrs+=ADDR_CHUNKMORE;
#ifdef MEMORY_POOL
 #ifdef USE_SIMD
  #ifdef TARG_SW5
        new_succ_task = (struct task **)acemesh_myalloc_aligned_32(sizeof(struct task *) *
                                                                   self->capacity_addrs);
#elif TARG_SW9
        new_succ_task = (struct task **)acemesh_myalloc_aligned_64(sizeof(struct task *) *
                                                                   self->capacity_addrs);
#endif
#else
        new_succ_task = (struct task **)acemesh_myalloc_aligned_16(sizeof(struct task *) *
                                                                   self->capacity_addrs);
#endif
        memcpy(new_succ_task, self->successor_tasks,
               sizeof(struct task *) * (self->capacity_addrs - ADDR_CHUNKMORE));
#else
        printf("need realloc successor\n");
        fflush(stdout);
        printf("realloc=%d\t",sizeof(struct task*)*self->capacity_addrs);
        fflush(stdout);
        new_succ_task = (struct task **)realloc(self->successor_tasks,
                                                sizeof(struct task *) * self->capacity_addrs);
#endif
#ifdef DEBUG
        alog_debug("realloc successor\n");
#endif
        if (new_succ_task == NULL) {
            printf("cannot allocate enough space for addr info!\n");
            exit(1);
        }
        self->successor_tasks=new_succ_task;
    }
    self->successor_tasks[self->successor_count] = (struct task*)t;
    ++(self->successor_count);
    self->last_successor = t;
#ifdef ACEMESH_SCHEDULER_PROFILING
    total_nedges++;
//    max_num_successor=mymax(max_num_successor,self->successor_count);
#endif
    decrement_ref_count((struct task*)t);
#if defined(ACEMESH_GRAPH_BUILD_PROFILING) && defined(FINE_PROF)
    buil_prof[ADD_SUCC]+=(rpcc()-add_start);
#endif
}

#ifdef REUSE_GRAPH
void store_info(aceMesh_task* self)
{
    if (self->task_base.stored == true) return;

#if defined(ACEMESH_GRAPH_BUILD_PROFILING) && defined(FINE_PROF) 
    unsigned long restore_start=rpcc();
#endif
    store_ref_count(self);
    set_reused_flag(self, true);
    struct task* vt = self->vertical_task;
    if (vt) {
        //printf("store info point 0\n");
        if (vt->is_base_task == false) {
            //printf("store info point 1\n");
            store_info((struct aceMesh_task*) vt);
        } else if(vt->stored == false) {
            store_ref_count(vt);
            set_reused_flag(vt, true);
            set_stored(vt, true);
        }
    }
    int i;
    for (i = 0; i < self->successor_count; ++i) {
        struct task* task = self->successor_tasks[i];
        if (task->is_base_task == false) {
            store_info((struct aceMesh_task* )task);
        }
    }
    set_stored(self,true);
#if defined(ACEMESH_GRAPH_BUILD_PROFILING) && defined(FINE_PROF)
    buil_prof[RESTROE_GRAPH]+=(rpcc()-restore_start);
#endif
}
#endif

void set_vertical_task(struct aceMesh_task* self,struct aceMesh_task* t)
{
#if defined(LIGHT_BUILD_GRAPH) || defined(LIGHT_BUILD_GRAPH2)
    assert(0);
#endif
#if defined(ACEMESH_GRAPH_BUILD_PROFILING) && defined(FINE_PROF)
    unsigned long add_start=rpcc();
#endif
    if( t == NULL ) return ;
    if(self->vertical_task == (struct task*)t)
    {
#if defined(ACEMESH_GRAPH_BUILD_PROFILING) && defined(FINE_PROF)
        buil_prof[ADD_SUCC]+=(rpcc()-add_start);
#endif
        return;
    }
    else {
#ifndef CLUSTERING
        if (self->last_successor == t) {
#if defined(ACEMESH_GRAPH_BUILD_PROFILING) && defined(FINE_PROF)
            buil_prof[ADD_SUCC] += (rpcc() - add_start);
#endif
            return ;
        }
#else
                int i;
                for( i = 0; i < self->successor_count; ++i)
                {
                    struct  task* itr = self->successor_tasks[i];
                    if(itr == (struct task*)t)
                    {
        #if defined(ACEMESH_GRAPH_BUILD_PROFILING) && defined(FINE_PROF)
                        buil_prof[ADD_SUCC]+=(rpcc()-add_start);
        #endif
                        return;
                    }
                }
#endif
        if (self->vertical_task != NULL) {
            if (self->successor_count >= self->capacity_addrs) {
//        printf("\nsuccessor_count=%d,capacity=%d\t",self->successor_count,self->capacity_addrs);
//        fflush(stdout);
                struct task **new_succ_task;
                self->capacity_addrs+=ADDR_CHUNKMORE;
#ifdef MEMORY_POOL
#ifdef USE_SIMD
#ifdef TARG_SW5
                new_succ_task = (struct task **)acemesh_myalloc_aligned_32(sizeof(struct task *) *
                                                                           self->capacity_addrs);
#elif TARG_SW9
                new_succ_task = (struct task **)acemesh_myalloc_aligned_64(sizeof(struct task *) *
                                                                           self->capacity_addrs);
#endif
#else
                new_succ_task = (struct task **)acemesh_myalloc_aligned_16(sizeof(struct task *) *
                                                                           self->capacity_addrs);
#endif
                memcpy(new_succ_task, self->successor_tasks,
                       sizeof(struct task *) * (self->capacity_addrs - ADDR_CHUNKMORE));
#else
        printf("need realloc successor\n");
        fflush(stdout);
        printf("realloc=%d\t",sizeof(struct task*)*self->capacity_addrs);
        fflush(stdout);
                new_succ_task = (struct task **)realloc(
                    self->successor_tasks, sizeof(struct task *) * self->capacity_addrs);
#endif
                if (new_succ_task == NULL) {
                    printf("cannot allocate enough space for addr info!\n");
                    exit(1);
                }
                self->successor_tasks=new_succ_task;
            }
            self->successor_tasks[self->successor_count] = (struct task*)t;
            ++(self->successor_count);
            self->last_successor = t;
            decrement_ref_count((struct task*)t);
#if defined(ACEMESH_GRAPH_BUILD_PROFILING) && defined(FINE_PROF)
            buil_prof[ADD_SUCC]+=(rpcc()-add_start);
#endif
#ifdef ACEMESH_SCHEDULER_PROFILING
            total_nedges++;
#endif
            return ;
        }
        self->vertical_task = (struct task*)t;
        decrement_ref_count((struct task*)t); 
#ifdef ACEMESH_SCHEDULER_PROFILING
        total_nedges++;
#endif
    }
#if defined(ACEMESH_GRAPH_BUILD_PROFILING) && defined(FINE_PROF)
    buil_prof[ADD_SUCC]+=(rpcc()-add_start);
#endif
}

#ifdef PAPI_PERFORMANCE
void papi_performance_start() {}
void papi_performance_end() {}
#endif

void set_loop_id(struct aceMesh_task *self, int id) { self->loop_id = id; }

void set_task_id(struct aceMesh_task *self, int id) { self->task_id = id; }


int get_loop_id(struct aceMesh_task *self) { return self->loop_id; }

int get_task_id(struct aceMesh_task *self) { return self->task_id; }

#ifdef DEBUG_GRAPH
int get_ref_count_for_check(struct aceMesh_task *self) { return self->ref_count_for_check; }

int inc_ref_count_for_check(struct aceMesh_task* self)
{
    ++(self->ref_count_for_check);
    return self->ref_count_for_check;
}
void set_ref_count_for_check(struct aceMesh_task* self,int value)
{
    self->ref_count_for_check = value;
}

void set_joint(struct aceMesh_task *self) { self->is_joint = 1; }
int is_end_task(struct aceMesh_task *self) { return self->is_joint; }



struct aceMesh_task *get_vertical_task(struct aceMesh_task *self)
{
    return (struct aceMesh_task *)(self->vertical_task);
}
struct aceMesh_task *get_successor_task(struct aceMesh_task *self, int i)
{
    return (struct aceMesh_task *)(self->successor_tasks[i]);
}

void dfs(struct aceMesh_task *self, int *task_nums, int deep_length)
{
    unsigned int i;
    if (self->is_joint == 0 && get_successor_sum(self) == 0) {
        printf("alone task! in deep length : %d ", deep_length);
        printf("warining! add end to this task");
        assert(0);
    }
    // assert(0);
    --task_nums;
    if (self->vertical_task != NULL) {
        struct aceMesh_task *t = (struct aceMesh_task *)(self->vertical_task);
        if (t->ref_count_for_check == -1) {
            t->ref_count_for_check = ref_count((t->task_base));
        } else {
            assert(t->ref_count_for_check > 0);
        }
        --(t->ref_count_for_check);
        if (t->ref_count_for_check == 0) {
            dfs(t, task_nums, deep_length + 1);
        }
    }
    for (i = 0; i < self->successor_count; ++i) {
        struct task *tmp;
        if (tmp = self->successor_tasks[i]) {
            struct aceMesh_task *t = (struct aceMesh_task *)(tmp);
            if (t->ref_count_for_check == -1) {
                t->ref_count_for_check = ref_count((t->task_base));
            } else {
                // std::cout<<"task ref_count :"<< t->ref_count() << std::endl;
                assert(t->ref_count_for_check > 0);
            }
            --(t->ref_count_for_check);
            if (t->ref_count_for_check == 0) {
                dfs(t, task_nums, deep_length + 1);
            }
        }
    }
}

int get_successor_sum(struct aceMesh_task *self) { return self->successor_count; }

#endif
