#include <assert.h>
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>

#include "aceMesh_concurrent_task.h"
#include "simd.h"
#include "slave.h"
#include "trace_out.h"
// #include "share.h"
#include "aceMesh_runtime.h"  //for LDM_NEIGHBOR
#include "aceMesh_utils.h"
#include "am_assert.h"
#include "am_machine.h"

#ifdef THREAD_TRACE_LEVEL_1
#include "thread_trace.h"
extern __thread_local_fix unsigned long begin_time_s;
extern __thread_local_fix unsigned long trace_time_s;
extern __thread_local_fix int trace_myid;
#endif

extern __uncached volatile  int slave_lock;
extern __uncached volatile int master_lock;
extern __thread_local_fix int local_sche_num_threads;
extern __thread_local_fix int local_total_num_threads;

#define decrement_ref_count(self) (--((struct task *)self)->ref_count_t)

extern __thread_local_fix unsigned long cycle_times_start;
extern __thread_local_fix unsigned long cycle_times_end;
extern __thread_local_fix double pure_exec_times;
extern volatile int total_exec_nums;
__thread_local_fix int pop = 0;
#ifdef TARG_SW5
#define my_atomic_add(_new_, _addr_)                                                \
    {                                                                               \
        asm volatile("faaw   %0,0(%1)\n\t" : "=r"(_new_) : "r"(_addr_) : "memory"); \
        _new_++;                                                                    \
    }
#define my_atomic_sub(_n_, _addr_)      \
    {                                     \
        unsigned long __tmp__;            \
        asm volatile(                     \
            " memb\n\t"                   \
            "vshff $31, %1, 0x01, %1\n\t" \
            "ldi    %0, 6(%1)\n\t"        \
            "updt   %0, 0(%2)\n\t"        \
            " memb\n\t"                   \
            : "=r"(__tmp__)               \
            : "r"(_n_), "r"(_addr_)       \
            : "memory");                  \
    }
#elif TARG_SW9
#define my_atomic_add(_new_, _addr_)                                                \
    {                                                                               \
        asm volatile("faal   %0,0(%1)\n\t" : "=r"(_new_) : "r"(_addr_) : "memory"); \
        _new_++;                                                                    \
    }
#endif


#ifdef CONCURRENT_CONSTRUCT_GRAPH
inline void slave_set_task_state(int val, volatile int *state)
{
    *state = val;
    asm volatile("memb\n");
}
#endif

/*void slave_my_mutex_lock(volatile int* my_lock)
{
#ifdef TARG_SW5

#ifdef MUTEX_BASIC
    unsigned int __addr,__tmpi;
    __asm__ __volatile__(
        "        memb\n"
        "        ldi      %1,%2\n"
        "1:      faaw     %0,0(%1)\n"
        "        bne      %0,1b\n"
        "        memb\n"
      : "=&r"  (__tmpi),
        "=&r"  (__addr)
      :  "m" (*my_lock)
      : "memory");
    return;

#else

    unsigned int __tmp = 0;
    unsigned int __cnt;
    __asm__ __volatile__ (
    "0:     ldw     %[__tmp], %[my_lock]\n"
    "       beq     %[__tmp], 2f\n"
    "       ldi     %[__cnt], 50\n"
    "       sll     %[__cnt], 4, %[__cnt]\n"
    "1:     subw    %[__cnt], 1, %[__cnt]\n"
    "       bne     %[__cnt], 1b\n"
    "       br      0b\n"
    "2:     faaw    %[__tmp], %[my_lock]\n"
    "       bne     %[__tmp], 0b\n"
    "       memb    \n"
    "       br      3f\n"
    "3:     unop    \n"
    : [__tmp] "=&r"(__tmp), [__cnt] "=&r"(__cnt)
    : [my_lock] "m"(*(my_lock))
    : "memory");
    return;
#endif
#else
    assert(0);
#endif
}
int slave_my_mutex_unlock(volatile int* my_lock)
{
#ifdef TARG_SW5

#ifdef MUTEX_BASIC
    unsigned int __addr ;
    __asm__ __volatile__(
        "        memb\n"
        "        ldi      %0,%1\n"
        "        stw      $31,0(%0)\n"
        "        memb\n"
      : "=&r"  (__addr)
      : "m" (*my_lock)
      : "memory");
    return;
#else

    unsigned int __tmp = 0;
    __asm__ __volatile__ (
    "       memb    \n"
    "       mov     0, %[__tmp]\n"
    "       stw     %[__tmp], %[my_lock]\n"
    : [__tmp] "=&r"(__tmp)
    : [my_lock] "m"(*(my_lock))
    : "memory"
    );
    return;
#endif
#else
    assert(0);
#endif
}
*/
#ifdef ACEMESH_SCHEDULER_PROFILING
__thread_local_fix long sum_vert_times;
__thread_local_fix int maxReuseChain;
__thread_local_fix long reuse_times;
__thread_local_fix int local_num_succ = 0;
#endif

#ifdef ACEMESH_SCHEDULER_PROFILING

void slave_inc_reuse_distance(struct aceMesh_task *self, int last_reuse_distance)
{
    self->reuse_distance += (last_reuse_distance + 1);
}

int slave_get_reuse_distance(struct aceMesh_task *self) { return self->reuse_distance; }
#endif
task_type get_task_type(struct aceMesh_task *self) { return self->my_type; }

#ifdef ACEMESH_PARA
//__threal_local_fix long cur_time;
extern __thread_local_fix unsigned long t_init;
extern __thread_local_fix unsigned long b_build;
extern __thread_local_fix int global_id1, global_id2, pre_id;
extern __thread_local_fix int dma_id1, dma_id2;
extern __thread_local_fix int local_id1, local_id2;
extern __thread_local_fix char ldm_npar[BUF_SIZE];
extern __thread_local volatile unsigned long put_reply;
extern char npar[65][MAXT];
#define mymin(a, b) ((a < b) ? (a) : (b))
#define mymax(a, b) ((a > b) ? (a) : (b))
#endif

#ifdef TASK_LIFETIME_PROFILING
extern unsigned long avr_process_cycles[64];
#endif

extern void slave_spawn_to_id(struct task *t);

#ifdef TARG_SW9
extern void slave_clear_RegNT_to_need_spawn();
#endif

#ifdef MULTI_PUSH
extern void EQ_multi_push(struct task *t[3]);
#endif

struct task *slave_execute(struct aceMesh_task *self)
{
#ifdef THREAD_TRACE_LEVEL_1
    trace_time_s = rtc() - begin_time_s;
    slave_trace_print(trace_time_s, trace_myid, RTS_execute_successor);
#endif
#ifdef TASK_LIFETIME_PROFILING
    unsigned long process_start = rtc();
#endif
    int val = 0;
    int updt_val=1;
    /*
    slave_my_mutex_lock(&(self->task_base.finished_lock));
    my_atomic_add(val,&(self->task_base.state));
    slave_my_mutex_lock(&(self->task_base.successor_lock));
    slave_my_mutex_unlock(&(self->task_base.finished_lock));
    */
   // printf("%d_before_execute:%p\n",_MYID,self);    
  slave_my_mutex_lock(&(self->task_base.successor_lock));
   
   //printf("%d_after_lock\n",_MYID);
    int size = self->successor_count;
#ifndef BIG_CR_REGION
    slave_set_task_state(1, &(self->task_base.over));
    slave_my_mutex_unlock(&(self->task_base.successor_lock));
    asm volatile("memb\n");
#endif
   // printf("%d_in_execute\n",_MYID);
    int j, i, dist;
    int newt = 0;
#ifdef _SERIAL_QUEUE
    int nrecv;
#endif
#ifdef ACEMESH_PARA
    int tsize;
    // volatile int reply;
#endif
    struct task *t = NULL;
    struct task *vert, **first, *tmpt[VECTOR_DBL];
    struct task *succ[LDM_NEIGHBORS];
#ifdef ACEMESH_SCHEDULER_PROFILING
    local_num_succ = mymax(local_num_succ, size);
#endif
#if DEBUG_GRAPH
#ifdef SAVE_RW_INFO
    FILE *out = slave_get_file();
    if (out == NULL) {
        printf("error on file");
        fflush(stdout);
    }
#endif
#endif
    // dag reuse, restore ref_conunt_t
#ifdef REUSE_GRAPH
    if (self->task_base.reused) self->task_base.ref_count_t = self->task_base.backup_ref;
#endif

#ifdef DEBUG
//    printf("successor_size:%d\n",size);
#endif
#if DEBUG_GRAPH
    slave_print_to_internal_thread_file("loop_id, %d, task_id, %d, task_type, %d, ", self->loop_id,
                                        self->task_id, self->my_type);
#ifdef SAVE_RW_INFO
    for (i = 0; i < self->rw_addrs_count; ++i) {
        struct addr_info *itr = &(self->rw_addrs[i]);
        fputs(" addr:", out);
        fprintf(out, "%p", itr->addr);
        fputs("   area_type:", out);
        fprintf(out, "%d", itr->area_type);
        fputs("  type:", out);
        fprintf(out, "%d", itr->rw_type);
        fputs("   is_neighbor:", out);
        fprintf(out, "%d", itr->is_neighbor);
    }
    fputs(", ", out);
#endif
    slave_print_long_long_thread_file(tick_count_now());
#endif
#ifdef ACEMESH_TIME
    cycle_times_end = rtc();
#ifdef SEG_BUFF
    if (_MYID < local_total_num_threads - N_SLAVE_AGENT)
#endif
        pure_exec_times += (double)(cycle_times_end - cycle_times_start) / SFREQ;
#endif
#ifdef ACEMESH_PARA
    global_id2 = mymin(global_id2, MAXT - 1);
#ifdef DEBUG
    printf("global_id2:%d\n", global_id2);
#endif
    if (global_id2 > pre_id) {
        dma_id2 = global_id2 / BUF_SIZE;
        local_id2 = global_id2 % BUF_SIZE;

        global_id1 = mymax(global_id1, pre_id + 1);
        dma_id1 = global_id1 / BUF_SIZE;
        local_id1 = global_id1 % BUF_SIZE;
#ifdef DEBUG
        printf("global_id1:%d,dma_id1:%d,local_id1:%d,dma_id2:%d,local_id2:%d\n", global_id1,
               dma_id1, local_id1, dma_id2, local_id2);
#endif

        if (dma_id1 == dma_id2) {
            for (i = local_id1; i <= local_id2; i++) ldm_npar[i] = 1;
            if (local_id2 == BUF_SIZE - 1) {
                put_reply = 0;
                athread_put(PE_MODE, &ldm_npar[0], &npar[_MYID][dma_id1 * BUF_SIZE], BUF_SIZE,
                            (void *)&put_reply, 0, 0);  // TODO
                while (put_reply != 1)
                    ;
                // ldm_npar={0};
                for (i = 0; i < BUF_SIZE; i++) ldm_npar[i] = 0;
            } else if (global_id2 == MAXT - 1) {
                tsize = local_id2 + 1;
                put_reply = 0;
                athread_put(PE_MODE, &ldm_npar[0], &npar[_MYID][dma_id1 * BUF_SIZE], tsize,
                            (void *)&put_reply, 0, 0);  // TODO
                while (put_reply != 1)
                    ;
            }
        } else {
            put_reply = 0;
            athread_put(PE_MODE, &ldm_npar[0], &npar[_MYID][dma_id1 * BUF_SIZE], BUF_SIZE,
                        (void *)&put_reply, 0, 0);  //
            while (put_reply != 1)
                ;
#ifdef DEBUG
            printf("put_first_buf\n");
#endif
            // ldm_npar={1};
            for (i = 0; i < BUF_SIZE; i++) ldm_npar[i] = 1;
            for (i = dma_id1 + 1; i < dma_id2; i++) {
                put_reply = 0;
                athread_put(PE_MODE, &ldm_npar[0], &npar[_MYID][i * BUF_SIZE], BUF_SIZE,
                            (void *)&put_reply, 0, 0);
                while (put_reply != 1)
                    ;
#ifdef DEBUG
                printf("put_%d_buf\n", i);
#endif
            }
            // ldm_npar={0};
            for (i = 0; i < BUF_SIZE; i++) ldm_npar[i] = 0;
            for (i = 0; i < local_id2; i++) ldm_npar[i] = 1;
            if (local_id2 == (BUF_SIZE - 1)) {
                put_reply = 0;
                athread_put(PE_MODE, &ldm_npar[0], &npar[_MYID][dma_id2 * BUF_SIZE], BUF_SIZE,
                            (void *)&put_reply, 0, 0);
                while (put_reply != 1)
                    ;
                // ldm_npar={0};
                for (i = 0; i < BUF_SIZE; i++) ldm_npar[i] = 0;
            } else if (global_id2 == (MAXT - 1)) {
                tsize = local_id2 + 1;
                put_reply = 0;
                athread_put(PE_MODE, &ldm_npar[0], &npar[_MYID][dma_id2 * BUF_SIZE], BUF_SIZE,
                            (void *)&put_reply, 0, 0);
                while (put_reply != 1)
                    ;
                // ldm_npar={0};
                for (i = 0; i < BUF_SIZE; i++) ldm_npar[i] = 0;
            }
        }
        pre_id = global_id2;
#ifdef DEBUG
        printf("pre_id:%d\n", pre_id);
#endif
    }
#endif
#ifdef ACEMESH_SCHEDULER_PROFILING
    dist = self->reuse_distance;
    if (dist > 0) {
        // printf(",%ld,",sum_vert_times);
        ++sum_vert_times;
        if (dist > maxReuseChain) {
            maxReuseChain = dist;
        }
    }
#endif
// #ifdef DEBUG
//     printf("success_size1:%d\n",size);
// #endif
    size = self->successor_count;
#ifdef USE_SIMD
  //  printf("%d_use_simd\n",_MYID);
    first = self->successor_tasks;
    if (size > 1) {  // more sucessors, use simd_load

        AM_assert1(((unsigned long)first) % (VECTOR_SIZEb / 8) == 0);
        while (size > 0) {
            int ub = mymin(size, LDM_NEIGHBORS);
            // printf("success_tasks_num=%d\n",ub);
            // simd loading
            for (i = 0; i < ub; i += VECTOR_DBL) {
#ifdef TARG_SW5
                simd_load(*((doublev4 *)(&succ[i])), first);
                first = (struct task *)((unsigned long)first + 32);  // skip 32 bytes
#elif TARG_SW9
                // simd_load(*((doublev8*)(&succ[i])),first);   //type conversion error with this
                // usage
                simd_load(*((uint512 *)(&succ[i])), first);
                first = (struct task *)((unsigned long)first + 64);  // skip 64 bytes
#endif
                size -= VECTOR_DBL;
            }

            // deal with successor_task

#ifdef MULTI_PUSH
            struct task *push_arr[3];
            int push_count = 0;
#endif

            for (i = 0; i < ub; i++) {
#ifdef _SERIAL_QUEUE
#ifdef TARG_SW9
                // #if defined(SEG_BUFF) && defined(MASTER)
                //           if(_MYID==local_total_num_threads-N_SLAVE_AGENT)
                //               slave_clear_RegNT_to_need_spawn();
                //           else
                // #endif
                nrecv = recvtaskfromRegNT();
#elif TARG_SW5
                nrecv = recvtaskfromRegNT();  // add by gjie
#endif
#endif
                //          printf("myid=%d,deal_with_success_tasks_size=%d,%d,%p\n",_MYID,i,ub,succ[0],succ[0]->ref_count_t);
                //          fflush(stdout);
                //          if( t=succ[i])
                //          {
                // #ifdef DEBUG
                // printf("successor_ref_count:%d,%p\n",t->ref_count_t,&(t->ref_count_t));
                //            printf("myid=%d,successor_ref_count:%d,%p\n",_MYID,succ[i]->ref_count_t,&(succ[i]->ref_count_t));
                //            fflush(stdout);
                // #endif
                // my_atomic_add(newt,&(t->ref_count_t));
                #ifdef TARG_SW5
                my_atomic_add(newt, &(succ[i]->ref_count_t));
                #else
             //   printf("%d_before_atomic_add\n",_MYID);
                my_atomic_add(newt, &(succ[i]->ref_count_t));
                //printf("");
                #endif
                if (newt == -1) {
// slave_spawn_to_id(t);
#ifdef MULTI_PUSH
                    push_arr[push_count] = succ[i];
                    push_count++;
#else
           //          printf("%d_before_spawn\n",_MYID);
                    slave_spawn_to_id(succ[i]);
#endif
                }
                //}

#ifdef MULTI_PUSH
                if (push_count == 3) {
                    EQ_multi_push(push_arr);
                    push_count = 0;
                } else if (i == ub - 1) {
                    int j = 0;
                    for (j = 0; j < push_count; j++) {
                        slave_spawn_to_id(push_arr[j]);
                    }
                }
#endif
            }
        }
        /*      for(i=0;i<size;i+=VECTOR_DBL)
              {
                simd_load(*((doublev4*)(&succ[i])),first);
                first=(struct task*)((unsigned long)first+32); //skip 32 bytes
              }
              for( j = 0; j < size; ++j)
                if( t=succ[j]) //TODO: why null succ?
                {
        #ifdef DEBUG
                   printf("successor_ref_count:%d\n",t->ref_count_t);
        #endif
                   my_atomic_add(newt,&(t->ref_count_t));
                   if(newt==-1)
                       slave_spawn_to_id(t);
                }
              */
    }                      // more succ
    else if (size == 1) {  // one succ, no need to use simd
        if (t = *first)    // TODO: why introduce nul successor?
        {
#ifdef DEBUG
            printf("successor_ref_count:%d,%p\n", t->ref_count_t, &(t->ref_count_t));
#endif
            #ifdef TARG_SW5
                my_atomic_add(newt, &(t->ref_count_t));
                #else
     //           printf("%d_before_atomic\n",_MYID);
                my_atomic_add(newt, &(t->ref_count_t));
                #endif
            //my_atomic_add(newt, &(t->ref_count_t));
      //        printf("%d_before_spawn\n",_MYID);
            if (newt == -1) slave_spawn_to_id(t);
        }  // non null vertical succ

    }  // one succ

#else
    first = self->successor_tasks;
    if (size > 0) {
        // printf("no_simd
        // after_success_tasks_size=%d,%p,%p\n",size,self->successor_tasks[0],*(struct task*)first);
        //      printf("no_simd after_success_tasks_size=%d,",size);
        //      printf("%p,",self->successor_tasks[0]);
        //      printf("%p,",*(struct task*)first);
        //      printf("%p\n",succ[0]);

        for (j = 0; j < size; ++j) {
            t = self->successor_tasks[j];
#ifdef DEBUG
//           printf("successor_ref_count:%d,%p\n",t->ref_count_t,t);
#endif
             #ifdef TARG_SW5
                my_atomic_add(newt, &(t->ref_count_t));
                #else
                my_atomic_add(newt, &(t->ref_count_t));
                #endif
            //my_atomic_add(newt, &(t->ref_count_t));
#ifdef DEBUG
            printf("after_successor_ref_count:%d,t=%p,self=%p\n", t->ref_count_t, t, self);
            fflush(stdout);
#endif
            if (newt == -1) {
                slave_spawn_to_id(t);
            }
        }

        /*
          for( j = 0; j < size; ++j){
            if( t=self->successor_tasks[j]) //TODO: why null succ?
            {
                my_atomic_false(val,&(t->edge));
    #ifdef DEBUG
    //           printf("successor_ref_count:%d,%p\n",t->ref_count_t,t);
    #endif
               slave_my_mutex_lock(&(t->ref_lock));
               my_atomic_add(newt,&(t->ref_count_t));
               slave_my_mutex_unlock(&(t->ref_lock));
    #ifdef DEBUG
               printf("after_successor_ref_count:%d,t=%p,self=%p\n",t->ref_count_t,t,self);
               fflush(stdout);
    #endif
               if(newt==-1)
               {
                    if (t->state<1) {
                        slave_my_mutex_unlock(&(self->task_base.successor_lock));
                        while (t->state<1) {
                        }
                        slave_my_mutex_lock(&(self->task_base.successor_lock));
                        if (ref_count(t) == -1&& pre_count(t) != 0) {
                           slave_spawn_to_id(t);
                        }
                    }else if(t->state==1 && ref_count(t) == -1&& pre_count(t) != 0){
                            slave_spawn_to_id(t);
                    }
               }
            }
         }
         */
    }  // more succ
#endif
//#if !defined(LIGHT_BUILD_GRAPH) && !defined(LIGHT_BUILD_GRAPH2)
   // printf("%d_before_vert\n",_MYID);
    vert = self->vertical_task;
//#endif
#ifdef DEBUG
    printf("vert addr: %x,%pi,self=%p\n", vert, vert, self);
    fflush(stdout);
#endif
#ifdef _RETURN_VERTICAL
    /// there are some bug,
    if (vert) {
        slave_my_mutex_lock(&(vert->ref_lock));
        my_atomic_add(newt, &(vert->ref_count_t));
        slave_my_mutex_unlock(&(vert->ref_lock));
        if (newt == -1) {
#ifdef _SERIAL_QUEUE
#ifdef TARG_SW9
            // #if defined(SEG_BUFF) && defined(MASTER)
            //           if(_MYID==local_total_num_threads-N_SLAVE_AGENT)
            //               slave_clear_RegNT_to_need_spawn();
            //           else
            // #endif
            nrecv = recvtaskfromRegNT();

#elif TARG_SW5
            nrecv = recvtaskfromRegNT();      // add by gjie
#endif
#endif
#ifdef SEG_BUFF
            if ((vert->affinity_id) >= 0 &&
                (vert->affinity_id) < local_total_num_threads - N_SLAVE_AGENT)
#else
            if ((vert->affinity_id) >= 0 && (vert->affinity_id) < local_total_num_threads)
#endif
            {
#ifdef ACEMESH_SCHEDULER_PROFILING
                slave_inc_reuse_distance((struct aceMesh_task *)(vert), dist);
#endif
#ifdef THREAD_TRACE_LEVEL_1
                trace_time_s = rtc() - begin_time_s;
                slave_trace_print(trace_time_s, trace_myid, RTS_event_end);
#endif
                return vert;
            } else {
                slave_spawn_to_id(vert);
            }
        }
    }
#ifdef THREAD_TRACE_LEVEL_1
    trace_time_s = rtc() - begin_time_s;
    slave_trace_print(trace_time_s, trace_myid, RTS_event_end);
#endif
    return NULL;
#else
    if (vert) {
#ifdef DEBUG
        printf("vert_successor_ref_count:%d,%p\n", vert->ref_count_t, vert);
        fflush(stdout);
#endif

        #ifdef TARG_SW5
                my_atomic_add(newt, &(vert->ref_count_t));
                #else
               my_atomic_add(newt, &(vert->ref_count_t));
                #endif
#ifdef DEBUG
        printf("after_update_vert_successor_ref_count:%d,ver=%p,self=%p,MYid=%d\n",
               vert->ref_count_t, vert, self, _MYID);
        fflush(stdout);
#endif
        if (newt == -1) {
            slave_spawn_to_id(vert);
        }
    }

    /*
    if(vert)
    {
       my_atomic_false(val,&(vert->edge));
#ifdef DEBUG
       printf("vert_successor_ref_count:%d,%p\n",vert->ref_count_t,vert);
       fflush(stdout);
#endif
       slave_my_mutex_lock(&(vert->ref_lock));
        my_atomic_add(newt,&(vert->ref_count_t));
       slave_my_mutex_unlock(&(vert->ref_lock));
#ifdef DEBUG
        printf("after_update_vert_successor_ref_count:%d,ver=%p,self=%p,MYid=%d\n",vert->ref_count_t,vert,self,_MYID);
        fflush(stdout);
#endif
        if(newt==-1)
        {
            if (vert->state<1) {
           slave_my_mutex_unlock(&(self->task_base.successor_lock));
            while (vert->state<1) {
            }
           slave_my_mutex_lock(&(self->task_base.successor_lock));
           if(ref_count(vert)==-1&&pre_count(vert)!=0)
            {
             slave_spawn_to_id(vert);
            }
          }else if(vert->state==1&&ref_count(vert)==-1&&pre_count(vert)!=0){
             slave_spawn_to_id(vert);
          }
        }
    }
    */
    // my_atomic_add(val,&(self->task_base.state));
#ifdef BIG_CR_REGION
    //slave_set_task_state(1, &(self->task_base.over));
    //self->task_base.over=1;
    slave_my_mutex_lock(&(self->task_base.over));
    asm volatile("memb\n");
    slave_my_mutex_unlock(&(self->task_base.successor_lock));
   // updt_subw(updt_val,&(self->task_base.slave));
    asm volatile("memb\n");
#endif
 //printf("%d_after_execute,state:%d\n",_MYID,self->task_base.over);
 
#ifdef TASK_LIFETIME_PROFILING
    unsigned long process_end = rtc();
    if (avr_process_cycles[_MYID] != 0) {
        avr_process_cycles[_MYID] += (process_end - process_start);
        avr_process_cycles[_MYID] /= 2;
    } else {
        avr_process_cycles[_MYID] = process_end - process_start;
    }
#endif
#ifdef THREAD_TRACE_LEVEL_1
    trace_time_s = rtc() - begin_time_s;
    slave_trace_print(trace_time_s, trace_myid, RTS_event_end);
#endif
  //printf("%d_before_return\n",_MYID);
    return NULL;
#endif
}
