#include <assert.h>
#include <stdio.h>
#include <stdlib.h>
#include <time.h>

// #include "aceMesh_stack.h"
#include "aceMesh_clang.h"
#include "aceMesh_task.h"
#include "athread.h"
#include "scheduler.h"
#include "utils/acelog.h"

#ifdef MTEST_LIGHT
#include "mpi.h"
#endif
#ifdef MEMORY_POOL
#include "MemPool.h"
#endif
// #ifdef ACEMESH_SCHEDULER_PROFILING
#include "aceMesh_utils.h"
// #endif



#ifdef THREAD_TRACE
#include "thread_trace.h"
extern unsigned long trace_time;
extern unsigned long begin_time;
extern unsigned long diff;
#endif
extern aceMesh_task *local_parent_main;
// #define NO_AFFINITY -1
int cgid;
int sche_num_threads = 1;
int total_num_threads = 0;
#if defined(TARG_SW9) && defined(CONCURRENT_CONSTRUCT_GRAPH)
extern volatile unsigned long con_graph;
#endif
#ifdef MTEST_LIGHT
#ifdef NO_PARALLEL
int *mtest_handle1;
int *mtest_handle2;
int mtest_kind1;  // 1:isend 2:irecv 3:icollect
int mtest_kind2;  // 1:isend 2:irecv 3:icollect
#else
tbb::enumerable_thread_specific<int *> mtest_handle1;
tbb::enumerable_thread_specific<int *> mtest_handle2;
tbb::enumerable_thread_specific<int> mtest_kind1;
tbb::enumerable_thread_specific<int> mtest_kind2;
#endif
#endif

#ifdef TARG_SW5
#ifdef DISTRIBUTED_SCHEDULER
struct generic_scheduler schedulers[65];
#else
struct generic_scheduler schedulers[2];
#endif
#elif TARG_SW9
struct generic_scheduler *schedulers = NULL;
#endif

#ifdef EMP_QUEUE
#if defined(EMP_MASTER) && defined(EMP_CONCURRENT_Q)
struct EQ_master_queue master_queue;
struct EQ_master_queue *my_master_queue;
#endif
#if defined(EMP_CONCURRENT_Q)
struct EQ_queue global_cent_queue;
struct EQ_queue *my_global_cent_queue;
#endif
#ifdef TARG_SW9
volatile unsigned long emp_close_master[64];
#endif
#endif
#if defined(ACEMESH_GRAPH_BUILD_PROFILING) && defined(FINE_PROF)
    double all_pure_register_time[64] = {0.0};
    double all_pure_addsucc_time[64] = {0.0};
#endif
#ifdef ACEMESH_TIME
double all_pure_exec_time[65] = {0.0};
double all_task_pure_exec_time[65] = {0.0};
double all_pure_task_compute_time[65] = {0.0};
double all_pure_task_dma_time[65] = {0.0};
long all_pure_task_num[66] = {0};
double blocking_pure_exec_time = 0.0;
// long all_task_nums[65]={0};
#ifdef ACEMESH_PROFILING_SUCC
unsigned long all_spawn_slave_task_time[65] = {0.0};
unsigned long all_spawn_slave_trans1_time[65] = {0.0};
unsigned long all_spawn_slave_trans2_time[65] = {0.0};
unsigned long all_spawn_master_task_time[65] = {0.0};
unsigned long all_reg_put_time[64] = {0.0};
unsigned int all_total_reg_put_times[64] = {0};
unsigned int all_reg_put_full_times[64];

unsigned int all_succ_reg_own_times[64] = {0};
unsigned int all_succ_reg_direct_times[64] = {0};
unsigned int all_succ_reg_transfer_times[64] = {0};
#endif
unsigned long master_cycle_time_start;
unsigned long master_cycle_time_end;
unsigned long blocking_cycle_time_end;

double master_seg_sche_time = 0.0;
double master_seg_sche_time2 = 0.0;
#endif

#ifdef ACEMESH_PROFILING_INST
unsigned long inst_perf[64] = {0};
#endif

#ifdef ACEMESH_PROFILING_CLASSIFY
#define MAX_PROF_RECORD 20
double classify_exec_time[MAX_PROF_RECORD] = {0};
#endif
#ifdef ACEMESH_SCHEDULER_PROFILING
#ifdef MEMORY_POOL
extern unsigned long mempool_alloc_block_num;
#endif
int max_num_neigh = 0;
int max_num_successor = 0;
int num_successor[65] = {0};
#endif

#ifdef ACEMESH_PARA
char npar[65][MAXT];        // first data, every sample-thread-active_task
char npar_sum[MAXT];        // reduce npar
char waitcomm[MAXT] = {0};  //
unsigned long time_init;
unsigned long exec_from;
unsigned long exec_to;
unsigned long tt_build = 0;
int gid1, gid2, gid3;
int mpre_id, last_interval;
float fpar;
int nidle, nidle_wait_comm;
int interval = MFREQ / 100000;
#endif

#if (defined(_SERIAL_QUEUE) && defined(MASTER)) || (defined(_CIRCULAR_QUEUE) && defined(MASTER))

#ifdef LOCAL_FLAG_M2S
extern __thread volatile struct task *from_master;
extern __thread volatile int master_flag;
#else
extern volatile struct task *from_master[64];
extern volatile int master_flag[64];
#endif
int pre_affinity_id = -1;
#endif

#ifdef SEG_BUFF
int is_need_pending_succ = 0;
#if defined(LOCAL_SEG) || defined(TWIN_SEG)

#ifdef TARG_SW5
extern __thread volatile int seg_spawn;
extern __thread volatile int seg_succ;
#elif TARG_SW9
unsigned long seg_spawn_addr[64];
unsigned long seg_succ_addr[64];
#endif
#endif

#if defined(MMEM_SEG) || defined(TWIN_SEG)
volatile int seg_succ = SEG_MASTER;
volatile int seg_spawn = SEG_SLAVE;
#endif

int agent_id;
#endif

// int main_spawn_num = 0;
// int worker_spawn_num = 0;
volatile int total_exec_nums = 0;
#ifdef LOCAL_FLAG
#ifdef TARG_SW5
extern volatile int __thread close_all_threads;
#elif TARG_SW9
unsigned long close_all_threads_addr[64];
__uncached unsigned long is_run_addr[64];
#endif
#else
volatile int is_run = 0;
volatile int close_all_threads = 0;
// volatile int mpi_close_all_threads=0;
#endif
// extern int cgid;
extern int my_mpi_rank;
// volatile int is_run = 0;
// volatile int mpi_is_run;
// volatile int mpi_close_all_threads;
// int index1 = 0;
#ifdef DEBUG
extern int my_mpi_rank;
#endif
extern int total_exec_array[64];
/*#ifdef MPI_SURPPORT
pthread_t ntid;
#endif
*/

#ifdef SUCC_BATCH
// #ifdef SUCC_BATCH_V1
struct m2s_detail M2S_details_64[64] = {0};
struct m2s_detail M2S_detail_temp_64[64] = {0};
// #endif
#ifdef LOCAL_FLAG_M2S
extern __thread volatile int m2s_flag;
#else
extern volatile int m2s_flag[64];
#endif
#endif

#ifdef EMP_QUEUE
#include "args.h"
#include "emp_basic.h"

#ifdef EMP_MAIN_PUSH_PROFILING
int main_push_count = 0;
unsigned long main_push_cycles = 0;
unsigned long main_push_while_cycles = 0;
extern unsigned long rpcc();
#endif

// extern __thread volatile int eq_status;
// extern __thread volatile uint64_t eq_ptr_slot;
// volatile int eq_status[64]={1};
// volatile uint64_t eq_ptr_slot[64];
// volatile int eq_num=1;
extern volatile int eq_status;
extern volatile uint64_t eq_ptr_slot;
#ifdef EMP_MASTER
// extern __thread volatile int m2s_flag;
extern volatile int m2s_flag;
// volatile master_buff ready_succ_buff;
master_buff ready_succ_buff;
volatile master_buff m2s_pending;
#endif

#ifdef EMP_MASTER
void EQ_main_push(struct task *t, int push_req)
#else
void EQ_main_push(struct task *t)
#endif
{
    // printf("EQ_main_push\n");
#ifdef EMP_MAIN_PUSH_PROFILING
    main_push_count++;
    unsigned long push_start = rpcc();
#endif
    // while (h2ldm(eq_status,0,cgid) != STATUS_IDLE)
    //  printf("eq_status:%d\n",eq_status);
    while (eq_status != STATUS_IDLE)
        // eq_num=63;
        // while (eq_status[eq_num])
        ;
        // printf("eq_status:%d\n",eq_status);
#ifdef EMP_MAIN_PUSH_PROFILING
    unsigned long while_end = rpcc();
    main_push_while_cycles += while_end - push_start;
#endif
    // h2ldm(eq_ptr_slot,0,cgid) = (uint64_t)t;
    eq_ptr_slot = (uint64_t)t;
    //  printf("eq_ptr_slot:%x\n",eq_ptr_slot);
    asm volatile("memb\n");
#ifdef EMP_MASTER
    // h2ldm(eq_status,0,cgid) = push_req;
    eq_status = push_req;
#else
    // h2ldm(eq_status,0,cgid) = STATUS_PUSH_REQ;
    eq_status = STATUS_PUSH_REQ;
    // eq_status[eq_num]=1;
#endif
    asm volatile("memb\n");
    // while(eq_status[eq_num]);
    // if(eq_num==63)
    // eq_num=1;
    // else
    // eq_num++;
#ifdef EMP_MAIN_PUSH_PROFILING
    unsigned long push_end = rpcc();
    main_push_cycles += push_end - push_start;
#endif
}

struct task *EQ_main_pop()
{
    // while (h2ldm(eq_status,0,cgid) != STATUS_IDLE)
    while (eq_status != STATUS_IDLE)
        ;
    // h2ldm(eq_status,0,cgid) = STATUS_POP_REQ;
    eq_status = STATUS_POP_REQ;
    asm volatile("memb\n");
    // while (h2ldm(eq_status,0,cgid) != STATUS_IDLE)
    while (eq_status != STATUS_IDLE)
        ;
    // return (struct task *)h2ldm(eq_ptr_slot,0,cgid);
    return (struct task *)eq_ptr_slot;
}

#ifdef EMP_POP_REC
void EQ_main_wakeup_cores()
{
    while (eq_status != STATUS_IDLE)
        ;
    // h2ldm(eq_status,0,cgid) = STATUS_POP_REQ;
    //    eq_status = STATUS_EXIT;
    asm volatile("memb\n");
}
#endif

#ifdef EMP_MASTER
inline int master_buff_full(master_buff *buff) { return buff->idx == BUFF_SIZE; }

inline int master_buff_empty(master_buff *buff) { return buff->idx == 0; }

/*
inline int master_buff_space_enough(master_buff * buff, int length)
{
    return BUFF_SIZE - buff->idx >= length;
}
*/

inline int master_buff_push(master_buff *buff, struct task *t)
{
    if (!master_buff_full(buff)) {
        buff->buff[buff->idx] = t;
        buff->idx++;
        return 1;
    } else {
        return 0;
    }
}

inline void master_buff_consume()
{
    memcpy(m2s_pending.buff, ready_succ_buff.buff, ready_succ_buff.idx * sizeof(struct task *));
    m2s_pending.idx = ready_succ_buff.idx;
    ready_succ_buff.idx = 0;
}
#endif

#endif

#ifdef MASTER

inline int get_random_stealing_destid()
{
    unsigned long seed = rpcc();
    // unsigned long seed=rand();
    // srand((unsigned int)time(NULL));
    // long int seed=random();
    return seed % total_num_threads;
}

struct task *get_next_task(generic_scheduler *my_scheduler)
{
    struct task *t = NULL;
    int flag = 0;
    //    return NULL;
    // flag=try_pop(&(my_scheduler->private_queue), &t);

    // #ifdef MPI_SURPPORT
    //     if(q_try_pop(&(my_scheduler->blocking_queue), &t))
    //     {
    //         return t;
    //     }
    // #endif
#ifdef LOCAL_MULTI_PRIORI
    int dest_qid = 0;
    for (dest_qid = LOCAL_MULTI_SIZE - 1; dest_qid >= 0; dest_qid--) {
#ifdef ARRAY_STACK
        if (ser_try_pop(&(my_scheduler->master_private_queue[dest_qid]), &t))
#else
        if (try_pop(&(my_scheduler->master_private_queue[dest_qid]), &t))
#endif
        {
#ifdef ACEMESH_SCHEDULER_PROFILING
            // don't include suspend task
            if (t != NULL) all_pure_task_num[total_num_threads]++;
#endif
            return t;
        }
    }
#else
#ifdef ARRAY_STACK
    if (ser_try_pop(&(my_scheduler->master_private_queue), &t))
#else
#ifdef EMP_MASTER
    // t = EQ_main_pop();
#ifdef EMP_CONCURRENT_Q
    t = EQ_queue_master_pop(&master_queue);
#else
    t = EQ_main_pop(); //
#endif
    if (t != NULL)
#else
    if (try_pop(&(my_scheduler->master_private_queue), &t))
#endif
#endif
    {
#ifdef ACEMESH_SCHEDULER_PROFILING
        // don't include suspend task
        if (t != NULL) all_pure_task_num[total_num_threads]++;
#endif
        return t;
    }
#endif
#ifdef MPI_SURPPORT
    if (q_try_pop(&(my_scheduler->blocking_queue), &t)) {
        return t;
    }
#endif

#ifdef DOUBLE_FUNC
// #ifdef CONCUR_Q_RANDOM_STEALING
#ifdef DISTRIBUTED_SCHEDULER
    // only for SHM-D
    int stealing_destid = get_random_stealing_destid();
    // printf("%d,",stealing_destid);
    struct aceMesh_stack *dest_stealing_queue = &(schedulers[stealing_destid].private_queue);
#else  // CENTRALIZED
    struct aceMesh_stack *dest_stealing_queue = &(schedulers[SL_SCHED].private_queue);
#endif
    if (try_pop(dest_stealing_queue, &t)) {
        // printf("got a slave task=%p\n",t);
        // fflush(stdout);
        return t;
    }
    // #endif
#endif

#ifdef DEBUG
    // else
    //     printf("rank:%d, get_next_task is NULL\n",my_mpi_rank);
#endif
    return NULL;
}
#ifdef MTEST_LIGHT
struct mtest_task *get_next_mtest_task(generic_scheduler *my_scheduler)
{
    struct mtest_task *t = NULL;
    int flag = 0;
    // flag=try_pop(&(my_scheduler->private_queue), &t);

    if (q_try_pop_test(&(my_scheduler->mtest_blocking_queue), &t)) {
        // printf("get task is blocking_task\n");
        return t;
    }

#ifdef DEBUG
    // else
    //     printf("rank:%d, get_next_task is NULL\n",my_mpi_rank);
#endif
    return NULL;
}

#endif

/*#ifdef MPI_SURPPORT
struct task* mpi_get_next_task(generic_scheduler *my_scheduler)
{
    struct task* t = NULL;
    int flag=0;
    //flag=try_pop(&(my_scheduler->private_queue), &t);
    if(try_pop(&(my_scheduler->private_queue), &t))
    {
#ifdef DEBUG
        //printf("rank:%d, scheduler.c mpi_get_next_task from private_queue
:t->affinity:%d\n",my_mpi_rank, t->affinity_id); #endif return t;
    }
    else if(q_try_pop(&(my_scheduler->blocking_queue), &t))
    {
#ifdef DEBUG
        //printf("rank:%d, scheduler.c mpi_get_next_task from blocking queue
:t->affinity:%d\n",my_mpi_rank, t->affinity_id); #endif return t;
    }
#ifdef DEBUG
    else
        printf("rank:%d, mpi_get_next_task is NULL\n",my_mpi_rank);
#endif
    return NULL;
}
void mpi_worker_kernel_func()
{
    struct task* t = NULL;
    generic_scheduler *my_scheduler=&schedulers[total_num_threads+1];
#ifdef DEBUG
    printf("rank:%d, before mpi kernel fun :%d\n",my_mpi_rank,total_num_threads+1);
#endif
    while( mpi_is_run == 0);
#ifdef DEBUG
    printf("rank:%d, after mpi kernel fun :%d\n",my_mpi_rank,total_num_threads+1);
#endif
    while(!close_all_threads)
    {
        while( mpi_is_run == 0 && mpi_close_all_threads == 0);
        if(t == NULL)
        {
            t=mpi_get_next_task(my_scheduler);
#ifdef DEBUG
            //printf("rank:%d, schedeule.c t==NULL\n", my_mpi_rank);
#endif
        }
        if(t != NULL)
        {
#ifdef ACEMESH_SCHEDULER_PROFILING
            all_pure_task_num[total_num_threads+1]++;
#endif
#ifdef DEBUG
            //printf("rank:%d, schedeule.c before mpi_task_execute :t->affinity:%d\n",
my_mpi_rank,t->affinity_id); #endif t = user_execute((ci_task*)t); t = NULL; #ifdef DEBUG
            //printf("rank:%d,end mpi_task_execute\n",my_mpi_rank);
#endif
        }
        //if(close_all_threads) break;
    }
}
#endif
*/
#endif

#ifdef SUCC_BATCH
void emp_d_master_consum_all()
{
    int total_task_num = 0;
    int i, k, flag_tmp;
    for (i = 0; i < sche_num_threads; i++) {
        total_task_num = M2S_detail_temp_64[i].task_num;
        // printf("%d,",total_task_num);
        if (total_task_num > 0) {  // 当前从核有任务
#ifdef LOCAL_FLAG_M2S
            if ((flag_tmp = h2ldm(m2s_flag, i, cgid)) == 1)
#else
            if (m2s_flag[i] == 1)
#endif
            {
                for (k = 0; k < total_task_num; k++) {  // 主核放任务
                    M2S_details_64[i].task_detail[k] = M2S_detail_temp_64[i].task_detail[k];
                }
                M2S_details_64[i].task_num = total_task_num;
                asm volatile("memb\n");
#ifdef LOCAL_FLAG_M2S
                h2ldm(m2s_flag, i, cgid) = 0;
#else
                m2s_flag[i] = 0;
#endif
                asm volatile("memb\n");
                M2S_detail_temp_64[i].task_num = 0;
            }
        }
    }
}
#endif

void main_kernel_func(struct task *wait_task)
{
    printf("entered main_kernel_func\n");
    fflush(stdout);
    //    int t0,dt,i;
    //    int total_pop_task = 0;
    //    int total_pool_task = 0;
#ifdef FAT_TDG
    local_parent_main = NULL;
#endif
    struct task *t;
#ifdef MTEST_LIGHT
    int flag, flag1, flag2;
    MPI_Status status1, status2;
    struct mtest_task *mt;
    struct mtest_task *old_mt;
    int mret;
    int test_count, test_max_count;
#endif
#ifdef SUCC_BATCH
    int i, k, flag_tmp;
#endif
    generic_scheduler *my_scheduler;
#ifdef DISTRIBUTED_SCHEDULER
    my_scheduler = &schedulers[total_num_threads];
#else
    my_scheduler = &schedulers[MS_SCHED];
#endif

#ifdef SEG_BUFF
#ifdef TARG_SW9
    agent_id = total_num_threads - 1;
#ifdef DEBUG
    printf("agentid=%d,seg_addr=%p,%p\n", agent_id, seg_succ_addr[agent_id],
           seg_spawn_addr[agent_id]);
    printf("master_init_seg_succ=%d,init_seg_spawn=%d\n",
           my_h2ldm(seg_succ_addr[agent_id], agent_id, cgid, int),
           my_h2ldm(seg_spawn_addr[agent_id], agent_id, cgid, int));
    fflush(stdout);
#endif
#endif
#endif

#ifdef DEBUG
    printf("main kernel func scheduler:%d\n", total_num_threads);
#endif
    t = NULL;
    //    t0 = clock();
    int num = 0;
    alog_debug("wait_task: %x", wait_task);
    alog_debug("wait_task ref: %d", ref_count(wait_task));
    //  while(0){
#ifdef FAT_TDG
 printf("current predec_count= %d\n",predec_count(wait_task));
    while (predec_count(wait_task) <-2){
        // printf("current predec_count= %d\n",predec_count(wait_task));
#else
 printf("current ref_count = %d\n", ref_count(wait_task));
    while (ref_count(wait_task) < -2) {
#endif
// num++;
// if(num==1000000){
// break;
// }
#ifdef MTEST_LIGHT
        if (t == NULL) {
            mt = get_next_mtest_task(my_scheduler);
            old_mt = NULL;
            while (mt != NULL) {
                flag1 = 0;
                if (mt->comm_handle1 != NULL) {
                    test_count = 0;
                    if (mt->comm_kind1 != 3) {
                        test_max_count = 1;
                    } else {
                        test_max_count = 100000;
                    }
                    while (test_count < test_max_count && flag1 == 0) {
                        // mret = MPI_Test(mt->comm_handle1, &flag1, &status1);
                        mret = MPI_Wait(mt->comm_handle1, &status1);
                        flag1 = 1;
                        test_count++;
                    }
                    if (flag1 == 0 && test_count == test_max_count) {
                        // printf("beyond 8\n");
                    }
                    if (mret != MPI_SUCCESS) {
                    }
                    if (flag1 != 0) {
                    }
                } else {
                    flag1 = 1;
                }

                flag2 = 0;
                if (mt->comm_handle2 != NULL) {
                    flag2 = 0;
                    test_count = 0;
                    if (mt->comm_kind2 != 3) {
                        test_max_count = 1;
                    } else {
                        test_max_count = 1;
                    }
                    while (test_count < test_max_count && flag2 == 0) {
                        mret = MPI_Test(mt->comm_handle2, &flag2, &status2);
                        test_count++;
                    }
                    if (flag2 == 0 && test_count == test_max_count) {
                        // printf("beyond 8\n");
                    }
                    if (mret != MPI_SUCCESS) {
                    }
                    if (flag2 != 0) {
                    }
                } else {
                    flag2 = 1;
                }

                flag = (flag1 == 1) && (flag2 == 1);

                if (flag == 0) {  // faill

                    q_push_mtest(&my_scheduler->mtest_blocking_queue, mt);
                    if (old_mt == NULL) {
                        old_mt = mt;
                    }
                } else {
                    q_push(&my_scheduler->blocking_queue, mt->comm_task);
                    // free(mt);
                }
                mt = get_next_mtest_task(my_scheduler);

                if (mt == old_mt) {
                    if (mt != NULL) {
                        q_push_mtest(&my_scheduler->mtest_blocking_queue, mt);
                    }
                    break;
                }
            }
        }

#endif

#ifdef MASTER
    if (t == NULL) {
        // printf("master_task_NULL\n");

#ifdef SUCC_BATCH
#ifdef SUCC_BATCH_V1  // disable
        for (i = 0; i < sche_num_threads; i++) {
            if (M2S_detail_temp_64[i].is_pending) {  // 当前从核有任务
#ifdef LOCAL_FLAG_M2S
                if ((flag_tmp = h2ldm(m2s_flag, i, cgid)) == 1) {
#else
                if (m2s_flag[i] == 1) {
#endif
                    for (k = 0; k < M2S_detail_temp_64[i].task_num; k++) {  // 主核放任务
                        M2S_details_64[i].task_detail[k] = M2S_detail_temp_64[i].task_detail[k];
                    }
                    M2S_details_64[i].task_num = M2S_detail_temp_64[i].task_num;
                    asm volatile("memb\n");
#ifdef LOCAL_FLAG_M2S
                    h2ldm(m2s_flag, i, cgid) = 0;
#else
                    m2s_flag[i] = 0;
#endif
                    asm volatile("memb\n");
                    M2S_detail_temp_64[i].task_num = 0;
                    M2S_detail_temp_64[i].is_pending = 0;
                }
            }
        }
#else
        emp_d_master_consum_all();
#endif
#endif
#if defined(EMP_MASTER) && defined(TARG_SW5)
#ifndef EMP_CONCURRENT_Q
        if (!master_buff_empty(&ready_succ_buff)) {
            // if (h2ldm(m2s_flag, 0, cgid) == BUFF_STATUS_MS) {
            if (m2s_flag == BUFF_STATUS_MS) {
                master_buff_consume();
                asm volatile("memb\n");
                // h2ldm(m2s_flag, 0, cgid) = BUFF_STATUS_SL;
                m2s_flag = BUFF_STATUS_SL;
                asm volatile("memb\n");
            }
        }
#endif
#endif

        t = get_next_task(my_scheduler);
    }
#ifdef FAT_TDG
    if (t != NULL) {
        // printf("&&&&&&&&&&&&&&&&&&%d\n",t->_endExecute.load(std::memory_order_seq_cst));
        if (t->_endExecute == true) {
            printf("master任务已经执行完不需要再执行\n");
            t = NULL;
        }
    }
#endif
    if (t != NULL) {
        printf("master_task_!NULL\n");
#ifndef DOUBLE_FUNC
        if (t->affinity_id != total_num_threads) assert(0);
#endif
#ifdef ACEMESH_SCHEDULER_PROFILING
        // including suspend task
        all_pure_task_num[total_num_threads + 1]++;
#ifdef ACEMESH_TIME
        master_cycle_time_start = rpcc();
#endif
#endif

#ifdef THREAD_TRACE
        trace_time = rpcc() - begin_time;
        int task_type = ((aceMesh_task *)t)->my_type;
        switch (task_type) {
            case 0:
                trace_print(trace_time, '0', RTS_execute_task);
                break;
            case 1:
                trace_print(trace_time, '0', RTS_execute_stencil_task);
                break;
            case 2:
                trace_print(trace_time, '0', RTS_execute_noaffinity_task);
                break;
            case 3:
                trace_print(trace_time, '0', RTS_execute_blocking_task);
                break;
            case 4:
                trace_print(trace_time, '0', RTS_execute_composite_task);
                break;
            case 5:
                trace_print(trace_time, '0', RTS_execute_composite_end_task);
                break;
            case 6:
                trace_print(trace_time, '0', RTS_execute_undefined_task_1);
                break;
            case 7:
                trace_print(trace_time, '0', RTS_execute_undefined_task_2);
                break;
            default:
                trace_print(trace_time, '0', RTS_execute_task);
                break;
        }
#endif

#ifdef FAT_TDG
        // printf("local_parent_main:%p %p\n",local_parent,t);

        local_parent_main = (aceMesh_task *)t;

#endif
        t = user_execute((ci_task *)t);
#ifdef THREAD_TRACE
        trace_time = rpcc() - begin_time;
        trace_print(trace_time, '0', RTS_event_end);
#endif

#ifdef ACEMESH_SCHEDULER_PROFILING
#ifdef ACEMESH_TIME
        master_cycle_time_end = rpcc();
        all_task_pure_exec_time[total_num_threads] +=
            (double)(master_cycle_time_end - master_cycle_time_start) / MFREQ;
#endif
        // not including suspend task
        if (t != NULL) all_pure_task_num[total_num_threads]++;
#endif
    }

#ifdef SEG_BUFF
#ifdef LOCAL_SEG
    int temp_seg = -1;
#ifdef ACEMESH_SCHEDULER_PROFILING
    long master_sche_cycle_start = rpcc();
#endif
    if (is_need_pending_succ == 1) {
#ifdef ACEMESH_SCHEDULER_PROFILING
        master_sche_cycle_start = rpcc();
#endif
        temp_seg = my_h2ldm(seg_succ_addr[agent_id], agent_id, cgid, int);
#ifdef ACEMESH_SCHEDULER_PROFILING
        master_seg_sche_time2 += (float)(rpcc() - master_sche_cycle_start) / MFREQ;
        master_sche_cycle_start = rpcc();
#endif
        // printf("master_temp_seg=%d\n",temp_seg);
        // fflush(stdout);
        if (temp_seg == SEG_MASTER) spawn_succ_from_buff_to_pending();
#ifdef ACEMESH_SCHEDULER_PROFILING
        master_seg_sche_time += (float)(rpcc() - master_sche_cycle_start) / MFREQ;
#endif
    }

#ifdef ACEMESH_SCHEDULER_PROFILING
    master_sche_cycle_start = rpcc();
#endif
    temp_seg = my_h2ldm(seg_spawn_addr[agent_id], agent_id, cgid, int);
#ifdef ACEMESH_SCHEDULER_PROFILING
    master_seg_sche_time2 += (float)(rpcc() - master_sche_cycle_start) / MFREQ;
    master_sche_cycle_start = rpcc();
#endif
    if (temp_seg == SEG_MASTER) get_need_spawn_task_from_pending();
#ifdef ACEMESH_SCHEDULER_PROFILING
    master_seg_sche_time += (float)(rpcc() - master_sche_cycle_start) / MFREQ;
#endif

#else  // for MMEM_SEG & TWIN_SEG

#ifdef ACEMESH_SCHEDULER_PROFILING
    long master_sche_cycle_start = rpcc();
#endif
    if (is_need_pending_succ == 1 && seg_succ == SEG_MASTER) spawn_succ_from_buff_to_pending();
#ifdef ACEMESH_SCHEDULER_PROFILING
    master_seg_sche_time2 += (float)(rpcc() - master_sche_cycle_start) / MFREQ;
    master_sche_cycle_start = rpcc();
#endif
    if (seg_spawn == SEG_MASTER) get_need_spawn_task_from_pending();
#ifdef ACEMESH_SCHEDULER_PROFILING
    master_seg_sche_time += (float)(rpcc() - master_sche_cycle_start) / MFREQ;
#endif

#endif

#endif

#endif
#ifdef DEBUG
//        printf("rank:%d, wait_task_ref:%d\n",my_mpi_rank, ref_count(wait_task));
#endif
}
#if defined(EMP_QUEUE) && defined(EMP_POP_REC)
EQ_main_wakeup_cores();
#endif
// printf("bye bye. I'm leaving!!\n");
// TODO mem leak
// free(wait_task);
wait_task = NULL;
}

#ifdef ACEMESH_SCHEDULER_PROFILING
long *all_threads_sum_vert_times = NULL;
long main_sum_vert_times = 0;
long *all_nfull = NULL;
int main_maxReuseChain = 0;
int *all_threads_maxReuseChain = NULL;
long *all_threads_reuse_times;
long main_reuse_times = 0;
long total_vert_times = 0;
long total_ldm_num_fulls = 0;

extern unsigned long total_nedges;
extern unsigned long total_ntasks_register;

void print_and_reset_reuse_statistics()
{
    int i, j;
    // sche_num_threads=total_num_threads;
    // #ifdef _LDMQ
    //     printf("\nall_fulls: ");
    // #endif
    for (i = 0; i < total_num_threads; ++i)
    // for(i = 0; i < sche_num_threads; ++i)
    {
        main_sum_vert_times += all_threads_sum_vert_times[i];
        //        printf("%ld,",all_threads_sum_vert_times[i]);
        // #ifdef _LDMQ
        //        total_ldm_num_fulls += all_nfull[i];
        //        printf("%ld,",all_nfull[i]);
        // #endif
    }
    //    printf("\n");

    for (i = 0; i < total_num_threads; ++i)
    // for(i = 0; i < sche_num_threads; ++i)
    {
        if (all_threads_maxReuseChain[i] > main_maxReuseChain)
            main_maxReuseChain = all_threads_maxReuseChain[i];
    }
    for (i = 0; i < total_num_threads; ++i) {
        main_reuse_times += all_threads_reuse_times[i];
    }
    printf("sum vert times : %ld\n", main_sum_vert_times);
    printf("max reuse chain: %d\n", main_maxReuseChain);
    printf("sum reuse times: %ld\n", main_reuse_times);

    total_vert_times += main_sum_vert_times;
    // main_sum_vert_times = 0;
    // main_maxReuseChain = 0;
    // main_reuse_times = 0;
    // TODO :how to reset for the next DAG region
    // total_ldm_num_fulls=0;
}
#endif

inline int get_affinity_id(struct task *self) { return self->affinity_id; }

void local_spawn(generic_scheduler *my_scheduler, struct task *first)
{
    //printf("local_spawn%p\n",first);
    alog_debug("local spawn: %x", first);
#if (defined(_SERIAL_QUEUE) && defined(MASTER)) || (defined(_CIRCULAR_QUEUE) && defined(MASTER))
    push_to_slave(&(my_scheduler->private_queue), first);
#else
#ifdef USE_PRIORITY_QUEUE
    int temp_pri_id = get_priority_id(first);
    if (!temp_pri_id)
        push(&(my_scheduler->private_queue), first);
    else if (temp_pri_id > 0)
        push(&(my_scheduler->priority_queue), first);
    else
        assert(0);
#else

#ifdef EMP_QUEUE
#ifdef EMP_MASTER
#ifdef EMP_CONCURRENT_Q
    EQ_queue_master_push(my_global_cent_queue, first, 0);
#else
    EQ_main_push(first, STATUS_PUSH_SL);  // send to g-server
#endif
#else
#ifdef EMP_CONCURRENT_Q
    EQ_queue_master_push(my_global_cent_queue, first);
#else
// printf("before_EQ_main_push:%p, affinity;%d\n",first,get_affinity_id(first));
    EQ_main_push(first);  // send to g-server
    // printf("after_local_spawn_try_pop:%p, affinity;%d\n",first,get_affinity_id(first));
#endif
#endif
#else
    // #ifdef MLOCK
    // init_push(&(my_scheduler->private_queue), first);
    // #else
    push(&(my_scheduler->private_queue), first);
    // #endif
#endif
#ifdef DEBUG
    printf("local_spawn:%p, affinity;%d\n", first, get_affinity_id(first));
#endif
    // first=NULL;
    // try_pop(&(my_scheduler->private_queue),&first);
     
#endif
#endif
}

#ifdef MASTER

#ifdef SEG_BUFF
struct task *get_next_task_from_buff(struct aceMesh_stack *self)
{
    struct task *t = NULL;
    if (try_pop(self, &t)) {
        return t;
    }
    return NULL;
}

void spawn_to_buff_succ(struct task *first)
{
    generic_scheduler *temp_scheduler = &schedulers[total_num_threads];
    struct task *t = NULL;

    // int temp_seg;
// #ifdef TARG_SW5
//     printf("temp_seg=%d\t",h2ldm(seg_succ,sche_num_threads-1,cgid));
// #elif TARG_SW9
//     printf("spawn_to_buff_succ_temp_seg=%d\n",my_h2ldm(seg_succ_addr[agent_id],agent_id,cgid,int));
// #endif
//     fflush(stdout);
#ifdef TEMP_DEBUG
    printf("push_task(%p)_to_buff_successor\n", first);
    fflush(stdout);
#endif
#ifdef ARRAY_STACK  // and here, why different interfaces? TODO
    ser_push(&(temp_scheduler->buff_successor), first);
#else
    push(&(temp_scheduler->buff_successor), first);
#endif
#ifdef TEMP_DEBUG
    printf("after_push_buff_succ,pool=%p\n", temp_scheduler->buff_successor.task_pool);
    fflush(stdout);
#endif
    is_need_pending_succ = 1;
}
void spawn_to_pending_succ(struct task *first)
{
    generic_scheduler *temp_scheduler = &schedulers[total_num_threads];
    struct task *t = NULL;

#ifdef ARRAY_STACK
    ser_push(&(temp_scheduler->pending_successor), first);
#else
    push(&(temp_scheduler->pending_successor), first);
#endif
}
void spawn_succ_from_buff_to_pending()
{
    generic_scheduler *temp_scheduler = &schedulers[total_num_threads];
    struct task *t = NULL;
    // copy buff_successor to pending_successor
    // while(temp_scheduler->buff_successor.task_pool!=NULL)
    int mask = 0;
    while (1)
    // while(t=get_next_task_from_buff(&(temp_scheduler->buff_successor))!=NULL)
    {
#ifdef ARRAY_STACK
        if (temp_scheduler->buff_successor.top > 0) {
            // copy_task_queue(&(temp_scheduler->buff_successor),&(temp_scheduler->pending_successor));
            if (ser_try_pop(&(temp_scheduler->buff_successor), &t)) {
                ser_push(&(temp_scheduler->pending_successor), t);
                mask = 1;
            }
        }
#else
#ifdef TEMP_DEBUG
        printf("buff_succ_pool=%p\n", temp_scheduler->buff_successor.task_pool);
        fflush(stdout);
#endif
        if (temp_scheduler->buff_successor.task_pool != NULL) {
            if (try_pop(&(temp_scheduler->buff_successor), &t)) {
#ifdef TEMP_DEBUG
                printf("get_task_from_buff_successor:%p\n", t);
                fflush(stdout);
#endif
                push(&(temp_scheduler->pending_successor), t);
#ifdef TEMP_DEBUG
                printf("after_push_pend_succ,pool=%p\n",
                       temp_scheduler->pending_successor.task_pool);
                fflush(stdout);
#endif
                mask = 1;
            }
        }
#endif
        else {
            if (mask > 0) {
#if defined(LOCAL_SEG) || defined(TWIN_SEG)
#ifdef TARG_SW5
                h2ldm(seg_succ, total_num_threads - 1, cgid) = SEG_SLAVE;
#elif TARG_SW9
                my_h2ldm(seg_succ_addr[agent_id], agent_id, cgid, int) = SEG_SLAVE;
#ifdef TEMP_DEBUG
                printf("buff_succ_is_NULL,seg_succ=%d\n",
                       my_h2ldm(seg_succ_addr[agent_id], agent_id, cgid, int));
                fflush(stdout);
#endif
#endif
#endif
#if defined(MMEM_SEG) || defined(TWIN_SEG)
                seg_succ = SEG_SLAVE;
#endif
            }
            break;
        }
    }  // end of while
    is_need_pending_succ = 0;
    //        printf("end_of_spawn_to_buff_succ\n");
    //        fflush(stdout);
    //        h2ldm(seg_succ,sche_num_threads-1,cgid)=SEG_SLAVE;
}
void get_need_spawn_task_from_pending()
{
    generic_scheduler *temp_scheduler = &schedulers[total_num_threads];
    struct task *t = NULL;
    int mask = 0;
    // traverse pending_spawn
    // while(temp_scheduler->pending_spawn.task_pool!=NULL)
    while (1)
    // while(t=get_next_task_from_buff(&(temp_scheduler->pending_spawn))!=NULL)
    {
#ifdef ARRAY_STACK
        // printf("pending_spawn_size=%d\n",temp_scheduler->pending_spawn.top);
        if (temp_scheduler->pending_spawn.top > 0) {
            // copy_task_queue(&(temp_scheduler->pending_spawn),&(temp_scheduler->master_private_queue));
            if (ser_try_pop(&(temp_scheduler->pending_spawn), &t)) {
                // printf("get_task_from_buff_pending_spawn:%p\n",t);
#ifdef LOCAL_MULTI_PRIORI
                ser_push(&(temp_scheduler->master_private_queue[t->priority_id]), t);
#else
                ser_push(&(temp_scheduler->master_private_queue), t);
#endif
            }
        }
#else
        if (temp_scheduler->pending_spawn.task_pool != NULL) {
            if (try_pop(&(temp_scheduler->pending_spawn), &t)) {
#ifdef LOCAL_MULTI_PRIORI
                push(&(temp_scheduler->master_private_queue[t->priority_id]), t);
#else
                push(&(temp_scheduler->master_private_queue), t);
#endif
                //            printf("get_task_from_buff_pending_spawn:%p\n",t);
                //            fflush(stdout);
                mask = 1;
            }
        }
#endif
        else {
            if (mask > 0) {
#if defined(LOCAL_SEG) || defined(TWIN_SEG)
#ifdef TARG_SW5
                h2ldm(seg_spawn, total_num_threads - 1, cgid) = SEG_SLAVE;
#elif TARG_SW9
                my_h2ldm(seg_spawn_addr[agent_id], agent_id, cgid, int) = SEG_SLAVE;
#endif
#endif
#if defined(MMEM_SEG) || defined(TWIN_SEG)
                seg_spawn = SEG_SLAVE;
#endif
            }
            break;
        }
    }  // end of while
}

#endif

void local_spawn_master(generic_scheduler *my_scheduler, struct task *first)
{
#ifdef ARRAY_STACK  // why different interfaces? TODO
#ifdef LOCAL_MULTI_PRIORI
    ser_push(&(my_scheduler->master_private_queue[first->priority_id]), first);
#else
    ser_push(&(my_scheduler->master_private_queue), first);
#endif
#else
#ifdef LOCAL_MULTI_PRIORI
    push(&(my_scheduler->master_private_queue[first->priority_id]), first);
#else
#ifdef EMP_MASTER
#ifdef EMP_CONCURRENT_Q
    EQ_queue_master_push(&global_cent_queue, first, 1);
#else
    EQ_main_push(first, STATUS_PUSH_MS);  // send to g-server
#endif
#else
    push(&(my_scheduler->master_private_queue), first);
#endif
#endif
#endif
}

#endif

void spawn_to_id(struct task *t)
{  // modify TODO
    // printf("master_spawn_to_id\n");
    int destid = get_affinity_id(t);
#ifdef TEMP_AFFI
    if (destid != t->bak_affinity_id) {
        printf("warning: destid has changed!!!!!rankid=%d,%d,%d,t=%p\n", my_mpi_rank, destid,
               t->bak_affinity_id, t);
        fflush(stdout);
    }
#endif
    //    double de=0;
    // printf("spawn to %d\n",destid);
    if (destid > -1) {
#ifdef MASTER
        if (destid == total_num_threads) {
            generic_scheduler *that_scheduler = &schedulers[destid];
#ifdef LOCAL_MULTI_PRIORI
            push(&(that_scheduler->master_private_queue[t->priority_id]), t);
#else
#ifdef EMP_MASTER
#ifdef EMP_CONCURRENT_Q
            EQ_queue_master_push(&global_cent_queue, t, 1);
#else
            EQ_main_push(t, STATUS_PUSH_MS);//
#endif
#else
            push(&(that_scheduler->master_private_queue), t);
#endif
#endif
        } else {
#endif
#ifdef TEMP_DEBUG
            printf("spawn_%p_to_%d,\n", t, t->affinity_id);
            fflush(stdout);
#endif
#if (defined(_SERIAL_QUEUE) && defined(MASTER)) || (defined(_CIRCULAR_QUEUE) && defined(MASTER))
            //           printf("error\n");
            //           fflush(stdout);
            int tmp, k;
            struct task *temp_t;
#ifdef TARG_SW5
#ifdef ACEMESH_PROFILING_SUCC
            unsigned long spawn_start = rpcc();
#endif
#ifdef LOCAL_FLAG_M2S
            while ((tmp = h2ldm(master_flag, destid, cgid)) != 0)
                ;
            h2ldm(from_master, destid, cgid) = t;
            asm volatile("memb\n");
            h2ldm(master_flag, destid, cgid) = 1;
            asm volatile("memb\n");
#else
            while (master_flag[destid] != 0)
                ;
            from_master[destid] = t;
            asm volatile("memb\n");
            master_flag[destid] = 1;
            asm volatile("memb\n");
#endif
#ifdef ACEMESH_PROFILING_SUCC
            all_spawn_slave_task_time[64] += (rpcc() - spawn_start);
            // printf("sche---%ld,",all_spawn_slave_task_time[64]);
#endif
#else
#ifdef TEMP_M2L
#if defined(TARG_SW9) || defined(SEG_BUFF)
            spawn_to_buff_succ(t);
#endif
#endif
#endif
            //           temp_t=h2ldm(from_master,destid,cgid);
            // printf("rank:%d,modify:%ld,%ld\n",my_mpi_rank,t,temp_t);
            //           pre_affinity_id=destid;

#else

// #ifdef USE_COMPOSITE_TASK
#if defined(SEG_BUFF)
        printf("don't need");
        fflush(stdout);
        spawn_to_buff_succ(t);
#else
        generic_scheduler *that_scheduler = &schedulers[destid];
#ifdef USE_PRIORITY_QUEUE
        int temp_pri_id = get_priority_id(t);
        if (!temp_pri_id)
            push(&(that_scheduler->private_queue), t);
        else if (temp_pri_id > 0)
            push(&(that_scheduler->priority_queue), t);
        else
            assert(0);
#else
#ifdef EMP_QUEUE
#ifdef EMP_MASTER
        if (master_buff_push(&ready_succ_buff, t)) {
            ;
        } else {  // ready_succ_buff FULL
            // printf("ready to consume ready_succ_buff\n");
            // fflush(stdout);
            // while (h2ldm(m2s_flag, 0, cgid) != BUFF_STATUS_MS) {
            while (m2s_flag != BUFF_STATUS_MS) {
                /*if (local_succ_count == 0) {
                    printf("ready succ buff full, SL handling\n");
                    fflush(stdout);
                }
                else {
                    printf("local_succ_count != 0\n");
                    fflush(stdout);
                    assert(0);
                }*/
                ;
            }
            master_buff_consume();
            asm volatile("memb\n");
            // printf("consumed ready_succ_buff, %d elements\n", m2s_pending.idx);
            // fflush(stdout);
            master_buff_push(&ready_succ_buff, t);
            // printf("pushed to ready_succ_buff\n");
            // fflush(stdout);
            // h2ldm(m2s_flag, 0, cgid) = BUFF_STATUS_SL;
            m2s_flag = BUFF_STATUS_SL;
            asm volatile("memb\n");
            // printf("change m2s_flag to SL\n");
            // fflush(stdout);
        }
#else
#ifdef EMP_CONCURRENT_Q
        EQ_queue_master_push(&global_cent_queue, t);
#else
        EQ_main_push(t);  // send to g-server
#endif
#endif
#else
        push(&(that_scheduler->private_queue), t);
#endif
#endif
#endif

#endif
#ifdef MASTER
        }
#endif
    } else {
        assert(0);
    }
}

#ifdef MPI_SURPPORT

void mpi_local_spawn(generic_scheduler *my_scheduler, struct task *first)
{
#ifdef LOCAL_MULTI_PRIORI
    push(&(my_scheduler->master_private_queue[first->priority_id]), first);
#else
    push(&(my_scheduler->master_private_queue), first);
#endif
#ifdef DEBUG
    printf("rank%d, mpi_local_spawn\n", my_mpi_rank);
#endif
}
void suspend_spawn(generic_scheduler *my_scheduler, struct task *first)
{
#ifdef MTEST_LIGHT
    struct mtest_task *mt = NULL;

#ifdef MEMORY_POOL
#ifdef USE_SIMD
#ifdef TARG_SW5
    mt = (struct mtest_task *)acemesh_myalloc_aligned_32(sizeof(struct mtest_task));
#elif TARG_SW9
    mt = (struct mtest_task *)acemesh_myalloc_aligned_64(sizeof(struct mtest_task));
#endif
#else
    mt = (struct mtest_task *)acemesh_myalloc_aligned_16(sizeof(struct mtest_task));
#endif
#else
    mt = (struct mtest_task *)malloc(sizeof(struct mtest_task));
#endif
    mt->comm_task = first;
    mt->next = NULL;
    mt->comm_handle1 = mtest_handle1;
    mt->comm_handle2 = mtest_handle2;
    mt->comm_kind1 = mtest_kind1;
    mt->comm_kind2 = mtest_kind2;
    // printf("h1=%0x, h2=%0x myrank=%d\n", mtest_handle1, mtest_handle2, my_mpi_rank);
    mtest_handle1 = NULL;
    mtest_handle1 = NULL;
    q_push_mtest(&(my_scheduler->mtest_blocking_queue), mt);
#else
    q_push(&(my_scheduler->blocking_queue), first);
#endif
#ifdef DEBUG
    printf("rank:%d, blocking task push to blocking_queue\n", my_mpi_rank);
#endif
}
void mpi_spawn(struct task *first)
{
#ifdef DISTRIBUTED_SCHEDULER
    generic_scheduler *that_scheduler = &schedulers[total_num_threads];
#else
    generic_scheduler *that_scheduler = &schedulers[MS_SCHED];
#endif
    suspend_spawn(that_scheduler, first);
}

#endif

// called only when the task dag graph is init-spawn,
// used to spawn tasks without any precessors(not concurrent)
void init_spawn_sche(struct task *t)
{
    int my_id = t->affinity_id;
    //printf("init spawn to %d,%p\n",my_id,t);
    if (my_id > -1) {
        /*#ifdef MPI_SURPPORT
                printf("task type:%d\n",get_task_type((aceMesh_task*)t));
                if (get_task_type((aceMesh_task*)t)==BLOCKING_TASK)
                {
                    generic_scheduler* my_scheduler = &schedulers[total_num_threads];
                    printf(" blocking sche_num:%d\n",total_num_threads);
                    //push(&(that_scheduler->private_queue),t);
                    suspend_spawn(my_scheduler,t);
                }
                else
                {
        #endif*/
        generic_scheduler *my_scheduler = &schedulers[my_id];
#ifdef DEBUG
        printf("rank%d, init spawn sche num:%d\n", my_mpi_rank, my_id);
#endif
#ifdef MASTER
        if (my_id == total_num_threads) {
            printf("master_push_master_task\n");
            local_spawn_master(my_scheduler, t);
        } else {
#endif
            local_spawn(my_scheduler, t);
#ifdef MASTER
        }
#endif
        /*#ifdef MPI_SURPPORT
                }
        #endif*/
    }
}

void create_worker_thread(int n_threads)
{
#ifdef THREAD_TRACE_LEVEL_1
    trace_time = rpcc() - begin_time;
    trace_print(trace_time, '0', RTS_thread_init);
#endif

    int i;
#ifndef LOCAL_FLAG
    is_run = 0;
    // mpi_is_run = 0;
#endif
    // total_num_threads=64;
    // sche_num_threads=n_threads;
#ifdef TARG_SW9
    schedulers =
        (struct generic_scheduler *)libc_uncached_malloc(sizeof(struct generic_scheduler) * 65);
    memset(schedulers, 0, sizeof(struct generic_scheduler) * 65);
#ifdef DEBUG
#ifdef DISTRIBUTED_SCHEDULER
    for (i = 0; i < n_threads; i++) {
        printf("generic scheduler %d = %p\n", i, &schedulers[i]);
    }
#endif
#endif
#endif

#ifdef EMP_QUEUE
#if defined(EMP_MASTER) && defined(EMP_CONCURRENT_Q)
    // EQ_master_queue master_queue;
    my_master_queue = &master_queue;
#endif
#if defined(EMP_CONCURRENT_Q)
    // struct EQ_queue global_cent_queue;
    my_global_cent_queue = &global_cent_queue;
#endif
#endif

#ifdef ACEMESH_PROFILING_INST
    penv_slave1_inst_init();
    for (i = 0; i < n_threads; i++) {
        inst_perf[i] = 0;
    }
#endif
#if (defined(_SERIAL_QUEUE) && defined(MASTER)) || (defined(_CIRCULAR_QUEUE) && defined(MASTER))
#ifndef LOCAL_FLAG_M2S
    for (i = 0; i < 64; i++) {
#ifdef SUCC_BATCH
        m2s_flag[i] = 1;
#else
        master_flag[i] = 0;
        from_master[i] = NULL;
#endif
    }
#endif
#endif
#ifdef ACEMESH_SCHEDULER_PROFILING
    all_threads_sum_vert_times = (long *)malloc(sizeof(long) * total_num_threads);
    all_threads_reuse_times = (long *)malloc(sizeof(long) * total_num_threads);
    all_threads_maxReuseChain = (int *)malloc(sizeof(int) * total_num_threads);
    all_nfull = (long *)malloc(sizeof(long) * total_num_threads);

    memset(all_threads_sum_vert_times, 0, sizeof(long) * total_num_threads);
    memset(all_threads_reuse_times, 0, sizeof(long) * total_num_threads);
    memset(all_threads_maxReuseChain, 0, sizeof(int) * total_num_threads);
    memset(all_nfull, 0, sizeof(long) * total_num_threads);
    /*for(i = 0; i < total_num_threads; ++i)
    {
        //printf(",  %lld", all_threads_sum_vert_times[i]);
        printf(",  %ld", all_nfull[i]);
        if (i%8==7) printf("\n");
    }*/
#endif
    athread_init();
#ifdef TARG_SW5
    cgid = sys_m_cgid();
#elif TARG_SW9
    cgid = current_array_id();
#endif
#ifdef THREAD_TRACE
    diff = rpcc() - begin_time;
#endif
#ifdef _SPAWN
    athread_spawn(worker_kernel_func, 0);
#else
    for (i = 0; i < total_num_threads; ++i) {
        athread_create(i, worker_kernel_func, 0);
#ifdef DEBUG
        printf("create worker thread:%d\n", i);
#endif
    }
#endif
    /*#ifdef MPI_THREAD
        create_mpi_thread();
    #endif*/

#ifdef THREAD_TRACE_LEVEL_1
    trace_time = rpcc() - begin_time;
    trace_print(trace_time, '0', RTS_event_end);
#endif
#ifdef SLAVE_CONSTRUCT_HELEPR
    // printf("prepare to test slave_init_done\n");
    // fflush(stdout);

    // i = 0;
    // while (i != total_num_threads) {
    //     for (i = 0; i < total_num_threads; i++) {
    //         if (my_h2ldm(helper_init_done, i, cgid,int) == 0) {
    //             break;
    //         }
    //     }
    // }

    // printf("slave_init_done all ok\n");
    // fflush(stdout);
    // assert(0);
#endif
}

void close_worker_thread()
{
    int i, j, sum;
#if defined(TARG_SW9) && defined(EMP_QUEUE)
    for (i = 0; i < total_num_threads; i++) {
        my_h2ldm(emp_close_master[i], i, cgid, int) = 1;
    }
#endif
#if defined(TARG_SW9) && defined(CONCURRENT_CONSTRUCT_GRAPH) && defined(SPEIO)
    // con_graph=0;
    // printf("con_graph:%ld\n",con_graph);
    my_h2ldm(con_graph, total_num_threads - 1, cgid, int) = 0;
#endif
    // mpi_close_all_threads = 1;
#ifdef LOCAL_FLAG
    for (i = 0; i < total_num_threads; ++i) {
#ifdef DEBUG
        printf("close_thread:%d\n", i);
#endif
#ifdef TARG_SW5
        h2ldm(close_all_threads, i, cgid) = 1;
#elif TARG_SW9
        my_h2ldm(close_all_threads_addr[i], i, cgid, int) = 1;
#endif
    }
#else
    close_all_threads = 1;
#endif
    // printf("close all worker_thread_first\n");
#ifdef _SPAWN
    athread_join();
    athread_halt();
#else

    for (i = 0; i < total_num_threads; ++i) {
        // for( i = 0; i <sche_num_threads; ++i){
        //  printf("before close worker_thread%d\n",i);
        athread_wait(i);
        athread_end(i);
        
        // printf("after close worker_thread%d\n",i);
    }
#endif
/*    printf("close worker_thread\n");
#ifdef MPI_THREAD
    //if(pthread_join(ntid,NULL))
        //printf("error join pthread.\n");
#endif*/
#ifdef DEBUG
    printf("close all worker_thread\n");
    fflush(stdout);
#endif

    if (my_mpi_rank <= 0) {
#ifdef ACEMESH_PROFILING_CLASSIFY
        printf("\nprofiling classify dag exec time \n");
        for (i = 0; i < MAX_PROF_RECORD; i++) {
            if ((classify_exec_time[i] - 0) > 0.0)
                printf("\tdag_%d_exec_time=%lf\n", i, classify_exec_time[i]);
        }
        printf("\n");
#endif

#ifdef ACEMESH_SCHEDULER_PROFILING
#ifdef MEMORY_POOL
        printf("memory pool realloc time: %lu ,memory requirment: %d MB\n", mempool_alloc_block_num,
               UNITSIZE / 1024 / 1024 * mempool_alloc_block_num);
#endif
        printf("max_num_neighbor=%d\n", max_num_neigh);
        //      printf("max_num_successor1=%d\n",max_num_successor);
        for (i = 0; i < total_num_threads; i++) {
            max_num_successor = mymax(max_num_successor, num_successor[i]);
            // printf("successor_num=%d,",num_successor[i]);
        }
        printf("max_num_successor=%d\n", max_num_successor);
#endif
    }

#ifdef ACEMESH_SCHEDULER_PROFILING

    // if(my_mpi_rank<=0 || my_mpi_rank==64)
    if (my_mpi_rank <= 0) {
// #ifdef _LDMQ
//     printf("total_ldm_num_fulls: %ld\n",total_ldm_num_fulls);
// #endif
#ifdef MASTER
        printf("rank:%d, master no_MPI pure time:%6.3lf\n", my_mpi_rank, blocking_pure_exec_time);
#ifdef SEG_BUFF
        printf("rank:%d,master_seg_sche_copy_time=%lf\n", my_mpi_rank, master_seg_sche_time);
        printf("rank:%d,master_seg_sche_h2ldm_time=%lf\n", my_mpi_rank, master_seg_sche_time2);
#endif
#endif

        double sum_pure_time = 0.0;
        double max_pure_time = 0.0;
        double min_pure_time = 99999.0;

        double sum_pure_task_compute_time = 0.0;
        double max_pure_task_compute_time = 0.0;
        double min_pure_task_compute_time = 99999.0;

        double sum_pure_task_dma_time = 0.0;
        double max_pure_task_dma_time = 0.0;
        double min_pure_task_dma_time = 99999.0;
#ifdef ACEMESH_SCHEDULER_PROFILING
        print_and_reset_reuse_statistics();
#endif

        // printf("total_num=%d\n",total_num_threads);
        // fflush(stdout);
        printf("pure exec time");
        for (i = 0; i < total_num_threads; ++i)
        // for(i = 0; i <= sche_num_threads; ++i)
        {
            printf(", %6.3lf", all_pure_exec_time[i]);
            fflush(stdout);
            sum_pure_time += all_pure_exec_time[i];
            max_pure_time =
                (max_pure_time < all_pure_exec_time[i]) ? all_pure_exec_time[i] : max_pure_time;
            min_pure_time =
                (min_pure_time > all_pure_exec_time[i]) ? all_pure_exec_time[i] : min_pure_time;
            if (i % 16 == 15) printf("\n");

            sum_pure_task_compute_time += all_pure_task_compute_time[i];
            max_pure_task_compute_time =
                (max_pure_task_compute_time < all_pure_task_compute_time[i])
                    ? all_pure_task_compute_time[i]
                    : max_pure_task_compute_time;
            min_pure_task_compute_time =
                (min_pure_task_compute_time > all_pure_task_compute_time[i])
                    ? all_pure_task_compute_time[i]
                    : min_pure_task_compute_time;
            sum_pure_task_dma_time += all_pure_task_dma_time[i];
            max_pure_task_dma_time = (max_pure_task_dma_time < all_pure_task_dma_time[i])
                                         ? all_pure_task_dma_time[i]
                                         : max_pure_task_dma_time;
            min_pure_task_dma_time = (min_pure_task_dma_time > all_pure_task_dma_time[i])
                                         ? all_pure_task_dma_time[i]
                                         : min_pure_task_dma_time;
        }
        printf(", %6.3lf", all_pure_exec_time[total_num_threads]);
        printf("\n");

#ifdef EMP_QUEUE
        int real_sche_nt = sche_num_threads - sche_num_threads / PE_NCOL;
#else
        int real_sche_nt = sche_num_threads;
#endif

        printf(
            "rank: %d, aver_pure_task_compute_time: %6.3lf ,max_pure_task_compute_time: %6.3lf "
            ",min_pure_task_compute_time: %6.3lf \n",
            my_mpi_rank, sum_pure_task_compute_time / real_sche_nt, max_pure_task_compute_time,
            min_pure_task_compute_time);
        printf(
            "rank: %d, aver_pure_task_dma_time: %6.3lf ,max_pure_task_dma_time: %6.3lf "
            ",min_pure_task_dma_time: %6.3lf \n",
            my_mpi_rank, sum_pure_task_dma_time / real_sche_nt, max_pure_task_dma_time,
            min_pure_task_dma_time);
        printf(
            "rank: %d, aver_pure_time: %6.3lf ,max_pure_time: %6.3lf ,min_pure_time: %6.3lf "
            ",DLB_pure=%6.3lf \n",
            my_mpi_rank, sum_pure_time / real_sche_nt, max_pure_time, min_pure_time,
            sum_pure_time / real_sche_nt / max_pure_time);
        sum_pure_time = 0.0;
        max_pure_time = 0.0;
        min_pure_time = 99999.0;
        printf("task pure exec time");
        for (i = 0; i < total_num_threads; ++i)
        // for(i = 0; i <= real_sche_nt; ++i)
        {
            printf(", %6.3lf", all_task_pure_exec_time[i]);
            sum_pure_time += all_task_pure_exec_time[i];
            max_pure_time = (max_pure_time < all_task_pure_exec_time[i])
                                ? all_task_pure_exec_time[i]
                                : max_pure_time;
            min_pure_time = (min_pure_time > all_task_pure_exec_time[i])
                                ? all_task_pure_exec_time[i]
                                : min_pure_time;
            if (i % 16 == 15) printf("\n");
        }
        printf(", %6.3lf", all_task_pure_exec_time[total_num_threads]);
        printf("\n");
        printf(
            "rank: %d, aver_task_pure_time: %6.3lf ,max_task_pure_time: %6.3lf "
            ",min_task_pure_time: %6.3lf \n",
            my_mpi_rank, sum_pure_time / real_sche_nt, max_pure_time, min_pure_time);

        long total_ntasks = 0;
        long max_ntask = 0;
        long min_ntask = 999999;
        long aver_ntask = 0;

#if defined(ACEMESH_GRAPH_BUILD_PROFILING) && defined(FINE_PROF)
        double sum_pure_add_time = 0.0;
        double max_pure_add_time = 0.0;
        double min_pure_add_time = 99999.0;
        double sum_pure_reg_time = 0.0;
        double max_pure_reg_time = 0.0;
        double min_pure_reg_time = 99999.0;

        // double sum_pure_task_compute_time = 0.0;
        // double max_pure_task_compute_time = 0.0;
        // double min_pure_task_compute_time = 99999.0;

        // double sum_pure_task_dma_time = 0.0;
        // double max_pure_task_dma_time = 0.0;
        // double min_pure_task_dma_time = 99999.0;
        printf("pure sub addsucc time");
        for (i = 0; i < total_num_threads; ++i)
        // for(i = 0; i <= sche_num_threads; ++i)
        {
            printf(", %6.3lf", all_pure_addsucc_time[i]);
            fflush(stdout);
            sum_pure_add_time += all_pure_addsucc_time[i];
            max_pure_add_time =
                (max_pure_add_time < all_pure_addsucc_time[i]) ? all_pure_addsucc_time[i] : max_pure_add_time;
            min_pure_add_time =
                (min_pure_add_time > all_pure_addsucc_time[i]) ? all_pure_addsucc_time[i] : min_pure_add_time;
            if (i % 16 == 15) printf("\n");
        }
        printf(", %6.3lf", all_task_pure_exec_time[total_num_threads]);
        printf("\n");
        printf(
            "rank: %d,total_task_pure_addsucc_time: %6.3lf ,aver_task_pure_addsucc_time: %6.3lf ,max_task_pure_addsucc_time: %6.3lf "
            ",min_task_pure_addsucc_time: %6.3lf \n",
            my_mpi_rank, sum_pure_add_time, sum_pure_add_time / real_sche_nt, max_pure_add_time, min_pure_add_time);
        //printf(", %6.3lf", all_pure_addsucc_time[total_num_threads]);
        printf("\n");

        printf("pure sub register_access time");
        for (i = 0; i < total_num_threads; ++i)
        // for(i = 0; i <= sche_num_threads; ++i)
        {
            printf(", %6.3lf", all_pure_register_time[i]);
            fflush(stdout);
            sum_pure_reg_time += all_pure_register_time[i];
            max_pure_reg_time =
                (max_pure_reg_time < all_pure_register_time[i]) ? all_pure_register_time[i] : max_pure_reg_time;
            min_pure_reg_time =
                (min_pure_reg_time > all_pure_register_time[i]) ? all_pure_register_time[i] : min_pure_add_time;
            if (i % 16 == 15) printf("\n");
        }
        printf(", %6.3lf", all_task_pure_exec_time[total_num_threads]);
        printf("\n");
        printf(
            "rank: %d,total_task_pure_reg_time: %6.3lf ,aver_task_pure_reg_time: %6.3lf ,max_task_pure_reg_time: %6.3lf "
            ",min_task_pure_reg_time: %6.3lf \n",
            my_mpi_rank, sum_pure_reg_time, sum_pure_reg_time / real_sche_nt, max_pure_reg_time, min_pure_reg_time);
        printf("\n");

#endif

        printf("pure task num");
        for (i = 0; i <= total_num_threads; ++i)
        // for(i = 0; i <= sche_num_threads; ++i)
        {
            printf(" %ld", all_pure_task_num[i]);
            if (i <= total_num_threads) total_ntasks += all_pure_task_num[i];
            if (i < real_sche_nt) {
                max_ntask = (max_ntask < all_pure_task_num[i] ? all_pure_task_num[i] : max_ntask);
                min_ntask = (min_ntask > all_pure_task_num[i] ? all_pure_task_num[i] : min_ntask);
            }
            if (i % 16 == 15) printf("\n");
        }
        printf(" %ld", all_pure_task_num[total_num_threads + 1]);
        printf("\n");
        aver_ntask = (total_ntasks - all_pure_task_num[total_num_threads]) / real_sche_nt;
        // printf("total_task_num=%ld,%lu,%lu, edges/ntasks=%6.3lf,\n DLB_ntask=%6.3lf,%ld,%ld,
        // ratio of vert=%lf\n",total_ntasks,total_ntasks_register,total_nedges,
        //                 (double)total_nedges/total_ntasks_register,(double)aver_ntask/max_ntask,aver_ntask,max_ntask,(double)main_sum_vert_times/total_ntasks);
        printf(
            "total_task_num=%ld, total_edge=%ld, edges/ntasks=%6.3lf,\n DLB_ntask=%6.3lf,%ld,%ld, "
            "ratio of vert=%lf\n",
            total_ntasks, total_nedges, (double)total_nedges / total_ntasks_register,
            (double)aver_ntask / max_ntask, aver_ntask, max_ntask,
            (double)main_sum_vert_times / total_ntasks);

#ifdef ACEMESH_PROFILING_INST
        unsigned long total_inst_perf = 0;
        for (i = 0; i < total_num_threads; i++) {
            total_inst_perf += inst_perf[i];
            // printf("each inst_perf=%lu\n",inst_perf[i]);
        }
        // printf("total_inst=%lu,%ld,%ld,%ld\n",total_inst_perf,total_ntasks,all_pure_task_num[65],all_pure_task_num[64]);
        printf(
            "aver_per_task inst= %lu\n",
            total_inst_perf / ((unsigned long)total_ntasks - (unsigned long)all_pure_task_num[64]));
#endif
#ifdef ACEMESH_PROFILING_SUCC
        printf("slave_task_spawn_time:\n");
        for (i = 0; i <= total_num_threads; ++i) {
            printf(", %6.3lf", (double)all_spawn_slave_task_time[i] / SFREQ);
            if (i % 16 == 15) printf("\n");
        }
        printf("\nslave_task_trans_to_others_time:\n");
        for (i = 0; i <= total_num_threads; ++i) {
            printf(", %6.3lf", (double)all_spawn_slave_trans1_time[i] / SFREQ);
            if (i % 16 == 15) printf("\n");
        }
        printf("\nslave_task_trans2_time:\n");
        for (i = 0; i <= total_num_threads; ++i) {
            printf(", %6.3lf", (double)all_spawn_slave_trans2_time[i] / SFREQ);
            if (i % 16 == 15) printf("\n");
        }
#ifdef MASTER
        printf("\nall_spawn_master_task_time:\n");
        for (i = 0; i <= total_num_threads; ++i) {
            printf(", %6.3lf", (double)all_spawn_master_task_time[i] / SFREQ);
            if (i % 16 == 15) printf("\n");
        }
#endif

#ifdef _SERIAL_QUEUE
        printf("\npure_reg_put_time:\n");
        for (i = 0; i < total_num_threads; ++i) {
            printf(", %6.3lf", (double)all_reg_put_time[i] / SFREQ);
            if (i % 16 == 15) printf("\n");
        }
        printf("\n");
        printf("\nall_total_put_times:\n");
        unsigned int total_reg_put_time = 0;
        for (i = 0; i < total_num_threads; ++i) {
            printf(", %6u", all_total_reg_put_times[i]);
            total_reg_put_time += all_total_reg_put_times[i];
            if (i % 16 == 15) printf("\n");
        }
        printf("\n");
        printf("\nall_put_full_times:\n");
        unsigned int total_reg_put_full_time = 0;
        for (i = 0; i < total_num_threads; ++i) {
            printf(", %6u", all_reg_put_full_times[i]);
            total_reg_put_full_time += all_reg_put_full_times[i];
            if (i % 16 == 15) printf("\n");
        }
        printf("\n");
        if (total_reg_put_time)
            printf("total_put_time= %u, put_full_time= %u, ratio=%lf\n", total_reg_put_time,
                   total_reg_put_full_time, (double)total_reg_put_full_time / total_reg_put_time);
        else
            printf("total_put_time= %u, put_full_time= %u, ratio=%lf\n", total_reg_put_time,
                   total_reg_put_full_time, 0);

        printf("\n");
        printf("\ntotal_succ_reg_own_times:\n");
        unsigned int total_succ_reg_own_times = 0;
        for (i = 0; i < total_num_threads; ++i) {
            printf(", %6u", all_succ_reg_own_times[i]);
            total_succ_reg_own_times += all_succ_reg_own_times[i];
            if (i % 16 == 15) printf("\n");
        }

        printf("\n");
        printf("\ntotal_succ_reg_direct_times:\n");
        unsigned int total_succ_reg_direct_times = 0;
        for (i = 0; i < total_num_threads; ++i) {
            printf(", %6u", all_succ_reg_direct_times[i]);
            total_succ_reg_direct_times += all_succ_reg_direct_times[i];
            if (i % 16 == 15) printf("\n");
        }

        printf("\n");
        printf("\ntotal_succ_reg_transfer_times:\n");
        unsigned int total_succ_reg_transfer_times = 0;
        for (i = 0; i < total_num_threads; ++i) {
            printf(", %6u", all_succ_reg_transfer_times[i]);
            total_succ_reg_transfer_times += all_succ_reg_transfer_times[i];
            if (i % 16 == 15) printf("\n");
        }
        printf("\n");
        unsigned int temp_total_times =
            total_succ_reg_own_times + total_succ_reg_direct_times + total_succ_reg_transfer_times;
        if (temp_total_times) {
            printf(
                "succ_reg_own_times= %lu, succ_reg_direct_times= %lu, succ_reg_transfer_times= "
                "%lu, total=%lu, %lu\n",
                total_succ_reg_own_times, total_succ_reg_direct_times,
                total_succ_reg_transfer_times, temp_total_times,
                total_succ_reg_direct_times + total_succ_reg_transfer_times * 2);
            printf("ratio of own= %6.3lf , ratio of direct= %6.3lf , ratio of transfer= %6.3lf\n",
                   (double)total_succ_reg_own_times / temp_total_times,
                   (double)total_succ_reg_direct_times / temp_total_times,
                   (double)total_succ_reg_transfer_times / temp_total_times);
        } else {
            printf(
                "succ_reg_own_times= %lu, succ_reg_direct_times= %lu, succ_reg_transfer_times= "
                "%lu, total=%lu, %lu\n",
                total_succ_reg_own_times, total_succ_reg_direct_times,
                total_succ_reg_transfer_times, temp_total_times,
                total_succ_reg_direct_times + total_succ_reg_transfer_times * 2);
            printf("ratio of own= %6.3lf , ratio of direct= %6.3lf , ratio of transfer= %6.3lf\n",
                   (double)0, (double)0, (double)0);
        }
#endif
#endif
    }

#ifdef _LDMQ
    printf("\nnfull:");
    long int max_nfull = 0;
    for (i = 0; i < total_num_threads; ++i) {
        max_nfull = (max_nfull < all_nfull[i] ? all_nfull[i] : max_nfull);
        // printf(",  %ld", all_nfull[i]);
        total_ldm_num_fulls += all_nfull[i];
        // if(i%8==7) printf("\n");
    }
    printf("\n");
    printf("total_ldm_num_fulls: %ld ,max_nfull=%ld\n", total_ldm_num_fulls, max_nfull);
#endif

    free(all_threads_sum_vert_times);
    free(all_threads_maxReuseChain);
    free(all_threads_reuse_times);
    free(all_nfull);
    all_threads_sum_vert_times = NULL;
    all_threads_maxReuseChain = NULL;
    all_threads_reuse_times = NULL;
    all_nfull = NULL;
#endif

#ifdef DEBUG
    printf("out of close_thread\n");
#endif
}

/*#ifdef MPI_THREAD
int create_mpi_thread()
{
    int ierror;
    ierror = pthread_create(&ntid,NULL,&mpi_worker_kernel_func,NULL);
    printf("create pthread_thread_create\n");
    if(ierror!=0)
    {
        printf("cannot create thread!\n");
        return -1;
    }
}
#endif*/

#ifdef ACEMESH_PARA
void statistics_npar_and_nidle()
{
    int i, j, sum;
    char filename[20] = "./output/para_";
    char b[5];
    sprintf(b, "%d", my_mpi_rank);
    char *c = ".csv";
    strcat(filename, b);
    strcat(filename, c);
    gid3 = mymin(gid3, MAXT);
    for (i = 0; i < gid3; i++) {
        sum = 0;
        for (j = 0; j < total_num_threads; j++) {
            sum += npar[j][i];
        }
        npar_sum[i] = sum += npar[total_num_threads][i];
        // printf("npar_sum_computer:%d,%d,%d\m",sum,npar[0][i],npar_sum[i]);
    }

    // printf("before last_interval=%d,gid3=%d\n",last_interval,gid3);
    for (i = gid3; i >= 0; i--) {
        if (npar_sum[i]) {
            last_interval = i;
            break;
        }
    }
    // printf("end last_interval=%d,gid3=%d\n",last_interval,gid3);
    // printf("end last_interval:%d\n",last_interval);
    sum = 0;
    for (i = 0; i < last_interval; i++) {
        sum += npar_sum[i];
    }
    fpar = (float)sum / (last_interval + 1.0);
    nidle = 0;
    nidle_wait_comm = 0;
    int ub = mymin(last_interval, MAXT);
    for (i = 0; i <= ub; i++) {
        if (npar_sum[i] == 0) {
            nidle++;
            if (waitcomm[i]) nidle_wait_comm++;
        }
    }

    FILE *fp;
    fp = fopen(filename, "w");
    // fp=fopen("./output/result.out","w");
    //  fprintf(fp,"%d %d\n",last_interval,gid3);
    /*    for(i=0;i<gid3;i++)
        {
            for(j=0;j<total_num_threads;j++)
              fprintf(fp,"%d ",npar[j][i]);
            fprintf(fp,"\n");
        }*/
    for (i = 0; i < ub; i++) {
        fprintf(fp, "%d\n", npar_sum[i]);
    }
    printf("rank: %d, aver_parallelism: %lf\n", my_mpi_rank, fpar);
    printf("rank: %d, num_nidle: %d, last_interval: %d, nidle_wcomm: %d\n", my_mpi_rank, nidle,
           last_interval, nidle_wait_comm);
    printf("rank: %d, ratio_nidle: %f\n", my_mpi_rank, (float)nidle / last_interval);
}

#endif
