#include <sys/types.h> 
#include <sys/stat.h> 
#include <unistd.h>

#include <string>
#include <cstdarg>
#include <cassert>
#include <vector>
#include <cmath>
#include <cstring>
#include "aceMesh_runtime.h"
#include "task_dag_graph.h"
#include "aceMesh_composite_task.h"
#if defined(DYNAMIC_SCHEDULER)
#include "aceMesh_scheduler_init.h"
#elif defined(ACEMESH_SCHEDULER)
#include "aceMesh_scheduler_init_v2.h"
#include "scheduler.h"
#endif

#ifdef MEMORY_POOL
extern "C" void FreePool();
#endif

extern "C" {
void print_and_reset_reuse_statistics();  
void close_worker_thread();
#ifdef ACEMESH_PARA
void statistics_npar_and_nidle();
#endif

#ifdef  NO_PARALLEL
#include "task_dag_graph.h"
using namespace AceMesh_runtime;
#else
#endif


#ifdef DATA_RACE_TRACE
#include "DataRaceTrace.h"
#endif

#define MAX_AM_THREADS 512
using std::string;
string CompileTime("2024-09-03 10:53:00");

char aceMesh_outdir[10];
int my_mpi_rank=-1;
#ifdef _BLACKROW
int black_topo;
#endif

#ifdef TEMP_NESTED
//how many nested levels can perform
#define NESTED_STACKSIZE 10
task_dag_graph task_graphs[NESTED_STACKSIZE];
int cur_nested_lev=0;
#else
task_dag_graph task_graph;
#endif


#include <iostream>

void echo_versionString()
{
  if(my_mpi_rank>=1) 
	return;
   std::cout<<"----------------------------------------------"<<std::endl;
}
#ifdef __ACEMESH_THREAD_GROUP
#include "splitter.h"
// the first param is the number of dimensions, the folowing are the chunks on each dimension.
splitter cur_splitter(3, 1, 1, 1);
#endif

/**************************************************************/
/* we accept runtime environments as                          */
/* ACEMESH_NUM_THREADS, ACEMESH_THREAD_GROUP_SIZE             */
/* ACEMESH_CORE_LIST,   ACEMESH_SPLIT_CHUNKS                  */
/*runtime environments are inferior to defs inside the program*/
/* apply environment variable settings,                       */
/* if they are not covered by the application.                */
/**************************************************************/

void get_env_settings(unsigned int & total_threads, int & group_size,  std::vector<int> &vec_core_ids)

{
    char* p_acemesh_num_threads;
    char* p_acemesh_thread_group_size;
    char* p_acemesh_core_list;
    //char* p_acemesh_split_chunks;

    p_acemesh_num_threads = getenv("ACEMESH_NUM_THREADS");
    if(p_acemesh_num_threads != NULL)
    {
        total_threads = atoi(p_acemesh_num_threads);
        if(total_threads < 0)
        {
           std::cerr << "wrong number of thread group: " << total_threads << std::endl;
           exit(1);
         }
    }else
	  total_threads=-1;
    p_acemesh_thread_group_size = getenv("ACEMESH_THREAD_GROUP_SIZE");
    if(p_acemesh_thread_group_size != NULL)
    {
        group_size = atoi(p_acemesh_thread_group_size);
        if(group_size < 0)
        {
            std::cerr << "wrong group size: " << group_size << std::endl;
            exit(1);
        }
	}else
	  group_size=-1;

    p_acemesh_core_list = getenv("ACEMESH_CORE_LIST");
    //std::vector<int> vec_core_ids;
    if(p_acemesh_core_list != NULL)
    {
//        std::cout << "core id env string:" << p_acemesh_core_list << std::endl;
        int i, first, last;
        bool is_range = false;
        if(*p_acemesh_core_list != '[')
        {
            std::cerr << "invalid ACEMESH_CORE_LIST argument." << std::endl;
            exit(1);
        }
        while(*p_acemesh_core_list != ']')
        {
            ++p_acemesh_core_list;
            i = atoi(p_acemesh_core_list);
            if(is_range)
            {
                last = i;
                assert(first < last);
                for (int ii = first + 1; ii <= last; ++ii)
                {
                    vec_core_ids.push_back(ii);
                }
                is_range = false;
            }
            else
            {
                vec_core_ids.push_back(i);
            }

            while(*p_acemesh_core_list != ',' && *p_acemesh_core_list != ']' && *p_acemesh_core_list != '-')
                ++p_acemesh_core_list;

            if(*p_acemesh_core_list == '-')
            {
                is_range = true;
                first = i;
            }
        }

        if(vec_core_ids.size() > MAX_AM_THREADS)
        {
            std::cout << "core list larger than "<< MAX_AM_THREADS << " is not currently supported." << std::endl;
            exit(1);
        }
		assert(vec_core_ids.size()>=total_threads);

    }
    
#ifdef __ACEMESH_THREAD_GROUP
    char* p_acemesh_split_chunks;
    p_acemesh_split_chunks = getenv("ACEMESH_SPLIT_CHUNKS");
    std::vector<size_t> dim_chunks;
    if(p_acemesh_split_chunks != NULL)
    {
        //std::cout << "split env string: " << p_acemesh_split_chunks << std::endl;        
        int i;
        if(*p_acemesh_split_chunks != '(')
        {
            std::cerr << "invalid ACEMESH_SPLIT_CHUNKS argument." << std::endl;
            return;
        }
        while(*p_acemesh_split_chunks != ')')
        {
            ++p_acemesh_split_chunks;
            //assuming single digit is enough for describing splits ,by lchen
            i = atoi(p_acemesh_split_chunks);
            dim_chunks.push_back(i);
            while(*p_acemesh_split_chunks != ',' && *p_acemesh_split_chunks != ')')
            ++p_acemesh_split_chunks;
        }
        
        cur_splitter.set_dim_chunks(dim_chunks);
        int total;
        if(group_size!=-1 && (total=cur_splitter.get_total_chunks()) != group_size)
        {
            std::cerr << "splitter dimension chunks,"<<total<<" does not match thread group size"<< group_size << std::endl;
            exit(1);
        }
        std::cout << "environment setting on split_chunks(ncuts, ... ,ncuts):" ;
        std::cout << " " << dim_chunks[0];
        for(size_t i=1;i< dim_chunks.size();i++)
        std::cout << ", " <<dim_chunks[i];
        std::cout << std::endl;
    }
#endif
}


void echo_configurations()
{
  if(my_mpi_rank>=1) 
	return;
  std::cout << "/*********************************************/" <<std::endl;
  //std::cout << "DAG_HOME:    "<<"/dulm/DAG_HOME" << std::endl;
  std::cout<<"the Compile (Build) Time of this Lib is "<<CompileTime<<std::endl;
  std::cout << "Current Configurations of the Scheduler:       " <<std::endl;

/*basic scheduler*/
  std::cout << "BASIC SCHEDULER            :";
#if defined(DYNAMIC_SCHEDULER)
  std::cout << " DYNAMIC " << std::endl;
#elif defined(ACEMESH_SCHEDULER)
  std::cout << " ACEMESH_OWN " <<std::endl;

  std::string details;
  std::cout << "GRAPH_BUILDING             :";
#ifdef  LIGHT_BUILD_GRAPH
  details=" light_dep; ";
#elif  LIGHT_BUILD_GRAPH1 
  details=" light_dep1; ";
#elif  LIGHT_BUILD_GRAPH2 
  details=" light_dep2; ";
#else
  details=" basic-taskgraph, ";
#endif
#ifdef SCAN_NEED_SPAWN
  details+=" entry_scan_hash; "; 
#endif
#if defined( NO_END_TASK_LIST )
  details+=" exit_scan_hash; ";
#endif
#ifdef IMMEDIA_UPDATE
  details+=" immedia_update; ";
#endif

  std::cout << details << std::endl;	


  std::cout << "DETAILED STRATEGY          :";

  details="";
#ifdef USE_TBB_QUEUE
  details+="tbb_queue,";
#endif

//#ifdef USE_DEQUE
//  details+="deque,";
//#endif
//#ifdef USE_MY_OPT_STRATEGY
//  details+="my_opt,";
//#endif
#ifdef USE_PRIORITY_QUEUE
  details+="priority_queue,";
#endif

#ifdef USE_STACK
  details+=" stack(";

#ifdef _SERIAL_QUEUE
    details+="Hq_dyn ";
    #ifdef _LDMQ
    details+=" + ldmq";
    #endif
    #ifdef EMP_RANDOM_STEALING
    details+=", + inform stealing";
    #endif
    #if defined( ASYN_INSERT_Q )
      #if defined( ASYN_INSERT_REMOTE_Q ) 
    details+=", + asyn R-spawn";   
      #elif defined (ASYN_INSERT_LOCAL_Q)
    details+=", + asyn L-spawn";
      #elif defined (DELAYED_SEND)
    details+=", + delayed-spawn";  
      #endif
      #ifdef EMP_D_RANDOM_STEALING
    details+=", + M-rdm-stealing";
      #endif   
    #endif 
#elif defined(_CIRCULAR_QUEUE)
    details+="Hq_circular";
    #ifdef _LDMQ
    details+=" + ldmq";
    #endif
    #ifdef _BULKTRANS
    details+=" + bulktrans";
    #endif

#elif EMP_QUEUE
    details+="EMP_QUEUE";
    #ifdef MULTI_PUSH
        details+=", Multi-push";
    #endif

    #ifdef MULTI_POP
        details+=", Multi-pop";
    #endif

    #ifdef LDM_QUEUE
        details+=", ldm-queue";
    #elif HIER_QUEUE_LIFO
        details+=", hier-queue";
    #elif EMP_CONCURRENT_Q
        details+=", mem-con-queue";
    #else
        details+=", mem-queue";
    #endif

    #ifdef EMP_MASTER
        details+=", master";
    #endif

    #ifdef EMP_POP_REC
        details+=", pop-fail-record";
    #endif
#else
 details+="Mqueue, ";
    #if defined(mutex_lock) && defined(MUTEX_BASIC)
        details+="Sync=Mutex lock(basic),";
    #elif defined(mutex_lock)
        details+="Sync=Mutex lock(shm),";
    #elif defined(MLOCK)
        details+="Sync=mlock,";
    #else
        details+="Sync=CAS,";
    #endif

    #if defined(CENTRAL_FIFO)
        details+="central-fifo";
    #elif defined(DISTRIBUTED_SCHEDULER)
        details+="distributed-lifo";
    #else
        details+="central-lifo";
    #endif
    
    #ifdef CONCUR_Q_RANDOM_STEALING
        details+=", random-stealing";
    #endif
#endif
#ifdef LOCAL_SEG
 details+="+local_flag";
#elif MMEM_SEG
 details+="+mmem_flag";
#elif TWIN_SEG
 details+="+twins_flag";
#endif

//#ifdef _LDMQ
// details+="+ ldmq";
//#endif
//
//#ifdef _BULKTRANS
// details+="+ bulktrans";
//#endif

#ifdef _RETURN_VERTICAL
  details+=" + ret_vert";
#endif
 details+=" )";
#endif

//shared queue
#ifdef USE_SHARED_QUEUE
  details+=":shared_queue";
#endif
#ifdef USE_SHARED_STACK
  details+=":shared_stack";
#endif
#ifdef USE_STEALING
  details+=":stealing";
#endif
 std::cout<< details <<std::endl;
#endif

/**/
   std::cout << "SPAWN                      :";
#ifdef _SPAWN
   std::cout << " spawn";
#else
  std::cout << " create";
#endif
  std:: cout << std::endl;

        std::cout << "MASTER_SURPPORT            :";
#if defined( MASTER )
        std::cout << " ON" ;
#else
        std::cout << " OFF" ;
#endif
        std::cout << std::endl;
  
    std::cout << "MPI_SURPPORT               :";
#if defined( MPI_SURPPORT )
    std::cout << " ON" ;
#else
    std::cout << " OFF" ;
#endif
    std::cout << std::endl;

    std::cout << "NESTED_SURPPORT            :";
#if defined( USE_COMPOSITE_TASK )
    std::cout << " ON" ;
#else
    std::cout << " OFF" ;
#endif
    std::cout << std::endl;

/**/
   std::cout << "MAP                        :";
#ifdef _MAP_COLM
   std::cout << " COL Major";
#elif defined(_MAP_ROWS)
   std::cout << " ROW Spread";
#else
   std::cout << " ROW Major";
#endif
   std::cout << std::endl;

/**/
   std::cout << "BLACKNODE_TOPO             :";
#ifdef _BLACKROW
   std::cout << " blackrow";
#elif defined(_REDBLACK)
   std::cout << " redblack";
#else
   std::cout << " OFF";
#endif
   std::cout <<std::endl;

// /*if enable ldm queue when using serial queue*/
//  std::cout << "_LDMQ                      :";
//#ifdef _LDMQ
//  std::cout << " ON";
//#else
//  std::cout << " OFF";
//#endif
//  std::cout <<std::endl;

std::cout << "MEMORY_POOL                :";
#if defined( MEMORY_POOL )
    std::cout << " ON" ;
#else
    std::cout << " OFF" ;
#endif
    std::cout << std::endl;

std::cout << "AUTO_AFFINITY              :";
#if defined( ALL_CYCLIC )
    std::cout << " ALL_CYCLIC" ;
#else
    std::cout << " AUTO" ;
#endif
#if defined( DOUBLE_FUNC )
    std::cout << " , DOUBLE_FUNC" ;
#endif
    std::cout << std::endl;

std::cout << "DOUBLE_AFFINITY            :";
#if defined( DOUBLE_FUNC )
    std::cout << " ON" ;
#else
    std::cout << " OFF" ;
#endif
    std::cout << std::endl;


std::cout << "SPECIFY_END_TASKS          :";
#if defined( SPECIFY_END_TASKS )
    std::cout << " ON" ;
#else
    std::cout << " OFF" ;
#endif
    std::cout << std::endl;

std::cout << "USE_SIMD                   :";
#if defined( USE_SIMD )
    std::cout << " ON" ;
#else
    std::cout << " OFF" ;
#endif
    std::cout << std::endl;	

std::cout << "TDG construct              :";
#ifdef FAT_TDG 
    details = " fatTDG,";
    #ifdef LIST_WITH_READHEAD
    details += " DA_DAG,";
    #else
    details += " DA_link,";
    #endif
    #ifdef NEST_UNIQUE_TUPLES
    details += " with unique";
    #endif
    std::cout << details << std::endl;
#else
    #ifdef HYPERGRAPH
    std::cout << " HYPERGRAPH " << std::endl;
    #else
    std::cout << " TASKGRAPH(thinTDG) " << std::endl;
    #endif
#endif

/*debug and profiling*/
  std::cout << "SCHEDULER_PROFILING        :";
#if defined( ACEMESH_SCHEDULER_PROFILING )
  std::cout << " ON " ;
#else
  std::cout << " OFF " ;
#endif
  std::cout << std::endl;

  /*Evaluate parallelism  */
	std::cout << "SCHEDULER_PROFILING_NPARA  :";
#if defined( ACEMESH_PARA )
	std::cout << " ON " ;
#else
	std::cout << " OFF " ;
#endif
	std::cout << std::endl;


  std::cout << "DEBUG_GRAPH                :";
#if defined( DEBUG_GRAPH )
  std::cout << " ON " ;
#else
  std::cout << " OFF " ;
#endif
  std::cout << std::endl;
  
  std::cout << "VERBOSE_OUT                :";
#if defined(VERB)
  std::cout << " ON " ;
#else
  std::cout << " OFF " ;
#endif
  std::cout << std::endl;

  std::cout << "REUSE_GRAPH                :";
#if defined(REUSE_GRAPH)
  std::cout << " ON " ;
#else
  std::cout << " OFF " ;
#endif
  std::cout << std::endl;

  std::cout << "CONCURRENT_CONSTRUCT_GRAPH :";
#if defined(CONCURRENT_CONSTRUCT_GRAPH)
  std::cout << " ON " ;
  #ifdef FAKE_CONCURRENT_GRAPH
  std::cout << ", fake concurrent";
  #endif
#else
  std::cout << " OFF " ;
#endif
  std::cout << std::endl;

  std::cout << "ACEMESH_LOG                :";
#if defined(ACEMESH_LOG)
  std::cout << " ON " ;
#else
  std::cout << " OFF " ;
#endif
  std::cout << std::endl;

if(0)
{ //actually these switches are of no use 
#if defined(ACEMESH_SCHEDULER)
/*if enable hierarchical execution*/
  std::cout <<"HIER_EXEC                  :";
#ifdef __ACEMESH_THREAD_GROUP
  std::cout << " ON (At runtime_init)";
#else
  std::cout << " OFF";
#endif
  std::cout <<std::endl;

  std::cout <<"CPU_BIND                   :";
#ifdef CPUBIND
  std::cout << " ON "<< std::endl;
#else
  std::cout << " OFF "<<std::endl;
#endif


  /*if enable parallel register*/
  std::cout <<"PAR_REGISTER               :";
#ifdef FOR_PARALLEL
  std::cout << " ON";
#else
  std::cout << " OFF";
#endif
  std::cout <<std::endl;
#endif
/*graph partitioning*/
  std::cout << "DAG_PARTITION              :" ;
#if defined(SUPPORT_PARTITION)
  std::cout << " SUPPORT " ;
#elif defined(AUTO_PARTITION) 
  std::cout << " AUTO " ;
#else
  std::cout << " NONE " ;
#endif
  std::cout << std::endl;
} //if 0, no use currently  
std::cout << "/*********************************************/" <<std::endl;

  return;
}


#ifdef __ACEMESH_THREAD_GROUP
void set_splitter_dim_chunks( size_t dim, ... )
{ 
    std::vector<size_t> dim_chunks;
    va_list list;
    va_start(list, dim);
    if (!dim) return;
    for (size_t i = 0; i < dim; ++i)
    {
       dim_chunks.push_back(va_arg(list, size_t));
    }
    va_end(list);

    cur_splitter.set_dim_chunks(dim_chunks);
    std::cout << "Set split_chunks(ncuts, ... , ncuts) to:" ;
    std::cout << " " << dim_chunks[0];
    for(size_t i=1; i< dim_chunks.size(); i++)
        std::cout << ", " << dim_chunks[i];
    std::cout << std::endl;
}

Error_Code apply_runtime_init(int total_threads, int group_size, int core_ids[])
{
    float tmp=total_threads/group_size;
    int n_groups=ceil(tmp);
    std::cout<<"total_threads:"<<total_threads << ", group size :" << group_size <<std::endl;

#ifdef  CPUBIND
    std::cout<<"processor ids: ";
    for (int i=0;i<total_threads; i++)
    std::cout<<" ,"<<core_ids[i];
    std::cout<<std::endl;
#endif
#ifdef TEMP_NESTED
    task_graphs[cur_nested_lev].init(n_groups);
#else
    task_graph.init(n_groups);
#endif
#ifdef  CPUBIND
    //init.thread_bind(core_ids, n_groups, group_size);
#else
    init.init_thread_num(total_threads);
#endif
    echo_configurations(); 
    return ACEMESH_OK;
}
Error_Code aceMesh_runtime_init(int total_threads, int group_size, int core_ids[])
{ 
    int fake1,fake2;
    std::vector<int> core_vec; 
    assert(group_size>0 && total_threads>0);
    echo_versionString();	
    float tmp=total_threads/group_size;
    int n_groups=ceil(tmp);
    get_env_settings(fake1, fake2, core_vec);
    apply_runtime_init(total_threads, group_size, core_ids);	
    return ACEMESH_OK;
}

Error_Code to_runtime_init(int total_threads, int group_size, std::vector<int> &core_vec)
{
    int core_ids[MAX_AM_THREADS]={0};
    echo_versionString();	
    if(core_vec.size()==0){ //default processor binding
      for(int i=0; i<total_threads;i++)
        core_ids[i]=i;
    }
    else
       for (int i = 0; i < core_vec.size(); ++i)
         core_ids[i] = core_vec[i];
    apply_runtime_init(total_threads, group_size, core_ids);
    return ACEMESH_OK;
}

Error_Code aceMesh_runtime_init(int total_threads, int group_size)
{
    int fake1, fake2; 
    std::vector<int> core_vec;
    int core_ids[MAX_AM_THREADS];
    assert(total_threads>0 && group_size>0);
    echo_versionString();	
    get_env_settings(fake1, fake2, core_vec);
    to_runtime_init(total_threads,group_size, core_vec);
    return ACEMESH_OK;
}

Error_Code aceMesh_runtime_init(int total_threads) 
{
    int fake, group_size; 
    std::vector<int> core_vec;
    echo_versionString();	 
    get_env_settings(fake, group_size, core_vec);
    if (group_size==-1){
       std::cout <<"use default group_size=1"<<std::endl;
       group_size=1;
    }
    to_runtime_init(total_threads,group_size, core_vec);
    return ACEMESH_OK;
}

Error_Code aceMesh_runtime_init(int total_threads,int core_ids[])
{
    int fake, group_size;
    std::vector<int> fake_vec;
    echo_versionString();	 
    get_env_settings(fake, group_size, fake_vec);
    if (group_size==-1){
       std::cout <<"use default group_size=1"<<std::endl;
       group_size=1;
    }
    apply_runtime_init(total_threads,group_size, core_ids);
    return ACEMESH_OK;

}

Error_Code aceMesh_runtime_init()
{
    int total_threads, group_size;
    std::vector<int> core_vec;
    echo_versionString();	 
    get_env_settings(total_threads, group_size, core_vec);
    if (total_threads==-1){
       std::cout <<"use default num_threads=1"<<std::endl;
       total_threads=1;
    }
    if (group_size==-1){
       std::cout <<"use default group_size=1"<<std::endl;
       group_size=1;
    }
    to_runtime_init(total_threads,group_size, core_vec);
    return ACEMESH_OK;
}
#else

#define PRINT_PROC_IDS(core_ids,total_threads) \
	std::cout<<"processor_ids: ";\
	for (int i=0;i<total_threads; i++)\
	  std::cout<<" ,"<<core_ids[i];\
	std::cout<<std::endl;

extern int sche_num_threads;
extern int total_num_threads;

Error_Code aceMesh_runtime_init_TNum(int thread_num)
{
    //int sche_thread_num=0;
    unsigned int fake1;int fake2,ii;
    //int core_ids[MAX_AM_THREADS]={0};
    std::vector<int> core_vec;
    echo_versionString();
    sche_num_threads=thread_num;
    if (thread_num > PE_NCOL)
    {
#ifdef _BLACKROW

#ifdef _MAP_ROWS
        total_num_threads = thread_num*2-PE_NCOL;
        black_topo=1;
#else
        total_num_threads = thread_num+PE_NCOL;
        black_topo=total_num_threads/PE_NCOL/2;
#endif
        if(my_mpi_rank<1) 
          std::cout<<"BLACK_ROW :"<<black_topo<<std::endl;

#else

#ifdef _MAP_ROWS
        total_num_threads = thread_num*2-PE_NCOL;
#elif _MAP_COLM
        total_num_threads=64;
#else
        total_num_threads = thread_num;
#endif

#endif
    }
    else
    {
        total_num_threads=thread_num;
#ifdef _BLACKROW
         black_topo=PE_NROW+1;
         if(my_mpi_rank<1) 
           std::cout<<"BLACK_ROW :"<<"NO_black"<<std::endl;
#endif
    }

/*#ifdef _BLACKROW
    if (thread_num>PE_NCOL)
    {
        sche_thread_num=thread_num+PE_NCOL;
#if defined(_MAP_ROWS)
        black_topo=1;
#else
        black_topo=sche_thread_num/PE_NCOL/2;
#endif
        //black_topo=1;
        std::cout<<"BLACK_ROW :"<<black_topo<<std::endl;
    }
    else
    {
        sche_thread_num=thread_num;
        black_topo=PE_NROW+1;
        std::cout<<"BLACK_ROW :"<<"NO_black"<<std::endl;        
    }
#endif

#if defined(_MAP_ROWS)
    if (thread_num>PE_NCOL)
        sche_thread_num=thread_num*2-PE_NCOL;
#elif _MAP_COLM
        sche_thread_num=thread_num;
#endif*/

    get_env_settings(fake1, fake2, core_vec);
    if(core_vec.size()==0)
      for(ii=0;ii<total_num_threads;ii++)
         core_vec.push_back(ii);
#ifdef TEMP_NESTED
    task_graphs[cur_nested_lev].init(total_num_threads, aceMesh_outdir);
#else
    task_graph.init(total_num_threads, aceMesh_outdir);
#endif
    //init_trace_ctl(aceMesh_outdir); 
#ifdef CPUBIND
    int core_ids[MAX_AM_THREADS]={0};
    assert(total_num_threads<=core_vec.size());
    for (int i = 0; i < core_vec.size(); ++i)
       core_ids[i] = core_vec[i];
    PRINT_PROC_IDS(core_ids, total_num_threads)
	//init.thread_bind(core_ids, thread_num);
#else
    //init.init_thread_num(thread_num);
    create_worker_thread(total_num_threads);	
#endif
    if(my_mpi_rank<1) 
    {
        std::cout<<"sche_num_threads:"<<thread_num<<std::endl;
        std::cout<<"total_thread_num:"<<total_num_threads<<std::endl;
    }
    echo_configurations();
    return ACEMESH_OK;
}

Error_Code aceMesh_runtime_init(int thread_num, int processors[])
{
    echo_versionString();	
    std::cout<<"thread num :"<<thread_num<<std::endl;
#ifdef TEMP_NESTED
    task_graphs[cur_nested_lev].init(thread_num, aceMesh_outdir); 
#else
    task_graph.init(thread_num, aceMesh_outdir); 
#endif
#ifdef CPUBIND
    PRINT_PROC_IDS(processors,thread_num)
#else 	
    create_worker_thread(thread_num);	
#endif 
    echo_configurations();
    return ACEMESH_OK;
}

Error_Code aceMesh_runtime_init_zeroPara()
{
    unsigned int thread_num,ii;int  fake2; 
    std::vector<int> core_vec;
    echo_versionString();	
    get_env_settings(thread_num, fake2, core_vec);
    if(core_vec.size()==0)
       for(ii=0;ii< thread_num;ii++)
            core_vec.push_back(ii);
    if ((int)thread_num==-1){
       std::cout <<"use default thread_num=1"<<std::endl;
       thread_num=1;
    }
    else
       std::cout<<"thread num :"<<thread_num<<std::endl;
    //TODO:nested
#ifdef TEMP_NESTED
    task_graphs[cur_nested_lev].init(thread_num, aceMesh_outdir);
#else
    task_graph.init(thread_num, aceMesh_outdir);
#endif
#ifdef CPUBIND
    int core_ids[MAX_AM_THREADS]={0};
    assert(thread_num<=core_vec.size());
    for (int i = 0; i < core_vec.size(); ++i)
       core_ids[i] = core_vec[i];
    PRINT_PROC_IDS(core_ids, thread_num)
    //init.thread_bind(core_ids, thread_num);
#else
    //init.init_thread_num(thread_num);
#endif

    echo_configurations();
    return ACEMESH_OK;
}
#endif 

#ifdef REUSE_GRAPH
Error_Code dag_start(int dagNo, int *int_vec, int n1, double *float_vec, int n2)
{
#ifdef TEMP_NESTED
  return task_graphs[cur_nested_lev].dag_start(dagNo, int_vec, n1, float_vec, n2);
#else
  return task_graph.dag_start(dagNo, int_vec, n1, float_vec, n2);
#endif
}
#endif


Error_Code begin_split_task_charPara(char* loop_info, void *taskptr)
{
#ifdef TEMP_NESTED
#ifdef DEBUG_GRAPH
    task_graphs[cur_nested_lev].begin_split_task(loop_info);
#else
    task_graphs[cur_nested_lev].begin_split_task();
#endif
#else
    if (taskptr) {
#ifdef DEBUG_GRAPH
        ((aceMesh_composite_task *)taskptr)->graph.begin_split_task(loop_info);
#else
        ((aceMesh_composite_task *)taskptr)->graph.begin_split_task();
#endif
    } else {
#ifdef DEBUG_GRAPH
        task_graph.begin_split_task(loop_info);
#else
        task_graph.begin_split_task();
#endif
    }
#endif
    return ACEMESH_OK;
}

Error_Code begin_split_task_zeroPara(void *taskptr)
{
#ifdef TEMP_NESTED
#ifdef DEBUG_GRAPH
    task_graphs[cur_nested_lev].begin_split_task("loop no name");
#else
    task_graphs[cur_nested_lev].begin_split_task();
#endif
#else
    if (taskptr) {
#ifdef DEBUG_GRAPH
        ((aceMesh_composite_task *)taskptr)->graph.begin_split_task("loop no name");
#else
        ((aceMesh_composite_task *)taskptr)->graph.begin_split_task();
#endif
    } else {
#ifdef DEBUG_GRAPH
        task_graph.begin_split_task("loop no name");
#else
        task_graph.begin_split_task();
#endif
    }
#endif
    return ACEMESH_OK;
}

Error_Code end_split_task_zeroPara(void *taskptr)
{
#ifdef TEMP_NESTED
    task_graphs[cur_nested_lev].end_split_task();
#else
    if (taskptr) {
        ((aceMesh_composite_task *)taskptr)->graph.end_split_task();
    } else {
        task_graph.end_split_task();
    }
#endif
    return ACEMESH_OK;
}

#ifdef NO_PARALLEL
std::vector<addr_tuple> addrs;

#else
typedef tbb::enumerable_thread_specific< std::vector<addr_tuple > >  Addr_TLS_ty;
Addr_TLS_ty addrs_tls;
#endif


Error_Code register_task_end(aceMesh_task* t)
{
  //now we have all the addrs.
  //sort(addrs.begin(), addrs.end(), )
#ifdef NO_PARALLEL
#ifdef TEMP_NESTED
  task_graphs[cur_nested_lev].register_task(t,addrs);
#else
  task_graph.register_task(t,addrs);
#endif
  addrs.clear();
#else
#ifdef TEMP_NESTED
  task_graphs[cur_nested_lev].register_task(t,addrs_tls.local());
#else
  task_graph.register_task(t,addrs_tls.local());
#endif
  addrs_tls.local().clear();
  //TODO:  
  //std::cerr<<"sorry that register_task_end has not been realized under parallel_register mode."<<std::endl;
  //exit(1);
#endif
  return ACEMESH_OK;
}
Error_Code register_task_datainfo(aceMesh_task* t, int n, double* ad,int area,int nei,int ty,int ne) //(aceMesh_task* t, int n, ...)
{
#ifndef NO_PARALLEL
   Addr_TLS_ty::reference addrs=addrs_tls.local();
    //TODO: not work yet, should use thread local storage. by lchen
	//std::cerr<<"sorry that register_task_datainfo has not been realized under parallel_register mode."<<std::endl;
    //exit(1);
#endif

#ifdef DATA_RACE_TRACE
    __AddTask2Trace(t);
#endif

    addr_tuple tmp;
    addr_tuple tmp2;
    for (int i = 0; i < n; i++) 
    {   
       tmp.addr = (void*)ad;//va_arg(args, void*);   
#ifdef DATA_RACE_TRACE
        __AddData2Trace("", (double*)tmp.addr);
#endif

        int area_type =area; //va_arg(args, int);
        if(area_type == NORMAL || area_type == SHADE || area_type == UNSHADE)
        {   
            tmp.area_type = area_type;
            tmp.neighbor = nei;//va_arg(args, int);
            tmp.type = ty;//va_arg(args, int);
            tmp.neighbor_type = ne;//va_arg(args, int)
            addrs.push_back(tmp);
    
        }   
        else if(area_type == SHADE_AND_UNSHADE)
        {   
            tmp.area_type = SHADE;
            tmp.neighbor = nei;//va_arg(args, int)
            tmp.type = ty;//va_arg(args, int);
            tmp.neighbor_type =ne; //va_arg(args, int);
            addrs.push_back(tmp);
    
            tmp2.addr = tmp.addr;
            tmp2.area_type = UNSHADE;
            tmp2.neighbor =nei; //va_arg(args, int);
            tmp2.type =ty; //va_arg(args, int);
            tmp2.neighbor_type = ne;//va_arg(args, int);
            addrs.push_back(tmp2);
        }   
        else
        {
            assert(0);
        }
    }
    return ACEMESH_OK;
}

Error_Code register_task(aceMesh_task* t, int n, ...)
{
#ifndef NO_PARALLEL
    Addr_TLS_ty::reference  addrs=addrs_tls.local();
#endif

#ifdef DATA_RACE_TRACE
    __AddTask2Trace(t);
#endif
	//get addrs; 
    va_list args;           
    va_start(args,n);    
    addr_tuple tmp;
    addr_tuple tmp2;
    for (int i = 0; i < n; i++) 
    {  
       tmp.addr = va_arg(args, void*);   
#ifdef DATA_RACE_TRACE
        __AddData2Trace("", (double*)tmp.addr);
#endif

       int area_type = va_arg(args, int);
       std::cout<<"area_type"<<area_type<<std::endl;
       if(area_type == NORMAL || area_type == SHADE || area_type == UNSHADE)
       {
            tmp.area_type = area_type;
            tmp.neighbor = va_arg(args, int);
            tmp.type = va_arg(args, int);
            tmp.neighbor_type = va_arg(args, int);
            addrs.push_back(tmp);
        } 
        else if(area_type == SHADE_AND_UNSHADE)
        {
            tmp.area_type = SHADE;
            tmp.neighbor = va_arg(args, int);
            tmp.type = va_arg(args, int);
            tmp.neighbor_type = va_arg(args, int);
            addrs.push_back(tmp);
		    
            tmp2.addr = tmp.addr;
            tmp2.area_type = UNSHADE;
            tmp2.neighbor = va_arg(args, int);
            tmp2.type = va_arg(args, int);
            tmp2.neighbor_type = va_arg(args, int);
            addrs.push_back(tmp2);
        }
        else
        {
            assert(0);
        }
    }
    va_end(args);
//    sort(addrs.begin(), addrs.end(), );
#ifdef TEMP_NESTED
    task_graphs[cur_nested_lev].register_task(t,addrs);
#else
    task_graph.register_task(t,addrs);
#endif
#ifdef NO_PARALLEL
    addrs.clear();
#else
    addrs_tls.local().clear();
#endif
    return ACEMESH_OK;
}

void spawn_and_wait_zeroPara(void *taskptr)
{
    //printf("====taskptr: %x\n", taskptr);
#ifdef TEMP_NESTED
#ifdef ACEMESH_PROFILING_CLASSIFY
    task_graphs[cur_nested_lev].spawn_and_wait(0);
#else
    task_graphs[cur_nested_lev].spawn_and_wait();
#endif
#else

#ifdef ACEMESH_PROFILING_CLASSIFY
    if (taskptr) {
        ((aceMesh_composite_task *)taskptr)->graph.spawn_and_wait(0);
    } else {
        task_graph.spawn_and_wait(0);
    }
#else
    if (taskptr) {
        ((aceMesh_composite_task *)taskptr)->graph.spawn_and_wait();
    } else {
        task_graph.spawn_and_wait();
    }
#endif
#endif
    // #ifdef ACEMESH_SCHEDULER_PROFILING
    // print_and_reset_reuse_statistics();
    // #endif
}
void spawn_and_wait(int print_graph, void *taskptr)
{
    //printf("====taskptr: %x,cur_lev=%d,%d\n", taskptr,cur_nested_lev,my_mpi_rank);
    //printf("====taskptr: %x,%d\n", taskptr,my_mpi_rank);
#ifdef TEMP_NESTED

#ifdef ACEMESH_PROFILING_CLASSIFY
    // for classify statistics task graph exec time 
    // print_graph means classkfy_id
    task_graphs[cur_nested_lev].spawn_and_wait(print_graph);
#else

#ifdef DEBUG_GRAPH
    task_graph.spawn_and_wait(print_graph);
#else
    task_graphs[cur_nested_lev].spawn_and_wait();
#endif

#endif

#else

#ifdef ACEMESH_PROFILING_CLASSIFY
    // for classify statistics task graph exec time 
    // print_graph means classkfy_id
    if (taskptr) {
        ((aceMesh_composite_task *)taskptr)->graph.spawn_and_wait(print_graph);
    } else {
        task_graph.spawn_and_wait(print_graph);
    }
#else
    if (taskptr) {
#ifdef DEBUG_GRAPH
        ((aceMesh_composite_task *)taskptr)->graph.spawn_and_wait(print_graph);
#else
        ((aceMesh_composite_task *)taskptr)->graph.spawn_and_wait();
#endif
    } else {
#ifdef DEBUG_GRAPH
        task_graph.spawn_and_wait(print_graph);
#else
        task_graph.spawn_and_wait();
#endif
    }
#endif

#endif
//#ifdef ACEMESH_SCHEDULER_PROFILING
//    print_and_reset_reuse_statistics();
//#endif
}

void spawn_and_goout(void *taskptr)
{
#ifdef FAT_TDG
// printf("runtime.cpp");
    task_graph.spawn_and_goout();
#else
#ifdef TEMP_NESTED
    //printf("spawn_and_out====taskptr: %x,cur_lev=%d,%d\n", taskptr,cur_nested_lev,my_mpi_rank);
    if (cur_nested_lev) {
        task_graphs[cur_nested_lev].spawn_and_goout();
    } else {
        std::cout << "already in outer task graph" << std::endl;
        exit(1);
    }
#else
    //printf("spawn_and_out====taskptr: %x,%d\n", taskptr,my_mpi_rank);
    if (taskptr) {
        ((aceMesh_composite_task *)taskptr)->graph.spawn_and_goout();
    } else {
        std::cout << "already in outer task graph" << std::endl;
        exit(1);
    }
#endif
#endif
}

#ifdef CONCURRENT_CONSTRUCT_GRAPH
void wait_for_all_task()
{
    task_graph.wait_for_all_task();

//#ifdef ACEMESH_SCHEDULER_PROFILING
//    aceMesh_task::print_and_reset_reuse_statistics();
//#endif

#ifdef DEBUG_GRAPH
    end_file_spawn();
#endif
}
#endif

#if defined(AUTO_PARTITION) || defined(SUPPORT_PARTITION)
void spawn_and_wait_sep()
{
#ifdef TEMP_NESTED
    task_graph.spawn_and_wait_with_separation();
#else
    task_graphs[cur_nested_lev].spawn_and_wait_with_separation();
#endif

// #ifdef ACEMESH_SCHEDULER_PROFILING
    // print_and_reset_reuse_statistics();
// #endif

}
#endif


void aceMesh_MPI_rank(int my_rank)
{

  int status;
   
    my_mpi_rank=my_rank;
#ifdef DEBUG_GRAPH_
    if(my_mpi_rank>=0)     
    { /*make a new directory! for ouput files*/
      char  outd[4],mkdircmd[10],rmdircmd[20];
     // char line[]={'/'};
      sprintf(outd, "%d" ,my_mpi_rank  ); 
      sprintf(mkdircmd,"%s",outd);
      sprintf(rmdircmd,"%s",outd);
      
      status = rmdir(rmdircmd);      
      status = mkdir(mkdircmd, S_IRWXU | S_IRWXG | S_IROTH | S_IXOTH);      
      sprintf(aceMesh_outdir, "%d" ,my_mpi_rank  );	  
    }
    else {
      aceMesh_outdir[0] = '\0';
    }
      
#endif

   return;
}

void do_register(aceMesh_task* p_task, int n, va_list* p_args)
{

#ifndef NO_PARALLEL
    std::vector<addr_tuple> addrs;
#endif

#ifdef DATA_RACE_TRACE
    __AddTask2Trace(p_task);
#endif
    addrs.clear();
    //get addrs;
   
    addr_tuple tmp;
    addr_tuple tmp2;
    for (int i = 0; i < n; i++) 
    {  
        tmp.addr = va_arg(*p_args, void*);
#ifdef DATA_RACE_TRACE
        __AddData2Trace("", (double*)tmp.addr);
#endif

        int area_type = va_arg(*p_args, int);
        if(area_type == NORMAL || area_type == SHADE || area_type == UNSHADE)
        {
            tmp.area_type = area_type;
            tmp.neighbor = va_arg(*p_args, int);
            tmp.type = va_arg(*p_args, int);
            tmp.neighbor_type = va_arg(*p_args, int);
            addrs.push_back(tmp);
        } 
        else if(area_type == SHADE_AND_UNSHADE)
        {
            tmp.area_type = SHADE;
            tmp.neighbor = va_arg(*p_args, int);
            tmp.type = va_arg(*p_args, int);
            tmp.neighbor_type = va_arg(*p_args, int);
            addrs.push_back(tmp);
            
            tmp2.addr = tmp.addr;
            tmp2.area_type = UNSHADE;
            tmp2.neighbor = va_arg(*p_args, int);
            tmp2.type = va_arg(*p_args, int);
            tmp2.neighbor_type = va_arg(*p_args, int);
            addrs.push_back(tmp2);
        }
        else
        {
            assert(0);
        }
    }

    //sort(addrs.begin(), addrs.end(), )
#ifdef TEMP_NESTED
    task_graphs[cur_nested_lev].register_task(p_task, addrs);
#else
    task_graph.register_task(p_task, addrs);
#endif
}



Error_Code aceMesh_runtime_shutdown()
{
#ifdef MEMORY_POOL
    FreePool();
#endif
printf("before close_worker_thread\n");
#if defined(ACEMESH_SCHEDULER)
    close_worker_thread();
#endif
//#ifdef DEBUG
printf("end close_worker_thread\n");
//#endif
#ifdef ACEMESH_PARA
    statistics_npar_and_nidle();
#endif
//#ifdef TEMP_NESTED
//task_graphs[cur_nested_lev].free_tasks();
//#else
//task_graph.free_tasks();
//#endif
    return ACEMESH_OK;
}

Error_Code aceMesh_runtime_shutdown_with_proc_id(int proc_id)
{
#ifdef MEMORY_POOL
        FreePool();
#endif

#if defined(ACEMESH_SCHEDULER)
    close_worker_thread();
#endif

#ifdef ACEMESH_PARA
    statistics_npar_and_nidle();
#endif

    return ACEMESH_OK;
}

void set_separations(std::map<void*, int>& sep_datas)
{
#ifdef SUPPORT_PARTITION
    //task_graph.set_separations(sep_datas);
#endif
}

void* save_data;
int part_id = -1;
void* aceMesh_get_data()
{
    return save_data;
}

void aceMesh_set_data(void* data)
{
    save_data = data;
}
int get_part_id()
{
    return part_id;
}

void set_part_id(int p_id)
{
    part_id = p_id;
}
}
