/* -*- Mode: C; c-basic-offset:4 ; -*- */
/*
   (C) 2001 by Argonne National Laboratory.
       See COPYRIGHT in top-level directory.
*/
#ifdef MPI_BUILD_PROFILING
#undef MPI_BUILD_PROFILING
#endif
#include "mpe_wrappers_conf.h"
#include "mpi.h"
#include "mpe_log.h"


/* AIX requires this to be the first thing in the file.  */
#ifndef __GNUC__
# if HAVE_ALLOCA_H
#  include <alloca.h>
# else
#  ifdef _AIX
 #pragma alloca
#  else
#   ifndef alloca /* predefined by HP cc +Olibcalls */
char *alloca ();
#   endif
#  endif
# endif
#else
# if defined( HAVE_ALLOCA_H )
#  include <alloca.h>
# endif
#endif

#if defined( STDC_HEADERS ) || defined( HAVE_STDLIB_H )
#include <stdlib.h>
#endif
#if defined( STDC_HEADERS ) || defined( HAVE_STDIO_H )
#include <stdio.h>
#endif
#if defined( STDC_HEADERS ) || defined( HAVE_STRING_H )
#include <string.h>
#endif

#ifdef HAVE_STDARG_H
/* Needed for va_start/end in MPI_Pcontrol */
#include <stdarg.h>
#endif

/* Enable memory tracing.  This requires MPICH's mpid/util/tr2.c codes */
#if defined(MPIR_MEMDEBUG)
#define malloc(a)    MPID_trmalloc((unsigned)(a),__LINE__,__FILE__)
#define free(a)      MPID_trfree(a,__LINE__,__FILE__)
#endif

/* 
   This is a large file and may exceed some compilers ability to handle.
   In that case, compile without optimization.
 */

/*
 * To give better control over the log file generated by these routines,
 * the user can switch on an off individual states as well as entire classes
 * of routines.  By default, only the communication routines, pack, and
 * unpack are logged.  This is a change from the previous implementation.
 *
 * MPI_Init checks for logging control options and environment variables,
 * and MPI_Pcontrol allows control over logging (allowing the user to
 * turn logging on and off).  Note that some routines are ALWAYS logged;
 * principly, these are the communicator constuction routines (needed to
 * avoid using the "context_id" which may not exist in some MPI
 * implementations).
 *
 * kind_mask is used ONLY when setting (or clearing) is_active by class.
 *
 * An additional feature is the RECV_IDLE state; this is the state between
 * when a recv is posted and when it is able to complete (basically the
 * time for an MPI_PROBE to succeed).
 *
 * Additional issues:
 * Must log all communicator creation events, even if logging turned off
 * (needed to match up communicator/contexts if a context_id not available).
 * We might choose to allow an MPIR_Comm_id(MPI_Comm) routine in
 * context/comm_util.c to simplify the implementation.
 *
 * We need begin-state and end-state routines, because we (a) might want
 * to use alternate logging routines (just link with a different logging
 * library to get runtime animation) and (b) some logging libraries might
 * want it that way (state-based instead of event-based).
 *
 * In request completion routines (e.g., wait, test), we need to shadow
 * the requests because a completed non-persistent request is freed and
 * the input value set to zero (a previous version of this code did not
 * do this).
 *
 * Note that some sends/recvs can be to/from MPI_PROC_NULL.  Must decide
 * whether no log event should be generated or not (can upshot/nupshot
 * handle MPI_PROC_NULL partners?  Should they?)
 * Previous code was uneven about that.
 *
 * Previous code was also uneven about placement of log_send/recv and
 * particularly on the size logged (some logged count, some size in bytes).
 * This still needs work.
 *
 * An alternate version of this could use macros (and require recompiling).
 */
typedef struct {
    int  stateID;      /* CLOG state ID */
    int  start_evtID;  /* CLOG Event ID for the beginning event */
    int  final_evtID;  /* CLOG event ID for the ending event */
    int  n_calls;      /* Number of times this state used * 2 */
    int  is_active;    /* Allows each state to be selectively switched off */
    int  kind_mask;    /* Indicates kind of state (message, environment) */
    char *name;        /* Pointer to name */
    char *color;       /* Color */
    char *format;      /* Format printf stype string */
} MPE_State;

typedef struct {
    int  eventID;      /* CLOG event ID */
    int  n_calls;      /* Number of times this event used */
    int  is_active;    /* Allows each event to be selectively switched off */
    int  kind_mask;    /* Indicates kind of state (message, environment) */
    char *name;        /* Pointer to name */
    char *color;       /* Color */
} MPE_Event;

/* Kind_mask values */
#define MPE_KIND_MSG 0x1
#define MPE_KIND_TOPO 0x2
#define MPE_KIND_COLL 0x4
#define MPE_KIND_DATATYPE 0x8
#define MPE_KIND_ENV 0x10
#define MPE_KIND_COMM_INFO 0x20
#define MPE_KIND_COMM 0x40
#define MPE_KIND_ATTR 0x80
#define MPE_KIND_GROUP 0x100
#define MPE_KIND_MSG_INIT 0x200
#define MPE_KIND_FILE 0x400
#define MPE_KIND_RMA 0x800
#define MPE_KIND_SPAWN 0x1000
#define MPE_KIND_INTERNAL 0x10000000

/*
   Because of existence of MPE internal states whose state ID is higher than
   any of MPI states' ID.  MPE_MAX_KNOWN_STATES needs to be defined in full.
*/
#define MPE_MAX_KNOWN_STATES 300

#define MPE_MAX_KNOWN_EVENTS 2

void MPE_Init_states_events( void );
void MPE_Init_mpi_core( void );
void MPE_Init_internal_logging( void );

#ifdef HAVE_MPI_IO
void MPE_Init_mpi_io( void );
#endif

#ifdef HAVE_MPI_RMA
void MPE_Init_mpi_rma( void );
#endif

#ifdef HAVE_MPI_SPAWN
void MPE_Init_mpi_spawn( void );
#endif

/* define known events' ID, i.e. index to the corresponding event in events[] */
#define MPE_COMM_INIT_ID 0
#define MPE_COMM_FINALIZE_ID 1

/* define known states' ID, i.e. index to the corresponding state in states[] */
#define MPE_ALLGATHER_ID 0
#define MPE_ALLGATHERV_ID 1
#define MPE_ALLREDUCE_ID 2
#define MPE_ALLTOALL_ID 3
#define MPE_ALLTOALLV_ID 4
#define MPE_BARRIER_ID 5
#define MPE_BCAST_ID 6
#define MPE_GATHER_ID 7
#define MPE_GATHERV_ID 8
#define MPE_OP_CREATE_ID 9
#define MPE_OP_FREE_ID 10
#define MPE_REDUCE_SCATTER_ID 11
#define MPE_REDUCE_ID 12
#define MPE_SCAN_ID 13
#define MPE_SCATTER_ID 14
#define MPE_SCATTERV_ID 15
#define MPE_ATTR_DELETE_ID 16
#define MPE_ATTR_GET_ID 17
#define MPE_ATTR_PUT_ID 18
#define MPE_COMM_COMPARE_ID 19
#define MPE_COMM_CREATE_ID 20
#define MPE_COMM_DUP_ID 21
#define MPE_COMM_FREE_ID 22
#define MPE_COMM_GROUP_ID 23
#define MPE_COMM_RANK_ID 24
#define MPE_COMM_REMOTE_GROUP_ID 25
#define MPE_COMM_REMOTE_SIZE_ID 26
#define MPE_COMM_SIZE_ID 27
#define MPE_COMM_SPLIT_ID 28
#define MPE_COMM_TEST_INTER_ID 29
#define MPE_GROUP_COMPARE_ID 30
#define MPE_GROUP_DIFFERENCE_ID 31
#define MPE_GROUP_EXCL_ID 32
#define MPE_GROUP_FREE_ID 33
#define MPE_GROUP_INCL_ID 34
#define MPE_GROUP_INTERSECTION_ID 35
#define MPE_GROUP_RANK_ID 36
#define MPE_GROUP_RANGE_EXCL_ID 37
#define MPE_GROUP_RANGE_INCL_ID 38
#define MPE_GROUP_SIZE_ID 39
#define MPE_GROUP_TRANSLATE_RANKS_ID 40
#define MPE_GROUP_UNION_ID 41
#define MPE_INTERCOMM_CREATE_ID 42
#define MPE_INTERCOMM_MERGE_ID 43
#define MPE_KEYVAL_CREATE_ID 44
#define MPE_KEYVAL_FREE_ID 45
#define MPE_ABORT_ID 46
#define MPE_ERROR_CLASS_ID 47
#define MPE_ERRHANDLER_CREATE_ID 48
#define MPE_ERRHANDLER_FREE_ID 49
#define MPE_ERRHANDLER_GET_ID 50
#define MPE_ERROR_STRING_ID 51
#define MPE_ERRHANDLER_SET_ID 52
#define MPE_GET_PROCESSOR_NAME_ID 53
#define MPE_INITIALIZED_ID 54
#define MPE_WTICK_ID 55
#define MPE_WTIME_ID 56
#define MPE_ADDRESS_ID 57
#define MPE_BSEND_ID 58
#define MPE_BSEND_INIT_ID 59
#define MPE_BUFFER_ATTACH_ID 60
#define MPE_BUFFER_DETACH_ID 61
#define MPE_CANCEL_ID 62
#define MPE_REQUEST_FREE_ID 63
#define MPE_RECV_INIT_ID 64
#define MPE_SEND_INIT_ID 65
#define MPE_GET_ELEMENTS_ID 66
#define MPE_GET_COUNT_ID 67
#define MPE_IBSEND_ID 68
#define MPE_IPROBE_ID 69
#define MPE_IRECV_ID 70
#define MPE_IRSEND_ID 71
#define MPE_ISEND_ID 72
#define MPE_ISSEND_ID 73
#define MPE_PACK_ID 74
#define MPE_PACK_SIZE_ID 75
#define MPE_PROBE_ID 76
#define MPE_RECV_ID 77
#define MPE_RSEND_ID 78
#define MPE_RSEND_INIT_ID 79
#define MPE_SEND_ID 80
#define MPE_SENDRECV_ID 81
#define MPE_SENDRECV_REPLACE_ID 82
#define MPE_SSEND_ID 83
#define MPE_SSEND_INIT_ID 84
#define MPE_START_ID 85
#define MPE_STARTALL_ID 86
#define MPE_TEST_ID 87
#define MPE_TESTALL_ID 88
#define MPE_TESTANY_ID 89
#define MPE_TEST_CANCELLED_ID 90
#define MPE_TESTSOME_ID 91
#define MPE_TYPE_COMMIT_ID 92
#define MPE_TYPE_CONTIGUOUS_ID 93
#define MPE_TYPE_EXTENT_ID 94
#define MPE_TYPE_FREE_ID 95
#define MPE_TYPE_HINDEXED_ID 96
#define MPE_TYPE_HVECTOR_ID 97
#define MPE_TYPE_INDEXED_ID 98
#define MPE_TYPE_LB_ID 99
#define MPE_TYPE_SIZE_ID 100
#define MPE_TYPE_STRUCT_ID 101
#define MPE_TYPE_UB_ID 102
#define MPE_TYPE_VECTOR_ID 103
#define MPE_UNPACK_ID 104
#define MPE_WAIT_ID 105
#define MPE_WAITALL_ID 106
#define MPE_WAITANY_ID 107
#define MPE_WAITSOME_ID 108
#define MPE_CART_COORDS_ID 109
#define MPE_CART_CREATE_ID 110
#define MPE_CART_GET_ID 111
#define MPE_CART_MAP_ID 112
#define MPE_CART_SHIFT_ID 113
#define MPE_CARTDIM_GET_ID 114
#define MPE_DIMS_CREATE_ID 115
#define MPE_GRAPH_CREATE_ID 116
#define MPE_GRAPH_GET_ID 117
#define MPE_GRAPH_MAP_ID 118
#define MPE_GRAPH_NEIGHBORS_ID 119
#define MPE_GRAPH_NEIGHBORS_COUNT_ID 120
#define MPE_GRAPHDIMS_GET_ID 121
#define MPE_TOPO_TEST_ID 122
#define MPE_RECV_IDLE_ID 123
#define MPE_CART_RANK_ID 124
#define MPE_CART_SUB_ID 125

/*
   Be sure NO MPE internal states are overlapped with ANY of MPI states
   Also, CLOG's internal states which are defined in clog_record.h
   should be overlapping with MPE internal states as well.

       250 <= MPE's internal stateID < 280
   and 280 <= CLOG's internal stateID < MPE_MAX_KNOWN_STATES

   This is done so the MPE internal stateID/evetIDs are included in
   the clog2TOslog2's predefined MPI uninitialized states.
*/

#define MPE_ISEND_WAITED_ID 250
#define MPE_IRECV_WAITED_ID 251

#include "mpe_requests.h"

#include "mpe_log_thread.h"

/* define global known states and events */
static MPE_State states[MPE_MAX_KNOWN_STATES];
static MPE_Event events[MPE_MAX_KNOWN_EVENTS];

/*
   Global trace control
   is_mpilog_on : a boolean flag indicates if MPI user level profiling is on.
   IS_MPELOG_ON : a boolean flag indicates if internal MPE profiling is on.
                  This allows MPE to turn off logging for safe PMPI calls.
   IS_MPELOG_ON = is_mpelog_on in a single thread program.
   IS_MPELOG_ON = thdstm->is_log_on in multiple threads situation.
   So IS_MPELOG_ON is defined by mpe_log_thread.h.
*/
static int is_mpilog_on = 0;
#if !defined( HAVE_PTHREAD_IN_MPI )
static int is_mpelog_on = 0;
#endif


/*  LOGFILENAME_LEN == CLOG_PATH_STRLEN  */
#define LOGFILENAME_STRLEN  256
static request_list *requests_head_0, *requests_tail_0, *requests_avail_0=0;
static int procid_0;
static char logFileName_0[LOGFILENAME_STRLEN];

/* This is used for the multiple-completion test/wait functions */
#define MPE_MAX_REQUESTS 1024
static MPI_Request req[MPE_MAX_REQUESTS];

/* Function prototypes for MPI_Request processing */
void MPE_Req_add_send( MPI_Request, MPI_Datatype, int,
                       int, int, const CLOG_CommIDs_t*, int );
void MPE_Req_add_recv( MPI_Request, MPI_Datatype, int,
                       int, int, const CLOG_CommIDs_t*, int );
void MPE_Req_cancel( MPI_Request );
void MPE_Req_remove( MPI_Request );
void MPE_Req_start( MPI_Request, MPE_State *, int, int );
void MPE_Req_wait_test( MPI_Request, MPI_Status *, char *, MPE_State *,
                        int, int );

/*
   Temporary MPE log definitions (eventually will replace with more
   permanent changes)
   Note that these include a communicator as well as the state (pointer
   to predefined state structure).  Use MPE_COMM_NULL for no communicator
*/
#define MPE_COMM_NULL  MPI_COMM_WORLD

/*
   To use these, declare
     register MPE_State *state;
   and call around routine
*/
/*
   For GNU CC with warnings turned on, we should use a macro that 
   declares this as register MPE_State *state = 0; when error checking 
   is on, just to suppress unnecessary warnings
*/
/*
   is_thisfn_logged is a logging switch that is local within 
   each profiled MPI function that MPE_LOG_SWITCH_DECL is used,
   i.e. on top of the program stack.  is_thisfn_logged is true
   when all other log switches are true, so it summarizes other
   switches (for performance reason).  When a profiled function
   is executed, is_thisfn_logged is default to false.
*/
#define MPE_LOG_SWITCH_DECL \
    register       int              is_thisfn_logged = 0;
#define MPE_LOG_STATE_DECL \
    register       MPE_State       *state   = 0; \
    register const CLOG_CommIDs_t  *commIDs = 0; \
    MPE_LOG_SWITCH_DECL
#define MPE_LOG_COMM_DECL \
    register       MPE_Event       *solo_event  = 0; \
    register const CLOG_CommIDs_t  *new_commIDs = 0;
#define MPE_LOG_SOLO_EVENT_DECL \
    register       MPE_Event       *solo_event  = 0;
#define MPE_LOG_BYTEBUF_DECL \
                   MPE_LOG_BYTES    bytebuf = {0};  \
                   int              bytebuf_pos = 0;

extern MPEU_DLL_SPEC CLOG_CommSet_t  *CLOG_CommSet;

/*
   All following macros have "comm" as argument, but none of them except
   MPE_LOG_STATE_BEGIN needs comm argument.  Instead all of them need
   commIDs as an argument.  "comm" are used in all macros to indicate
   the macro body needs reference of comm, i.e commIDs.  The goal is that
   the functions that invoke these macros will look clearer and more consistent.
*/
#define MPE_LOG_STATE_BEGIN(comm,name) \
    if (is_mpilog_on && IS_MPELOG_ON) { \
        state = &states[name]; \
        if (state->is_active) { \
            commIDs = CLOG_CommSet_get_IDs( CLOG_CommSet, comm ); \
            MPE_Log_commIDs_event( commIDs, THREADID, \
                                   state->start_evtID, NULL ); \
            is_thisfn_logged = 1; \
        } \
    }
/*    if (is_mpilog_on && IS_MPELOG_ON && state->is_active) { \ */
#define MPE_LOG_STATE_END(comm,bytebuffer) \
    if (is_thisfn_logged) { \
        MPE_Log_commIDs_event( commIDs, THREADID, \
                               state->final_evtID, bytebuffer ); \
        state->n_calls += 2; \
    }

/*    if (is_mpilog_on && IS_MPELOG_ON) { \ */
#define MPE_LOG_SOLO_EVENT(commIDs,thdID,name) \
    if (is_thisfn_logged) { \
        solo_event = &events[name]; \
        if (solo_event->is_active) { \
            MPE_Log_commIDs_event( commIDs, thdID, \
                                   solo_event->eventID, NULL ); \
            solo_event->n_calls += 1; \
        } \
    }

/*    if (is_mpilog_on && IS_MPELOG_ON && state->is_active) { \ */
#define MPE_LOG_COMM_SEND(comm,receiver,tag,size) \
    if (is_thisfn_logged) { \
        MPE_Log_commIDs_send( commIDs, THREADID, receiver, tag, size ); \
    }
/*    if (is_mpilog_on && IS_MPELOG_ON && state->is_active) { \ */
#define MPE_LOG_COMM_RECV(comm,sender,tag,size) \
    if (is_thisfn_logged) { \
        MPE_Log_commIDs_receive( commIDs, THREADID, sender, tag, size ); \
    }

#define MPE_REQ_ADD_SEND(request,datatype,count,dest,tag,comm,is_persistent) \
    if (dest != MPI_PROC_NULL) { \
        MPE_Req_add_send( request, datatype, count, \
                          dest, tag, commIDs, is_persistent ); \
    }
#define MPE_REQ_ADD_RECV(request,datatype,count,source,tag,comm,is_persistent) \
    if (source != MPI_PROC_NULL) { \
       MPE_Req_add_recv( request, datatype, count, \
                         source, tag, commIDs, is_persistent ); \
    }

#define MPE_REQ_START(request) \
    MPE_Req_start( request, state, THREADID, IS_MPELOG_ON );
#define MPE_REQ_WAIT_TEST(request,status,note) \
    MPE_Req_wait_test( request, status, note, state, THREADID, IS_MPELOG_ON );

/*
   MPE_LOG_COMM_CHECK is needed in MPI_Comm_free() and MPI_Intercomm_create()
   where commIDs could be NULL if users disable logging with MPI_Pcontrol(0).
   But the 2 MPI functions needs commIDs to do some bookkeeping operations.
*/
#define MPE_LOG_COMM_CHECK(comm) \
    if (!commIDs) \
        commIDs = CLOG_CommSet_get_IDs( CLOG_CommSet, comm ); \

#define MPE_LOG_COMMFREE(new_comm,comm_etype) \
    if ( new_comm == MPI_COMM_NULL ) { \
        MPE_Log_commIDs_nullcomm( commIDs, THREADID, comm_etype ); \
        MPE_LOG_SOLO_EVENT( commIDs, THREADID, MPE_COMM_FINALIZE_ID ) \
    }

/*
   Update commIDs after CLOG_CommSet_add_intracomm() which may have invoked
   realloc() on CLOG_CommSet's table[] of commIDs, because invocation
   of realloc() may invalidate all commIDs handed out by CLOG_CommSet.

   Communicator creation/destruction needs to be tracked even when
   user turns off logging through MPI_Pcontrol(0), otherwise subsequent
   logging of MPI calls that use the communicator after MPI_Pcontrol(1)
   would fail during logging.  CLOG_CommSet_add_intracomm() is
   needed to avoid logging failure.  But MPE_Log_commIDs_intracomm()
   is needed so enough information is written to clog2 file so clog2TOslog2
   won't fail.  This means MPI_Pcontrol(0) of the communicator creation
   function will not log the MPI communicator creation state but CLOG2
   buffer(disk) is still updated/modified with the communicator creation info.
*/
/*    if (is_mpilog_on && IS_MPELOG_ON && state->is_active) { \ */
#define MPE_LOG_INTRACOMM(comm,new_comm,comm_etype) \
    if ( new_comm != MPI_COMM_NULL ) { \
        IS_MPELOG_ON = 0; \
        new_commIDs = CLOG_CommSet_add_intracomm( CLOG_CommSet, \
                                                  new_comm ); \
        IS_MPELOG_ON = 1; \
        commIDs = CLOG_CommSet_get_IDs( CLOG_CommSet, comm ); \
        MPE_Log_commIDs_intracomm( commIDs, THREADID, \
                                   comm_etype, new_commIDs ); \
        MPE_LOG_SOLO_EVENT( new_commIDs, THREADID, MPE_COMM_INIT_ID ) \
    }

/*
   Update commIDs after CLOG_CommSet_add_intercomm() which may have invoked
   realloc() on CLOG_CommSet's table[] of commIDs, because invocation
   of realloc() may invalidate all commIDs handed out by CLOG_CommSet.

   Communicator creation/destruction needs to be tracked even when
   user turns off logging through MPI_Pcontrol(0), otherwise subsequent
   logging of MPI calls that use the communicator after MPI_Pcontrol(1)
   would fail during logging.  CLOG_CommSet_add_intercomm() is
   needed to avoid logging failure.  But MPE_Log_commIDs_intercomm()
   is needed so enough information is written to clog2 file so clog2TOslog2
   won't fail.  This means MPI_Pcontrol(0) of the communicator creation
   function will not log the MPI communicator creation state but CLOG2
   buffer(disk) is still updated/modified with the communicator creation info.
*/
/*    if (is_mpilog_on && IS_MPELOG_ON && state->is_active) { \ */
#define MPE_LOG_INTERCOMM(comm,new_comm,comm_etype) \
    if ( new_comm != MPI_COMM_NULL ) { \
        IS_MPELOG_ON = 0; \
        new_commIDs = CLOG_CommSet_add_intercomm( CLOG_CommSet, \
                                                  new_comm, commIDs ); \
        IS_MPELOG_ON = 1; \
        commIDs = CLOG_CommSet_get_IDs( CLOG_CommSet, comm ); \
        MPE_Log_commIDs_intercomm( commIDs, THREADID, \
                                   comm_etype, new_commIDs ); \
        MPE_LOG_SOLO_EVENT( new_commIDs, THREADID, MPE_COMM_INIT_ID ) \
    }

#define MPE_LOG_ON \
    if (is_thisfn_logged) IS_MPELOG_ON = 1;

#define MPE_LOG_OFF \
    if (is_thisfn_logged) IS_MPELOG_ON = 0;

/* Service routines for managing requests .... */
/*
   If there are large numbers of requests, we should probably use a better
   search structure, such as a hash table or tree
*/
void MPE_Req_add_send( request, datatype, count,
                       dest, tag, commIDs, is_persistent )
      MPI_Request     request;
      MPI_Datatype    datatype;
const CLOG_CommIDs_t *commIDs;
      int             count, dest, tag, is_persistent;
{
    request_list *newrq;
    int typesize;

    rq_alloc(requests_avail_0,newrq);
    if (newrq) {
        PMPI_Type_size( datatype, &typesize );
        newrq->request       = request;
        newrq->commIDs       = commIDs;
        newrq->status        = RQ_SEND;
        newrq->size          = count * typesize;
        newrq->tag           = tag;
        newrq->mate          = dest;
        newrq->next          = 0;
        newrq->is_persistent = is_persistent;
        rq_add( requests_head_0, requests_tail_0, newrq );
    }
}

void MPE_Req_add_recv( request, datatype, count,
                       source, tag, commIDs, is_persistent )
      MPI_Request     request;
      MPI_Datatype    datatype;
const CLOG_CommIDs_t *commIDs;
      int             count, source, tag, is_persistent;
{
    request_list *newrq;

    /*
       We could pre-allocate request_list members, or allocate in
       blocks.  Do this is we see this is a bottleneck
    */
    rq_alloc( requests_avail_0, newrq );
    if (newrq) {
        newrq->request       = request;
        newrq->commIDs       = commIDs;
        newrq->status        = RQ_RECV;
        newrq->next          = 0;
        newrq->is_persistent = is_persistent;
        rq_add( requests_head_0, requests_tail_0, newrq );
    }
}

void  MPE_Req_cancel( request )
MPI_Request request;
{
    request_list *rq;
    rq_find( requests_head_0, request, rq );
    if (rq) rq->status |= RQ_CANCEL;
}

void  MPE_Req_remove( request )
MPI_Request request;
{
    rq_remove( requests_head_0, requests_tail_0, requests_avail_0, request );
}

/* Persistent sends and receives are handled with this routine (called by
   start or startall) */
void MPE_Req_start( request, state, thdID, is_logging_on )
MPI_Request  request;
MPE_State   *state;
int          thdID;
int          is_logging_on;
{
    request_list *rq;
    MPE_State    *istate;

    /* look for request */
    rq = requests_head_0;
    while (rq && (rq->request != request)) {
        rq   = rq->next;
    }

    if (!rq) {
#ifdef PRINT_PROBLEMS
        fprintf( stderr, "Request not found in '%s'.\n", note );
#endif
       return;                /* request not found */
    }

    if ((rq->status & RQ_SEND) && rq->mate != MPI_PROC_NULL) {
        if (is_mpilog_on && is_logging_on && state->is_active) {
            istate  = &states[MPE_ISEND_WAITED_ID];
            if (istate->is_active) {
                MPE_Log_commIDs_event( rq->commIDs, thdID,
                                       istate->start_evtID, NULL );
                MPE_Log_commIDs_send( rq->commIDs, thdID,
                                      rq->mate, rq->tag, rq->size );
                MPE_Log_commIDs_event( rq->commIDs, thdID,
                                       istate->final_evtID, NULL );
                istate->n_calls += 2;
            }
            else {
                MPE_Log_commIDs_send( rq->commIDs, thdID,
                                      rq->mate, rq->tag, rq->size );
            }
        }
    }
}

void MPE_Req_wait_test( request, status, note, state, thdID, is_logging_on )
MPI_Request  request;
MPI_Status  *status;
char        *note;
MPE_State   *state;
int          thdID;
int          is_logging_on;
{
    request_list *rq, *last;
    int           flag, size;
    MPE_State    *istate;

    /* look for request */
    rq = requests_head_0;
    last = 0;
    while (rq && (rq->request != request)) {
        last = rq;
        rq   = rq->next;
    }

    if (!rq) {
#ifdef PRINT_PROBLEMS
        fprintf( stderr, __FILE__":MPE_Req_wait_test(), "
                         "Request not found in '%s'.\n", note );
        fflush( stderr );
#endif
        return;                /* request not found */
    }

#ifdef HAVE_MPI_STATUS_IGNORE
    if (status == MPI_STATUS_IGNORE) {
        fprintf( stderr, __FILE__":MPE_Req_wait_test() cannot proess "
                         "incoming MPI_Status, MPI_STATUS_IGNORE" );
        fflush( stderr );
        return;
    }
#endif

    if (status->MPI_TAG != MPI_ANY_TAG || (rq->status & RQ_SEND) ) {
        /* if the request was not invalid */

        if (rq->status & RQ_CANCEL) {
            PMPI_Test_cancelled( status, &flag );
            if (flag) return;    /* the request has been cancelled */
        }

        /*
           Receives conclude at the END of Wait/Test.
           Sends start at the beginning.
        */    
        if ((rq->status & RQ_RECV) && (status->MPI_SOURCE != MPI_PROC_NULL)) {
            PMPI_Get_count( status, MPI_BYTE, &size );
            if (is_mpilog_on && is_logging_on && state->is_active) {
                istate  = &states[MPE_IRECV_WAITED_ID];
                if (istate->is_active) {
                    MPE_Log_commIDs_event( rq->commIDs, thdID,
                                           istate->start_evtID, NULL );
                    MPE_Log_commIDs_receive( rq->commIDs, thdID,
                                             status->MPI_SOURCE,
                                             status->MPI_TAG, size );
                    MPE_Log_commIDs_event( rq->commIDs, thdID,
                                           istate->final_evtID, NULL );
                    istate->n_calls += 2;
                }
                else {
                    MPE_Log_commIDs_receive( rq->commIDs, thdID,
                                             status->MPI_SOURCE,
                                             status->MPI_TAG, size );
                }
            }
        }
    }

    /* Since the request has already been found, removing it */
    if (!rq->is_persistent) {
        rq_remove_at( requests_head_0, requests_tail_0, requests_avail_0, 
                      rq, last );
    }
}



void MPE_Init_states_events( void )
{
    MPE_State  *state;
    MPE_Event  *event;
    int         allow_mask;
    int         idx;

    /* Initialize all internal events */
    for ( idx = 0; idx < MPE_MAX_KNOWN_EVENTS; idx++ ) {
        event               = &events[idx];
        event->eventID      = MPE_Log_get_known_solo_eventID();
        event->n_calls      = 0;
        event->is_active    = 0;
        event->name         = NULL;
        event->kind_mask    = 0;
        event->color        = "white";
    }

    /* Initialize all internal states */
    for ( idx = 0; idx < MPE_MAX_KNOWN_STATES; idx++ ) {
        state               = &states[idx];
        state->stateID      = MPE_Log_get_known_stateID();
        state->start_evtID  = MPE_Log_get_known_eventID();
        state->final_evtID  = MPE_Log_get_known_eventID();
        state->n_calls      = 0;
        state->is_active    = 0;
        state->name         = NULL;
        state->kind_mask    = 0;
        state->color        = "white";
        state->format       = NULL;
    }

    /* Should check environment and command-line for changes to allow_mask */

    /* By default, log only message-passing (pt-to-pt and collective) */
    allow_mask  = MPE_KIND_MSG | MPE_KIND_MSG_INIT | MPE_KIND_COLL ;
    allow_mask |= MPE_KIND_COMM | MPE_KIND_COMM_INFO;
    allow_mask |= MPE_KIND_TOPO;
    MPE_Init_mpi_core();

#ifdef HAVE_MPI_IO
    allow_mask |= MPE_KIND_FILE;
    MPE_Init_mpi_io();
#endif

#ifdef HAVE_MPI_RMA
    allow_mask |= MPE_KIND_RMA;
    MPE_Init_mpi_rma();
#endif

#ifdef HAVE_MPI_SPAWN
    allow_mask |= MPE_KIND_SPAWN;
    MPE_Init_mpi_spawn();
#endif

    /* The internal flag is always ON */
    allow_mask |= MPE_KIND_INTERNAL;
    MPE_Init_internal_logging();

    /* Activate the basic states */
    for ( idx = 0; idx < MPE_MAX_KNOWN_STATES; idx++ ) {
        if ( (states[idx].kind_mask & allow_mask) != 0 )
            states[idx].is_active = 1;
    }

    /* Activate the basic events */
    for ( idx = 0; idx < MPE_MAX_KNOWN_EVENTS; idx++ ) {
        if ( (events[idx].kind_mask & allow_mask) != 0 )
            events[idx].is_active = 1;
    }
}

void MPE_Init_mpi_core( void )
{
    MPE_State  *state;

    /* We COULD read these definitions from a file, but accessing the file
       in PARALLEL can be a problem and even if one process accessed it and
       broadcast, we'd still have to find the file.  Is this a problem?
       (We have to WRITE the file, after all).

       We only need to load the name and kind_mask.  is_active is derived
       from kind_mask and allowed mask.
     */
    state = &states[MPE_ALLGATHER_ID];
    state->kind_mask = MPE_KIND_COLL;
    state->name = "MPI_Allgather";
    state->color = "purple3";
    state->format = NULL;

    state = &states[MPE_ALLGATHERV_ID];
    state->kind_mask = MPE_KIND_COLL;
    state->name = "MPI_Allgatherv";
    state->color = "purple3";
    state->format = NULL;

    state = &states[MPE_ALLREDUCE_ID];
    state->kind_mask = MPE_KIND_COLL;
    state->name = "MPI_Allreduce";
    state->color = "purple";
    state->format = NULL;

    state = &states[MPE_ALLTOALL_ID];
    state->kind_mask = MPE_KIND_COLL;
    state->name = "MPI_Alltoall";
    state->color = "DarkViolet";
    state->format = "send_msg_sz/p=%d, recv_msg_sz/p=%d.";

    state = &states[MPE_ALLTOALLV_ID];
    state->kind_mask = MPE_KIND_COLL;
    state->name = "MPI_Alltoallv";
    state->color = "DarkViolet";
    state->format = "send_msg_sz/p=%d, recv_msg_sz/p=%d.";

    state = &states[MPE_BARRIER_ID];
    state->kind_mask = MPE_KIND_COLL;
    state->name = "MPI_Barrier";
    state->color = "yellow";
    state->format = NULL;

    state = &states[MPE_BCAST_ID];
    state->kind_mask = MPE_KIND_COLL;
    state->name = "MPI_Bcast";
    state->color = "cyan";
    state->format = NULL;

    state = &states[MPE_GATHER_ID];
    state->kind_mask = MPE_KIND_COLL;
    state->name = "MPI_Gather";
    state->format = NULL;

    state = &states[MPE_GATHERV_ID];
    state->kind_mask = MPE_KIND_COLL;
    state->name = "MPI_Gatherv";
    state->format = NULL;

    state = &states[MPE_OP_CREATE_ID];
    state->kind_mask = MPE_KIND_ENV;
    state->name = "MPI_Op_create";
    state->format = NULL;

    state = &states[MPE_OP_FREE_ID];
    state->kind_mask = MPE_KIND_ENV;
    state->name = "MPI_Op_free";
    state->format = NULL;

    state = &states[MPE_REDUCE_SCATTER_ID];
    state->kind_mask = MPE_KIND_COLL;
    state->name = "MPI_Reduce_scatter";
    state->format = NULL;

    state = &states[MPE_REDUCE_ID];
    state->kind_mask = MPE_KIND_COLL;
    state->name = "MPI_Reduce";
    state->color = "MediumPurple";
    state->format = NULL;

    state = &states[MPE_SCAN_ID];
    state->kind_mask = MPE_KIND_COLL;
    state->name = "MPI_Scan";
    state->format = NULL;

    state = &states[MPE_SCATTER_ID];
    state->kind_mask = MPE_KIND_COLL;
    state->name = "MPI_Scatter";
    state->color = "orchid";
    state->format = NULL;

    state = &states[MPE_SCATTERV_ID];
    state->kind_mask = MPE_KIND_COLL;
    state->name = "MPI_Scatterv";
    state->color = "orchid";
    state->format = NULL;

    state = &states[MPE_ATTR_DELETE_ID];
    state->kind_mask = MPE_KIND_ATTR;
    state->name = "MPI_Attr_delete";
    state->format = NULL;

    state = &states[MPE_ATTR_GET_ID];
    state->kind_mask = MPE_KIND_ATTR;
    state->name = "MPI_Attr_get";
    state->format = NULL;

    state = &states[MPE_ATTR_PUT_ID];
    state->kind_mask = MPE_KIND_ATTR;
    state->name = "MPI_Attr_put";
    state->format = NULL;

    state = &states[MPE_COMM_COMPARE_ID];
    state->kind_mask = MPE_KIND_COMM_INFO;
    state->name = "MPI_Comm_compare";
    state->color = "white";
    state->format = NULL;

    state = &states[MPE_COMM_CREATE_ID];
    state->kind_mask = MPE_KIND_COMM;
    state->name = "MPI_Comm_create";
    state->color = "DarkOliveGreen1";
    state->format = NULL;

    state = &states[MPE_COMM_DUP_ID];
    state->kind_mask = MPE_KIND_COMM;
    state->name = "MPI_Comm_dup";
    state->color = "OliveDrab1";
    state->format = NULL;

    state = &states[MPE_COMM_FREE_ID];
    state->kind_mask = MPE_KIND_COMM;
    state->name = "MPI_Comm_free";
    state->color = "LightSeaGreen";
    state->format = NULL;

    state = &states[MPE_COMM_GROUP_ID];
    state->kind_mask = MPE_KIND_COMM_INFO;
    state->name = "MPI_Comm_group";
    state->color = "white";
    state->format = NULL;

    state = &states[MPE_COMM_RANK_ID];
    state->kind_mask = MPE_KIND_COMM_INFO;
    state->name = "MPI_Comm_rank";
    state->color = "white";
    state->format = NULL;

    state = &states[MPE_COMM_REMOTE_GROUP_ID];
    state->kind_mask = MPE_KIND_COMM_INFO;
    state->name = "MPI_Comm_remote_group";
    state->color = "white";
    state->format = NULL;

    state = &states[MPE_COMM_REMOTE_SIZE_ID];
    state->kind_mask = MPE_KIND_COMM_INFO;
    state->name = "MPI_Comm_remote_size";
    state->color = "white";
    state->format = NULL;

    state = &states[MPE_COMM_SIZE_ID];
    state->kind_mask = MPE_KIND_COMM_INFO;
    state->name = "MPI_Comm_size";
    state->color = "white";
    state->format = NULL;

    state = &states[MPE_COMM_SPLIT_ID];
    state->kind_mask = MPE_KIND_COMM;
    state->name = "MPI_Comm_split";
    state->color = "DarkOliveGreen2";
    state->format = NULL;

    state = &states[MPE_COMM_TEST_INTER_ID];
    state->kind_mask = MPE_KIND_COMM_INFO;
    state->name = "MPI_Comm_test_inter";
    state->color = "white";
    state->format = NULL;

    state = &states[MPE_GROUP_COMPARE_ID];
    state->kind_mask = MPE_KIND_GROUP;
    state->name = "MPI_Group_compare";
    state->format = NULL;

    state = &states[MPE_GROUP_DIFFERENCE_ID];
    state->kind_mask = MPE_KIND_GROUP;
    state->name = "MPI_Group_difference";
    state->format = NULL;

    state = &states[MPE_GROUP_EXCL_ID];
    state->kind_mask = MPE_KIND_GROUP;
    state->name = "MPI_Group_excl";
    state->format = NULL;

    state = &states[MPE_GROUP_FREE_ID];
    state->kind_mask = MPE_KIND_GROUP;
    state->name = "MPI_Group_free";
    state->format = NULL;

    state = &states[MPE_GROUP_INCL_ID];
    state->kind_mask = MPE_KIND_GROUP;
    state->name = "MPI_Group_incl";
    state->format = NULL;

    state = &states[MPE_GROUP_INTERSECTION_ID];
    state->kind_mask = MPE_KIND_GROUP;
    state->name = "MPI_Group_intersection";
    state->format = NULL;

    state = &states[MPE_GROUP_RANK_ID];
    state->kind_mask = MPE_KIND_GROUP;
    state->name = "MPI_Group_rank";
    state->format = NULL;

    state = &states[MPE_GROUP_RANGE_EXCL_ID];
    state->kind_mask = MPE_KIND_GROUP;
    state->name = "MPI_Group_range_excl";
    state->format = NULL;

    state = &states[MPE_GROUP_RANGE_INCL_ID];
    state->kind_mask = MPE_KIND_GROUP;
    state->name = "MPI_Group_range_incl";
    state->format = NULL;

    state = &states[MPE_GROUP_SIZE_ID];
    state->kind_mask = MPE_KIND_GROUP;
    state->name = "MPI_Group_size";
    state->format = NULL;

    state = &states[MPE_GROUP_TRANSLATE_RANKS_ID];
    state->kind_mask = MPE_KIND_GROUP;
    state->name = "MPI_Group_translate_ranks";
    state->format = NULL;

    state = &states[MPE_GROUP_UNION_ID];
    state->kind_mask = MPE_KIND_GROUP;
    state->name = "MPI_Group_union";
    state->format = NULL;

    state = &states[MPE_INTERCOMM_CREATE_ID];
    state->kind_mask = MPE_KIND_COMM;
    state->name = "MPI_Intercomm_create";
    state->color = "DarkOliveGreen4";
    state->format = NULL;

    state = &states[MPE_INTERCOMM_MERGE_ID];
    state->kind_mask = MPE_KIND_COMM;
    state->name = "MPI_Intercomm_merge";
    state->color = "DarkOliveGreen3";
    state->format = NULL;

    state = &states[MPE_KEYVAL_CREATE_ID];
    state->kind_mask = MPE_KIND_ATTR;
    state->name = "MPI_Keyval_create";
    state->format = NULL;

    state = &states[MPE_KEYVAL_FREE_ID];
    state->kind_mask = MPE_KIND_ATTR;
    state->name = "MPI_Keyval_free";
    state->format = NULL;

    state = &states[MPE_ABORT_ID];
    state->kind_mask = MPE_KIND_ENV;
    state->name = "MPI_Abort";
    state->format = NULL;

    state = &states[MPE_ERROR_CLASS_ID];
    state->kind_mask = MPE_KIND_ENV;
    state->name = "MPI_Error_class";
    state->format = NULL;

    state = &states[MPE_ERRHANDLER_CREATE_ID];
    state->kind_mask = MPE_KIND_ENV;
    state->name = "MPI_Errhandler_create";
    state->format = NULL;

    state = &states[MPE_ERRHANDLER_FREE_ID];
    state->kind_mask = MPE_KIND_ENV;
    state->name = "MPI_Errhandler_free";
    state->format = NULL;

    state = &states[MPE_ERRHANDLER_GET_ID];
    state->kind_mask = MPE_KIND_ENV;
    state->name = "MPI_Errhandler_get";
    state->format = NULL;

    state = &states[MPE_ERROR_STRING_ID];
    state->kind_mask = MPE_KIND_ENV;
    state->name = "MPI_Error_string";
    state->format = NULL;

    state = &states[MPE_ERRHANDLER_SET_ID];
    state->kind_mask = MPE_KIND_ENV;
    state->name = "MPI_Errhandler_set";
    state->format = NULL;

    state = &states[MPE_GET_PROCESSOR_NAME_ID];
    state->kind_mask = MPE_KIND_ENV;
    state->name = "MPI_Get_processor_name";
    state->format = NULL;

    state = &states[MPE_INITIALIZED_ID];
    state->kind_mask = MPE_KIND_ENV;
    state->name = "MPI_Initialized";
    state->format = NULL;

    state = &states[MPE_WTICK_ID];
    state->kind_mask = MPE_KIND_ENV;
    state->name = "MPI_Wtick";
    state->format = NULL;

    state = &states[MPE_WTIME_ID];
    state->kind_mask = MPE_KIND_ENV;
    state->name = "MPI_Wtime";
    state->format = NULL;

    state = &states[MPE_ADDRESS_ID];
    state->kind_mask = MPE_KIND_DATATYPE;
    state->name = "MPI_Address";
    state->format = NULL;

    state = &states[MPE_BSEND_ID];
    state->kind_mask = MPE_KIND_MSG;
    state->name = "MPI_Bsend";
    state->color = "SlateBlue";
    state->format = NULL;

    state = &states[MPE_BSEND_INIT_ID];
    state->kind_mask = MPE_KIND_MSG_INIT;
    state->name = "MPI_Bsend_init";
    state->format = NULL;

    state = &states[MPE_BUFFER_ATTACH_ID];
    state->kind_mask = MPE_KIND_MSG_INIT;
    state->name = "MPI_Buffer_attach";
    state->format = NULL;

    state = &states[MPE_BUFFER_DETACH_ID];
    state->kind_mask = MPE_KIND_MSG_INIT;
    state->name = "MPI_Buffer_detach";
    state->format = NULL;

    state = &states[MPE_CANCEL_ID];
    state->kind_mask = MPE_KIND_MSG;
    state->name = "MPI_Cancel";
    state->format = NULL;

    state = &states[MPE_REQUEST_FREE_ID];
    state->kind_mask = MPE_KIND_MSG_INIT;
    state->name = "MPI_Request_free";
    state->format = NULL;

    state = &states[MPE_RECV_INIT_ID];
    state->kind_mask = MPE_KIND_MSG_INIT;
    state->name = "MPI_Recv_init";
    state->format = NULL;

    state = &states[MPE_SEND_INIT_ID];
    state->kind_mask = MPE_KIND_MSG_INIT;
    state->name = "MPI_Send_init";
    state->format = NULL;

    state = &states[MPE_GET_ELEMENTS_ID];
    state->kind_mask = MPE_KIND_MSG;
    state->name = "MPI_Get_elements";
    state->format = NULL;

    state = &states[MPE_GET_COUNT_ID];
    state->kind_mask = MPE_KIND_MSG;
    state->name = "MPI_Get_count";
    state->format = NULL;

    state = &states[MPE_IBSEND_ID];
    state->kind_mask = MPE_KIND_MSG;
    state->name = "MPI_Ibsend";
    state->format = NULL;

    state = &states[MPE_IPROBE_ID];
    state->kind_mask = MPE_KIND_MSG;
    state->name = "MPI_Iprobe";
    state->color = "LavenderBlush";
    state->format = NULL;

    state = &states[MPE_IRECV_ID];
    state->kind_mask = MPE_KIND_MSG;
    state->name = "MPI_Irecv";
    state->color = "PaleGreen";
    state->format = NULL;

    state = &states[MPE_IRSEND_ID];
    state->kind_mask = MPE_KIND_MSG;
    state->name = "MPI_Irsend";
    state->color = "LightSkyBlue";
    state->format = NULL;

    state = &states[MPE_ISEND_ID];
    state->kind_mask = MPE_KIND_MSG;
    state->name = "MPI_Isend";
    state->color = "SkyBlue";
    state->format = NULL;

    state = &states[MPE_ISSEND_ID];
    state->kind_mask = MPE_ISSEND_ID;
    state->name = "MPI_Issend";
    state->color = "LightSteelBlue";
    state->format = NULL;

    state = &states[MPE_PACK_ID];
    state->kind_mask = MPE_KIND_MSG;
    state->name = "MPI_Pack";
    state->format = NULL;

    state = &states[MPE_PACK_SIZE_ID];
    state->kind_mask = MPE_KIND_MSG;
    state->name = "MPI_Pack_size";
    state->format = NULL;

    state = &states[MPE_PROBE_ID];
    state->kind_mask = MPE_KIND_MSG;
    state->name = "MPI_Probe";
    state->color = "lavender";
    state->format = NULL;

    state = &states[MPE_RECV_ID];
    state->kind_mask = MPE_KIND_MSG;
    state->name = "MPI_Recv";
    state->color = "green";
    state->format = NULL;

    state = &states[MPE_RSEND_ID];
    state->kind_mask = MPE_KIND_MSG;
    state->name = "MPI_Rsend";
    state->color = "DeepSkyBlue";
    state->format = NULL;

    state = &states[MPE_RSEND_INIT_ID];
    state->kind_mask = MPE_KIND_MSG_INIT;
    state->name = "MPI_Rsend_init";
    state->format = NULL;

    state = &states[MPE_SEND_ID];
    state->kind_mask = MPE_KIND_MSG;
    state->name = "MPI_Send";
    state->color = "blue";
    state->format = NULL;

    state = &states[MPE_SENDRECV_ID];
    state->kind_mask = MPE_KIND_MSG;
    state->name = "MPI_Sendrecv";
    state->color = "SeaGreen";
    state->format = NULL;

    state = &states[MPE_SENDRECV_REPLACE_ID];
    state->kind_mask = MPE_KIND_MSG;
    state->name = "MPI_Sendrecv_replace";
    state->color = "SeaGreen1";
    state->format = NULL;

    state = &states[MPE_SSEND_ID];
    state->kind_mask = MPE_KIND_MSG;
    state->name = "MPI_Ssend";
    state->color = "DeepSkyBlue";
    state->format = NULL;

    state = &states[MPE_SSEND_INIT_ID];
    state->kind_mask = MPE_KIND_MSG_INIT;
    state->name = "MPI_Ssend_init";
    state->format = NULL;

    state = &states[MPE_START_ID];
    state->kind_mask = MPE_KIND_MSG;
    state->name = "MPI_Start";
    state->format = NULL;

    state = &states[MPE_STARTALL_ID];
    state->kind_mask = MPE_KIND_MSG;
    state->name = "MPI_Startall";
    state->format = NULL;

    state = &states[MPE_TEST_ID];
    state->kind_mask = MPE_KIND_MSG;
    state->name = "MPI_Test";
    state->color = "orange";
    state->format = NULL;

    state = &states[MPE_TESTALL_ID];
    state->kind_mask = MPE_KIND_MSG;
    state->name = "MPI_Testall";
    state->color = "orange1";
    state->format = NULL;

    state = &states[MPE_TESTANY_ID];
    state->kind_mask = MPE_KIND_MSG;
    state->name = "MPI_Testany";
    state->color = "orange3";
    state->format = NULL;

    state = &states[MPE_TEST_CANCELLED_ID];
    state->kind_mask = MPE_KIND_MSG;
    state->name = "MPI_Test_cancelled";
    state->format = NULL;

    state = &states[MPE_TESTSOME_ID];
    state->kind_mask = MPE_KIND_MSG;
    state->name = "MPI_Testsome";
    state->color = "orange4";
    state->format = NULL;

    state = &states[MPE_TYPE_COMMIT_ID];
    state->kind_mask = MPE_KIND_DATATYPE;
    state->name = "MPI_Type_commit";
    state->format = NULL;

    state = &states[MPE_TYPE_CONTIGUOUS_ID];
    state->kind_mask = MPE_KIND_DATATYPE;
    state->name = "MPI_Type_contiguous";
    state->format = NULL;

    state = &states[MPE_TYPE_EXTENT_ID];
    state->kind_mask = MPE_KIND_DATATYPE;
    state->name = "MPI_Type_extent";
    state->format = NULL;

    state = &states[MPE_TYPE_FREE_ID];
    state->kind_mask = MPE_KIND_DATATYPE;
    state->name = "MPI_Type_free";
    state->format = NULL;

    state = &states[MPE_TYPE_HINDEXED_ID];
    state->kind_mask = MPE_KIND_DATATYPE;
    state->name = "MPI_Type_hindexed";
    state->format = NULL;

    state = &states[MPE_TYPE_INDEXED_ID];
    state->kind_mask = MPE_KIND_DATATYPE;
    state->name = "MPI_Type_indexed";
    state->format = NULL;

    state = &states[MPE_TYPE_HVECTOR_ID];
    state->kind_mask = MPE_KIND_DATATYPE;
    state->name = "MPI_Type_hvector";
    state->format = NULL;

    state = &states[MPE_TYPE_LB_ID];
    state->kind_mask = MPE_KIND_DATATYPE;
    state->name = "MPI_Type_lb";
    state->format = NULL;

    state = &states[MPE_TYPE_SIZE_ID];
    state->kind_mask = MPE_KIND_DATATYPE;
    state->name = "MPI_Type_size";
    state->format = NULL;

    state = &states[MPE_TYPE_STRUCT_ID];
    state->kind_mask = MPE_KIND_DATATYPE;
    state->name = "MPI_Type_struct";
    state->format = NULL;

    state = &states[MPE_TYPE_UB_ID];
    state->kind_mask = MPE_KIND_DATATYPE;
    state->name = "MPI_Type_ub";
    state->format = NULL;

    state = &states[MPE_TYPE_VECTOR_ID];
    state->kind_mask = MPE_KIND_DATATYPE;
    state->name = "MPI_Type_vector";
    state->format = NULL;

    state = &states[MPE_UNPACK_ID];
    state->kind_mask = MPE_KIND_MSG;
    state->name = "MPI_Unpack";
    state->format = NULL;

    state = &states[MPE_WAIT_ID];
    state->kind_mask = MPE_KIND_MSG;
    state->name = "MPI_Wait";
    state->color = "red";
    state->format = NULL;

    state = &states[MPE_WAITALL_ID];
    state->kind_mask = MPE_KIND_MSG;
    state->name = "MPI_Waitall";
    state->color = "OrangeRed";
    state->format = NULL;

    state = &states[MPE_WAITANY_ID];
    state->kind_mask = MPE_KIND_MSG;
    state->name = "MPI_Waitany";
    state->color = "coral";
    state->format = NULL;

    state = &states[MPE_WAITSOME_ID];
    state->kind_mask = MPE_KIND_MSG;
    state->name = "MPI_Waitsome";
    state->color = "IndianRed";
    state->format = NULL;

    state = &states[MPE_CART_COORDS_ID];
    state->kind_mask = MPE_KIND_TOPO;
    state->name = "MPI_Cart_coords";
    state->color = "white";
    state->format = NULL;

    state = &states[MPE_CART_CREATE_ID];
    state->kind_mask = MPE_KIND_TOPO;
    state->name = "MPI_Cart_create";
    state->color="DarkOliveGreen1";
    state->format = NULL;

    state = &states[MPE_CART_GET_ID];
    state->kind_mask = MPE_KIND_TOPO;
    state->name = "MPI_Cart_get";
    state->color = "white";
    state->format = NULL;

    state = &states[MPE_CART_MAP_ID];
    state->kind_mask = MPE_KIND_TOPO;
    state->name = "MPI_Cart_map";
    state->color = "white";
    state->format = NULL;

    state = &states[MPE_CART_RANK_ID];
    state->kind_mask = MPE_KIND_TOPO;
    state->name = "MPI_Cart_rank";
    state->color = "white";
    state->format = NULL;

    state = &states[MPE_CART_SHIFT_ID];
    state->kind_mask = MPE_KIND_TOPO;
    state->name = "MPI_Cart_shift";
    state->color = "white";
    state->format = NULL;

    state = &states[MPE_CART_SUB_ID];
    state->kind_mask = MPE_KIND_TOPO;
    state->name = "MPI_Cart_sub";
    state->color ="DarkOliveGreen2";
    state->format = NULL;

    state = &states[MPE_CARTDIM_GET_ID];
    state->kind_mask = MPE_KIND_TOPO;
    state->name = "MPI_Cartdim_get";
    state->color = "white";
    state->format = NULL;

    state = &states[MPE_DIMS_CREATE_ID];
    state->kind_mask = MPE_KIND_TOPO;
    state->name = "MPI_Dims_create";
    state->color = "white";
    state->format = NULL;

    state = &states[MPE_GRAPH_CREATE_ID];
    state->kind_mask = MPE_KIND_TOPO;
    state->name = "MPI_Graph_create";
    state->color="DarkOliveGreen3";
    state->format = NULL;

    state = &states[MPE_GRAPH_GET_ID];
    state->kind_mask = MPE_KIND_TOPO;
    state->name = "MPI_Graph_get";
    state->color = "white";
    state->format = NULL;

    state = &states[MPE_GRAPH_MAP_ID];
    state->kind_mask = MPE_KIND_TOPO;
    state->name = "MPI_Graph_map";
    state->color = "white";
    state->format = NULL;

    state = &states[MPE_GRAPH_NEIGHBORS_ID];
    state->kind_mask = MPE_KIND_TOPO;
    state->name = "MPI_Graph_neighbors";
    state->color = "white";
    state->format = NULL;

    state = &states[MPE_GRAPH_NEIGHBORS_COUNT_ID];
    state->kind_mask = MPE_KIND_TOPO;
    state->name = "MPI_Graph_neighbors_count";
    state->color = "white";
    state->format = NULL;

    state = &states[MPE_GRAPHDIMS_GET_ID];
    state->kind_mask = MPE_KIND_TOPO;
    state->name = "MPI_Graphdims_get";
    state->color = "white";
    state->format = NULL;

    state = &states[MPE_TOPO_TEST_ID];
    state->kind_mask = MPE_KIND_TOPO;
    state->name = "MPI_Topo_test";
    state->color = "white";
    state->format = NULL;

    state = &states[MPE_RECV_IDLE_ID];
    state->kind_mask = MPE_KIND_MSG;
    state->name = "MPI_Recv_idle";
    state->color ="SeaGreen1";
    state->format = NULL;
}

void MPE_Init_internal_logging( void )
{
    MPE_State  *state;
    MPE_Event  *event;

    /* These are MPE internal states */
    state = &states[MPE_ISEND_WAITED_ID];
    state->kind_mask = MPE_KIND_INTERNAL;
    state->name = "MPE_Isend_waited";
    state->color="magenta";
    state->format = NULL;

    state = &states[MPE_IRECV_WAITED_ID];
    state->kind_mask = MPE_KIND_INTERNAL;
    state->name = "MPE_Irecv_waited";
    state->color="DarkOrange";
    state->format = NULL;

    /* These are MPE internal Events */
    event = &events[MPE_COMM_INIT_ID];
    event->kind_mask = MPE_KIND_INTERNAL;
    event->name = "MPE_Comm_init";
    event->color = "red";
    state->format = NULL;

    event = &events[MPE_COMM_FINALIZE_ID];
    event->kind_mask = MPE_KIND_INTERNAL;
    event->name = "MPE_Comm_finalize";
    event->color = "orange";
    state->format = NULL;
}

/*
 * Here begins the individual routines.  We may eventually want to
 * break them up, at least by class (no need to load the MPI_CART/GRAPH
 * routines if the application doesn't use them).
 */


int   MPI_Allgather( sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype, comm )
void * sendbuf;
int sendcount;
MPI_Datatype sendtype;
void * recvbuf;
int recvcount;
MPI_Datatype recvtype;
MPI_Comm comm;
{
  int       returnVal;
  MPE_LOG_STATE_DECL
  MPE_LOG_THREADSTM_DECL

/*
    MPI_Allgather - prototyping replacement for MPI_Allgather
    Log the beginning and ending of the time spent in MPI_Allgather calls.
*/
  MPE_LOG_THREADSTM_GET
  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_BEGIN(comm,MPE_ALLGATHER_ID)
  MPE_LOG_THREAD_UNLOCK

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_OFF
#endif

  returnVal = PMPI_Allgather( sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype, comm );

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_ON
#endif

  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_END(comm,NULL)
  MPE_LOG_THREAD_UNLOCK

  return returnVal;
}

int   MPI_Allgatherv( sendbuf, sendcount, sendtype, recvbuf, recvcounts, displs, recvtype, comm )
void * sendbuf;
int sendcount;
MPI_Datatype sendtype;
void * recvbuf;
int * recvcounts;
int * displs;
MPI_Datatype recvtype;
MPI_Comm comm;
{
  int   returnVal;
  MPE_LOG_STATE_DECL
  MPE_LOG_THREADSTM_DECL
/*
    MPI_Allgatherv - prototyping replacement for MPI_Allgatherv
    Log the beginning and ending of the time spent in MPI_Allgatherv calls.
*/

  MPE_LOG_THREADSTM_GET
  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_BEGIN(comm,MPE_ALLGATHERV_ID)
  MPE_LOG_THREAD_UNLOCK

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_OFF
#endif

  returnVal = PMPI_Allgatherv( sendbuf, sendcount, sendtype,
                               recvbuf, recvcounts, displs, recvtype, comm );

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_ON
#endif

  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_END(comm,NULL)
  MPE_LOG_THREAD_UNLOCK

  return returnVal;
}

int   MPI_Allreduce( sendbuf, recvbuf, count, datatype, op, comm )
void * sendbuf;
void * recvbuf;
int count;
MPI_Datatype datatype;
MPI_Op op;
MPI_Comm comm;
{
  int   returnVal;
  MPE_LOG_STATE_DECL
  MPE_LOG_THREADSTM_DECL

/*
    MPI_Allreduce - prototyping replacement for MPI_Allreduce
    Log the beginning and ending of the time spent in MPI_Allreduce calls.
*/

  MPE_LOG_THREADSTM_GET
  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_BEGIN(comm,MPE_ALLREDUCE_ID)
  MPE_LOG_THREAD_UNLOCK

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_OFF
#endif

  returnVal = PMPI_Allreduce( sendbuf, recvbuf, count, datatype, op, comm );

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_ON
#endif

  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_END(comm,NULL)
  MPE_LOG_THREAD_UNLOCK

  return returnVal;
}

int  MPI_Alltoall( sendbuf, sendcnt, sendtype, recvbuf, recvcnt, recvtype, comm )
void * sendbuf;
int sendcnt;
MPI_Datatype sendtype;
void * recvbuf;
int recvcnt;
MPI_Datatype recvtype;
MPI_Comm comm;
{
  int  returnVal;
  MPE_LOG_STATE_DECL
  MPE_LOG_THREADSTM_DECL

  MPE_LOG_BYTEBUF_DECL
  int  comm_size, type_sz, msg_sz;

/*
    MPI_Alltoall - prototyping replacement for MPI_Alltoall
    Log the beginning and ending of the time spent in MPI_Alltoall calls.
*/

  MPE_LOG_THREADSTM_GET
  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_BEGIN(comm,MPE_ALLTOALL_ID)
  MPE_LOG_THREAD_UNLOCK

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_OFF
#endif

  returnVal = PMPI_Alltoall( sendbuf, sendcnt, sendtype,
                             recvbuf, recvcnt, recvtype, comm );

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_ON
#endif

  MPE_LOG_THREAD_LOCK
     PMPI_Comm_size( comm, &comm_size );
     bytebuf_pos = 0;

     PMPI_Type_size( sendtype, &type_sz );
     msg_sz = comm_size * sendcnt * type_sz;
     MPE_Log_pack( bytebuf, &bytebuf_pos, 'd', 1, &msg_sz );

     PMPI_Type_size( recvtype, &type_sz );
     msg_sz = comm_size * recvcnt * type_sz;
     MPE_Log_pack( bytebuf, &bytebuf_pos, 'd', 1, &msg_sz );
  MPE_LOG_STATE_END(comm,bytebuf)
  MPE_LOG_THREAD_UNLOCK

  return returnVal;
}

int   MPI_Alltoallv( sendbuf, sendcnts, sdispls, sendtype, recvbuf, recvcnts, rdispls, recvtype, comm )
void * sendbuf;
int * sendcnts;
int * sdispls;
MPI_Datatype sendtype;
void * recvbuf;
int * recvcnts;
int * rdispls;
MPI_Datatype recvtype;
MPI_Comm comm;
{
  int   returnVal;
  MPE_LOG_STATE_DECL
  MPE_LOG_THREADSTM_DECL

  MPE_LOG_BYTEBUF_DECL
  int  idx, comm_size, type_sz, msg_sz;

/*
    MPI_Alltoallv - prototyping replacement for MPI_Alltoallv
    Log the beginning and ending of the time spent in MPI_Alltoallv calls.
*/

  MPE_LOG_THREADSTM_GET
  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_BEGIN(comm,MPE_ALLTOALLV_ID)
  MPE_LOG_THREAD_UNLOCK

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_OFF
#endif

  returnVal = PMPI_Alltoallv( sendbuf, sendcnts, sdispls, sendtype,
                              recvbuf, recvcnts, rdispls, recvtype, comm );

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_ON
#endif

  MPE_LOG_THREAD_LOCK
     PMPI_Comm_size( comm, &comm_size );
     bytebuf_pos = 0;

     PMPI_Type_size( sendtype, &type_sz );
     msg_sz = 0;
     for ( idx = 0; idx < comm_size; idx++ )
         msg_sz += sendcnts[idx] * type_sz;
     MPE_Log_pack( bytebuf, &bytebuf_pos, 'd', 1, &msg_sz );

     PMPI_Type_size( recvtype, &type_sz );
     msg_sz = 0;
     for ( idx = 0; idx < comm_size; idx++ )
         msg_sz += recvcnts[idx] * type_sz;
     MPE_Log_pack( bytebuf, &bytebuf_pos, 'd', 1, &msg_sz );
  MPE_LOG_STATE_END(comm,bytebuf)
  MPE_LOG_THREAD_UNLOCK

  return returnVal;
}

int   MPI_Barrier( comm )
MPI_Comm comm;
{
  int   returnVal;
  MPE_LOG_STATE_DECL
  MPE_LOG_THREADSTM_DECL

/*
    MPI_Barrier - prototyping replacement for MPI_Barrier
    Log the beginning and ending of the time spent in MPI_Barrier calls.
*/

  MPE_LOG_THREADSTM_GET
  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_BEGIN(comm,MPE_BARRIER_ID)
  MPE_LOG_THREAD_UNLOCK

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_OFF
#endif

  returnVal = PMPI_Barrier( comm );

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_ON
#endif

  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_END(comm,NULL)
  MPE_LOG_THREAD_UNLOCK

  return returnVal;
}

int MPI_Bcast( buffer, count, datatype, root, comm )
void * buffer;
int count;
MPI_Datatype datatype;
int root;
MPI_Comm comm;
{
  int   returnVal;
  MPE_LOG_STATE_DECL
  MPE_LOG_THREADSTM_DECL

/*
    MPI_Bcast - prototyping replacement for MPI_Bcast
    Log the beginning and ending of the time spent in MPI_Bcast calls.
*/

  MPE_LOG_THREADSTM_GET
  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_BEGIN(comm,MPE_BCAST_ID)
  MPE_LOG_THREAD_UNLOCK

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_OFF
#endif

  returnVal = PMPI_Bcast( buffer, count, datatype, root, comm );

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_ON
#endif

  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_END(comm,NULL)
  MPE_LOG_THREAD_UNLOCK

  return returnVal;
}

int MPI_Gather( sendbuf, sendcnt, sendtype, recvbuf, recvcount, recvtype, root, comm )
void * sendbuf;
int sendcnt;
MPI_Datatype sendtype;
void * recvbuf;
int recvcount;
MPI_Datatype recvtype;
int root;
MPI_Comm comm;
{
  int   returnVal;
  MPE_LOG_STATE_DECL
  MPE_LOG_THREADSTM_DECL

/*
    MPI_Gather - prototyping replacement for MPI_Gather
    Log the beginning and ending of the time spent in MPI_Gather calls.
*/

  MPE_LOG_THREADSTM_GET
  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_BEGIN(comm,MPE_GATHER_ID)
  MPE_LOG_THREAD_UNLOCK

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_OFF
#endif

  returnVal = PMPI_Gather( sendbuf, sendcnt, sendtype, recvbuf, recvcount, recvtype, root, comm );

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_ON
#endif

  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_END(comm,NULL)
  MPE_LOG_THREAD_UNLOCK

  return returnVal;
}

int MPI_Gatherv( sendbuf, sendcnt, sendtype, recvbuf, recvcnts, displs, recvtype, root, comm )
void * sendbuf;
int sendcnt;
MPI_Datatype sendtype;
void * recvbuf;
int * recvcnts;
int * displs;
MPI_Datatype recvtype;
int root;
MPI_Comm comm;
{
  int   returnVal;
  MPE_LOG_STATE_DECL
  MPE_LOG_THREADSTM_DECL

/*
    MPI_Gatherv - prototyping replacement for MPI_Gatherv
    Log the beginning and ending of the time spent in MPI_Gatherv calls.
*/

  MPE_LOG_THREADSTM_GET
  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_BEGIN(comm,MPE_GATHERV_ID)
  MPE_LOG_THREAD_UNLOCK

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_OFF
#endif

  returnVal = PMPI_Gatherv( sendbuf, sendcnt, sendtype,
                            recvbuf, recvcnts, displs, recvtype, root, comm );

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_ON
#endif

  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_END(comm,NULL)
  MPE_LOG_THREAD_UNLOCK

  return returnVal;
}

int  MPI_Op_create( function, commute, op )
MPI_User_function * function;
int commute;
MPI_Op * op;
{
  int  returnVal;
  MPE_LOG_STATE_DECL
  MPE_LOG_THREADSTM_DECL

/*
    MPI_Op_create - prototyping replacement for MPI_Op_create
    Log the beginning and ending of the time spent in MPI_Op_create calls.
*/

  MPE_LOG_THREADSTM_GET
  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_BEGIN(MPE_COMM_NULL,MPE_OP_CREATE_ID)
  MPE_LOG_THREAD_UNLOCK

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_OFF
#endif

  returnVal = PMPI_Op_create( function, commute, op );

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_ON
#endif

  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_END(MPE_COMM_NULL,NULL)
  MPE_LOG_THREAD_UNLOCK

  return returnVal;
}

int  MPI_Op_free( op )
MPI_Op * op;
{
  int  returnVal;
  MPE_LOG_STATE_DECL
  MPE_LOG_THREADSTM_DECL

/*
    MPI_Op_free - prototyping replacement for MPI_Op_free
    Log the beginning and ending of the time spent in MPI_Op_free calls.
*/

  MPE_LOG_THREADSTM_GET
  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_BEGIN(MPE_COMM_NULL,MPE_OP_FREE_ID)
  MPE_LOG_THREAD_UNLOCK

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_OFF
#endif

  returnVal = PMPI_Op_free( op );

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_ON
#endif

  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_END(MPE_COMM_NULL,NULL)
  MPE_LOG_THREAD_UNLOCK

  return returnVal;
}

int   MPI_Reduce_scatter( sendbuf, recvbuf, recvcnts, datatype, op, comm )
void * sendbuf;
void * recvbuf;
int * recvcnts;
MPI_Datatype datatype;
MPI_Op op;
MPI_Comm comm;
{
  int   returnVal;
  MPE_LOG_STATE_DECL
  MPE_LOG_THREADSTM_DECL

/*
    MPI_Reduce_scatter - prototyping replacement for MPI_Reduce_scatter
    Log the beginning and ending of the time spent in MPI_Reduce_scatter calls.
*/

  MPE_LOG_THREADSTM_GET
  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_BEGIN(comm,MPE_REDUCE_SCATTER_ID)
  MPE_LOG_THREAD_UNLOCK

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_OFF
#endif

  returnVal = PMPI_Reduce_scatter( sendbuf, recvbuf, recvcnts,
                                   datatype, op, comm );

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_ON
#endif

  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_END(comm,NULL)
  MPE_LOG_THREAD_UNLOCK

  return returnVal;
}

int   MPI_Reduce( sendbuf, recvbuf, count, datatype, op, root, comm )
void * sendbuf;
void * recvbuf;
int count;
MPI_Datatype datatype;
MPI_Op op;
int root;
MPI_Comm comm;
{
  int   returnVal;
  MPE_LOG_STATE_DECL
  MPE_LOG_THREADSTM_DECL

/*
    MPI_Reduce - prototyping replacement for MPI_Reduce
    Log the beginning and ending of the time spent in MPI_Reduce calls.
*/

  MPE_LOG_THREADSTM_GET
  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_BEGIN(comm,MPE_REDUCE_ID)
  MPE_LOG_THREAD_UNLOCK

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_OFF
#endif

  returnVal = PMPI_Reduce( sendbuf, recvbuf, count, datatype, op, root, comm );

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_ON
#endif

  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_END(comm,NULL)
  MPE_LOG_THREAD_UNLOCK

  return returnVal;
}

int   MPI_Scan( sendbuf, recvbuf, count, datatype, op, comm )
void * sendbuf;
void * recvbuf;
int count;
MPI_Datatype datatype;
MPI_Op op;
MPI_Comm comm;
{
  int   returnVal;
  MPE_LOG_STATE_DECL
  MPE_LOG_THREADSTM_DECL

/*
    MPI_Scan - prototyping replacement for MPI_Scan
    Log the beginning and ending of the time spent in MPI_Scan calls.
*/

  MPE_LOG_THREADSTM_GET
  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_BEGIN(comm,MPE_SCAN_ID)
  MPE_LOG_THREAD_UNLOCK

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_OFF
#endif

  returnVal = PMPI_Scan( sendbuf, recvbuf, count, datatype, op, comm );

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_ON
#endif

  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_END(comm,NULL)
  MPE_LOG_THREAD_UNLOCK

  return returnVal;
}

int   MPI_Scatter( sendbuf, sendcnt, sendtype, recvbuf, recvcnt, recvtype, root, comm )
void * sendbuf;
int sendcnt;
MPI_Datatype sendtype;
void * recvbuf;
int recvcnt;
MPI_Datatype recvtype;
int root;
MPI_Comm comm;
{
  int   returnVal;
  MPE_LOG_STATE_DECL
  MPE_LOG_THREADSTM_DECL

/*
    MPI_Scatter - prototyping replacement for MPI_Scatter
    Log the beginning and ending of the time spent in MPI_Scatter calls.
*/

  MPE_LOG_THREADSTM_GET
  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_BEGIN(comm,MPE_SCATTER_ID)
  MPE_LOG_THREAD_UNLOCK

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_OFF
#endif

  returnVal = PMPI_Scatter( sendbuf, sendcnt, sendtype,
                            recvbuf, recvcnt, recvtype, root, comm );

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_ON
#endif

  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_END(comm,NULL)
  MPE_LOG_THREAD_UNLOCK

  return returnVal;
}

int   MPI_Scatterv( sendbuf, sendcnts, displs, sendtype,
                    recvbuf, recvcnt, recvtype, root, comm )
void * sendbuf;
int * sendcnts;
int * displs;
MPI_Datatype sendtype;
void * recvbuf;
int recvcnt;
MPI_Datatype recvtype;
int root;
MPI_Comm comm;
{
  int   returnVal;
  MPE_LOG_STATE_DECL
  MPE_LOG_THREADSTM_DECL

/*
    MPI_Scatterv - prototyping replacement for MPI_Scatterv
    Log the beginning and ending of the time spent in MPI_Scatterv calls.
*/

  MPE_LOG_THREADSTM_GET
  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_BEGIN(comm,MPE_SCATTERV_ID)
  MPE_LOG_THREAD_UNLOCK

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_OFF
#endif

  returnVal = PMPI_Scatterv( sendbuf, sendcnts, displs, sendtype,
                             recvbuf, recvcnt, recvtype, root, comm );

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_ON
#endif

  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_END(comm,NULL)
  MPE_LOG_THREAD_UNLOCK

  return returnVal;
}

int   MPI_Attr_delete( comm, keyval )
MPI_Comm comm;
int keyval;
{
  int   returnVal;
  MPE_LOG_STATE_DECL
  MPE_LOG_THREADSTM_DECL

/*
    MPI_Attr_delete - prototyping replacement for MPI_Attr_delete
    Log the beginning and ending of the time spent in MPI_Attr_delete calls.
*/

  MPE_LOG_THREADSTM_GET
  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_BEGIN(comm,MPE_ATTR_DELETE_ID)
  MPE_LOG_THREAD_UNLOCK

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_OFF
#endif

  returnVal = PMPI_Attr_delete( comm, keyval );

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_ON
#endif

  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_END(comm,NULL)
  MPE_LOG_THREAD_UNLOCK

  return returnVal;
}

int   MPI_Attr_get( comm, keyval, attr_value, flag )
MPI_Comm comm;
int keyval;
void * attr_value;
int * flag;
{
  int   returnVal;
  MPE_LOG_STATE_DECL
  MPE_LOG_THREADSTM_DECL

/*
    MPI_Attr_get - prototyping replacement for MPI_Attr_get
    Log the beginning and ending of the time spent in MPI_Attr_get calls.
*/

  MPE_LOG_THREADSTM_GET
  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_BEGIN(comm,MPE_ATTR_GET_ID)
  MPE_LOG_THREAD_UNLOCK

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_OFF
#endif

  returnVal = PMPI_Attr_get( comm, keyval, attr_value, flag );

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_ON
#endif

  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_END(comm,NULL)
  MPE_LOG_THREAD_UNLOCK

  return returnVal;
}

int   MPI_Attr_put( comm, keyval, attr_value )
MPI_Comm comm;
int keyval;
void * attr_value;
{
  int   returnVal;
  MPE_LOG_STATE_DECL
  MPE_LOG_THREADSTM_DECL

/*
    MPI_Attr_put - prototyping replacement for MPI_Attr_put
    Log the beginning and ending of the time spent in MPI_Attr_put calls.
*/

  MPE_LOG_THREADSTM_GET
  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_BEGIN(comm,MPE_ATTR_PUT_ID)
  MPE_LOG_THREAD_UNLOCK

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_OFF
#endif

  returnVal = PMPI_Attr_put( comm, keyval, attr_value );

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_ON
#endif

  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_END(comm,NULL)
  MPE_LOG_THREAD_UNLOCK

  return returnVal;
}

int   MPI_Comm_compare( comm1, comm2, result )
MPI_Comm comm1;
MPI_Comm comm2;
int * result;
{
  int   returnVal;
  MPE_LOG_STATE_DECL
  MPE_LOG_THREADSTM_DECL

/*
    MPI_Comm_compare - prototyping replacement for MPI_Comm_compare
    Log the beginning and ending of the time spent in MPI_Comm_compare calls.
*/

  MPE_LOG_THREADSTM_GET
  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_BEGIN(MPE_COMM_NULL,MPE_COMM_COMPARE_ID)
  MPE_LOG_THREAD_UNLOCK

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_OFF
#endif

  returnVal = PMPI_Comm_compare( comm1, comm2, result );

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_ON
#endif

  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_END(MPE_COMM_NULL,NULL)
  MPE_LOG_THREAD_UNLOCK

  return returnVal;
}

int   MPI_Comm_create( comm, group, comm_out )
MPI_Comm comm;
MPI_Group group;
MPI_Comm * comm_out;
{
  int   returnVal;
  MPE_LOG_STATE_DECL
  MPE_LOG_COMM_DECL
  MPE_LOG_THREADSTM_DECL

/*
    MPI_Comm_create - prototyping replacement for MPI_Comm_create
    Log the beginning and ending of the time spent in MPI_Comm_create calls.
*/

  MPE_LOG_THREADSTM_GET
  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_BEGIN(comm,MPE_COMM_CREATE_ID)
  MPE_LOG_THREAD_UNLOCK

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_OFF
#endif

  returnVal = PMPI_Comm_create( comm, group, comm_out );

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_ON
#endif

  MPE_LOG_THREAD_LOCK
  MPE_LOG_INTRACOMM(comm,*comm_out,CLOG_COMM_INTRA_CREATE)

  MPE_LOG_STATE_END(comm,NULL)
  MPE_LOG_THREAD_UNLOCK

  return returnVal;
}

int   MPI_Comm_dup( comm, comm_out )
MPI_Comm comm;
MPI_Comm * comm_out;
{
  int   returnVal;
  MPE_LOG_STATE_DECL
  MPE_LOG_COMM_DECL
  MPE_LOG_THREADSTM_DECL

/*
    MPI_Comm_dup - prototyping replacement for MPI_Comm_dup
    Log the beginning and ending of the time spent in MPI_Comm_dup calls.
*/

  MPE_LOG_THREADSTM_GET
  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_BEGIN(comm,MPE_COMM_DUP_ID)
  MPE_LOG_THREAD_UNLOCK

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_OFF
#endif

  returnVal = PMPI_Comm_dup( comm, comm_out );

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_ON
#endif

  MPE_LOG_THREAD_LOCK
  MPE_LOG_INTRACOMM(comm,*comm_out,CLOG_COMM_INTRA_CREATE)

  MPE_LOG_STATE_END(comm,NULL)
  MPE_LOG_THREAD_UNLOCK

  return returnVal;
}

int   MPI_Comm_free( comm )
MPI_Comm * comm;
{
  int   returnVal;
  MPE_LOG_STATE_DECL
  MPE_LOG_SOLO_EVENT_DECL
  MPE_LOG_THREADSTM_DECL

/*
    MPI_Comm_free - prototyping replacement for MPI_Comm_free
    Log the beginning and ending of the time spent in MPI_Comm_free calls.
*/

  MPE_LOG_THREADSTM_GET
  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_BEGIN(*comm,MPE_COMM_FREE_ID)
  MPE_LOG_COMM_CHECK(*comm)
  MPE_LOG_THREAD_UNLOCK

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_OFF
#endif

  returnVal = PMPI_Comm_free( comm );

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_ON
#endif

  MPE_LOG_THREAD_LOCK
  MPE_LOG_COMMFREE(*comm,CLOG_COMM_FREE)

  MPE_LOG_STATE_END(*comm,NULL)
  MPE_LOG_THREAD_UNLOCK

  return returnVal;
}

int   MPI_Comm_group( comm, group )
MPI_Comm comm;
MPI_Group * group;
{
  int   returnVal;
  MPE_LOG_STATE_DECL
  MPE_LOG_THREADSTM_DECL

/*
    MPI_Comm_group - prototyping replacement for MPI_Comm_group
    Log the beginning and ending of the time spent in MPI_Comm_group calls.
*/

  MPE_LOG_THREADSTM_GET
  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_BEGIN(comm,MPE_COMM_GROUP_ID)
  MPE_LOG_THREAD_UNLOCK

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_OFF
#endif

  returnVal = PMPI_Comm_group( comm, group );

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_ON
#endif

  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_END(comm,NULL)
  MPE_LOG_THREAD_UNLOCK

  return returnVal;
}

int   MPI_Comm_rank( comm, rank )
MPI_Comm comm;
int * rank;
{
  int   returnVal;
  MPE_LOG_STATE_DECL
  MPE_LOG_THREADSTM_DECL

/*
    MPI_Comm_rank - prototyping replacement for MPI_Comm_rank
    Log the beginning and ending of the time spent in MPI_Comm_rank calls.
*/

  MPE_LOG_THREADSTM_GET
  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_BEGIN(comm,MPE_COMM_RANK_ID)
  MPE_LOG_THREAD_UNLOCK

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_OFF
#endif

  returnVal = PMPI_Comm_rank( comm, rank );

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_ON
#endif

  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_END(comm,NULL)
  MPE_LOG_THREAD_UNLOCK

  return returnVal;
}

int   MPI_Comm_remote_group( comm, group )
MPI_Comm comm;
MPI_Group * group;
{
  int   returnVal;
  MPE_LOG_STATE_DECL
  MPE_LOG_THREADSTM_DECL

/*
    MPI_Comm_remote_group - prototyping replacement for MPI_Comm_remote_group
    Log the beginning and ending of the time spent in MPI_Comm_remote_group calls.
*/

  MPE_LOG_THREADSTM_GET
  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_BEGIN(comm,MPE_COMM_REMOTE_GROUP_ID)
  MPE_LOG_THREAD_UNLOCK

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_OFF
#endif

  returnVal = PMPI_Comm_remote_group( comm, group );

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_ON
#endif

  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_END(comm,NULL)
  MPE_LOG_THREAD_UNLOCK

  return returnVal;
}

int   MPI_Comm_remote_size( comm, size )
MPI_Comm comm;
int * size;
{
  int   returnVal;
  MPE_LOG_STATE_DECL
  MPE_LOG_THREADSTM_DECL

/*
    MPI_Comm_remote_size - prototyping replacement for MPI_Comm_remote_size
    Log the beginning and ending of the time spent in MPI_Comm_remote_size calls.
*/

  MPE_LOG_THREADSTM_GET
  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_BEGIN(comm,MPE_COMM_REMOTE_SIZE_ID)
  MPE_LOG_THREAD_UNLOCK

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_OFF
#endif

  returnVal = PMPI_Comm_remote_size( comm, size );

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_ON
#endif

  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_END(comm,NULL)
  MPE_LOG_THREAD_UNLOCK

  return returnVal;
}

int   MPI_Comm_size( comm, size )
MPI_Comm comm;
int * size;
{
  int   returnVal;
  MPE_LOG_STATE_DECL
  MPE_LOG_THREADSTM_DECL

/*
    MPI_Comm_size - prototyping replacement for MPI_Comm_size
    Log the beginning and ending of the time spent in MPI_Comm_size calls.
*/

  MPE_LOG_THREADSTM_GET
  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_BEGIN(comm,MPE_COMM_SIZE_ID)
  MPE_LOG_THREAD_UNLOCK

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_OFF
#endif

  returnVal = PMPI_Comm_size( comm, size );

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_ON
#endif

  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_END(comm,NULL)
  MPE_LOG_THREAD_UNLOCK

  return returnVal;
}

int   MPI_Comm_split( comm, color, key, comm_out )
MPI_Comm comm;
int color;
int key;
MPI_Comm * comm_out;
{
  int   returnVal;
  MPE_LOG_STATE_DECL
  MPE_LOG_COMM_DECL
  MPE_LOG_THREADSTM_DECL

/*
    MPI_Comm_split - prototyping replacement for MPI_Comm_split
    Log the beginning and ending of the time spent in MPI_Comm_split calls.
*/

  MPE_LOG_THREADSTM_GET
  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_BEGIN(comm,MPE_COMM_SPLIT_ID)
  MPE_LOG_THREAD_UNLOCK

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_OFF
#endif

  returnVal = PMPI_Comm_split( comm, color, key, comm_out );

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_ON
#endif

  MPE_LOG_THREAD_LOCK
  MPE_LOG_INTRACOMM(comm,*comm_out,CLOG_COMM_INTRA_CREATE)

  MPE_LOG_STATE_END(comm,NULL)
  MPE_LOG_THREAD_UNLOCK

  return returnVal;
}

int   MPI_Comm_test_inter( comm, flag )
MPI_Comm comm;
int * flag;
{
  int   returnVal;
  MPE_LOG_STATE_DECL
  MPE_LOG_THREADSTM_DECL

/*
    MPI_Comm_test_inter - prototyping replacement for MPI_Comm_test_inter
    Log the beginning and ending of the time spent in MPI_Comm_test_inter calls.
*/

  MPE_LOG_THREADSTM_GET
  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_BEGIN(comm,MPE_COMM_TEST_INTER_ID)
  MPE_LOG_THREAD_UNLOCK

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_OFF
#endif

  returnVal = PMPI_Comm_test_inter( comm, flag );

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_ON
#endif

  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_END(comm,NULL)
  MPE_LOG_THREAD_UNLOCK

  return returnVal;
}

int   MPI_Group_compare( group1, group2, result )
MPI_Group group1;
MPI_Group group2;
int * result;
{
  int   returnVal;
  MPE_LOG_STATE_DECL
  MPE_LOG_THREADSTM_DECL

/*
    MPI_Group_compare - prototyping replacement for MPI_Group_compare
    Log the beginning and ending of the time spent in MPI_Group_compare calls.
*/

  MPE_LOG_THREADSTM_GET
  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_BEGIN(MPE_COMM_NULL,MPE_GROUP_COMPARE_ID)
  MPE_LOG_THREAD_UNLOCK

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_OFF
#endif

  returnVal = PMPI_Group_compare( group1, group2, result );

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_ON
#endif

  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_END(MPE_COMM_NULL,NULL)
  MPE_LOG_THREAD_UNLOCK

  return returnVal;
}

int   MPI_Group_difference( group1, group2, group_out )
MPI_Group group1;
MPI_Group group2;
MPI_Group * group_out;
{
  int   returnVal;
  MPE_LOG_STATE_DECL
  MPE_LOG_THREADSTM_DECL

/*
    MPI_Group_difference - prototyping replacement for MPI_Group_difference
    Log the beginning and ending of the time spent in MPI_Group_difference calls.
*/

  MPE_LOG_THREADSTM_GET
  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_BEGIN(MPE_COMM_NULL,MPE_GROUP_DIFFERENCE_ID)
  MPE_LOG_THREAD_UNLOCK

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_OFF
#endif

  returnVal = PMPI_Group_difference( group1, group2, group_out );

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_ON
#endif

  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_END(MPE_COMM_NULL,NULL)
  MPE_LOG_THREAD_UNLOCK

  return returnVal;
}

int   MPI_Group_excl( group, n, ranks, newgroup )
MPI_Group group;
int n;
int * ranks;
MPI_Group * newgroup;
{
  int   returnVal;
  MPE_LOG_STATE_DECL
  MPE_LOG_THREADSTM_DECL

/*
    MPI_Group_excl - prototyping replacement for MPI_Group_excl
    Log the beginning and ending of the time spent in MPI_Group_excl calls.
*/

  MPE_LOG_THREADSTM_GET
  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_BEGIN(MPE_COMM_NULL,MPE_GROUP_EXCL_ID)
  MPE_LOG_THREAD_UNLOCK

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_OFF
#endif

  returnVal = PMPI_Group_excl( group, n, ranks, newgroup );

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_ON
#endif

  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_END(MPE_COMM_NULL,NULL)
  MPE_LOG_THREAD_UNLOCK

  return returnVal;
}

int   MPI_Group_free( group )
MPI_Group * group;
{
  int   returnVal;
  MPE_LOG_STATE_DECL
  MPE_LOG_THREADSTM_DECL

/*
    MPI_Group_free - prototyping replacement for MPI_Group_free
    Log the beginning and ending of the time spent in MPI_Group_free calls.
*/

  MPE_LOG_THREADSTM_GET
  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_BEGIN(MPE_COMM_NULL,MPE_GROUP_FREE_ID)
  MPE_LOG_THREAD_UNLOCK

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_OFF
#endif

  returnVal = PMPI_Group_free( group );

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_ON
#endif

  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_END(MPE_COMM_NULL,NULL)
  MPE_LOG_THREAD_UNLOCK

  return returnVal;
}

int   MPI_Group_incl( group, n, ranks, group_out )
MPI_Group group;
int n;
int * ranks;
MPI_Group * group_out;
{
  int   returnVal;
  MPE_LOG_STATE_DECL
  MPE_LOG_THREADSTM_DECL

/*
    MPI_Group_incl - prototyping replacement for MPI_Group_incl
    Log the beginning and ending of the time spent in MPI_Group_incl calls.
*/

  MPE_LOG_THREADSTM_GET
  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_BEGIN(MPE_COMM_NULL,MPE_GROUP_INCL_ID)
  MPE_LOG_THREAD_UNLOCK

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_OFF
#endif

  returnVal = PMPI_Group_incl( group, n, ranks, group_out );

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_ON
#endif

  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_END(MPE_COMM_NULL,NULL)
  MPE_LOG_THREAD_UNLOCK

  return returnVal;
}

int   MPI_Group_intersection( group1, group2, group_out )
MPI_Group group1;
MPI_Group group2;
MPI_Group * group_out;
{
  int   returnVal;
  MPE_LOG_STATE_DECL
  MPE_LOG_THREADSTM_DECL

/*
    MPI_Group_intersection - prototyping replacement for MPI_Group_intersection
    Log the beginning and ending of the time spent in MPI_Group_intersection calls.
*/

  MPE_LOG_THREADSTM_GET
  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_BEGIN(MPE_COMM_NULL,MPE_GROUP_INTERSECTION_ID)
  MPE_LOG_THREAD_UNLOCK

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_OFF
#endif

  returnVal = PMPI_Group_intersection( group1, group2, group_out );

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_ON
#endif

  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_END(MPE_COMM_NULL,NULL)
  MPE_LOG_THREAD_UNLOCK

  return returnVal;
}

int   MPI_Group_rank( group, rank )
MPI_Group group;
int * rank;
{
  int   returnVal;
  MPE_LOG_STATE_DECL
  MPE_LOG_THREADSTM_DECL

/*
    MPI_Group_rank - prototyping replacement for MPI_Group_rank
    Log the beginning and ending of the time spent in MPI_Group_rank calls.
*/

  MPE_LOG_THREADSTM_GET
  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_BEGIN(MPE_COMM_NULL,MPE_GROUP_RANK_ID)
  MPE_LOG_THREAD_UNLOCK

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_OFF
#endif

  returnVal = PMPI_Group_rank( group, rank );

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_ON
#endif

  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_END(MPE_COMM_NULL,NULL)
  MPE_LOG_THREAD_UNLOCK

  return returnVal;
}

int   MPI_Group_range_excl( group, n, ranges, newgroup )
MPI_Group group;
int n;
int ranges[][3];
MPI_Group * newgroup;
{
  int   returnVal;
  MPE_LOG_STATE_DECL
  MPE_LOG_THREADSTM_DECL

/*
    MPI_Group_range_excl - prototyping replacement for MPI_Group_range_excl
    Log the beginning and ending of the time spent in MPI_Group_range_excl calls.
*/

  MPE_LOG_THREADSTM_GET
  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_BEGIN(MPE_COMM_NULL,MPE_GROUP_RANGE_EXCL_ID)
  MPE_LOG_THREAD_UNLOCK

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_OFF
#endif

  returnVal = PMPI_Group_range_excl( group, n, ranges, newgroup );

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_ON
#endif

  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_END(MPE_COMM_NULL,NULL)
  MPE_LOG_THREAD_UNLOCK

  return returnVal;
}

int   MPI_Group_range_incl( group, n, ranges, newgroup )
MPI_Group group;
int n;
int ranges[][3];
MPI_Group * newgroup;
{
  int   returnVal;
  MPE_LOG_STATE_DECL
  MPE_LOG_THREADSTM_DECL

/*
    MPI_Group_range_incl - prototyping replacement for MPI_Group_range_incl
    Log the beginning and ending of the time spent in MPI_Group_range_incl calls.
*/

  MPE_LOG_THREADSTM_GET
  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_BEGIN(MPE_COMM_NULL,MPE_GROUP_RANGE_INCL_ID)
  MPE_LOG_THREAD_UNLOCK

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_OFF
#endif

  returnVal = PMPI_Group_range_incl( group, n, ranges, newgroup );

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_ON
#endif

  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_END(MPE_COMM_NULL,NULL)
  MPE_LOG_THREAD_UNLOCK

  return returnVal;
}

int   MPI_Group_size( group, size )
MPI_Group group;
int * size;
{
  int   returnVal;
  MPE_LOG_STATE_DECL
  MPE_LOG_THREADSTM_DECL

/*
    MPI_Group_size - prototyping replacement for MPI_Group_size
    Log the beginning and ending of the time spent in MPI_Group_size calls.
*/

  MPE_LOG_THREADSTM_GET
  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_BEGIN(MPE_COMM_NULL,MPE_GROUP_SIZE_ID)
  MPE_LOG_THREAD_UNLOCK

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_OFF
#endif

  returnVal = PMPI_Group_size( group, size );

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_ON
#endif

  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_END(MPE_COMM_NULL,NULL)
  MPE_LOG_THREAD_UNLOCK

  return returnVal;
}

int   MPI_Group_translate_ranks( group_a, n, ranks_a, group_b, ranks_b )
MPI_Group group_a;
int n;
int * ranks_a;
MPI_Group group_b;
int * ranks_b;
{
  int   returnVal;
  MPE_LOG_STATE_DECL
  MPE_LOG_THREADSTM_DECL

/*
    MPI_Group_translate_ranks - prototyping replacement for MPI_Group_translate_ranks
    Log the beginning and ending of the time spent in MPI_Group_translate_ranks calls.
*/

  MPE_LOG_THREADSTM_GET
  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_BEGIN(MPE_COMM_NULL,MPE_GROUP_TRANSLATE_RANKS_ID)
  MPE_LOG_THREAD_UNLOCK

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_OFF
#endif

  returnVal = PMPI_Group_translate_ranks( group_a, n, ranks_a,
                                          group_b, ranks_b );

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_ON
#endif

  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_END(MPE_COMM_NULL,NULL)
  MPE_LOG_THREAD_UNLOCK

  return returnVal;
}

int   MPI_Group_union( group1, group2, group_out )
MPI_Group group1;
MPI_Group group2;
MPI_Group * group_out;
{
  int   returnVal;
  MPE_LOG_STATE_DECL
  MPE_LOG_THREADSTM_DECL

/*
    MPI_Group_union - prototyping replacement for MPI_Group_union
    Log the beginning and ending of the time spent in MPI_Group_union calls.
*/

  MPE_LOG_THREADSTM_GET
  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_BEGIN(MPE_COMM_NULL,MPE_GROUP_UNION_ID)
  MPE_LOG_THREAD_UNLOCK

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_OFF
#endif

  returnVal = PMPI_Group_union( group1, group2, group_out );

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_ON
#endif

  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_END(MPE_COMM_NULL,NULL)
  MPE_LOG_THREAD_UNLOCK

  return returnVal;
}

int   MPI_Intercomm_create( local_comm, local_leader, peer_comm, remote_leader, tag, comm_out )
MPI_Comm local_comm;
int local_leader;
MPI_Comm peer_comm;
int remote_leader;
int tag;
MPI_Comm * comm_out;
{
  int   returnVal;
  MPE_LOG_STATE_DECL
  MPE_LOG_COMM_DECL
  MPE_LOG_THREADSTM_DECL

/*
    MPI_Intercomm_create - prototyping replacement for MPI_Intercomm_create
    Log the beginning and ending of the time spent in MPI_Intercomm_create calls.
*/

  MPE_LOG_THREADSTM_GET
  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_BEGIN(local_comm,MPE_INTERCOMM_CREATE_ID)
  MPE_LOG_COMM_CHECK(local_comm)
  MPE_LOG_THREAD_UNLOCK

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_OFF
#endif

  returnVal = PMPI_Intercomm_create( local_comm, local_leader,
                                     peer_comm, remote_leader,
                                     tag, comm_out );

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_ON
#endif

  MPE_LOG_THREAD_LOCK
  MPE_LOG_INTERCOMM(local_comm,*comm_out,CLOG_COMM_INTER_CREATE)

  MPE_LOG_STATE_END(local_comm,NULL)
  MPE_LOG_THREAD_UNLOCK

  return returnVal;
}

int   MPI_Intercomm_merge( comm, high, comm_out )
MPI_Comm comm;
int high;
MPI_Comm * comm_out;
{
  int   returnVal;
  MPE_LOG_STATE_DECL
  MPE_LOG_COMM_DECL
  MPE_LOG_THREADSTM_DECL

/*
    MPI_Intercomm_merge - prototyping replacement for MPI_Intercomm_merge
    Log the beginning and ending of the time spent in MPI_Intercomm_merge calls.
*/

  MPE_LOG_THREADSTM_GET
  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_BEGIN(comm,MPE_INTERCOMM_MERGE_ID)
  MPE_LOG_THREAD_UNLOCK

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_OFF
#endif

  returnVal = PMPI_Intercomm_merge( comm, high, comm_out );

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_ON
#endif

  MPE_LOG_THREAD_LOCK
  MPE_LOG_INTRACOMM(comm,*comm_out,CLOG_COMM_INTRA_CREATE)

  MPE_LOG_STATE_END(comm,NULL)
  MPE_LOG_THREAD_UNLOCK

  return returnVal;
}

int   MPI_Keyval_create( copy_fn, delete_fn, keyval, extra_state )
MPI_Copy_function * copy_fn;
MPI_Delete_function * delete_fn;
int * keyval;
void * extra_state;
{
  int   returnVal;
  MPE_LOG_STATE_DECL
  MPE_LOG_THREADSTM_DECL

/*
    MPI_Keyval_create - prototyping replacement for MPI_Keyval_create
    Log the beginning and ending of the time spent in MPI_Keyval_create calls.
*/

  MPE_LOG_THREADSTM_GET
  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_BEGIN(MPE_COMM_NULL,MPE_KEYVAL_CREATE_ID)
  MPE_LOG_THREAD_UNLOCK

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_OFF
#endif

  returnVal = PMPI_Keyval_create( copy_fn, delete_fn, keyval, extra_state );

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_ON
#endif

  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_END(MPE_COMM_NULL,NULL)
  MPE_LOG_THREAD_UNLOCK

  return returnVal;
}

int   MPI_Keyval_free( keyval )
int * keyval;
{
  int   returnVal;
  MPE_LOG_STATE_DECL
  MPE_LOG_THREADSTM_DECL

/*
    MPI_Keyval_free - prototyping replacement for MPI_Keyval_free
    Log the beginning and ending of the time spent in MPI_Keyval_free calls.
*/

  MPE_LOG_THREADSTM_GET
  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_BEGIN(MPE_COMM_NULL,MPE_KEYVAL_FREE_ID)
  MPE_LOG_THREAD_UNLOCK

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_OFF
#endif

  returnVal = PMPI_Keyval_free( keyval );

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_ON
#endif

  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_END(MPE_COMM_NULL,NULL)
  MPE_LOG_THREAD_UNLOCK

  return returnVal;
}

int  MPI_Abort( comm, errorcode )
MPI_Comm comm;
int errorcode;
{
  int  returnVal;
  MPE_LOG_STATE_DECL
  MPE_LOG_THREADSTM_DECL

/*
    MPI_Abort - prototyping replacement for MPI_Abort
    Log the beginning and ending of the time spent in MPI_Abort calls.
*/

  MPE_LOG_THREADSTM_GET
  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_BEGIN(comm,MPE_ABORT_ID)
  MPE_LOG_THREAD_UNLOCK

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_OFF
#endif

  returnVal = PMPI_Abort( comm, errorcode );

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_ON
#endif

  MPE_LOG_THREAD_LOCK
  /* Pretty implausible... */
  MPE_LOG_STATE_END(comm,NULL)
  MPE_LOG_THREAD_UNLOCK

  return returnVal;
}

int  MPI_Error_class( errorcode, errorclass )
int errorcode;
int * errorclass;
{
  int  returnVal;
  MPE_LOG_STATE_DECL
  MPE_LOG_THREADSTM_DECL

/*
    MPI_Error_class - prototyping replacement for MPI_Error_class
    Log the beginning and ending of the time spent in MPI_Error_class calls.
*/

  MPE_LOG_THREADSTM_GET
  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_BEGIN(MPE_COMM_NULL,MPE_ERROR_CLASS_ID)
  MPE_LOG_THREAD_UNLOCK

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_OFF
#endif

  returnVal = PMPI_Error_class( errorcode, errorclass );

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_ON
#endif

  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_END(MPE_COMM_NULL,NULL)
  MPE_LOG_THREAD_UNLOCK

  return returnVal;
}

int  MPI_Errhandler_create( function, errhandler )
MPI_Handler_function * function;
MPI_Errhandler * errhandler;
{
  int  returnVal;
  MPE_LOG_STATE_DECL
  MPE_LOG_THREADSTM_DECL

/*
    MPI_Errhandler_create - prototyping replacement for MPI_Errhandler_create
    Log the beginning and ending of the time spent in MPI_Errhandler_create calls.
*/

  MPE_LOG_THREADSTM_GET
  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_BEGIN(MPE_COMM_NULL,MPE_ERRHANDLER_CREATE_ID)
  MPE_LOG_THREAD_UNLOCK

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_OFF
#endif

  returnVal = PMPI_Errhandler_create( function, errhandler );

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_ON
#endif

  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_END(MPE_COMM_NULL,NULL)
  MPE_LOG_THREAD_UNLOCK

  return returnVal;
}

int  MPI_Errhandler_free( errhandler )
MPI_Errhandler * errhandler;
{
  int  returnVal;
  MPE_LOG_STATE_DECL
  MPE_LOG_THREADSTM_DECL

/*
    MPI_Errhandler_free - prototyping replacement for MPI_Errhandler_free
    Log the beginning and ending of the time spent in MPI_Errhandler_free calls.
*/

  MPE_LOG_THREADSTM_GET
  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_BEGIN(MPE_COMM_NULL,MPE_ERRHANDLER_FREE_ID)
  MPE_LOG_THREAD_UNLOCK

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_OFF
#endif

  returnVal = PMPI_Errhandler_free( errhandler );

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_ON
#endif

  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_END(MPE_COMM_NULL,NULL)
  MPE_LOG_THREAD_UNLOCK

  return returnVal;
}

int  MPI_Errhandler_get( comm, errhandler )
MPI_Comm comm;
MPI_Errhandler * errhandler;
{
  int  returnVal;
  MPE_LOG_STATE_DECL
  MPE_LOG_THREADSTM_DECL

/*
    MPI_Errhandler_get - prototyping replacement for MPI_Errhandler_get
    Log the beginning and ending of the time spent in MPI_Errhandler_get calls.
*/

  MPE_LOG_THREADSTM_GET
  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_BEGIN(comm,MPE_ERRHANDLER_GET_ID)
  MPE_LOG_THREAD_UNLOCK

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_OFF
#endif

  returnVal = PMPI_Errhandler_get( comm, errhandler );

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_ON
#endif

  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_END(comm,NULL)
  MPE_LOG_THREAD_UNLOCK

  return returnVal;
}

int  MPI_Error_string( errorcode, string, resultlen )
int errorcode;
char * string;
int * resultlen;
{
  int  returnVal;
  MPE_LOG_STATE_DECL
  MPE_LOG_THREADSTM_DECL

/*
    MPI_Error_string - prototyping replacement for MPI_Error_string
    Log the beginning and ending of the time spent in MPI_Error_string calls.
*/

  MPE_LOG_THREADSTM_GET
  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_BEGIN(MPE_COMM_NULL,MPE_ERROR_STRING_ID)
  MPE_LOG_THREAD_UNLOCK

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_OFF
#endif

  returnVal = PMPI_Error_string( errorcode, string, resultlen );

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_ON
#endif

  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_END(MPE_COMM_NULL,NULL)
  MPE_LOG_THREAD_UNLOCK

  return returnVal;
}

int  MPI_Errhandler_set( comm, errhandler )
MPI_Comm comm;
MPI_Errhandler errhandler;
{
  int  returnVal;
  MPE_LOG_STATE_DECL
  MPE_LOG_THREADSTM_DECL

/*
    MPI_Errhandler_set - prototyping replacement for MPI_Errhandler_set
    Log the beginning and ending of the time spent in MPI_Errhandler_set calls.
*/

  MPE_LOG_THREADSTM_GET
  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_BEGIN(comm,MPE_ERRHANDLER_SET_ID)
  MPE_LOG_THREAD_UNLOCK

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_OFF
#endif

  returnVal = PMPI_Errhandler_set( comm, errhandler );

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_ON
#endif

  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_END(comm,NULL)
  MPE_LOG_THREAD_UNLOCK

  return returnVal;
}

#define MAKE_PROCNAME_FILE 1

/* Copy of CLOG_Util_getenvbool() in log_mpi_util.c */
int MPE_Util_getenvbool( char *env_var, int default_value );

int  MPI_Finalize( )
{
    MPE_State       *state;
    MPE_Event       *event;
    int              state_count[MPE_MAX_KNOWN_STATES];
    int              state_total[MPE_MAX_KNOWN_STATES];
    int              event_count[MPE_MAX_KNOWN_STATES];
    int              event_total[MPE_MAX_KNOWN_STATES];
    int              returnVal, idx;

#if defined( MAKE_PROCNAME_FILE )
    MPI_Status       status;
    FILE            *procname_file = NULL;
    char             procname_file_str[LOGFILENAME_STRLEN] = {0};
    char             processor_name[MPI_MAX_PROCESSOR_NAME] = {0};
    int              namelen;
    int              world_size;
    int              isOK2procname, isGO4procname;
#endif

    MPE_LOG_SWITCH_DECL
    MPE_LOG_SOLO_EVENT_DECL
    MPE_LOG_THREADSTM_DECL

/*
    MPI_Finalize - prototyping replacement for MPI_Finalize
*/

    MPE_LOG_THREADSTM_GET
    MPE_LOG_THREAD_LOCK

    is_thisfn_logged  = 1;
    MPE_LOG_SOLO_EVENT( CLOG_CommSet->IDs4world, THREADID, MPE_COMM_FINALIZE_ID )

    /*
       To guard again erroneous implementation of PMPI_Finalize which
       make MPI_ calls, e.g. BG/L, from calling MPE_Log_events
       i.e. writing to the CLOG's stream when it is already closed in
       MPE_Finish_log(), turn the trace off explicitly.
    */
#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_OFF
#endif

    /* set the total number of state calls by any processor */
    for ( idx = 0; idx < MPE_MAX_KNOWN_STATES; idx++ ) 
        state_count[idx] = states[idx].n_calls;
    PMPI_Reduce( state_count, state_total, MPE_MAX_KNOWN_STATES,
                 MPI_INT, MPI_SUM, 0, MPI_COMM_WORLD );

    /* set the total number of event calls by any processor */
    for ( idx = 0; idx < MPE_MAX_KNOWN_EVENTS; idx++ ) 
        event_count[idx] = events[idx].n_calls;
    PMPI_Reduce( event_count, event_total, MPE_MAX_KNOWN_EVENTS,
                 MPI_INT, MPI_SUM, 0, MPI_COMM_WORLD );

    if ( procid_0 == 0 ) {
        fprintf( stderr, "Writing logfile....\n" );
        for ( idx = 0; idx < MPE_MAX_KNOWN_STATES; idx++ ) {
            if (state_total[idx] > 0) {
                state  = &states[idx];
                MPE_Describe_known_state( CLOG_CommSet->IDs4world, THREADID,
                                          state->stateID,
                                          state->start_evtID,
                                          state->final_evtID, 
                                          state->name, state->color,
                                          state->format );
            }
        }
        for ( idx = 0; idx < MPE_MAX_KNOWN_EVENTS; idx++ ) {
            if (event_total[idx] > 0) {
                event  = &events[idx];
                MPE_Describe_known_event( CLOG_CommSet->IDs4world, THREADID,
                                          event->eventID,
                                          event->name, event->color,
                                          NULL );
            }
        }
    }
    MPE_LOG_THREAD_UNLOCK

    MPE_Finish_log( logFileName_0 );
    if ( procid_0 == 0 ) {
        fprintf( stderr, "Finished writing logfile %s.\n",
                 MPE_Log_merged_logfilename() );
        fflush( stderr );
    }

    MPE_LOG_THREAD_LOCK
    /* Recover all of the allocated requests */
    rq_end( requests_avail_0 );
    MPE_LOG_THREAD_UNLOCK
    
#if defined( MAKE_PROCNAME_FILE )
    /* assuming MPE_LOG_RANK2PROCNAME is false if not defined */
    isGO4procname = MPE_Util_getenvbool( "MPE_LOG_RANK2PROCNAME", 0 );
    /* Let everyone in MPI_COMM_WORLD know what root has */
    PMPI_Bcast( &isGO4procname, 1, MPI_INT, 0, MPI_COMM_WORLD );

    if ( isGO4procname ) {
#define PROCNAME_TAG 1099
        PMPI_Barrier( MPI_COMM_WORLD );
        /* Initialize the flag to create a procname file to false */
        isOK2procname = 0;
        if ( procid_0 == 0 ) {
            strncpy( procname_file_str, MPE_Log_merged_logfilename(),
                     LOGFILENAME_STRLEN );
            strcat( procname_file_str, ".pnm" );
            procname_file = fopen( procname_file_str, "w" );
            if ( procname_file != NULL ) {
                /* If fopen() returns OK, set flag to true */
                isOK2procname = 1;
                fprintf( stderr, "Writing MPI_processor_name file....\n" );
                fflush( stderr );
            }
            else {
                fprintf( stderr, "Failed to open %s!\n", procname_file_str );
                fflush( stderr );
            }
        }
        /* If the procname file is created OK, let everybody know */
        PMPI_Bcast( &isOK2procname, 1, MPI_INT, 0, MPI_COMM_WORLD );
    
        if ( isOK2procname ) {
            PMPI_Get_processor_name( processor_name, &namelen );
            PMPI_Comm_size( MPI_COMM_WORLD, &world_size );
            if ( procid_0 == 0 ) {
                idx = procid_0;
                fprintf( procname_file, "Rank %d : %s\n", idx, processor_name );
                for ( idx = 1; idx < world_size; idx++ ) {
                    /*
                    MPI_Recv( &namelen, 1, MPI_INT,
                              idx, PROCNAME_TAG, MPI_COMM_WORLD, &status );
                    MPI_Recv( processor_name, namelen, MPI_CHAR,
                               idx, PROCNAME_TAG, MPI_COMM_WORLD, &status );
                    */
                    PMPI_Recv( processor_name, MPI_MAX_PROCESSOR_NAME, MPI_CHAR,
                               idx, PROCNAME_TAG, MPI_COMM_WORLD, &status );
                    fprintf( procname_file, "Rank %d : %s\n",
                             idx, processor_name );
                }
                fflush( procname_file );
                fclose( procname_file );
                fprintf( stderr, "Finished writing processor name file %s.\n",
                         procname_file_str );
                fflush( stderr );
                
            }
            else {
                /*
                namelen += 1; // include the terminating NULL 
                MPI_Send( &namelen, 1, MPI_INT, 0, PROCNAME_TAG, MPI_COMM_WORLD );
                MPI_Send( processor_name, namelen, MPI_CHAR,
                           0, PROCNAME_TAG, MPI_COMM_WORLD );
                */
                PMPI_Send( processor_name, MPI_MAX_PROCESSOR_NAME, MPI_CHAR,
                           0, PROCNAME_TAG, MPI_COMM_WORLD );
            }
        }
        PMPI_Barrier( MPI_COMM_WORLD );
    }  /* endof if ( isGO4procname ) */
#endif

    returnVal = PMPI_Finalize();

    return returnVal;
}

int  MPI_Get_processor_name( name, resultlen )
char * name;
int * resultlen;
{
  int  returnVal;
  MPE_LOG_STATE_DECL
  MPE_LOG_THREADSTM_DECL

/*
    MPI_Get_processor_name - prototyping replacement for MPI_Get_processor_name
    Log the beginning and ending of the time spent in MPI_Get_processor_name calls.
*/

  MPE_LOG_THREADSTM_GET
  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_BEGIN(MPE_COMM_NULL,MPE_GET_PROCESSOR_NAME_ID)
  MPE_LOG_THREAD_UNLOCK

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_OFF
#endif

  returnVal = PMPI_Get_processor_name( name, resultlen );

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_ON
#endif

  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_END(MPE_COMM_NULL,NULL)
  MPE_LOG_THREAD_UNLOCK

  return returnVal;
}

/*
int is_mpe_f2c = 0;
int MPE_Init_mpi( int *argc, char ***argv );
int MPE_Init_mpi( int *argc, char ***argv )
{
    if ( is_mpe_f2c ) {
        MPI_Fint ierr;
        printf( "calling fortran pmpi_init_()\n" );
        pmpi_init_( &ierr );
        return (int) ierr;
    }
    else
        return PMPI_Init( argc, argv );
}
*/

/*
 * Replacement for MPI_Init.  Initializes logging and sets up basic
 * state definitions, including default color/pattern values
 */
int  MPI_Init( argc, argv )
int     *argc;
char  ***argv;
{
    int   returnVal;

    MPE_LOG_SWITCH_DECL
    MPE_LOG_SOLO_EVENT_DECL
    MPE_LOG_THREADSTM_DECL

    /* Initialize the THREADSTM to validate MPE_LOG_{ON/OFF} and THREADID. */
    MPE_LOG_THREAD_INIT
    MPE_LOG_THREADSTM_GET

#if defined( MAKE_SAFE_PMPI_CALL )
    is_thisfn_logged  = 1;
    MPE_LOG_OFF
#endif

    /*
    returnVal = MPE_Init_mpi( argc, argv );
    */
    returnVal = PMPI_Init( argc, argv );

    MPE_Init_log();
    PMPI_Comm_rank( MPI_COMM_WORLD, &procid_0 );

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_ON
#endif

    MPE_LOG_THREAD_LOCK
    /* Initialize the _selected_ MPI and MPE logging internal states */
    MPE_Init_states_events();

#ifdef HAVE___ARGV
    if ( argv == NULL )
        argv = &__argv;
#endif

    /*  Set default logfilename  */  
    if ( argv != NULL )
        sprintf( logFileName_0, "%s", (*argv)[0] );
    else
        sprintf( logFileName_0, "Unknown" );

    rq_init( requests_avail_0 );
    is_mpilog_on = 1;
    IS_MPELOG_ON = 1;

    MPE_LOG_SOLO_EVENT( CLOG_CommSet->IDs4world, THREADID, MPE_COMM_INIT_ID )
    MPE_LOG_THREAD_UNLOCK

    return returnVal;
}

#if defined( HAVE_MPI_INIT_THREAD )
int  MPI_Init_thread( argc, argv, required, provided )
int    *argc;
char ***argv;
int     required;
int    *provided;
{
    int   returnVal;

    MPE_LOG_SWITCH_DECL
    MPE_LOG_SOLO_EVENT_DECL
    MPE_LOG_THREADSTM_DECL

    /* Initialize the THREADSTM to validate MPE_LOG_{ON/OFF} and THREADID. */
    MPE_LOG_THREAD_INIT
    MPE_LOG_THREADSTM_GET

#if defined( MAKE_SAFE_PMPI_CALL )
    is_thisfn_logged  = 1;
    MPE_LOG_OFF
#endif

    returnVal = PMPI_Init_thread( argc, argv, required, provided );

    MPE_Init_log();
    PMPI_Comm_rank( MPI_COMM_WORLD, &procid_0 );

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_ON
#endif

    MPE_LOG_THREAD_LOCK
    /* Initialize the _selected_ MPI and MPE logging internal states */
    MPE_Init_states_events();

#ifdef HAVE___ARGV
    if ( argv == NULL )
        argv = &__argv;
#endif

    /*  Set default logfilename  */
    if ( argv != NULL )
        sprintf( logFileName_0, "%s", (*argv)[0] );
    else
        sprintf( logFileName_0, "Unknown" );

    rq_init( requests_avail_0 );
    is_mpilog_on = 1;
    IS_MPELOG_ON = 1;

    MPE_LOG_SOLO_EVENT( CLOG_CommSet->IDs4world, THREADID, MPE_COMM_INIT_ID )
    MPE_LOG_THREAD_UNLOCK

    return returnVal;
}
#endif


/*
    MPI_Initialized - prototyping replacement for MPI_Initialized
    Log the beginning and ending of the time spent in MPI_Initialized calls.
*/
/*
int  MPI_Initialized( flag )
int * flag;
{
  int  returnVal;
  MPE_LOG_STATE_DECL
  MPE_LOG_THREADSTM_DECL

  MPE_LOG_THREADSTM_GET
  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_BEGIN(MPE_COMM_NULL,MPE_INITIALIZED_ID)
  MPE_LOG_THREAD_UNLOCK

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_OFF
#endif

  returnVal = PMPI_Initialized( flag );

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_ON
#endif

  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_END(MPE_COMM_NULL,NULL)
  MPE_LOG_THREAD_UNLOCK

  return returnVal;
}
*/

#ifdef FOO
/*
   Use the regular routines for these.  Note that the state logging needs
   MPI_Wtime; make sure that it uses PMPI_Wtime if you use these
*/
double  MPI_Wtick(  )
{
  double  returnVal;
  MPE_LOG_STATE_DECL
  MPE_LOG_THREADSTM_DECL

/*
    MPI_Wtick - prototyping replacement for MPI_Wtick
    Log the beginning and ending of the time spent in MPI_Wtick calls.
*/

  MPE_LOG_THREADSTM_GET
  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_BEGIN(MPE_COMM_NULL,MPE_WTICK_ID)
  MPE_LOG_THREAD_UNLOCK

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_OFF
#endif

  returnVal = PMPI_Wtick(  );

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_ON
#endif

  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_END(MPE_COMM_NULL,NULL)
  MPE_LOG_THREAD_UNLOCK

  return returnVal;
}

double  MPI_Wtime(  )
{
  double  returnVal;
  MPE_LOG_STATE_DECL
  MPE_LOG_THREADSTM_DECL

/*
    MPI_Wtime - prototyping replacement for MPI_Wtime
    Log the beginning and ending of the time spent in MPI_Wtime calls.
*/

  MPE_LOG_THREADSTM_GET
  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_BEGIN(MPE_COMM_NULL,MPE_WTIME_ID)
  MPE_LOG_THREAD_UNLOCK

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_OFF
#endif

  returnVal = PMPI_Wtime(  );

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_ON
#endif

  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_END(MPE_COMM_NULL,NULL)
  MPE_LOG_THREAD_UNLOCK

  return returnVal;
}
#endif

int  MPI_Address( location, address )
void * location;
MPI_Aint * address;
{
  int  returnVal;
  MPE_LOG_STATE_DECL
  MPE_LOG_THREADSTM_DECL

/*
    MPI_Address - prototyping replacement for MPI_Address
    Log the beginning and ending of the time spent in MPI_Address calls.
*/

  MPE_LOG_THREADSTM_GET
  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_BEGIN(MPE_COMM_NULL,MPE_ADDRESS_ID)
  MPE_LOG_THREAD_UNLOCK

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_OFF
#endif

  returnVal = PMPI_Address( location, address );

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_ON
#endif

  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_END(MPE_COMM_NULL,NULL)
  MPE_LOG_THREAD_UNLOCK

  return returnVal;
}

int  MPI_Bsend( buf, count, datatype, dest, tag, comm )
void * buf;
int count;
MPI_Datatype datatype;
int dest;
int tag;
MPI_Comm comm;
{
  int  returnVal;
  int  size;
  MPE_LOG_STATE_DECL
  MPE_LOG_THREADSTM_DECL

/*
    MPI_Bsend - prototyping replacement for MPI_Bsend
    Log the beginning and ending of the time spent in MPI_Bsend calls.
*/

  MPE_LOG_THREADSTM_GET
  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_BEGIN(comm,MPE_BSEND_ID)

  PMPI_Type_size( datatype, &size );
  MPE_LOG_COMM_SEND( comm, dest, tag, count * size )
  MPE_LOG_THREAD_UNLOCK

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_OFF
#endif

  returnVal = PMPI_Bsend( buf, count, datatype, dest, tag, comm );

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_ON
#endif

  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_END(comm,NULL)
  MPE_LOG_THREAD_UNLOCK

  return returnVal;
}

int  MPI_Bsend_init( buf, count, datatype, dest, tag, comm, request )
void * buf;
int count;
MPI_Datatype datatype;
int dest;
int tag;
MPI_Comm comm;
MPI_Request * request;
{
  int  returnVal;
  MPE_LOG_STATE_DECL
  MPE_LOG_THREADSTM_DECL

/*
    MPI_Bsend_init - prototyping replacement for MPI_Bsend_init
    Log the beginning and ending of the time spent in MPI_Bsend_init calls.
*/

  MPE_LOG_THREADSTM_GET
  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_BEGIN(comm,MPE_BSEND_INIT_ID)
  MPE_LOG_THREAD_UNLOCK

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_OFF
#endif

  returnVal = PMPI_Bsend_init( buf, count, datatype, dest, tag, comm, request );

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_ON
#endif

  MPE_LOG_THREAD_LOCK
  /* Note not started yet ... */
  MPE_REQ_ADD_SEND( *request, datatype, count, dest, tag, comm, 1 )

  MPE_LOG_STATE_END(comm,NULL)
  MPE_LOG_THREAD_UNLOCK

  return returnVal;
}

int  MPI_Buffer_attach( buffer, size )
void * buffer;
int size;
{
  int  returnVal;
  MPE_LOG_STATE_DECL
  MPE_LOG_THREADSTM_DECL

/*
    MPI_Buffer_attach - prototyping replacement for MPI_Buffer_attach
    Log the beginning and ending of the time spent in MPI_Buffer_attach calls.
*/

  MPE_LOG_THREADSTM_GET
  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_BEGIN(MPE_COMM_NULL,MPE_BUFFER_ATTACH_ID)
  MPE_LOG_THREAD_UNLOCK

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_OFF
#endif

  returnVal = PMPI_Buffer_attach( buffer, size );

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_ON
#endif

  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_END(MPE_COMM_NULL,NULL)
  MPE_LOG_THREAD_UNLOCK

  return returnVal;
}

int  MPI_Buffer_detach( buffer, size )
void * buffer;
int * size;
{
  int  returnVal;
  MPE_LOG_STATE_DECL
  MPE_LOG_THREADSTM_DECL

/*
    MPI_Buffer_detach - prototyping replacement for MPI_Buffer_detach
    Log the beginning and ending of the time spent in MPI_Buffer_detach calls.
*/

  MPE_LOG_THREADSTM_GET
  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_BEGIN(MPE_COMM_NULL,MPE_BUFFER_DETACH_ID)
  MPE_LOG_THREAD_UNLOCK

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_OFF
#endif

  returnVal = PMPI_Buffer_detach( buffer, size );

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_ON
#endif

  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_END(MPE_COMM_NULL,NULL)
  MPE_LOG_THREAD_UNLOCK

  return returnVal;
}

int  MPI_Cancel( request )
MPI_Request * request;
{
  int  returnVal;
  MPE_LOG_STATE_DECL
  MPE_LOG_THREADSTM_DECL

/*
    MPI_Cancel - prototyping replacement for MPI_Cancel
    Log the beginning and ending of the time spent in MPI_Cancel calls.
*/

  MPE_LOG_THREADSTM_GET
  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_BEGIN(MPE_COMM_NULL,MPE_CANCEL_ID)

  MPE_Req_cancel( *request );
  MPE_LOG_THREAD_UNLOCK

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_OFF
#endif

  returnVal = PMPI_Cancel( request );

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_ON
#endif

  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_END(MPE_COMM_NULL,NULL)
  MPE_LOG_THREAD_UNLOCK

  return returnVal;
}

int  MPI_Request_free( request )
MPI_Request * request;
{
  int  returnVal;
  MPE_LOG_STATE_DECL
  MPE_LOG_THREADSTM_DECL

/*
    MPI_Request_free - prototyping replacement for MPI_Request_free
    Log the beginning and ending of the time spent in MPI_Request_free calls.
*/

  MPE_LOG_THREADSTM_GET
  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_BEGIN(MPE_COMM_NULL,MPE_REQUEST_FREE_ID)

  MPE_Req_remove( *request );
  MPE_LOG_THREAD_UNLOCK

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_OFF
#endif

  returnVal = PMPI_Request_free( request );

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_ON
#endif

  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_END(MPE_COMM_NULL,NULL)
  MPE_LOG_THREAD_UNLOCK

  return returnVal;
}

int  MPI_Recv_init( buf, count, datatype, source, tag, comm, request )
void * buf;
int count;
MPI_Datatype datatype;
int source;
int tag;
MPI_Comm comm;
MPI_Request * request;
{
  int  returnVal;
  MPE_LOG_STATE_DECL
  MPE_LOG_THREADSTM_DECL

/*
    MPI_Recv_init - prototyping replacement for MPI_Recv_init
    Log the beginning and ending of the time spent in MPI_Recv_init calls.
*/

  MPE_LOG_THREADSTM_GET
  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_BEGIN(comm,MPE_RECV_INIT_ID)
  MPE_LOG_THREAD_UNLOCK

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_OFF
#endif

  returnVal = PMPI_Recv_init( buf, count, datatype, source, tag,
                              comm, request );

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_ON
#endif

  MPE_LOG_THREAD_LOCK
  if (returnVal == MPI_SUCCESS) {
      /* Not started yet ... */
      MPE_REQ_ADD_RECV( *request, datatype, count, source, tag, comm, 1 );
  }

  MPE_LOG_STATE_END(comm,NULL)
  MPE_LOG_THREAD_UNLOCK

  return returnVal;
}

int  MPI_Send_init( buf, count, datatype, dest, tag, comm, request )
void * buf;
int count;
MPI_Datatype datatype;
int dest;
int tag;
MPI_Comm comm;
MPI_Request * request;
{
  int  returnVal;
  MPE_LOG_STATE_DECL
  MPE_LOG_THREADSTM_DECL

/*
    MPI_Send_init - prototyping replacement for MPI_Send_init
    Log the beginning and ending of the time spent in MPI_Send_init calls.
*/

  MPE_LOG_THREADSTM_GET
  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_BEGIN(comm,MPE_SEND_INIT_ID)
  MPE_LOG_THREAD_UNLOCK

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_OFF
#endif

  returnVal = PMPI_Send_init( buf, count, datatype, dest, tag, comm, request );

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_ON
#endif

  MPE_LOG_THREAD_LOCK
  /* Note not started yet ... */
  MPE_REQ_ADD_SEND( *request, datatype, count, dest, tag, comm, 1 )

  MPE_LOG_STATE_END(comm,NULL)
  MPE_LOG_THREAD_UNLOCK

  return returnVal;
}

int   MPI_Get_elements( status, datatype, elements )
MPI_Status * status;
MPI_Datatype datatype;
int * elements;
{
  int   returnVal;
  MPE_LOG_STATE_DECL
  MPE_LOG_THREADSTM_DECL

/*
    MPI_Get_elements - prototyping replacement for MPI_Get_elements
    Log the beginning and ending of the time spent in MPI_Get_elements calls.
*/

  MPE_LOG_THREADSTM_GET
  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_BEGIN(MPE_COMM_NULL,MPE_GET_ELEMENTS_ID)
  MPE_LOG_THREAD_UNLOCK

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_OFF
#endif

  returnVal = PMPI_Get_elements( status, datatype, elements );

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_ON
#endif

  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_END(MPE_COMM_NULL,NULL)
  MPE_LOG_THREAD_UNLOCK

  return returnVal;
}

int  MPI_Get_count( status, datatype, count )
MPI_Status * status;
MPI_Datatype datatype;
int * count;
{
  int  returnVal;
  MPE_LOG_STATE_DECL
  MPE_LOG_THREADSTM_DECL

/*
    MPI_Get_count - prototyping replacement for MPI_Get_count
    Log the beginning and ending of the time spent in MPI_Get_count calls.
*/

  MPE_LOG_THREADSTM_GET
  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_BEGIN(MPE_COMM_NULL,MPE_GET_COUNT_ID)
  MPE_LOG_THREAD_UNLOCK

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_OFF
#endif

  returnVal = PMPI_Get_count( status, datatype, count );

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_ON
#endif

  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_END(MPE_COMM_NULL,NULL)
  MPE_LOG_THREAD_UNLOCK

  return returnVal;
}

int  MPI_Ibsend( buf, count, datatype, dest, tag, comm, request )
void * buf;
int count;
MPI_Datatype datatype;
int dest;
int tag;
MPI_Comm comm;
MPI_Request * request;
{
  int  returnVal;
  MPE_LOG_STATE_DECL
  MPE_LOG_THREADSTM_DECL

/*
    MPI_Ibsend - prototyping replacement for MPI_Ibsend
    Log the beginning and ending of the time spent in MPI_Ibsend calls.
*/

  MPE_LOG_THREADSTM_GET
  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_BEGIN(comm,MPE_IBSEND_ID)
  MPE_LOG_THREAD_UNLOCK

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_OFF
#endif

  returnVal = PMPI_Ibsend( buf, count, datatype, dest, tag, comm, request );

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_ON
#endif

  MPE_LOG_THREAD_LOCK
  MPE_REQ_ADD_SEND( *request, datatype, count, dest, tag, comm, 0 )

  MPE_LOG_STATE_END(comm,NULL)
  MPE_LOG_THREAD_UNLOCK

  return returnVal;
}

int  MPI_Iprobe( source, tag, comm, flag, status )
int source;
int tag;
MPI_Comm comm;
int * flag;
MPI_Status * status;
{
  int  returnVal;
  MPE_LOG_STATE_DECL
  MPE_LOG_THREADSTM_DECL

#ifdef HAVE_MPI_STATUS_IGNORE
  MPI_Status    tmp_status;
  if (status == MPI_STATUS_IGNORE)
      status = &tmp_status;
#endif

/*
    MPI_Iprobe - prototyping replacement for MPI_Iprobe
    Log the beginning and ending of the time spent in MPI_Iprobe calls.
*/

  MPE_LOG_THREADSTM_GET
  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_BEGIN(comm,MPE_IPROBE_ID)
  MPE_LOG_THREAD_UNLOCK

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_OFF
#endif

  returnVal = PMPI_Iprobe( source, tag, comm, flag, status );

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_ON
#endif

  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_END(comm,NULL)
  MPE_LOG_THREAD_UNLOCK

#ifdef HAVE_MPI_STATUS_BROKEN_ON_PROC_NULL
  if (status && source == MPI_PROC_NULL) {
      status->MPI_SOURCE = MPI_PROC_NULL;
      status->MPI_TAG    = MPI_ANY_TAG;
#ifdef HAVE_MPI_STATUS_SET_ELEMENTS
      MPI_Status_set_elements( status, datatype, 0 );
#endif
  }
#endif

  return returnVal;
}

int  MPI_Irecv( buf, count, datatype, source, tag, comm, request )
void * buf;
int count;
MPI_Datatype datatype;
int source;
int tag;
MPI_Comm comm;
MPI_Request * request;
{
  int  returnVal;
  MPE_LOG_STATE_DECL
  MPE_LOG_THREADSTM_DECL

/*
    MPI_Irecv - prototyping replacement for MPI_Irecv
    Log the beginning and ending of the time spent in MPI_Irecv calls.
*/

  MPE_LOG_THREADSTM_GET
  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_BEGIN(comm,MPE_IRECV_ID)
  MPE_LOG_THREAD_UNLOCK

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_OFF
#endif

  returnVal = PMPI_Irecv( buf, count, datatype, source, tag, comm, request );

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_ON
#endif

  MPE_LOG_THREAD_LOCK
  if (returnVal == MPI_SUCCESS) {
      MPE_REQ_ADD_RECV( *request, datatype, count, source, tag, comm, 0 )
  }

  MPE_LOG_STATE_END(comm,NULL)
  MPE_LOG_THREAD_UNLOCK

  return returnVal;
}

int  MPI_Irsend( buf, count, datatype, dest, tag, comm, request )
void * buf;
int count;
MPI_Datatype datatype;
int dest;
int tag;
MPI_Comm comm;
MPI_Request * request;
{
  int  returnVal;
  MPE_LOG_STATE_DECL
  MPE_LOG_THREADSTM_DECL

/*
    MPI_Irsend - prototyping replacement for MPI_Irsend
    Log the beginning and ending of the time spent in MPI_Irsend calls.
*/

  MPE_LOG_THREADSTM_GET
  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_BEGIN(comm,MPE_IRSEND_ID)
  MPE_LOG_THREAD_UNLOCK

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_OFF
#endif

  returnVal = PMPI_Irsend( buf, count, datatype, dest, tag, comm, request );

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_ON
#endif

  MPE_LOG_THREAD_LOCK
  MPE_REQ_ADD_SEND( *request, datatype, count, dest, tag, comm, 0 )

  MPE_LOG_STATE_END(comm,NULL)
  MPE_LOG_THREAD_UNLOCK

  return returnVal;
}

int  MPI_Isend( buf, count, datatype, dest, tag, comm, request )
void * buf;
int count;
MPI_Datatype datatype;
int dest;
int tag;
MPI_Comm comm;
MPI_Request * request;
{
  int  returnVal;
  int  size;
  MPE_LOG_STATE_DECL
  MPE_LOG_THREADSTM_DECL

/*
    MPI_Isend - prototyping replacement for MPI_Isend
    Log the beginning and ending of the time spent in MPI_Isend calls.
*/

  MPE_LOG_THREADSTM_GET
  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_BEGIN(comm,MPE_ISEND_ID)

  PMPI_Type_size( datatype, &size );
  MPE_LOG_COMM_SEND( comm, dest, tag, size * count )
  MPE_LOG_THREAD_UNLOCK

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_OFF
#endif

  returnVal = PMPI_Isend( buf, count, datatype, dest, tag, comm, request );

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_ON
#endif

  MPE_LOG_THREAD_LOCK
  MPE_REQ_ADD_SEND( *request, datatype, count, dest, tag, comm, 0 )

  MPE_LOG_STATE_END(comm,NULL)
  MPE_LOG_THREAD_UNLOCK

  return returnVal;
}

int  MPI_Issend( buf, count, datatype, dest, tag, comm, request )
void * buf;
int count;
MPI_Datatype datatype;
int dest;
int tag;
MPI_Comm comm;
MPI_Request * request;
{
  int  returnVal;
  int  size;
  MPE_LOG_STATE_DECL
  MPE_LOG_THREADSTM_DECL

/*
    MPI_Issend - prototyping replacement for MPI_Issend
    Log the beginning and ending of the time spent in MPI_Issend calls.
*/

  MPE_LOG_THREADSTM_GET
  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_BEGIN(comm,MPE_ISSEND_ID)

  PMPI_Type_size( datatype, &size );
  MPE_LOG_COMM_SEND( comm, dest, tag, size * count )
  MPE_LOG_THREAD_UNLOCK

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_OFF
#endif

  returnVal = PMPI_Issend( buf, count, datatype, dest, tag, comm, request );

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_ON
#endif

  MPE_LOG_THREAD_LOCK
  MPE_REQ_ADD_SEND( *request, datatype, count, dest, tag, comm, 0 )

  MPE_LOG_STATE_END(comm,NULL)
  MPE_LOG_THREAD_UNLOCK

  return returnVal;
}

int   MPI_Pack( inbuf, incount, type, outbuf, outcount, position, comm )
void * inbuf;
int incount;
MPI_Datatype type;
void * outbuf;
int outcount;
int * position;
MPI_Comm comm;
{
  int   returnVal;
  MPE_LOG_STATE_DECL
  MPE_LOG_THREADSTM_DECL

/*
    MPI_Pack - prototyping replacement for MPI_Pack
    Log the beginning and ending of the time spent in MPI_Pack calls.
*/

  MPE_LOG_THREADSTM_GET
  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_BEGIN(comm,MPE_PACK_ID)
  MPE_LOG_THREAD_UNLOCK

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_OFF
#endif

  returnVal = PMPI_Pack( inbuf, incount, type, outbuf, outcount,
                         position, comm );

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_ON
#endif

  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_END(comm,NULL)
  MPE_LOG_THREAD_UNLOCK

  return returnVal;
}

int   MPI_Pack_size( incount, datatype, comm, size )
int incount;
MPI_Datatype datatype;
MPI_Comm comm;
int * size;
{
  int   returnVal;
  MPE_LOG_STATE_DECL
  MPE_LOG_THREADSTM_DECL

/*
    MPI_Pack_size - prototyping replacement for MPI_Pack_size
    Log the beginning and ending of the time spent in MPI_Pack_size calls.
*/

  MPE_LOG_THREADSTM_GET
  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_BEGIN(comm,MPE_PACK_SIZE_ID)
  MPE_LOG_THREAD_UNLOCK

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_OFF
#endif

  returnVal = PMPI_Pack_size( incount, datatype, comm, size );

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_ON
#endif

  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_END(comm,NULL)
  MPE_LOG_THREAD_UNLOCK

  return returnVal;
}

int  MPI_Probe( source, tag, comm, status )
int source;
int tag;
MPI_Comm comm;
MPI_Status * status;
{
  int  returnVal;
  MPE_LOG_STATE_DECL
  MPE_LOG_THREADSTM_DECL

#ifdef HAVE_MPI_STATUS_IGNORE
  MPI_Status    tmp_status;
  if (status == MPI_STATUS_IGNORE)
      status = &tmp_status;
#endif

/*
    MPI_Probe - prototyping replacement for MPI_Probe
    Log the beginning and ending of the time spent in MPI_Probe calls.
*/

  MPE_LOG_THREADSTM_GET
  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_BEGIN(comm,MPE_PROBE_ID)
  MPE_LOG_THREAD_UNLOCK

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_OFF
#endif

  returnVal = PMPI_Probe( source, tag, comm, status );

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_ON
#endif

  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_END(comm,NULL)
  MPE_LOG_THREAD_UNLOCK

#ifdef HAVE_MPI_STATUS_BROKEN_ON_PROC_NULL
  if (status && source == MPI_PROC_NULL) {
      status->MPI_SOURCE = MPI_PROC_NULL;
      status->MPI_TAG    = MPI_ANY_TAG;
#ifdef HAVE_MPI_STATUS_SET_ELEMENTS
      MPI_Status_set_elements( status, datatype, 0 );
#endif
  }
#endif

  return returnVal;
}

int  MPI_Recv( buf, count, datatype, source, tag, comm, status )
void * buf;
int count;
MPI_Datatype datatype;
int source;
int tag;
MPI_Comm comm;
MPI_Status * status;
{
  int  returnVal, acount;
  MPE_LOG_STATE_DECL
  MPE_LOG_THREADSTM_DECL

#ifdef HAVE_MPI_STATUS_IGNORE
  MPI_Status    tmp_status;
  if (status == MPI_STATUS_IGNORE)
      status = &tmp_status;
#endif

/*
    MPI_Recv - prototyping replacement for MPI_Recv
    Log the beginning and ending of the time spent in MPI_Recv calls.
*/

  MPE_LOG_THREADSTM_GET
  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_BEGIN(comm,MPE_RECV_ID)
  MPE_LOG_THREAD_UNLOCK

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_OFF
#endif

  returnVal = PMPI_Recv( buf, count, datatype, source, tag, comm, status );

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_ON
#endif

#ifdef HAVE_MPI_STATUS_BROKEN_ON_PROC_NULL
  if (status && source == MPI_PROC_NULL) {
      status->MPI_SOURCE = MPI_PROC_NULL;
      status->MPI_TAG    = MPI_ANY_TAG;
#ifdef HAVE_MPI_STATUS_SET_ELEMENTS
      PMPI_Status_set_elements( status, datatype, 0 );
#endif
  }
#endif

  MPE_LOG_THREAD_LOCK
  if (returnVal == MPI_SUCCESS) {
      PMPI_Get_count( status, MPI_BYTE, &acount );
      MPE_LOG_COMM_RECV( comm, status->MPI_SOURCE, status->MPI_TAG, acount )
  }

  MPE_LOG_STATE_END(comm,NULL)
  MPE_LOG_THREAD_UNLOCK

  return returnVal;
}

int  MPI_Rsend( buf, count, datatype, dest, tag, comm )
void * buf;
int count;
MPI_Datatype datatype;
int dest;
int tag;
MPI_Comm comm;
{
  int  returnVal;
  int  size;
  MPE_LOG_STATE_DECL
  MPE_LOG_THREADSTM_DECL

/*
    MPI_Rsend - prototyping replacement for MPI_Rsend
    Log the beginning and ending of the time spent in MPI_Rsend calls.
*/

  MPE_LOG_THREADSTM_GET
  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_BEGIN(comm,MPE_RSEND_ID)

  PMPI_Type_size( datatype, &size );
  MPE_LOG_COMM_SEND( comm, dest, tag, count * size )
  MPE_LOG_THREAD_UNLOCK

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_OFF
#endif

  returnVal = PMPI_Rsend( buf, count, datatype, dest, tag, comm );

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_ON
#endif

  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_END(comm,NULL)
  MPE_LOG_THREAD_UNLOCK

  return returnVal;
}

int  MPI_Rsend_init( buf, count, datatype, dest, tag, comm, request )
void * buf;
int count;
MPI_Datatype datatype;
int dest;
int tag;
MPI_Comm comm;
MPI_Request * request;
{
  int  returnVal;
  MPE_LOG_STATE_DECL
  MPE_LOG_THREADSTM_DECL

/*
    MPI_Rsend_init - prototyping replacement for MPI_Rsend_init
    Log the beginning and ending of the time spent in MPI_Rsend_init calls.
*/

  MPE_LOG_THREADSTM_GET
  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_BEGIN(comm,MPE_RSEND_INIT_ID)
  MPE_LOG_THREAD_UNLOCK

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_OFF
#endif

  returnVal = PMPI_Rsend_init( buf, count, datatype, dest, tag, comm, request );

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_ON
#endif

  MPE_LOG_THREAD_LOCK
  /* Note not started yet ... */
  MPE_REQ_ADD_SEND( *request, datatype, count, dest, tag, comm, 1 )

  MPE_LOG_STATE_END(comm,NULL)
  MPE_LOG_THREAD_UNLOCK

  return returnVal;
}

int  MPI_Send( buf, count, datatype, dest, tag, comm )
void * buf;
int count;
MPI_Datatype datatype;
int dest;
int tag;
MPI_Comm comm;
{
  int  returnVal;
  int  size;
  MPE_LOG_STATE_DECL
  MPE_LOG_THREADSTM_DECL

/*
    MPI_Send - prototyping replacement for MPI_Send
    Log the beginning and ending of the time spent in MPI_Send calls.
*/

  MPE_LOG_THREADSTM_GET
  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_BEGIN(comm,MPE_SEND_ID)

  PMPI_Type_size( datatype, &size );
  MPE_LOG_COMM_SEND( comm, dest, tag, size * count )
  MPE_LOG_THREAD_UNLOCK

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_OFF
#endif

  returnVal = PMPI_Send( buf, count, datatype, dest, tag, comm );

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_ON
#endif

  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_END(comm,NULL)
  MPE_LOG_THREAD_UNLOCK

  return returnVal;
}

int  MPI_Sendrecv( sendbuf, sendcount, sendtype, dest, sendtag, 
                   recvbuf, recvcount, recvtype, source, recvtag,
                   comm, status )
void * sendbuf;
int sendcount;
MPI_Datatype sendtype;
int dest;
int sendtag;
void * recvbuf;
int recvcount;
MPI_Datatype recvtype;
int source;
int recvtag;
MPI_Comm comm;
MPI_Status * status;
{
  int  returnVal;
  int  acount, sendsize;
  MPE_LOG_STATE_DECL
  MPE_LOG_THREADSTM_DECL

#ifdef HAVE_MPI_STATUS_IGNORE
  MPI_Status    tmp_status;
  if (status == MPI_STATUS_IGNORE)
      status = &tmp_status;
#endif

/*
    MPI_Sendrecv - prototyping replacement for MPI_Sendrecv
    Log the beginning and ending of the time spent in MPI_Sendrecv calls.
*/

  MPE_LOG_THREADSTM_GET
  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_BEGIN(comm,MPE_SENDRECV_ID)

      PMPI_Type_size( sendtype, &sendsize );
      MPE_LOG_COMM_SEND( comm, dest, sendtag, sendcount * sendsize )
  MPE_LOG_THREAD_UNLOCK

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_OFF
#endif

  returnVal = PMPI_Sendrecv( sendbuf, sendcount, sendtype, dest, sendtag, 
                             recvbuf, recvcount, recvtype, source, recvtag, 
                             comm, status );

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_ON
#endif

#ifdef HAVE_MPI_STATUS_BROKEN_ON_PROC_NULL
  if (status && source == MPI_PROC_NULL) {
      status->MPI_SOURCE = MPI_PROC_NULL;
      status->MPI_TAG    = MPI_ANY_TAG;
#ifdef HAVE_MPI_STATUS_SET_ELEMENTS
      PMPI_Status_set_elements( status, datatype, 0 );
#endif
  }
#endif

  MPE_LOG_THREAD_LOCK
      PMPI_Get_count( status, MPI_BYTE, &acount );
      MPE_LOG_COMM_RECV( comm, status->MPI_SOURCE, status->MPI_TAG, acount )

  MPE_LOG_STATE_END(comm,NULL)
  MPE_LOG_THREAD_UNLOCK

  return returnVal;
}

int  MPI_Sendrecv_replace( buf, count, datatype, dest, sendtag, source, 
                           recvtag, comm, status )
void * buf;
int count;
MPI_Datatype datatype;
int dest;
int sendtag;
int source;
int recvtag;
MPI_Comm comm;
MPI_Status * status;
{
  int  returnVal;
  int  acount, sendsize;
  MPE_LOG_STATE_DECL
  MPE_LOG_THREADSTM_DECL

#ifdef HAVE_MPI_STATUS_IGNORE
  MPI_Status    tmp_status;
  if (status == MPI_STATUS_IGNORE)
      status = &tmp_status;
#endif

/*
    MPI_Sendrecv_replace - prototyping replacement for MPI_Sendrecv_replace
    Log the beginning and ending of the time spent in MPI_Sendrecv_replace calls.
*/

  MPE_LOG_THREADSTM_GET
  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_BEGIN(comm,MPE_SENDRECV_REPLACE_ID)

      PMPI_Type_size( datatype, &sendsize );
      MPE_LOG_COMM_SEND( comm, dest, sendtag, count * sendsize )
  MPE_LOG_THREAD_UNLOCK

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_OFF
#endif

  returnVal = PMPI_Sendrecv_replace( buf, count, datatype, dest, 
                                     sendtag, source, recvtag, comm, status );

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_ON
#endif

#ifdef HAVE_MPI_STATUS_BROKEN_ON_PROC_NULL
  if (status && source == MPI_PROC_NULL) {
      status->MPI_SOURCE = MPI_PROC_NULL;
      status->MPI_TAG    = MPI_ANY_TAG;
#ifdef HAVE_MPI_STATUS_SET_ELEMENTS
      PMPI_Status_set_elements( status, datatype, 0 );
#endif
  }
#endif

  MPE_LOG_THREAD_LOCK
      PMPI_Get_count( status, MPI_BYTE, &acount );
      MPE_LOG_COMM_RECV( comm, status->MPI_SOURCE, status->MPI_TAG, acount )

  MPE_LOG_STATE_END(comm,NULL)
  MPE_LOG_THREAD_UNLOCK

  return returnVal;
}

int  MPI_Ssend( buf, count, datatype, dest, tag, comm )
void * buf;
int count;
MPI_Datatype datatype;
int dest;
int tag;
MPI_Comm comm;
{
  int  returnVal, size;
  MPE_LOG_STATE_DECL
  MPE_LOG_THREADSTM_DECL

/*
    MPI_Ssend - prototyping replacement for MPI_Ssend
    Log the beginning and ending of the time spent in MPI_Ssend calls.
*/

  MPE_LOG_THREADSTM_GET
  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_BEGIN(comm,MPE_SSEND_ID)

  PMPI_Type_size( datatype, &size );
  MPE_LOG_COMM_SEND( comm, dest, tag, count * size )
  MPE_LOG_THREAD_UNLOCK

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_OFF
#endif

  returnVal = PMPI_Ssend( buf, count, datatype, dest, tag, comm );

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_ON
#endif

  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_END(comm,NULL)
  MPE_LOG_THREAD_UNLOCK

  return returnVal;
}

int  MPI_Ssend_init( buf, count, datatype, dest, tag, comm, request )
void * buf;
int count;
MPI_Datatype datatype;
int dest;
int tag;
MPI_Comm comm;
MPI_Request * request;
{
  int  returnVal;
  MPE_LOG_STATE_DECL
  MPE_LOG_THREADSTM_DECL

/*
    MPI_Ssend_init - prototyping replacement for MPI_Ssend_init
    Log the beginning and ending of the time spent in MPI_Ssend_init calls.
*/

  MPE_LOG_THREADSTM_GET
  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_BEGIN(comm,MPE_SSEND_INIT_ID)
  MPE_LOG_THREAD_UNLOCK

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_OFF
#endif

  returnVal = PMPI_Ssend_init( buf, count, datatype, dest, tag, comm, request );

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_ON
#endif

  MPE_LOG_THREAD_LOCK
  MPE_REQ_ADD_SEND( *request, datatype, count, dest, tag, comm, 1 )

  MPE_LOG_STATE_END(comm,NULL)
  MPE_LOG_THREAD_UNLOCK

  return returnVal;
}

int  MPI_Start( request )
MPI_Request * request;
{
  int  returnVal;
  MPE_LOG_STATE_DECL
  MPE_LOG_THREADSTM_DECL

/*
    MPI_Start - prototyping replacement for MPI_Start
    Log the beginning and ending of the time spent in MPI_Start calls.
*/

  MPE_LOG_THREADSTM_GET
  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_BEGIN(MPE_COMM_NULL,MPE_START_ID)
  MPE_LOG_THREAD_UNLOCK

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_OFF
#endif

  returnVal = PMPI_Start( request );

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_ON
#endif

  MPE_LOG_THREAD_LOCK
  MPE_REQ_START( *request )

  MPE_LOG_STATE_END(MPE_COMM_NULL,NULL)
  MPE_LOG_THREAD_UNLOCK

  return returnVal;
}

int  MPI_Startall( count, array_of_requests )
int count;
MPI_Request * array_of_requests;
{
  int  returnVal;
  int  i;
  MPE_LOG_STATE_DECL
  MPE_LOG_THREADSTM_DECL

/*
    MPI_Startall - prototyping replacement for MPI_Startall
    Log the beginning and ending of the time spent in MPI_Startall calls.
*/

  MPE_LOG_THREADSTM_GET
  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_BEGIN(MPE_COMM_NULL,MPE_STARTALL_ID)
  MPE_LOG_THREAD_UNLOCK

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_OFF
#endif

  returnVal = PMPI_Startall( count, array_of_requests );

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_ON
#endif

  MPE_LOG_THREAD_LOCK
  for (i=0; i<count; i++)
      MPE_REQ_START( array_of_requests[i] )

  MPE_LOG_STATE_END(MPE_COMM_NULL,NULL)
  MPE_LOG_THREAD_UNLOCK

  return returnVal;
}

int   MPI_Test( request, flag, status )
MPI_Request * request;
int * flag;
MPI_Status * status;
{
    int   returnVal;
    MPI_Request lreq = *request;
    MPE_LOG_STATE_DECL
    MPE_LOG_THREADSTM_DECL

/*
    MPI_Test - prototyping replacement for MPI_Test
    Log the beginning and ending of the time spent in MPI_Test calls.
*/

#if defined( HAVE_MPI_STATUS_IGNORE )
    MPI_Status   tmp_status;
    if ( status == MPI_STATUS_IGNORE )
        status = &tmp_status;
#endif

    MPE_LOG_THREADSTM_GET
    MPE_LOG_THREAD_LOCK
    MPE_LOG_STATE_BEGIN(MPE_COMM_NULL,MPE_TEST_ID)
    MPE_LOG_THREAD_UNLOCK

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_OFF
#endif

    returnVal = PMPI_Test( request, flag, status );

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_ON
#endif

    MPE_LOG_THREAD_LOCK
    if (*flag) 
        MPE_REQ_WAIT_TEST( lreq, status, "MPI_Test" )

    MPE_LOG_STATE_END(MPE_COMM_NULL,NULL)
    MPE_LOG_THREAD_UNLOCK

    return returnVal;
}

int  MPI_Testall( count, array_of_requests, flag, array_of_statuses )
int count;
MPI_Request * array_of_requests;
int * flag;
MPI_Status * array_of_statuses;
{
    int  returnVal;
    int  i;
    MPE_LOG_STATE_DECL
    MPE_LOG_THREADSTM_DECL

/*
    MPI_Testall - prototyping replacement for MPI_Testall
    Log the beginning and ending of the time spent in MPI_Testall calls.
*/
#if defined( HAVE_MPI_STATUSES_IGNORE )
    int  is_malloced = 0;
    if ( array_of_statuses == MPI_STATUSES_IGNORE ) {
        MPE_LOG_THREAD_LOCK
#if ! defined( HAVE_ALLOCA )
        array_of_statuses = (MPI_Status *) malloc( count * sizeof(MPI_Status) );
        is_malloced = 1;
#else
        array_of_statuses = (MPI_Status *) alloca( count * sizeof(MPI_Status) );
        is_malloced = 0;
#endif
        MPE_LOG_THREAD_UNLOCK
    }
#endif

    MPE_LOG_THREADSTM_GET
    MPE_LOG_THREAD_LOCK
    MPE_LOG_STATE_BEGIN(MPE_COMM_NULL,MPE_TESTALL_ID)

    if (count > MPE_MAX_REQUESTS) {
        fprintf( stderr, __FILE__":MPI_Testall() - "
                         "Array Index Out of Bound Exception !"
                         "\t""count(%d) > MPE_MAX_REQUESTS(%d)\n",
                         count, MPE_MAX_REQUESTS );
        fflush( stderr );
    }

    if (count <= MPE_MAX_REQUESTS) {
        for (i=0; i<count; i++) 
            req[i] = array_of_requests[i];
    }
    MPE_LOG_THREAD_UNLOCK

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_OFF
#endif

    returnVal = PMPI_Testall( count, array_of_requests, flag,
                              array_of_statuses );

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_ON
#endif

    MPE_LOG_THREAD_LOCK
    if (*flag && count <= MPE_MAX_REQUESTS) {
        for (i=0; i < count; i++) {
            MPE_REQ_WAIT_TEST( req[i], &array_of_statuses[i], "MPI_Testall" )
        }
    }

    MPE_LOG_STATE_END(MPE_COMM_NULL,NULL)
    MPE_LOG_THREAD_UNLOCK

#if defined( HAVE_MPI_STATUSES_IGNORE ) && ! defined( HAVE_ALLOCA )
    if ( is_malloced == 1 )
        free( array_of_statuses );
#endif

    return returnVal;
}

int  MPI_Testany( count, array_of_requests, index, flag, status )
int count;
MPI_Request * array_of_requests;
int * index;
int * flag;
MPI_Status * status;
{
    int  returnVal;
    int i;
    MPE_LOG_STATE_DECL
    MPE_LOG_THREADSTM_DECL

/*
    MPI_Testany - prototyping replacement for MPI_Testany
    Log the beginning and ending of the time spent in MPI_Testany calls.
*/

#if defined( HAVE_MPI_STATUS_IGNORE )
    MPI_Status   tmp_status;
    if ( status == MPI_STATUS_IGNORE )
        status = &tmp_status;
#endif

    MPE_LOG_THREADSTM_GET
    MPE_LOG_THREAD_LOCK
    MPE_LOG_STATE_BEGIN(MPE_COMM_NULL,MPE_TESTANY_ID)

    if (count > MPE_MAX_REQUESTS) {
        fprintf( stderr, __FILE__":MPI_Testany() - "
                         "Array Index Out of Bound Exception !"
                         "\t""count(%d) > MPE_MAX_REQUESTS(%d)\n",
                         count, MPE_MAX_REQUESTS );
        fflush( stderr );
    }

    if (count <= MPE_MAX_REQUESTS) {
        for (i=0; i<count; i++) 
            req[i] = array_of_requests[i];
    }
    MPE_LOG_THREAD_UNLOCK

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_OFF
#endif

    returnVal = PMPI_Testany( count, array_of_requests, index, flag, status );

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_ON
#endif

    MPE_LOG_THREAD_LOCK
    if (*flag && count <= MPE_MAX_REQUESTS) 
        MPE_REQ_WAIT_TEST( req[*index], status, "MPI_Testany" )

    MPE_LOG_STATE_END(MPE_COMM_NULL,NULL)
    MPE_LOG_THREAD_UNLOCK

    return returnVal;
}

int  MPI_Test_cancelled( status, flag )
MPI_Status * status;
int * flag;
{
  int  returnVal;
  MPE_LOG_STATE_DECL
  MPE_LOG_THREADSTM_DECL

/*
    MPI_Test_cancelled - prototyping replacement for MPI_Test_cancelled
    Log the beginning and ending of the time spent in MPI_Test_cancelled calls.
*/

  MPE_LOG_THREADSTM_GET
  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_BEGIN(MPE_COMM_NULL,MPE_TEST_CANCELLED_ID)
  MPE_LOG_THREAD_UNLOCK

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_OFF
#endif

  returnVal = PMPI_Test_cancelled( status, flag );

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_ON
#endif

  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_END(MPE_COMM_NULL,NULL)
  MPE_LOG_THREAD_UNLOCK

  return returnVal;
}

int  MPI_Testsome( incount, array_of_requests, outcount, 
                   array_of_indices, array_of_statuses )
int incount;
MPI_Request * array_of_requests;
int * outcount;
int * array_of_indices;
MPI_Status * array_of_statuses;
{
    int  returnVal;
    int  i;
    MPE_LOG_STATE_DECL
    MPE_LOG_THREADSTM_DECL

/*
    MPI_Testsome - prototyping replacement for MPI_Testsome
    Log the beginning and ending of the time spent in MPI_Testsome calls.
*/

#if defined( HAVE_MPI_STATUSES_IGNORE )
    int  is_malloced = 0;
    if ( array_of_statuses == MPI_STATUSES_IGNORE ) {
        MPE_LOG_THREAD_LOCK
#if ! defined( HAVE_ALLOCA )
        array_of_statuses = (MPI_Status *) malloc( incount
                                                 * sizeof(MPI_Status) );
        is_malloced = 1;
#else
        array_of_statuses = (MPI_Status *) alloca( incount
                                                 * sizeof(MPI_Status) );
        is_malloced = 0;
#endif
        MPE_LOG_THREAD_UNLOCK
    }
#endif

    MPE_LOG_THREADSTM_GET
    MPE_LOG_THREAD_LOCK
    MPE_LOG_STATE_BEGIN(MPE_COMM_NULL,MPE_TESTSOME_ID)

    if (incount > MPE_MAX_REQUESTS) {
        fprintf( stderr, __FILE__":MPI_Testsome() - "
                         "Array Index Out of Bound Exception !"
                         "\t""incount(%d) > MPE_MAX_REQUESTS(%d)\n",
                         incount, MPE_MAX_REQUESTS );
        fflush( stderr );
    }

    if (incount <= MPE_MAX_REQUESTS) {
        for (i=0; i<incount; i++) 
            req[i] = array_of_requests[i];
    }
    MPE_LOG_THREAD_UNLOCK

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_OFF
#endif

    returnVal = PMPI_Testsome( incount, array_of_requests, outcount, 
                               array_of_indices, array_of_statuses );

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_ON
#endif

    MPE_LOG_THREAD_LOCK
    if (incount <= MPE_MAX_REQUESTS) {
        for (i=0; i < *outcount; i++) {
             MPE_REQ_WAIT_TEST( req[array_of_indices[i]], &array_of_statuses[i], "MPI_Testsome" )
        }
    }

    MPE_LOG_STATE_END(MPE_COMM_NULL,NULL)
    MPE_LOG_THREAD_UNLOCK

#if defined( HAVE_MPI_STATUSES_IGNORE ) && ! defined( HAVE_ALLOCA )
    if ( is_malloced == 1 )
        free( array_of_statuses );
#endif

    return returnVal;
}

int   MPI_Type_commit( datatype )
MPI_Datatype * datatype;
{
  int   returnVal;
  MPE_LOG_STATE_DECL
  MPE_LOG_THREADSTM_DECL

/*
    MPI_Type_commit - prototyping replacement for MPI_Type_commit
    Log the beginning and ending of the time spent in MPI_Type_commit calls.
*/

  MPE_LOG_THREADSTM_GET
  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_BEGIN(MPE_COMM_NULL,MPE_TYPE_COMMIT_ID)
  MPE_LOG_THREAD_UNLOCK

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_OFF
#endif

  returnVal = PMPI_Type_commit( datatype );

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_ON
#endif

  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_END(MPE_COMM_NULL,NULL)
  MPE_LOG_THREAD_UNLOCK

  return returnVal;
}

int  MPI_Type_contiguous( count, old_type, newtype )
int count;
MPI_Datatype old_type;
MPI_Datatype * newtype;
{
  int  returnVal;
  MPE_LOG_STATE_DECL
  MPE_LOG_THREADSTM_DECL

/*
    MPI_Type_contiguous - prototyping replacement for MPI_Type_contiguous
    Log the beginning and ending of the time spent in MPI_Type_contiguous calls.
*/

  MPE_LOG_THREADSTM_GET
  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_BEGIN(MPE_COMM_NULL,MPE_TYPE_CONTIGUOUS_ID)
  MPE_LOG_THREAD_UNLOCK

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_OFF
#endif

  returnVal = PMPI_Type_contiguous( count, old_type, newtype );

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_ON
#endif

  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_END(MPE_COMM_NULL,NULL)
  MPE_LOG_THREAD_UNLOCK

  return returnVal;
}

int  MPI_Type_extent( datatype, extent )
MPI_Datatype datatype;
MPI_Aint * extent;
{
  int  returnVal;
  MPE_LOG_STATE_DECL
  MPE_LOG_THREADSTM_DECL

/*
    MPI_Type_extent - prototyping replacement for MPI_Type_extent
    Log the beginning and ending of the time spent in MPI_Type_extent calls.
*/

  MPE_LOG_THREADSTM_GET
  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_BEGIN(MPE_COMM_NULL,MPE_TYPE_EXTENT_ID)
  MPE_LOG_THREAD_UNLOCK

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_OFF
#endif

  returnVal = PMPI_Type_extent( datatype, extent );

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_ON
#endif

  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_END(MPE_COMM_NULL,NULL)
  MPE_LOG_THREAD_UNLOCK

  return returnVal;
}

int   MPI_Type_free( datatype )
MPI_Datatype * datatype;
{
  int   returnVal;
  MPE_LOG_STATE_DECL
  MPE_LOG_THREADSTM_DECL

/*
    MPI_Type_free - prototyping replacement for MPI_Type_free
    Log the beginning and ending of the time spent in MPI_Type_free calls.
*/

  MPE_LOG_THREADSTM_GET
  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_BEGIN(MPE_COMM_NULL,MPE_TYPE_FREE_ID)
  MPE_LOG_THREAD_UNLOCK

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_OFF
#endif

  returnVal = PMPI_Type_free( datatype );

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_ON
#endif

  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_END(MPE_COMM_NULL,NULL)
  MPE_LOG_THREAD_UNLOCK

  return returnVal;
}

int  MPI_Type_hindexed( count, blocklens, indices, old_type, newtype )
int count;
int * blocklens;
MPI_Aint * indices;
MPI_Datatype old_type;
MPI_Datatype * newtype;
{
  int  returnVal;
  MPE_LOG_STATE_DECL
  MPE_LOG_THREADSTM_DECL

/*
    MPI_Type_hindexed - prototyping replacement for MPI_Type_hindexed
    Log the beginning and ending of the time spent in MPI_Type_hindexed calls.
*/

  MPE_LOG_THREADSTM_GET
  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_BEGIN(MPE_COMM_NULL,MPE_TYPE_HINDEXED_ID)
  MPE_LOG_THREAD_UNLOCK

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_OFF
#endif

  returnVal = PMPI_Type_hindexed( count, blocklens, indices,
                                  old_type, newtype );

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_ON
#endif

  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_END(MPE_COMM_NULL,NULL)
  MPE_LOG_THREAD_UNLOCK

  return returnVal;
}

int  MPI_Type_hvector( count, blocklen, stride, old_type, newtype )
int count;
int blocklen;
MPI_Aint stride;
MPI_Datatype old_type;
MPI_Datatype * newtype;
{
  int  returnVal;
  MPE_LOG_STATE_DECL
  MPE_LOG_THREADSTM_DECL

/*
    MPI_Type_hvector - prototyping replacement for MPI_Type_hvector
    Log the beginning and ending of the time spent in MPI_Type_hvector calls.
*/

  MPE_LOG_THREADSTM_GET
  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_BEGIN(MPE_COMM_NULL,MPE_TYPE_HVECTOR_ID)
  MPE_LOG_THREAD_UNLOCK

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_OFF
#endif

  returnVal = PMPI_Type_hvector( count, blocklen, stride, old_type, newtype );

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_ON
#endif

  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_END(MPE_COMM_NULL,NULL)
  MPE_LOG_THREAD_UNLOCK

  return returnVal;
}

int  MPI_Type_indexed( count, blocklens, indices, old_type, newtype )
int count;
int * blocklens;
int * indices;
MPI_Datatype old_type;
MPI_Datatype * newtype;
{
  int  returnVal;
  MPE_LOG_STATE_DECL
  MPE_LOG_THREADSTM_DECL

/*
    MPI_Type_indexed - prototyping replacement for MPI_Type_indexed
    Log the beginning and ending of the time spent in MPI_Type_indexed calls.
*/

  MPE_LOG_THREADSTM_GET
  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_BEGIN(MPE_COMM_NULL,MPE_TYPE_INDEXED_ID)
  MPE_LOG_THREAD_UNLOCK

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_OFF
#endif

  returnVal = PMPI_Type_indexed( count, blocklens, indices, old_type, newtype );

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_ON
#endif

  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_END(MPE_COMM_NULL,NULL)
  MPE_LOG_THREAD_UNLOCK

  return returnVal;
}

int   MPI_Type_lb( datatype, displacement )
MPI_Datatype datatype;
MPI_Aint * displacement;
{
  int   returnVal;
  MPE_LOG_STATE_DECL
  MPE_LOG_THREADSTM_DECL

/*
    MPI_Type_lb - prototyping replacement for MPI_Type_lb
    Log the beginning and ending of the time spent in MPI_Type_lb calls.
*/

  MPE_LOG_THREADSTM_GET
  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_BEGIN(MPE_COMM_NULL,MPE_TYPE_LB_ID)
  MPE_LOG_THREAD_UNLOCK

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_OFF
#endif

  returnVal = PMPI_Type_lb( datatype, displacement );

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_ON
#endif

  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_END(MPE_COMM_NULL,NULL)
  MPE_LOG_THREAD_UNLOCK

  return returnVal;
}

int   MPI_Type_size( datatype, size )
MPI_Datatype datatype;
int          * size;
{
  int   returnVal;
  MPE_LOG_STATE_DECL
  MPE_LOG_THREADSTM_DECL

/*
    MPI_Type_size - prototyping replacement for MPI_Type_size
    Log the beginning and ending of the time spent in MPI_Type_size calls.
*/

  MPE_LOG_THREADSTM_GET
  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_BEGIN(MPE_COMM_NULL,MPE_TYPE_SIZE_ID)
  MPE_LOG_THREAD_UNLOCK

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_OFF
#endif

  returnVal = PMPI_Type_size( datatype, size );

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_ON
#endif

  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_END(MPE_COMM_NULL,NULL)
  MPE_LOG_THREAD_UNLOCK

  return returnVal;
}

int  MPI_Type_struct( count, blocklens, indices, old_types, newtype )
int count;
int * blocklens;
MPI_Aint * indices;
MPI_Datatype * old_types;
MPI_Datatype * newtype;
{
  int  returnVal;
  MPE_LOG_STATE_DECL
  MPE_LOG_THREADSTM_DECL

/*
    MPI_Type_struct - prototyping replacement for MPI_Type_struct
    Log the beginning and ending of the time spent in MPI_Type_struct calls.
*/

  MPE_LOG_THREADSTM_GET
  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_BEGIN(MPE_COMM_NULL,MPE_TYPE_STRUCT_ID)
  MPE_LOG_THREAD_UNLOCK

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_OFF
#endif

  returnVal = PMPI_Type_struct( count, blocklens, indices, old_types, newtype );

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_ON
#endif

  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_END(MPE_COMM_NULL,NULL)
  MPE_LOG_THREAD_UNLOCK

  return returnVal;
}

int   MPI_Type_ub( datatype, displacement )
MPI_Datatype datatype;
MPI_Aint * displacement;
{
  int   returnVal;
  MPE_LOG_STATE_DECL
  MPE_LOG_THREADSTM_DECL

/*
    MPI_Type_ub - prototyping replacement for MPI_Type_ub
    Log the beginning and ending of the time spent in MPI_Type_ub calls.
*/

  MPE_LOG_THREADSTM_GET
  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_BEGIN(MPE_COMM_NULL,MPE_TYPE_UB_ID)
  MPE_LOG_THREAD_UNLOCK

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_OFF
#endif

  returnVal = PMPI_Type_ub( datatype, displacement );

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_ON
#endif

  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_END(MPE_COMM_NULL,NULL)
  MPE_LOG_THREAD_UNLOCK

  return returnVal;
}

int  MPI_Type_vector( count, blocklen, stride, old_type, newtype )
int count;
int blocklen;
int stride;
MPI_Datatype old_type;
MPI_Datatype * newtype;
{
  int  returnVal;
  MPE_LOG_STATE_DECL
  MPE_LOG_THREADSTM_DECL

/*
    MPI_Type_vector - prototyping replacement for MPI_Type_vector
    Log the beginning and ending of the time spent in MPI_Type_vector calls.
*/

  MPE_LOG_THREADSTM_GET
  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_BEGIN(MPE_COMM_NULL,MPE_TYPE_VECTOR_ID)
  MPE_LOG_THREAD_UNLOCK

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_OFF
#endif

  returnVal = PMPI_Type_vector( count, blocklen, stride, old_type, newtype );

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_ON
#endif

  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_END(MPE_COMM_NULL,NULL)
  MPE_LOG_THREAD_UNLOCK

  return returnVal;
}

int   MPI_Unpack( inbuf, insize, position, outbuf, outcount, type, comm )
void * inbuf;
int insize;
int * position;
void * outbuf;
int outcount;
MPI_Datatype type;
MPI_Comm comm;
{
  int   returnVal;
  MPE_LOG_STATE_DECL
  MPE_LOG_THREADSTM_DECL

/*
    MPI_Unpack - prototyping replacement for MPI_Unpack
    Log the beginning and ending of the time spent in MPI_Unpack calls.
*/

  MPE_LOG_THREADSTM_GET
  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_BEGIN(comm,MPE_UNPACK_ID)
  MPE_LOG_THREAD_UNLOCK

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_OFF
#endif

  returnVal = PMPI_Unpack( inbuf, insize, position,
                           outbuf, outcount, type, comm );

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_ON
#endif

  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_END(comm,NULL)
  MPE_LOG_THREAD_UNLOCK

  return returnVal;
}

int   MPI_Wait( request, status )
MPI_Request * request;
MPI_Status * status;
{
    int          returnVal;
    MPI_Request  lreq = *request;
    MPE_LOG_STATE_DECL
    MPE_LOG_THREADSTM_DECL

/*
    MPI_Wait - prototyping replacement for MPI_Wait
    Log the beginning and ending of the time spent in MPI_Wait calls.
*/

#if defined( HAVE_MPI_STATUS_IGNORE )
    MPI_Status   tmp_status;
    if ( status == MPI_STATUS_IGNORE )
        status = &tmp_status;
#endif

    MPE_LOG_THREADSTM_GET
    MPE_LOG_THREAD_LOCK
    MPE_LOG_STATE_BEGIN(MPE_COMM_NULL,MPE_WAIT_ID)
    MPE_LOG_THREAD_UNLOCK

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_OFF
#endif

    returnVal = PMPI_Wait( request, status );

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_ON
#endif

    MPE_LOG_THREAD_LOCK
    MPE_REQ_WAIT_TEST( lreq, status, "MPI_Wait" )

    MPE_LOG_STATE_END(MPE_COMM_NULL,NULL)
    MPE_LOG_THREAD_UNLOCK

    return returnVal;
}

int  MPI_Waitall( count, array_of_requests, array_of_statuses )
int count;
MPI_Request * array_of_requests;
MPI_Status * array_of_statuses;
{
    int  returnVal;
    int  i;
    MPE_LOG_STATE_DECL
    MPE_LOG_THREADSTM_DECL

/*
    MPI_Waitall - prototyping replacement for MPI_Waitall
    Log the beginning and ending of the time spent in MPI_Waitall calls.
*/

#if defined( HAVE_MPI_STATUSES_IGNORE )
    int  is_malloced = 0;
    if ( array_of_statuses == MPI_STATUSES_IGNORE ) {
        MPE_LOG_THREAD_LOCK
#if ! defined( HAVE_ALLOCA )
        array_of_statuses = (MPI_Status *) malloc( count * sizeof(MPI_Status) );
        is_malloced = 1;
#else
        array_of_statuses = (MPI_Status *) alloca( count * sizeof(MPI_Status) );
        is_malloced = 0;
#endif
        MPE_LOG_THREAD_UNLOCK
    }
#endif

    MPE_LOG_THREADSTM_GET
    MPE_LOG_THREAD_LOCK
    MPE_LOG_STATE_BEGIN(MPE_COMM_NULL,MPE_WAITALL_ID)

    if (count > MPE_MAX_REQUESTS) {
        fprintf( stderr, __FILE__":MPI_Waitall() - "
                         "Array Index Out of Bound Exception !"
                         "\t""count(%d) > MPE_MAX_REQUESTS(%d)\n",
                         count, MPE_MAX_REQUESTS );
        fflush( stderr );
    }

    if (count <= MPE_MAX_REQUESTS) {
        for (i=0; i<count; i++) 
            req[i] = array_of_requests[i];
    }
    MPE_LOG_THREAD_UNLOCK

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_OFF
#endif

    returnVal = PMPI_Waitall( count, array_of_requests, array_of_statuses );

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_ON
#endif

    MPE_LOG_THREAD_LOCK
    if (count <= MPE_MAX_REQUESTS) {
        for (i=0; i < count; i++) {
            MPE_REQ_WAIT_TEST( req[i], &array_of_statuses[i], "MPI_Waitall" )
        }
    }

    MPE_LOG_STATE_END(MPE_COMM_NULL,NULL)
    MPE_LOG_THREAD_UNLOCK

#if defined( HAVE_MPI_STATUSES_IGNORE ) && ! defined( HAVE_ALLOCA )
    if ( is_malloced == 1 )
        free( array_of_statuses );
#endif

    return returnVal;
}

int  MPI_Waitany( count, array_of_requests, index, status )
int count;
MPI_Request * array_of_requests;
int * index;
MPI_Status * status;
{
    int  returnVal;
    int  i;
    MPE_LOG_STATE_DECL
    MPE_LOG_THREADSTM_DECL

/*
    MPI_Waitany - prototyping replacement for MPI_Waitany
    Log the beginning and ending of the time spent in MPI_Waitany calls.
*/

#if defined( HAVE_MPI_STATUS_IGNORE )
    MPI_Status   tmp_status;
    if ( status == MPI_STATUS_IGNORE )
        status = &tmp_status;
#endif

    MPE_LOG_THREADSTM_GET
    MPE_LOG_THREAD_LOCK
    MPE_LOG_STATE_BEGIN(MPE_COMM_NULL,MPE_WAITANY_ID)

    if (count > MPE_MAX_REQUESTS) {
        fprintf( stderr, __FILE__":MPI_Waitany() - "
                         "Array Index Out of Bound Exception !"
                         "\t""count(%d) > MPE_MAX_REQUESTS(%d)\n",
                         count, MPE_MAX_REQUESTS );
        fflush( stderr );
    }

    if (count <= MPE_MAX_REQUESTS) {
        for (i=0; i<count; i++) 
            req[i] = array_of_requests[i];
    }
    MPE_LOG_THREAD_UNLOCK

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_OFF
#endif

    returnVal = PMPI_Waitany( count, array_of_requests, index, status );

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_ON
#endif

    MPE_LOG_THREAD_LOCK
    if (*index <= MPE_MAX_REQUESTS) {
        MPE_REQ_WAIT_TEST( req[*index], status, "MPI_Waitany" )
    }
    else {
        fprintf( stderr, __FILE__":MPI_Waitany() - "
                         "Array Index Out of Bound Exception !"
                         "\t""*index(%d) > MPE_MAX_REQUESTS(%d)\n",
                         *index, MPE_MAX_REQUESTS );
        fflush( stderr );
    }

    MPE_LOG_STATE_END(MPE_COMM_NULL,NULL)
    MPE_LOG_THREAD_UNLOCK

    return returnVal;
}

int  MPI_Waitsome( incount, array_of_requests, outcount,
                   array_of_indices, array_of_statuses )
int incount;
MPI_Request * array_of_requests;
int * outcount;
int * array_of_indices;
MPI_Status * array_of_statuses;
{
    int  returnVal;
    int  i;
    MPE_LOG_STATE_DECL
    MPE_LOG_THREADSTM_DECL

/*
    MPI_Waitsome - prototyping replacement for MPI_Waitsome
    Log the beginning and ending of the time spent in MPI_Waitsome calls.
*/

#if defined( HAVE_MPI_STATUSES_IGNORE )
    int  is_malloced = 0;
    if ( array_of_statuses == MPI_STATUSES_IGNORE ) {
        MPE_LOG_THREAD_LOCK
#if ! defined( HAVE_ALLOCA )
        array_of_statuses = (MPI_Status *) malloc( incount
                                                 * sizeof(MPI_Status) );
        is_malloced = 1;
#else
        array_of_statuses = (MPI_Status *) alloca( incount
                                                 * sizeof(MPI_Status) );
        is_malloced = 0;
#endif
        MPE_LOG_THREAD_UNLOCK
    }
#endif

    MPE_LOG_THREADSTM_GET
    MPE_LOG_THREAD_LOCK
    MPE_LOG_STATE_BEGIN(MPE_COMM_NULL,MPE_WAITSOME_ID)

    if (incount > MPE_MAX_REQUESTS) {
        fprintf( stderr, __FILE__":MPI_Waitsome() - "
                         "Array Index Out of Bound Exception !"
                         "\t""incount(%d) > MPE_MAX_REQUESTS(%d)\n",
                         incount, MPE_MAX_REQUESTS );
        fflush( stderr );
    }

    if (incount <= MPE_MAX_REQUESTS) {
        for (i=0; i<incount; i++) 
            req[i] = array_of_requests[i];
    }
    MPE_LOG_THREAD_UNLOCK

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_OFF
#endif

    returnVal = PMPI_Waitsome( incount, array_of_requests, outcount, 
                               array_of_indices, array_of_statuses );

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_ON
#endif

    MPE_LOG_THREAD_LOCK
    if (incount <= MPE_MAX_REQUESTS) {
        for (i=0; i < *outcount; i++) {
            MPE_REQ_WAIT_TEST( req[array_of_indices[i]], &array_of_statuses[i], "MPI_Waitsome" )
        }
    }

    MPE_LOG_STATE_END(MPE_COMM_NULL,NULL)
    MPE_LOG_THREAD_UNLOCK

#if defined( HAVE_MPI_STATUSES_IGNORE ) && ! defined( HAVE_ALLOCA )
    if ( is_malloced == 1 )
        free( array_of_statuses );
#endif

    return returnVal;
}

int   MPI_Cart_coords( comm, rank, maxdims, coords )
MPI_Comm comm;
int rank;
int maxdims;
int * coords;
{
  int   returnVal;
  MPE_LOG_STATE_DECL
  MPE_LOG_THREADSTM_DECL

/*
    MPI_Cart_coords - prototyping replacement for MPI_Cart_coords
    Log the beginning and ending of the time spent in MPI_Cart_coords calls.
*/

  MPE_LOG_THREADSTM_GET
  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_BEGIN(comm,MPE_CART_COORDS_ID)
  MPE_LOG_THREAD_UNLOCK

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_OFF
#endif

  returnVal = PMPI_Cart_coords( comm, rank, maxdims, coords );

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_ON
#endif

  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_END(comm,NULL)
  MPE_LOG_THREAD_UNLOCK

  return returnVal;
}

int   MPI_Cart_create( comm_old, ndims, dims, periods, reorder, comm_cart )
MPI_Comm comm_old;
int ndims;
int * dims;
int * periods;
int reorder;
MPI_Comm * comm_cart;
{
  int   returnVal;
  MPE_LOG_STATE_DECL
  MPE_LOG_COMM_DECL
  MPE_LOG_THREADSTM_DECL

/*
    MPI_Cart_create - prototyping replacement for MPI_Cart_create
    Log the beginning and ending of the time spent in MPI_Cart_create calls.
*/

  MPE_LOG_THREADSTM_GET
  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_BEGIN(comm_old,MPE_CART_CREATE_ID)
  MPE_LOG_THREAD_UNLOCK

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_OFF
#endif

  returnVal = PMPI_Cart_create( comm_old, ndims, dims, periods, reorder,
                                comm_cart );

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_ON
#endif

  MPE_LOG_THREAD_LOCK
  MPE_LOG_INTRACOMM(comm_old,*comm_cart,CLOG_COMM_INTRA_CREATE)

  MPE_LOG_STATE_END(comm_old,NULL)
  MPE_LOG_THREAD_UNLOCK

  return returnVal;
}

int   MPI_Cart_get( comm, maxdims, dims, periods, coords )
MPI_Comm comm;
int maxdims;
int * dims;
int * periods;
int * coords;
{
  int   returnVal;
  MPE_LOG_STATE_DECL
  MPE_LOG_THREADSTM_DECL

/*
    MPI_Cart_get - prototyping replacement for MPI_Cart_get
    Log the beginning and ending of the time spent in MPI_Cart_get calls.
*/

  MPE_LOG_THREADSTM_GET
  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_BEGIN(comm,MPE_CART_GET_ID)
  MPE_LOG_THREAD_UNLOCK

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_OFF
#endif

  returnVal = PMPI_Cart_get( comm, maxdims, dims, periods, coords );

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_ON
#endif

  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_END(comm,NULL)
  MPE_LOG_THREAD_UNLOCK

  return returnVal;
}

int   MPI_Cart_map( comm_old, ndims, dims, periods, newrank )
MPI_Comm comm_old;
int ndims;
int * dims;
int * periods;
int * newrank;
{
  int   returnVal;
  MPE_LOG_STATE_DECL
  MPE_LOG_THREADSTM_DECL

/*
    MPI_Cart_map - prototyping replacement for MPI_Cart_map
    Log the beginning and ending of the time spent in MPI_Cart_map calls.
*/

  MPE_LOG_THREADSTM_GET
  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_BEGIN(comm_old,MPE_CART_MAP_ID)
  MPE_LOG_THREAD_UNLOCK

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_OFF
#endif

  returnVal = PMPI_Cart_map( comm_old, ndims, dims, periods, newrank );

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_ON
#endif

  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_END(comm_old,NULL)
  MPE_LOG_THREAD_UNLOCK

  return returnVal;
}

int   MPI_Cart_rank( comm, coords, rank )
MPI_Comm comm;
int * coords;
int * rank;
{
  int   returnVal;
  MPE_LOG_STATE_DECL
  MPE_LOG_THREADSTM_DECL

/*
    MPI_Cart_rank - prototyping replacement for MPI_Cart_rank
    Log the beginning and ending of the time spent in MPI_Cart_rank calls.
*/

  MPE_LOG_THREADSTM_GET
  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_BEGIN(comm,MPE_CART_RANK_ID)
  MPE_LOG_THREAD_UNLOCK

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_OFF
#endif

  returnVal = PMPI_Cart_rank( comm, coords, rank );

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_ON
#endif

  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_END(comm,NULL)
  MPE_LOG_THREAD_UNLOCK

  return returnVal;
}

int   MPI_Cart_shift( comm, direction, displ, source, dest )
MPI_Comm comm;
int direction;
int displ;
int * source;
int * dest;
{
  int   returnVal;
  MPE_LOG_STATE_DECL
  MPE_LOG_THREADSTM_DECL

/*
    MPI_Cart_shift - prototyping replacement for MPI_Cart_shift
    Log the beginning and ending of the time spent in MPI_Cart_shift calls.
*/

  MPE_LOG_THREADSTM_GET
  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_BEGIN(comm,MPE_CART_SHIFT_ID)
  MPE_LOG_THREAD_UNLOCK

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_OFF
#endif

  returnVal = PMPI_Cart_shift( comm, direction, displ, source, dest );

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_ON
#endif

  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_END(comm,NULL)
  MPE_LOG_THREAD_UNLOCK

  return returnVal;
}

int   MPI_Cart_sub( comm, remain_dims, comm_new )
MPI_Comm comm;
int * remain_dims;
MPI_Comm * comm_new;
{
  int   returnVal;
  MPE_LOG_STATE_DECL
  MPE_LOG_COMM_DECL
  MPE_LOG_THREADSTM_DECL

/*
    MPI_Cart_sub - prototyping replacement for MPI_Cart_sub
    Log the beginning and ending of the time spent in MPI_Cart_sub calls.
*/

  MPE_LOG_THREADSTM_GET
  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_BEGIN(comm,MPE_CART_SUB_ID)
  MPE_LOG_THREAD_UNLOCK

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_OFF
#endif

  returnVal = PMPI_Cart_sub( comm, remain_dims, comm_new );

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_ON
#endif

  MPE_LOG_THREAD_LOCK
  MPE_LOG_INTRACOMM(comm,*comm_new,CLOG_COMM_INTRA_CREATE)

  MPE_LOG_STATE_END(comm,NULL)
  MPE_LOG_THREAD_UNLOCK

  return returnVal;
}

int   MPI_Cartdim_get( comm, ndims )
MPI_Comm comm;
int * ndims;
{
  int   returnVal;
  MPE_LOG_STATE_DECL
  MPE_LOG_THREADSTM_DECL

/*
    MPI_Cartdim_get - prototyping replacement for MPI_Cartdim_get
    Log the beginning and ending of the time spent in MPI_Cartdim_get calls.
*/

  MPE_LOG_THREADSTM_GET
  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_BEGIN(comm,MPE_CARTDIM_GET_ID)
  MPE_LOG_THREAD_UNLOCK

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_OFF
#endif

  returnVal = PMPI_Cartdim_get( comm, ndims );

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_ON
#endif

  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_END(comm,NULL)
  MPE_LOG_THREAD_UNLOCK

  return returnVal;
}

int  MPI_Dims_create( nnodes, ndims, dims )
int nnodes;
int ndims;
int * dims;
{
  int  returnVal;
  MPE_LOG_STATE_DECL
  MPE_LOG_THREADSTM_DECL

/*
    MPI_Dims_create - prototyping replacement for MPI_Dims_create
    Log the beginning and ending of the time spent in MPI_Dims_create calls.
*/

  MPE_LOG_THREADSTM_GET
  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_BEGIN(MPE_COMM_NULL,MPE_DIMS_CREATE_ID)
  MPE_LOG_THREAD_UNLOCK

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_OFF
#endif

  returnVal = PMPI_Dims_create( nnodes, ndims, dims );

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_ON
#endif

  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_END(MPE_COMM_NULL,NULL)
  MPE_LOG_THREAD_UNLOCK

  return returnVal;
}

int   MPI_Graph_create( comm_old, nnodes, index, edges, reorder, comm_graph )
MPI_Comm comm_old;
int nnodes;
int * index;
int * edges;
int reorder;
MPI_Comm * comm_graph;
{
  int   returnVal;
  MPE_LOG_STATE_DECL
  MPE_LOG_COMM_DECL
  MPE_LOG_THREADSTM_DECL

/*
    MPI_Graph_create - prototyping replacement for MPI_Graph_create
    Log the beginning and ending of the time spent in MPI_Graph_create calls.
*/

  MPE_LOG_THREADSTM_GET
  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_BEGIN(comm_old,MPE_GRAPH_CREATE_ID)
  MPE_LOG_THREAD_UNLOCK

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_OFF
#endif

  returnVal = PMPI_Graph_create( comm_old, nnodes, index, edges, reorder,
                                 comm_graph );

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_ON
#endif

  MPE_LOG_THREAD_LOCK
  MPE_LOG_INTRACOMM(comm_old,*comm_graph,CLOG_COMM_INTRA_CREATE)

  MPE_LOG_STATE_END(comm_old,NULL)
  MPE_LOG_THREAD_UNLOCK

  return returnVal;
}

int   MPI_Graph_get( comm, maxindex, maxedges, index, edges )
MPI_Comm comm;
int maxindex;
int maxedges;
int * index;
int * edges;
{
  int   returnVal;
  MPE_LOG_STATE_DECL
  MPE_LOG_THREADSTM_DECL

/*
    MPI_Graph_get - prototyping replacement for MPI_Graph_get
    Log the beginning and ending of the time spent in MPI_Graph_get calls.
*/

  MPE_LOG_THREADSTM_GET
  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_BEGIN(comm,MPE_GRAPH_GET_ID)
  MPE_LOG_THREAD_UNLOCK

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_OFF
#endif

  returnVal = PMPI_Graph_get( comm, maxindex, maxedges, index, edges );

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_ON
#endif

  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_END(comm,NULL)
  MPE_LOG_THREAD_UNLOCK

  return returnVal;
}

int   MPI_Graph_map( comm_old, nnodes, index, edges, newrank )
MPI_Comm comm_old;
int nnodes;
int * index;
int * edges;
int * newrank;
{
  int   returnVal;
  MPE_LOG_STATE_DECL
  MPE_LOG_THREADSTM_DECL

/*
    MPI_Graph_map - prototyping replacement for MPI_Graph_map
    Log the beginning and ending of the time spent in MPI_Graph_map calls.
*/

  MPE_LOG_THREADSTM_GET
  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_BEGIN(comm_old,MPE_GRAPH_MAP_ID)
  MPE_LOG_THREAD_UNLOCK

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_OFF
#endif

  returnVal = PMPI_Graph_map( comm_old, nnodes, index, edges, newrank );

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_ON
#endif

  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_END(comm_old,NULL)
  MPE_LOG_THREAD_UNLOCK

  return returnVal;
}

int   MPI_Graph_neighbors( comm, rank, maxneighbors, neighbors )
MPI_Comm comm;
int rank;
int maxneighbors;
int * neighbors;
{
  int   returnVal;
  MPE_LOG_STATE_DECL
  MPE_LOG_THREADSTM_DECL

/*
    MPI_Graph_neighbors - prototyping replacement for MPI_Graph_neighbors
    Log the beginning and ending of the time spent in MPI_Graph_neighbors calls.
*/

  MPE_LOG_THREADSTM_GET
  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_BEGIN(comm,MPE_GRAPH_NEIGHBORS_ID)
  MPE_LOG_THREAD_UNLOCK

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_OFF
#endif

  returnVal = PMPI_Graph_neighbors( comm, rank, maxneighbors, neighbors );

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_ON
#endif

  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_END(comm,NULL)
  MPE_LOG_THREAD_UNLOCK

  return returnVal;
}

int   MPI_Graph_neighbors_count( comm, rank, nneighbors )
MPI_Comm comm;
int rank;
int * nneighbors;
{
  int   returnVal;
  MPE_LOG_STATE_DECL
  MPE_LOG_THREADSTM_DECL

/*
    MPI_Graph_neighbors_count - prototyping replacement for MPI_Graph_neighbors_count
    Log the beginning and ending of the time spent in MPI_Graph_neighbors_count calls.
*/

  MPE_LOG_THREADSTM_GET
  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_BEGIN(comm,MPE_GRAPH_NEIGHBORS_COUNT_ID)
  MPE_LOG_THREAD_UNLOCK

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_OFF
#endif

  returnVal = PMPI_Graph_neighbors_count( comm, rank, nneighbors );

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_ON
#endif

  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_END(comm,NULL)
  MPE_LOG_THREAD_UNLOCK

  return returnVal;
}

int   MPI_Graphdims_get( comm, nnodes, nedges )
MPI_Comm comm;
int * nnodes;
int * nedges;
{
  int   returnVal;
  MPE_LOG_STATE_DECL
  MPE_LOG_THREADSTM_DECL

/*
    MPI_Graphdims_get - prototyping replacement for MPI_Graphdims_get
    Log the beginning and ending of the time spent in MPI_Graphdims_get calls.
*/

  MPE_LOG_THREADSTM_GET
  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_BEGIN(comm,MPE_GRAPHDIMS_GET_ID)
  MPE_LOG_THREAD_UNLOCK

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_OFF
#endif

  returnVal = PMPI_Graphdims_get( comm, nnodes, nedges );

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_ON
#endif

  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_END(comm,NULL)
  MPE_LOG_THREAD_UNLOCK

  return returnVal;
}

int   MPI_Topo_test( comm, top_type )
MPI_Comm comm;
int * top_type;
{
  int   returnVal;
  MPE_LOG_STATE_DECL
  MPE_LOG_THREADSTM_DECL

/*
    MPI_Topo_test - prototyping replacement for MPI_Topo_test
    Log the beginning and ending of the time spent in MPI_Topo_test calls.
*/

  MPE_LOG_THREADSTM_GET
  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_BEGIN(comm,MPE_TOPO_TEST_ID)
  MPE_LOG_THREAD_UNLOCK

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_OFF
#endif

  returnVal = PMPI_Topo_test( comm, top_type );

#if defined( MAKE_SAFE_PMPI_CALL )
    MPE_LOG_ON
#endif

  MPE_LOG_THREAD_LOCK
  MPE_LOG_STATE_END(comm,NULL)
  MPE_LOG_THREAD_UNLOCK

  return returnVal;
}

/*
  level = 1 turns on tracing, 
  level = 0 turns it off.

  Still to do: in some cases, must log communicator operations even if
  logging is off.
 */
int MPI_Pcontrol( const int level, ... )
{
#ifdef HAVE_STDARG_H    
    /* Some compilers are unhappy if routines with stdargs (...) don't 
       include va_start/end */
    va_list list;
    va_start( list, level );
    is_mpilog_on = level;
    va_end( list );
#else
    is_mpilog_on = level;
#endif
    return MPI_SUCCESS;
}

#ifdef HAVE_MPI_IO
#include "log_mpi_io.c"
#endif

#ifdef HAVE_MPI_RMA
#include "log_mpi_rma.c"
#endif

#ifdef HAVE_MPI_SPAWN
#include "log_mpi_spawn.c"
#endif
