#ifndef _GNU_SOURCE
#define _GNU_SOURCE
#endif
#ifdef ARCH_MIC
#   include <immintrin.h>
#   include <zmmintrin.h>
#endif
#ifdef OMP_BARRIER
#   include <omp.h>
#endif
#include <sched.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <assert.h>
#include <pthread.h>
#include <sys/times.h>
#include "barrier.h"

const char ARCH_STR [ ] =
#ifdef ARCH_MIC
    "mic";
#endif
#ifdef ARCH_X86_64
    "x86_64";
#endif
#ifdef ARCH_ARMV7L
    "armv7l";
#endif

const char BENCH_STR [ ] =
#ifdef SANITY_BENCHMARK
    "sanity";
#endif
#ifdef PURE_BENCHMARK
    "pure";
#endif
#ifdef LDIMBL_BENCHMARK
    "ldimbl";
#endif

const char * BARRIER_STR =
#ifdef PTHREAD_BARRIER
    "pthread";
#endif
#ifdef SR_BARRIER
    "sr";
#endif
#ifdef CTRGS_BARRIER
    "ctrgs";
#endif
#ifdef CTRLS_BARRIER
    "ctrls";
#endif
#ifdef SNZI_BARRIER
    "snzi";
#endif
#ifdef DSMN_BARRIER
    "dsmn";
#endif
#ifdef STNGS_BARRIER
    "stngs";
#endif
#ifdef STNLS_BARRIER
    "stnls";
#endif
#ifdef DTNGS_BARRIER
    "dtngs";
#endif
#ifdef DTNLS_BARRIER
    "dtnls";
#endif
#ifdef OMP_BARRIER
    "omp";
#endif

#ifdef ITP_BARRIER
    "itp_anonymous";
#endif

const char SPINNING_STR [ ] =
#ifdef SPIN_SPINNING
    "spin";
#endif
#ifdef HWYIELD_SPINNING
    "hwyield";
#endif
#ifdef PTYIELD_SPINNING
    "ptyield";
#endif
#ifdef PAUSE_SPINNING
    "pause";
#endif
#ifdef WFE_SPINNING
    "wfe";
#endif

static const char * HOSTNAME_STR;

static const char * EXP_ID_STR;

static bool INTERPOLATE_RADIX;

#if defined( PTHREAD_BARRIER) || defined( ITP_BARRIER)
static pthread_barrier_t bar_pthreadBarrier [ BARRIERS_MAX_NUM ];
#endif

#ifdef SR_BARRIER
static sr_barrier_t bar_srBarrier [ BARRIERS_MAX_NUM ];
#endif

#ifdef TREE_BARRIER
static tree_barrier_t bar_treeBarrier [ BARRIERS_MAX_NUM ];
#endif

#ifdef SNZI_BARRIER
static snzi_barrier_t bar_snziBarrier [ BARRIERS_MAX_NUM ];
#endif

#ifdef DSMN_BARRIER
static dsmn_barrier_t bar_dsmnBarrier [ BARRIERS_MAX_NUM ];
#endif

#ifdef ITP_BARRIER
BARRIER * bar_itpBarrier[ BARRIERS_MAX_NUM ] ;
#endif

/**
 * Online cpu set
 */
static cpu_set_t bar_onlineCpuSet;

/**
 * Array for sanity testing of barriers semantics
 */
static int bar_TestArray [ THREADS_MAX_NUM ];

static inline void
bar_Assert( int i)
{
#ifndef NASSERT
    assert( i);
#endif
}

static inline void
bar_InternalError( char * fileName, 
                   unsigned lineNo)
{
    fprintf( stderr, "Internal error %s:%u !\n", fileName, lineNo);
    exit( 1);
}

#ifdef ARCH_X86_FAMILY
static void
x86_Cpuid( int in_eax, int * eax, int * ebx, int * ecx, int * edx )
{
  asm volatile( "pushq %%rbx   \n\t"
                "cpuid         \n\t"
                "movq %%rbx, %1\n\t"
                "popq %%rbx    \n\t"
                : "=a"( eax), "=r"(ebx), "=c"(ecx), "=d"(edx)
                : "a"( in_eax)
                : "cc" );
}
#endif

static inline int
sys_GetPrivilegeLevel( )
{

#   ifdef ARCH_ARM_FAMILY
    bar_InternalError( __FILE__, __LINE__);
#   else
#       ifdef ARCH_X86_FAMILY
    {
        short int csReg;
        short int ringMask = X86_RINGS_MASK;

        asm volatile( " mov %%cs, %0\n\t"
                      : "=r"( csReg)
                      );

        return csReg & ringMask;
    }
#       else
    bar_InternalError( __FILE__, __LINE__);
#       endif
#   endif
}

#ifdef ARCH_STORE_NR_NGO
#   ifdef ARCH_MIC
static inline void
store_nr_ngo_int( void * addr,
                  int data)
{
    __m512i siVec = _mm512_set1_epi32( data);

    _mm512_storenrngo_ps( addr, _mm512_castsi512_ps( siVec));
}

static inline void
store_nr_ngo_bool( void * addr,
                   bool data)
{
    store_nr_ngo_int( addr, data);
}

static inline void
store_nr_ngo_int_max_ma( void * addr, 
                         int_max_ma_t data)
{
    __m512i siVec = _mm512_set1_epi64( (__int64)data);

    bar_Assert( sizeof( __int64) <= HW_MAX_MA_SIZE_IN_BITS);
    _mm512_storenrngo_ps( addr, _mm512_castsi512_ps( siVec));
}
#   endif /* ARCH_MIC */
#endif /* ARCH_STORE_NR_NGO */

#ifdef ARCH_STORE_NR
#   ifdef ARCH_MIC
static inline void
store_nr_int( void * addr,
              int data)
{
    __m512i siVec = _mm512_set1_epi32( data);

    _mm512_storenr_ps( addr, _mm512_castsi512_ps( siVec));
}

static inline void
store_nr_bool( void * addr,
               bool data)
{
    store_nr_int( addr, data);
}

static inline void
store_nr_int_max_ma( void * addr, 
                     int_max_ma_t data)
{
    __m512i siVec = _mm512_set1_epi64( (__int64)data);

    bar_Assert( sizeof( __int64) <= HW_MAX_MA_SIZE_IN_BITS);
    _mm512_storenr_ps( addr, _mm512_castsi512_ps( siVec));
}
#   endif /* ARCH_MIC */
#endif /* ARCH_STORE_NR */

inline static void
memory_barrier( )
{
#   ifdef ARCH_ARM_FAMILY
        asm volatile ( "dmb" : : : "memory");
#   else
#       if defined( ARCH_X86_FAMILY) && !defined( ARCH_MIC)
        asm volatile ( "mfence" : : : "memory");
#       else
#           if !defined( ARCH_MIC)
            bar_InternalError( __FILE__, __LINE__);
#           endif
#       endif
#   endif
}

#ifdef YIELD_SPINNING
inline static void
spinning_thread_yield( )
{
#ifdef HWYIELD_SPINNING
#   ifdef ARCH_ARM_FAMILY
    asm volatile ( "yield\n\t");
#   else
#       ifdef ARCH_X86_FAMILY
    asm volatile ( "hlt\n\t");
#       else
    bar_InternalError( __FILE__, __LINE__);
#       endif
#   endif
#endif /* HWYIELD_SPINNING */
#ifdef PTYIELD_SPINNING
    pthread_yield( );
#endif /* PTYIELD_SPINNING */
}
#endif

#ifdef PAUSE_SPINNING
inline static void
spinning_pause( )
{
#   ifdef ARCH_ARM_FAMILY
    asm volatile ( "wfi\n\t");
#   else
#       ifdef ARCH_X86_FAMILY
    asm volatile ( "pause\n\t");
#       else
    bar_InternalError( __FILE__, __LINE__);
#       endif
#   endif
}
#endif /* PAUSE_SPINNING */

#ifdef WFE_SPINNING
inline static void
spinning_thread_wfe_init( void * mem)
{
#   ifdef ARCH_ARM_FAMILY
#   else
#       if defined( ARCH_X86_FAMILY) && defined( ARCH_X86_MONITOR_MWAIT)
    {
        asm volatile( " mov  %0, %%rax\n\t"
                      " mov  $0x0, %%rcx\n\t"
                      " mov  $0x0, %%rdx\n\t"
                      " monitor %%rax, %%rcx, %%rdx\n\t"
                      :
                      : "r"( mem)
                      : "%rax", "%rcx", "%rdx");
    }
#       else
    bar_InternalError( __FILE__, __LINE__);
#       endif
#   endif
}

inline static void
spinning_thread_wfe_wait( )
{
#   ifdef ARCH_ARM_FAMILY
    asm volatile ( "wfe\n");
#   else
#       if defined( ARCH_X86_FAMILY) && defined( ARCH_X86_MONITOR_MWAIT)
    asm volatile( " movl  0, %%eax\n\t"
                  " movl  0, %%ecx\n\t"
                  " mwait\n"
                  :
                  :
                  : "%eax", "%ecx");
#       else
    bar_InternalError( __FILE__, __LINE__);
#       endif
#   endif
}

inline static void
spinning_thread_wfe_send( )
{
#   ifdef ARCH_ARM_FAMILY
    asm volatile ( "sev\n");
#   else
#       if defined( ARCH_X86_FAMILY) && defined( ARCH_X86_MONITOR_MWAIT)
#       else
    bar_InternalError( __FILE__, __LINE__);
#       endif
#   endif
}
#endif

inline static int
load_linked( volatile int * x)
{
    int val = 0;
#ifdef ARCH_LL_SC
#   ifdef ARCH_ARM_FAMILY

    asm volatile ("ldrex %0, [%1]\n\t"
       : "=r"( val)
       : "r"( x)
       : "memory");

    return val;
#   else /* ARCH_ARM_FAMILY */
    bar_InternalError( __FILE__, __LINE__);

    return val;
#   endif /* !ARCH_ARM_FAMILY */
#else /* ARCH_LL_SC */
    bar_InternalError( __FILE__, __LINE__);

    return val;
#endif /* !ARCH_LL_SC */
}

inline static bool
store_conditional( volatile int * x, int newVal)
{
#ifdef ARCH_LL_SC
#   ifdef ARCH_ARM_FAMILY
    int res;

    asm volatile ("strex %0, %1, [%2]\n\t"
       : "=r"( res)
       : "r"( newVal), "r"( x)
       : "memory", "r0" );
    
    return !res;
#   else /* ARCH_ARM_FAMILY */
    bar_InternalError( __FILE__, __LINE__);

    return RETURN_FAIL;
#   endif /* !ARCH_ARM_FAMILY */
#else /* ARCH_LL_SC */
    bar_InternalError( __FILE__, __LINE__);

    return RETURN_FAIL;
#endif /* !ARCH_LL_SC */
}

inline static bool
compare_and_swap( volatile int * x, int oldVal, int newVal)
{
#ifdef ARCH_CAS
#   ifdef ARCH_X86_FAMILY
    int res;

    __asm__ __volatile__ (
        "  lock\n\t"
        "  cmpxchgl %2,%1\n\t"
        "  sete %%al\n\t"
        "  movzbl %%al, %0\n\t"
        : "=q" (res), "=m" (*x)
        : "r" (newVal), "m" (*x), "a" (oldVal)
        : "memory");

    return (bool)res;
#   endif
#else /* ARCH_CAS */
#   ifdef ARCH_LL_SC
    {
        bool noSucc;
        
        do 
        {
            if ( load_linked( x) != oldVal )
            {
                return FALSE;
            } 
            noSucc = store_conditional( x, newVal);
        } while ( !noSucc );

        return TRUE;
    }
#   else /* ARCH_LL_SC */
    bar_InternalError( __FILE__, __LINE__);

    return RETURN_FAIL;
#   endif /* !ARCH_LL_SC */
#endif /* !ARCH_CAS */
}

inline static int
fetch_and_add( volatile int * variable, 
               int inc)
{
#ifdef ARCH_FETCH_AND_ADD
#   ifdef ARCH_X86_FAMILY
    asm volatile( "lock; xaddl %0, %1;\n\t"
                  :"=r" (inc)                   /* Output */
                  :"m" (*variable), "0" (inc)  /* Input */
                  :"memory" );
    return inc;
#   else
    bar_InternalError( __FILE__, __LINE__);
#   endif
#else /* ARCH_FETCH_AND_ADD */
#   ifdef ARCH_LL_SC
    {
        bool noSucc;
        int val;
        
        do 
        {
            val = load_linked( variable);
            noSucc = store_conditional( variable, val + inc);
        } while ( !noSucc );

        return val;
    }
#   else /* ARCH_LL_SC */
#       ifdef ARCH_CAS
        {
            bool succ = FALSE;
            
            do 
            {
                int val;

                val = *variable;
                succ = compare_and_swap( variable, val, val + inc);
            } while ( !succ );
        }
#       else
        bar_InternalError( __FILE__, __LINE__);
#       endif
#   endif /* !ARCH_LL_SC */
#endif /* !ARCH_FETCH_AND_ADD */
}

#ifdef SR_BARRIER
static void
sr_barrier_init( sr_barrier_t * sr_barrier,
                 void * dummy,
                 int threadsNum,
                 int barrierId)
{
    sr_barrier->sense = 0;
    sr_barrier->count = threadsNum;
    sr_barrier->threadsNum = threadsNum;
    sr_barrier->barrierId = barrierId;
}

static inline void
sr_barrier_set_sense( volatile bool * addr,
                      bool sense)
{
/* FIXME: Rigorous proof is needed for NGO usage. */
#ifdef ARCH_STORE_NR_NGO
    store_nr_ngo_bool( (void *) addr, sense);
#else
#   ifdef ARCH_STORE_NR
    store_nr_bool( (void *) addr, sense);
#   else
    (* addr) = sense;
#   endif 
#endif
}

static inline void
sr_barrier_set_count( volatile bool * addr,
                      int count)
{
/* FIXME: Rigorous proof is needed for NGO usage. */
#ifdef ARCH_STORE_NR_NGO
    store_nr_ngo_int( (void *) addr, count);
#else
#   ifdef ARCH_STORE_NR
    store_nr_int( (void *) addr, count);
#   else
    (* addr) = count;
#   endif 
#endif
}


static inline void
sr_barrier_wait( sr_barrier_t * sr_barrier,
                 tls_Data_t * tlsData)
{
    int * senseP = & tlsData->sense[ sr_barrier->barrierId ].data;
    int currCount = fetch_and_add( & (sr_barrier->count), -1);
    int currSense = *senseP;

    if ( currCount == 1 )
    {
        sr_barrier_set_count( & (sr_barrier->count), sr_barrier->threadsNum);
        sr_barrier_set_sense( & (sr_barrier->sense), currSense);
#ifdef WFE_SPINNING
        spinning_thread_wfe_send( );
#endif
    } else
    {
#ifdef WFE_SPINNING
        if ( currSense != sr_barrier->sense )
        {
            spinning_thread_wfe_init( (void *) & sr_barrier->sense);
        }
#endif
        while ( currSense != sr_barrier->sense ) 
        {
#ifdef YIELD_SPINNING
            spinning_thread_yield( );
#endif
#ifdef PAUSE_SPINNING
            spinning_pause( );
#endif
#ifdef WFE_SPINNING
            spinning_thread_wfe_wait( );
#endif
        };
    }
    sr_barrier_set_sense( senseP, !currSense);
}
#endif /* SR_BARRIER */

#ifdef TREE_BARRIER
#   ifdef TRNM_BARRIER
static int trnm_PartIdMap [ THREADS_MAX_NUM ] [ CEIL_LOG2_THREADS_MAX_NUM ];

/**
 * Specialized (see assertions) power function
 */
static inline int
math_pow( int val, int power)
{
    int res = 1;

    bar_Assert( (val > 1) && (val <= MA_GRANULARITY));
    bar_Assert( (power >= 0) && (power <= CEIL_LOG2_THREADS_MAX_NUM));
    while ( power-- )
    {
        res *= val;        
    }
    
    return res;
}

static void
trnm_InitPartIdMap( int threadsNum, int radix, int depth)
{
    int i, j;

#   ifdef MCS_TREE_VARIATION
    radix++;
#   endif
    for ( i = 0; i < threadsNum; i++ )
    {
        for ( j = 0; j < depth; j++ )
        {
            int radixInPower = math_pow( radix, j);
            int partId = (i / radixInPower) % radix;
            
#   ifdef MCS_TREE_VARIATION
            partId--;
#   endif
            trnm_PartIdMap [ i ] [ j ] = partId;
        }
    }
}
#   endif /* TRNM_BARRIER */

static void
tree_inode_init( tree_barrier_t * tree_barrier,
                 tree_node_t ** child,
                 tree_node_t * parent)
{
    *child  = & tree_barrier->inodes [ tree_barrier->inodesNum++ ];
#ifdef T_LOCAL_SENSE
    (*child)->sense = FALSE;
#endif
#ifdef COMBINED_BARRIER
    (*child)->count = 0;
    (*child)->threadsNum = 0;
#endif
#ifdef TRNM_BARRIER
    (*child)->trnmDataInit.full = TRNM_FALSE;
#   ifdef TRNM_STAT_WIN
    (*child)->trnmDataCurr.full = TRNM_FALSE;
#   endif
#   ifdef TRNM_DYNM_WIN
    (*child)->trnmDataCurr [ PARITY_ODD ].full = TRNM_FALSE;
    (*child)->trnmDataCurr [ PARITY_EVEN ].full = TRNM_FALSE;
#   endif
#endif
    (*child)->parent = parent;
}

static void
tree_barrier_build_tree( tree_barrier_t * tree_barrier,
                         tree_node_t * parent,
                         int depth)
{
    int i;
    tree_node_t * child;

    if ( depth == 0 )
    {
        if ( tree_barrier->leavesNum < tree_barrier->threadsNum )
        {
            tree_barrier->leaves [ tree_barrier->leavesNum++ ] = parent;
        }
    } else
    {
        tree_inode_init( tree_barrier, & child, parent);
        for (
#   ifdef MCS_TREE_VARIATION
              i = TRNM_STAT_WIN_ID;
#   else
              i = 0;
#   endif
              i < tree_barrier->radix;
              i++ )
        {
            if ( tree_barrier->leavesNum < tree_barrier->threadsNum )
            {
                tree_barrier_build_tree( tree_barrier, child, depth - 1);
#ifdef COMBINED_BARRIER
                child->count++;
                child->threadsNum++;
#endif
#ifdef TRNM_BARRIER
                child->tier = depth - 1;
#   ifdef TRNM_STAT_WIN
#       ifdef MCS_TREE_VARIATION
                if ( i != TRNM_STAT_WIN_ID )
                {
                    child->trnmDataInit.part [ i ] = TRNM_TRUE;
                    child->trnmDataCurr.part [ i ] = TRNM_TRUE;
                }
#       else /* MCS_TREE_VARIATION */
                child->trnmDataInit.part [ i ] = (i == TRNM_STAT_WIN_ID) ? TRNM_FALSE : TRNM_TRUE;
                child->trnmDataCurr.part [ i ] = (i == TRNM_STAT_WIN_ID) ? TRNM_FALSE : TRNM_TRUE;
#       endif /* !MCS_TREE_VARIATION */
#   endif
#   ifdef TRNM_DYNM_WIN
                child->trnmDataInit.part [ i ] = TRNM_TRUE;
                child->trnmDataCurr [ PARITY_ODD ].part [ i ] = TRNM_TRUE;
                child->trnmDataCurr [ PARITY_EVEN ].part [ i ] = TRNM_TRUE;
#   endif
#endif 
            }
        }
    }
}

static void
tree_barrier_init( tree_barrier_t * tree_barrier,
                   void * dummy,
                   int barrier_count,
                   int radix,
                   int barrierId)
{
    int depth = 0;
    int nodes_count = 0;
    int i = 0;

    bar_Assert( barrier_count > 0 && radix > 1);
    tree_barrier->radix = radix;
#ifdef T_GLOBAL_SENSE
    tree_barrier->sense = FALSE;
#endif
    tree_barrier->leavesNum = 0;
    tree_barrier->inodesNum = 0;
    tree_barrier->threadsNum = barrier_count;
    tree_barrier->barrierId = barrierId;
    
    if ( barrier_count == 1 )
    {
        depth++;
    }
    while ( barrier_count > 1 )
    {
        depth++;
        barrier_count = (barrier_count + radix - 1) / radix;
    }
    tree_barrier_build_tree( tree_barrier, NULL, depth);
#ifdef TRNM_BARRIER
    trnm_InitPartIdMap( tree_barrier->threadsNum, radix, depth);
#endif
}

static inline void
tree_barrier_set_sense( volatile bool * addr,
                        bool sense)
{
/* FIXME: Rigorous proof is needed for NGO usage. */
/* FIXME: Analysis is needed for TRNM_BARRIER and T_GLOBAL_SENSE corner case. */
#if defined( ARCH_STORE_NR_NGO) && (!defined( TRNM_BARRIER) || !defined( T_GLOBAL_SENSE))
    store_nr_ngo_bool( (void *) addr, sense);
#else
#   ifdef ARCH_STORE_NR
    store_nr_bool( (void *) addr, sense);
#   else
    (* addr) = sense;
#   endif 
#endif
}

static inline void
tree_barrier_set_count( volatile int * addr,
                        int count)
{
/* FIXME: Rigorous proof is needed for NGO usage. */
#ifdef ARCH_STORE_NR_NGO
    store_nr_ngo_int( (void *) addr, count);
#else
#   ifdef ARCH_STORE_NR
    store_nr_int( (void *) addr, count);
#   else
    (* addr) = count;
#   endif 
#endif
}

static inline void
tree_barrier_set_full( int_max_ma_vol_t * addr,
                       int_max_ma_t full)
{
/* FIXME: Rigorous proof is needed for NGO usage. */
#ifdef ARCH_STORE_NR_NGO
    store_nr_ngo_int_max_ma( (void *) addr, full);
#else
#   ifdef ARCH_STORE_NR
    store_nr_int_max_ma( (void *) addr, full);
#   else
    (* addr) = full;
#   endif 
#endif
}

static inline void
tree_barrier_wait( tree_barrier_t * tree_barrier,
                   tls_Data_t * tlsData,
                   tree_node_t * node)
{
    int * senseP = & (tlsData->sense[ tree_barrier->barrierId ].data);
#ifdef COMBINED_BARRIER
    int currCount = fetch_and_add( & (node->count), -1);
#endif
    int currSense = *senseP;

#ifdef TRNM_BARRIER
    int partId = trnm_PartIdMap [ tlsData->threadId ] [ node->tier ];
#   ifdef TRNM_STAT_WIN
    bool isWinner = (partId == TRNM_STAT_WIN_ID);
    int_min_ma_vol_t * partP = & (node->trnmDataCurr.part [ partId ]);
    int_max_ma_vol_t * fullP = & (node->trnmDataCurr.full);
    if ( !isWinner )
    {
        (*partP) = TRNM_FALSE;
    }
#   endif
#   ifdef TRNM_DYNM_WIN
    int_min_ma_vol_t * partP = & (node->trnmDataCurr [ currSense ].part [ partId ]);
    int_max_ma_vol_t * fullP = & (node->trnmDataCurr [ currSense ].full);
    (*partP) = TRNM_FALSE;
#ifdef WFE_SPINNING
    spinning_thread_wfe_send( );
#endif
    /* In case when Intra-Processor Forwarding Is Allowed (as in X86 ISA 8.2.3.5) the load above
       and store below in if statement may lead to all threads going to busy-waiting. */
#ifdef INTRA_PROCESSOR_FORWARDING_ALLOWED
    memory_barrier( );
#endif
#   endif
#endif
    if ( 
#ifdef COMBINED_BARRIER
         currCount == 1
#endif
#ifdef TRNM_BARRIER
#   ifdef TRNM_STAT_WIN
         isWinner == TRUE
#   endif
#   ifdef TRNM_DYNM_WIN
         (*fullP) == TRNM_FALSE
#   endif
#endif
       )
    {
#ifdef TRNM_BARRIER
#   ifdef TRNM_STAT_WIN
#       ifdef WFE_SPINNING
        if ( (*fullP) != TRNM_FALSE ) 
        {
            spinning_thread_wfe_init( (void *) fullP);
        }
#       endif
        while ( (*fullP) != TRNM_FALSE )
        {
#       ifdef YIELD_SPINNING
            spinning_thread_yield( );
#       endif
#       ifdef PAUSE_SPINNING
            spinning_pause( );
#       endif
#       ifdef WFE_SPINNING
            spinning_thread_wfe_wait( );
#       endif
        }
#   endif
#endif /* TRNM_BARRIER */
#ifdef T_GLOBAL_SENSE
#   ifdef COMBINED_BARRIER
        node->count = node->threadsNum;
#   endif
#   ifdef TRNM_BARRIER
        tree_barrier_set_full( fullP, node->trnmDataInit.full);
#   endif
#endif
        if ( node->parent )
        {
            tree_barrier_wait( tree_barrier, tlsData, node->parent);
        }
#ifdef T_GLOBAL_SENSE
        tree_barrier_set_sense( & (tree_barrier->sense), currSense);
#endif
#ifdef T_LOCAL_SENSE
#   ifdef COMBINED_BARRIER
        tree_barrier_set_count( & (node->count), node->threadsNum);
#   endif
#   ifdef TRNM_BARRIER
        tree_barrier_set_full( fullP, node->trnmDataInit.full);
#   endif
        tree_barrier_set_sense( & (node->sense), currSense);
#endif
#ifdef WFE_SPINNING
        spinning_thread_wfe_send( );
#endif
    } else
    {
#ifdef T_GLOBAL_SENSE
#   ifdef WFE_SPINNING
        if ( currSense != tree_barrier->sense ) 
        {
            spinning_thread_wfe_init( (void *) & tree_barrier->sense);
        }
#   endif
        while ( currSense != tree_barrier->sense ) 
#endif
#ifdef T_LOCAL_SENSE
#   ifdef WFE_SPINNING
        if ( currSense != node->sense ) 
        {
            spinning_thread_wfe_init( (void *) & node->sense);
        }
#   endif
        while ( currSense != node->sense ) 
#endif
        {
#ifdef YIELD_SPINNING
            spinning_thread_yield( );
#endif
#ifdef PAUSE_SPINNING
            spinning_pause( );
#endif
#ifdef WFE_SPINNING
            spinning_thread_wfe_wait( );
#endif
        };
#if defined( TRNM_BARRIER) && defined( TRNM_DYNM_WIN)
        tree_barrier_set_full( fullP, node->trnmDataInit.full);
#endif
    }
    tree_barrier_set_sense( senseP, !currSense);
}
#endif /* TREE_BARRIER */

#ifdef SNZI_BARRIER

static void snzi_node_arrive( snzi_node_t * node);
static void snzi_node_depart( snzi_node_t * node);

static void
snzi_inode_arrive( snzi_inode_t * inode)
{
    bool succArr = FALSE;
    int undoArr = 0;

    while ( !succArr )
    {
        snzi_x_t x;

        x.d = inode->X.d;
        if ( (x.f.c >= 1) && (x.f.a == 0) )
        {
            snzi_x_t y;
                
            y.d = x.d;
            y.f.c++;
            if ( compare_and_swap( (int *) & (inode->X.d), x.d, y.d) )
            {
                succArr = TRUE;
            }
        } 
        if ( (x.f.c == 0) && (x.f.a == 0) )
        {
            snzi_x_t y = x;

            y.f.a = 1;
            y.f.v++;
            if ( compare_and_swap( & (inode->X.d), x.d, y.d) )
            {
                succArr = TRUE;
                x.f.a = 1;
                x.f.v++;
            }
        }
        if ( x.f.a == 1 )
        {
            snzi_x_t y;

            snzi_node_arrive( (snzi_node_t *) inode->parent);
            y = x;
            y.f.a = 0;
            y.f.c = 1;
            if ( ! compare_and_swap( & (inode->X.d), x.d, y.d) )
            {
                undoArr++;
            }
        }
        
    }
    while ( undoArr )
    {
        snzi_node_depart( (snzi_node_t *) inode->parent) ;
        undoArr--;
    }
}

static void
snzi_inode_depart( snzi_inode_t * inode)
{
    for ( ; ; )
    {
        snzi_x_t x;
        snzi_x_t y;

        x.d = inode->X.d;
        y = x;
        y.f.c--;
        if ( compare_and_swap( & (inode->X.d), x.d, y.d) )
        {
            if ( x.f.c == 1 )
            {
                snzi_node_depart( inode->parent);
            }
            return;
        }
    }
}

static void
snzi_root_arrive( snzi_root_t * root)
{
    snzi_x_t y;

    y.d = 0;
    for ( ; ; )
    {
        snzi_x_t x;
        
        x.d = root->X.d;
        if ( x.f.c == 0 )
        {
            y.f.c = 1;
            y.f.a = 1;
            y.f.v = x.f.v + 1;
        } else
        {
            y.f.c = x.f.c + 1;
            y.f.a = x.f.a;
            y.f.v = x.f.v;
        }
        if ( compare_and_swap( & (root->X.d), x.d, y.d) )
            break;
    }
    if ( y.f.a )
    {
        snzi_x_t z;

        z.d = y.d;
        root->I = TRUE;
        z.f.a = FALSE;
        compare_and_swap( & (root->X.d), y.d, z.d);
    }
}

static void
snzi_root_depart( snzi_root_t * root)
{
    for ( ; ; )
    {
        snzi_x_t x = root->X;
        snzi_x_t y = x;
        
        x.d = root->X.d;
        y.d = x.d;
       
        y.f.c = x.f.c - 1;
        y.f.a = FALSE;
        y.f.v = x.f.v;
        if ( compare_and_swap( & (root->X.d), x.d, y.d) )
        {
            if ( x.f.c >= 2 )
                return;
            for ( ; ; )
            {
#ifdef ARCH_LL_SC
                load_linked( & (root->I));
                if ( root->X.f.v != x.f.v)
                    return;
                if ( store_conditional( & (root->I), FALSE) )
                {
#    ifdef WFE_SPINNING
                    spinning_thread_wfe_send( );
#    endif
                    return;
                }
#else
                /* this code is CAS approximation of the code below */
                atomic_Data_t I;
               
                I = root->I;
                if ( root->X.f.v != x.f.v)
                    return;
                if ( compare_and_swap( & (root->I), I, FALSE) )
                {
#    ifdef WFE_SPINNING
                    spinning_thread_wfe_send( );
#    endif
                    return;
                }
#endif
            }
        }
    }
}

static void
snzi_node_arrive( snzi_node_t * node)
{
    if ( node->type == SNZI_NODE_TYPE_ROOT )
    {
        snzi_root_arrive( (snzi_root_t *) node);
    } else
    {
        bar_Assert( node->type == SNZI_NODE_TYPE_INODE);
        snzi_inode_arrive( (snzi_inode_t *) node);
    }
}

static void
snzi_node_depart( snzi_node_t * node)
{
    if ( node->type == SNZI_NODE_TYPE_ROOT )
    {
        snzi_root_depart( (snzi_root_t *) node);
    } else
    {
        bar_Assert( node->type == SNZI_NODE_TYPE_INODE);
        snzi_inode_depart( (snzi_inode_t *) node);
    }
}

static void
snzi_inode_init( snzi_t * snzi_barrier,
                 snzi_node_t ** child,
                 snzi_node_t * parent)
{
    if ( snzi_barrier->inodesNum == 0 )
    {
        snzi_root_t * node;
        
        bar_Assert( parent == NULL);
        *child = & snzi_barrier->inodes [ snzi_barrier->inodesNum++ ];
        (*child)->type = SNZI_NODE_TYPE_ROOT;
        node = &((*child)->n.root);
        snzi_barrier->root = node;
        node->X.d = (atomic_Data_t)0;
        node->I = (atomic_Data_t)0;
    } else
    {
        snzi_inode_t * node;

        *child  = & snzi_barrier->inodes [ snzi_barrier->inodesNum++ ];
        (*child)->type = SNZI_NODE_TYPE_INODE;
        node = &((*child)->n.inode);
        node->X.d = (atomic_Data_t)0;
        node->parent = parent;
    }
}

static void
snzi_barrier_build_tree( snzi_t * snzi,
                         snzi_node_t * parent,
                         int depth)
{
    int i;
    snzi_node_t * child;

    if ( depth == 0 )
    {
        if ( snzi->leavesNum < snzi->threadsNum )
        {
            snzi->leaves [ snzi->leavesNum++ ] = parent;
        }
    } else
    {
        snzi_inode_init( snzi, & child, parent);
        for ( i = 0; i < snzi->radix; i++ )
        {
            if ( snzi->leavesNum < snzi->threadsNum )
            {
                snzi_barrier_build_tree( snzi, child, depth - 1);
            }
        }
    }
}

static void
snzi_barrier_init( snzi_barrier_t * snzi_barrier,
                   tls_Data_t * snzi_barrier_data,
                   int barrier_count,
                   int radix,
                   int barrierId)
{
    int i;
    int threadsNum = barrier_count;
    int depth = 0;
    int nodes_count = 0;
    snzi_order_t order;

    snzi_barrier->barrierId = barrierId;
    bar_Assert( barrier_count > 0 && radix > 1);
    for ( order = SNZI_ORDER_PREV;
          order < SNZI_ORDER_NUM;
          order++ )
    {
        snzi_barrier->snzi [ order ].radix = radix;
        snzi_barrier->snzi [ order ].leavesNum = 0;
        snzi_barrier->snzi [ order ].inodesNum = 0;
        snzi_barrier->snzi [ order ].threadsNum = barrier_count;
    }
    if ( barrier_count == 1 )
    {
        depth++;
    }
    while ( barrier_count > 1 )
    {
        depth++;
        barrier_count = (barrier_count + radix - 1) / radix;
    }
    for ( order = SNZI_ORDER_PREV;
          order < SNZI_ORDER_NUM;
          order++ )
    {
        snzi_barrier_build_tree( & snzi_barrier->snzi [ order ], NULL, depth);
    }
    for ( i = 0;
          i < threadsNum;
          i++ )
    {
        snzi_node_arrive( snzi_barrier->snzi[ SNZI_ORDER_CURR ].leaves[ i ]);
    }
    for ( i = 0; i < threadsNum; i++ )
    {
        tls_Data_t * snzi_barrier_data_tls = & snzi_barrier_data [ i ];

        for ( order = SNZI_ORDER_PREV;
              order < SNZI_ORDER_NUM;
              order++ )
        {
            snzi_barrier_data_tls->snzi [ barrierId ][ order ] = & snzi_barrier->snzi [ order ];
        }
    }
}

static inline void
snzi_barrier_wait( snzi_barrier_t * snzi_barrier,
                   tls_Data_t * snzi_barrier_tls_data)
{
    snzi_t * next_snzi;
    int barrierId = snzi_barrier->barrierId;

    snzi_node_arrive( snzi_barrier_tls_data->snzi [ barrierId ] [ SNZI_ORDER_NEXT ]->leaves[ snzi_barrier_tls_data->threadId ]);
    snzi_node_depart( snzi_barrier_tls_data->snzi [ barrierId ] [ SNZI_ORDER_CURR ]->leaves[ snzi_barrier_tls_data->threadId ]);
    for ( ; ; )
    {
#ifdef WFE_SPINNING
        if ( snzi_barrier_tls_data->snzi [ barrierId ] [ SNZI_ORDER_CURR ]->root->I ) 
        {
            spinning_thread_wfe_init( (void *) & snzi_barrier_tls_data->snzi [ barrierId ] [ SNZI_ORDER_CURR ]->root->I);
        }
#endif
        if ( !snzi_barrier_tls_data->snzi [ barrierId ] [ SNZI_ORDER_CURR ]->root->I )
            break;
#ifdef YIELD_SPINNING
        spinning_thread_yield( );
#endif
#ifdef PAUSE_SPINNING
        spinning_pause( );
#endif
#ifdef WFE_SPINNING
        spinning_thread_wfe_wait( );
#endif
    }
    next_snzi = snzi_barrier_tls_data->snzi [ snzi_barrier->barrierId ] [ SNZI_ORDER_NEXT ];
    snzi_barrier_tls_data->snzi [ barrierId ] [ SNZI_ORDER_NEXT ] = snzi_barrier_tls_data->snzi [ barrierId ] [ SNZI_ORDER_PREV ];
    snzi_barrier_tls_data->snzi [ barrierId ] [ SNZI_ORDER_PREV ] = snzi_barrier_tls_data->snzi [ barrierId ] [ SNZI_ORDER_CURR ];
    snzi_barrier_tls_data->snzi [ barrierId ] [ SNZI_ORDER_CURR ] = next_snzi;
}
#endif /* SNZI_BARRIER */

#ifdef DSMN_BARRIER
static int
math_log2FloorGZ( int threadsNum)
{
    int res = 1;
   
    bar_Assert( threadsNum > 0);
    while ( (1 << res) < threadsNum )
    {
        res++;
    }
    
    return res;
}

static void
dsmn_barrier_init( dsmn_barrier_t * dsmn_barrier,
                   tls_Data_t * dsmn_barrier_tls_data,
                   int threadsNum,
                   int barrierId)
{
    int id;
    int l;
    int ceilLog2ThreadsNum = math_log2FloorGZ( threadsNum);

    dsmn_barrier->barrierId = barrierId;
    dsmn_barrier->ceilLog2ThreadsNum = ceilLog2ThreadsNum;
    for ( id = 0; id < threadsNum; id++ )
    {
        for ( l = 0; l < ceilLog2ThreadsNum; l++ )
        {
            parity_t p;
            int partnerId = (id + (1 << l)) % threadsNum;

            for ( p = PARITY_EVEN; p < PARITY_NUM; p++ )
            {
                dsmn_barrier_tls_data [ id ].my_flags [ p ] [ l ] [ barrierId ].data = FALSE;
                dsmn_barrier_tls_data [ id ].partner_flags [ p ] [ l ] [ barrierId ] = 
                    & (dsmn_barrier_tls_data [ partnerId ].my_flags [ p ] [ l ] [ barrierId ]);
            }
        }
        dsmn_barrier_tls_data [ id ].parity [ barrierId ].data = PARITY_EVEN;
        dsmn_barrier_tls_data [ id ].sense [ barrierId ].data = TRUE;
    }
}

static inline void
dsmn_barrier_store_bool( volatile bool * addr,
                         bool data)
{
/* FIXME: Rigorous proof is needed for NGO usage. */
#ifdef ARCH_STORE_NR_NGO
    store_nr_ngo_bool( (void *) addr, data);
#else
#   ifdef ARCH_STORE_NR
    store_nr_bool( (void *) addr, data);
#   else
    (* addr) = data;
#   endif 
#endif
}

static inline void
dsmn_barrier_store_sense( volatile bool * sense_addr, 
                          bool sense)
{
    dsmn_barrier_store_bool( sense_addr, sense);
}

static inline void
dsmn_barrier_set_tls_sense( volatile bool * sense_addr,
                            bool sense)
{
    dsmn_barrier_store_bool( sense_addr, sense);
}

dsmn_barrier_set_tls_parity( volatile bool * sense_addr,
                             bool parity )
{
    dsmn_barrier_store_bool( sense_addr, parity);
}

static inline void
dsmn_barrier_wait( dsmn_barrier_t * dsmn_barrier,
                   tls_Data_t * dsmn_barrier_tls_data)
{
    int l = 0;
    int barrierId = dsmn_barrier->barrierId;
    int ceilLog2ThreadsNum = dsmn_barrier->ceilLog2ThreadsNum;
    bool s = dsmn_barrier_tls_data->sense [ barrierId ].data;
    parity_t p = dsmn_barrier_tls_data->parity [ barrierId ].data;

    for ( l = 0; l < ceilLog2ThreadsNum; l++ )
    {
        dsmn_barrier_store_sense( & (dsmn_barrier_tls_data->partner_flags [ p ] [ l ] [ barrierId ]->data), s);
#ifdef WFE_SPINNING
        spinning_thread_wfe_send( );
#endif
#ifdef WFE_SPINNING
        if ( dsmn_barrier_tls_data->my_flags [ p ] [ l ] [ barrierId ].data != s )
        {
            spinning_thread_wfe_init( 
                (void *) & dsmn_barrier_tls_data->my_flags [ p ] [ l ] [ barrierId ]);
        }
#endif
        while ( dsmn_barrier_tls_data->my_flags [ p ] [ l ] [ barrierId ].data != s )
        {
#ifdef YIELD_SPINNING
            spinning_thread_yield( );
#endif
#ifdef PAUSE_SPINNING
            spinning_pause( );
#endif
#ifdef WFE_SPINNING
            spinning_thread_wfe_wait( );
#endif
        }
    }
    if ( p == PARITY_ODD )
    {
        dsmn_barrier_set_tls_sense( & (dsmn_barrier_tls_data->sense [ barrierId ].data), !s);
    }
    dsmn_barrier_set_tls_parity( & (dsmn_barrier_tls_data->parity [ barrierId ].data), PARITY_ODD - p);
}
#endif /* DSMN_BARRIER */

static inline void
bar_BarrierInit( int barrierId,
                 tls_Data_t * tlsData)
{
#ifdef SR_BARRIER
    tlsData->sense [ barrierId ].data = !bar_srBarrier [ barrierId ].sense;
#endif
#ifdef TREE_BARRIER
#   ifdef T_GLOBAL_SENSE
    tlsData->sense [ barrierId ].data = !bar_treeBarrier [ barrierId ].sense;
#   endif
#   ifdef T_LOCAL_SENSE
    tlsData->sense [ barrierId ].data = !bar_treeBarrier [ barrierId ].inodes [ 0 ].sense;
#   endif
#endif
#ifdef ITP_BARRIER
    {
        tlsData->participant [ barrierId ] = new PARTICIPANT( bar_itpBarrier [ barrierId ]);
        pthread_barrier_wait( & bar_pthreadBarrier [ barrierId ]);
    }
#endif
}

static inline void
bar_BarrierWait( int barrierId,
                 tls_Data_t * tlsData)
{
    exp_Stage_e expStage = tlsData->expInfo->expStage;
        
    if ( expStage == EXP_STAGE_REF )
        return;
#ifdef OMP_BARRIER
#pragma omp barrier
#endif
#ifdef PTHREAD_BARRIER
    pthread_barrier_wait( & bar_pthreadBarrier [ barrierId ]);
#endif
#ifdef SR_BARRIER
    sr_barrier_wait( & bar_srBarrier [ barrierId ], tlsData);
#endif
#ifdef TREE_BARRIER
    tree_barrier_wait( & bar_treeBarrier [ barrierId ], tlsData, 
                       bar_treeBarrier [ barrierId ].leaves [ tlsData->threadId ]);
#endif
#ifdef SNZI_BARRIER
    snzi_barrier_wait( & bar_snziBarrier [ barrierId ], tlsData);
#endif
#ifdef ITP_BARRIER
    tlsData->participant [ barrierId ]->barrier( );
#endif
#ifdef DSMN_BARRIER
    dsmn_barrier_wait( & bar_dsmnBarrier [ barrierId ], tlsData);
#endif
}

#ifdef LDIMBL_BENCHMARK
#define IMBALANCE_FACTOR 25
static inline void
test_LoadImbalance( int threadId)
{
    volatile int i;

    for ( i = 0; i < IMBALANCE_FACTOR * threadId; i++ )
    {
        ;
    }
}
#endif /* LDIMBL_BENCHMARK */

#if defined( PURE_BENCHMARK) || defined( LDIMBL_BENCHMARK)
static void *
test_barrier_pure( tls_Data_t * tlsData)
{
    int loBarNum = tlsData->expInfo->loBarNum;
    int hiBarNum = tlsData->expInfo->hiBarNum;
#ifdef LDIMBL_BENCHMARK
    int i = tlsData->threadId;
#endif
    int j;

    bar_BarrierInit( 0, tlsData);

    for ( j = loBarNum; j <= hiBarNum; j = j + 1 )
    {
#ifdef LDIMBL_BENCHMARK
        test_LoadImbalance( i);
#endif
        bar_BarrierWait( 0, tlsData);
    }

    return NULL;
}
#endif /* PURE_BENCHMARK */

#ifdef SANITY_BENCHMARK
static void *
test_barrier_sanity( tls_Data_t * tlsData)
{
    exp_Stage_e expStage =  tlsData->expInfo->expStage;
    int loBarNum = tlsData->expInfo->loBarNum;
    int hiBarNum = tlsData->expInfo->hiBarNum;
    int threadsNum = tlsData->expInfo->curThreadsNum;
    int deltaBarNum = threadsNum * 2 + 1;
    int i = tlsData->threadId;
    int j;
    int k;

    bar_BarrierInit( 0, tlsData);
    bar_BarrierInit( 1, tlsData);
    bar_BarrierInit( 2, tlsData);

    for ( j = 0; j <= (hiBarNum - loBarNum); j = j + deltaBarNum )
    {

        bar_TestArray [ i ] = 0;

        for ( k = 0; k < threadsNum; k++ )
        {
            int t;

            bar_BarrierWait( 0, tlsData);
            t = bar_TestArray [ (i + j + k) % threadsNum ];
            bar_BarrierWait( 1, tlsData);
#ifndef NDEBUG
            printf( " [%d] -> [%d] val: %d\n", i, (i + j + k) % threadsNum, t);
#endif
            bar_TestArray [ i ] = t + 1;
        }
        bar_BarrierWait( 2, tlsData);
#ifndef NDEBUG
        printf( "res id: %d val: %d\n", i, bar_TestArray [ i ]);
#endif
        if ( bar_TestArray [ i ] != threadsNum )
        {
            if ( tlsData->expInfo->expStage == EXP_STAGE_EXP )
            {
                bar_InternalError( __FILE__, __LINE__);
            }
        }
    }

    return NULL;
}
#endif /* SANITY_BENCHMARK */

static inline void
bar_BarriersInit( exp_Info_t * expInfo,
                  tls_Data_t * tlsData)
{
    int j;
#if defined( SNZI_BARRIER) || defined( TREE_BARRIER)
    int radix = expInfo->curRadixNum;
#endif
    int threadsNum = expInfo->curThreadsNum;
#if defined( ITP_BARRIER)
    int helperPthreadBarrier [ BARRIERS_MAX_NUM ];
#endif

    for ( j = 0; j < BARRIERS_MAX_NUM; j++ )
    {
#ifdef PTHREAD_BARRIER
        pthread_barrier_init( & bar_pthreadBarrier [ j ], NULL, threadsNum);
#endif

#ifdef SR_BARRIER
        sr_barrier_init( & bar_srBarrier [ j ], NULL, threadsNum, j);
#endif

#ifdef TREE_BARRIER
        tree_barrier_init( & bar_treeBarrier [ j ], NULL, threadsNum, radix, j);
#endif

#ifdef SNZI_BARRIER
        snzi_barrier_init( & bar_snziBarrier [ j ], tlsData, threadsNum, radix, j);
#endif

#ifdef ITP_BARRIER
        pthread_barrier_init( & bar_pthreadBarrier [ j ], NULL, threadsNum);
        bar_itpBarrier[ j ] = new BARRIER( threadsNum);
#endif
#ifdef DSMN_BARRIER
        dsmn_barrier_init( & bar_dsmnBarrier [ j ], tlsData, threadsNum, j);
#endif
    }
}

static inline void
bar_TlsDataInit( exp_Info_t * expInfo,
                 tls_Data_t * tlsData)
{
    int i;
    int threadsNum = expInfo->curThreadsNum;

    for ( i = 0; i < threadsNum; i++ )
    {
        tlsData [ i ].threadId = i;
        tlsData [ i ].expInfo = expInfo;
    }
}

static inline void
sys_SetOnlineCpuSet( cpu_set_t * onlineCpuSet)
{
    * onlineCpuSet = bar_onlineCpuSet;
}

static inline void
bar_SetThreadAffinityHelper( exp_Info_t * expInfo,
#ifdef OMP_BARRIER
                             int ompThreadNum
#else
                             pthread_attr_t * pthreadAttr
#endif
                             )
{
    int i, j;
    int threadId = 0;
    int threadsNum = expInfo->curThreadsNum;

    cpu_set_t onlineCpuSet;
    CPU_ZERO( &onlineCpuSet);
    sys_SetOnlineCpuSet( &onlineCpuSet);

    while( threadId < threadsNum )
    {
        for ( j = 0; j < CPU_MAP_PRIORITY_DELTA; j++ )
        {
            for ( i = j; 
                  (i < sizeof( cpu_set_t) * BITS_IN_BYTE) && threadId < threadsNum;
                  i = i + CPU_MAP_PRIORITY_DELTA )
            {
#ifdef OMP_BARRIER
                {
                    if ( !CPU_ISSET( i, &onlineCpuSet) )
                        continue;
                    
                    if ( threadId == ompThreadNum )
                    {
                        int ret;
                        kmp_affinity_mask_t mask;
                       
                        kmp_create_affinity_mask( &mask); 
                        kmp_set_affinity_mask_proc( i, & mask);
                        ret = kmp_set_affinity( & mask);
                        if ( ret )
                        {
                            bar_InternalError( __FILE__, __LINE__);
                        }
                    }
                }
#else
                {
                    int ret;
                    cpu_set_t currCpuSet;
                    
                    if ( !CPU_ISSET( i, &onlineCpuSet) )
                        continue;

                    CPU_ZERO( &currCpuSet);
                    CPU_SET( i, &currCpuSet);

                    ret = pthread_attr_init( & pthreadAttr[ threadId ]);
                    if ( ret )
                    {
                        bar_InternalError( __FILE__, __LINE__);
                    }
                    pthread_attr_setaffinity_np( & pthreadAttr[ threadId ], sizeof( currCpuSet), &currCpuSet);
                }
#endif
                threadId++;
            }
        }
    }
}

#ifdef OMP_BARRIER
static inline void
bar_OmpSetThreadAffinity( exp_Info_t * expInfo,
                          int ompThreadNum)
{
    bar_SetThreadAffinityHelper( expInfo, ompThreadNum);
}
#else /* OMP_BARRIER */
static inline void
bar_PthreadAttrsInit( exp_Info_t * expInfo,
                      pthread_attr_t * pthreadAttr)
{
    bar_SetThreadAffinityHelper( expInfo, pthreadAttr);
}
#endif /* !OMP_BARRIER */ 

static inline void
bar_BarriersFini( exp_Info_t * expInfo)
{
    int i;

    for ( i = 0; i < BARRIERS_MAX_NUM; i++ )
    {
#if defined( PTHREAD_BARRIER) || defined( ITP_BARRIER)
        pthread_barrier_destroy( & bar_pthreadBarrier [ i ]);
#endif
    }
}

static inline void
bar_PthreadAttrsFini( exp_Info_t * expInfo,
                      pthread_attr_t * pthreadAttr)
{
    int i;
    int ret;
    int threadsNum = expInfo->curThreadsNum;

    for ( i = 0; i < threadsNum; i++ )
    {
        ret = pthread_attr_destroy( & pthreadAttr[ i ]);
        bar_Assert( !ret);
    }
}

static inline void
bar_PthreadsFini( exp_Info_t * expInfo,
                 pthread_t * pthread)
{
    int i;
    int ret;
    int threadsNum = expInfo->curThreadsNum;

    for ( i = 0; i < threadsNum; i++ )
    {
        pthread_join( pthread [ i ], NULL);
        bar_Assert( !ret);
    }
}

static inline void
bar_CreateThreadsAndRunTest( tls_Data_t * tlsData,
                             exp_Info_t * expInfo,
#ifndef OMP_BARRIER
                             pthread_t * pthread,
                             pthread_attr_t * pthreadAttr,
#endif
                             void * (* testFunc)(tls_Data_t *) )
{
    int i;
    int ret;
    int threadsNum = expInfo->curThreadsNum;

#ifdef OMP_BARRIER
#   pragma omp parallel num_threads( threadsNum)
    {
        i = omp_get_thread_num( );
        bar_OmpSetThreadAffinity( expInfo, i);
        testFunc( (void * __restrict__) & tlsData [ i ]);
    }
#else
    for ( i = 0; i < threadsNum; i++ )
    {
        ret = pthread_create( & pthread [ i ], 
                              & pthreadAttr [ i ], 
                              (void * (*)(void *)) testFunc, 
                              (void * __restrict__) & tlsData [ i ]);
        bar_Assert( !ret);
    }
#   ifndef NDEBUG
    printf( "Created number of pthreads: %i \n", threadsNum);
#   endif
#endif
}

static void
bar_TestBarrier( exp_Info_t * expInfo)
{
#ifndef OMP_BARRIER
    pthread_t pthread [ THREADS_MAX_NUM ];
    pthread_attr_t pthreadAttr [ THREADS_MAX_NUM ];
#endif
    tls_Data_t tlsData [ THREADS_MAX_NUM ];
    int threadsNum = expInfo->curThreadsNum;
    long int i, j, ret;

    bar_TlsDataInit( expInfo, tlsData);
    bar_BarriersInit( expInfo, tlsData);
#ifndef OMP_BARRIER
    bar_PthreadAttrsInit( expInfo, pthreadAttr);
#endif

#ifdef SANITY_BENCHMARK
    bar_CreateThreadsAndRunTest( tlsData, expInfo,
#   ifndef OMP_BARRIER
                                 pthread, pthreadAttr, 
#   endif
                                 & test_barrier_sanity);
#endif

#if defined( PURE_BENCHMARK) || defined( LDIMBL_BENCHMARK)
    bar_CreateThreadsAndRunTest( tlsData, expInfo,
#   ifndef OMP_BARRIER
                                 pthread, pthreadAttr,
#   endif
                                 & test_barrier_pure);
#endif

#ifndef OMP_BARRIER
    bar_PthreadsFini( expInfo, pthread);
    bar_PthreadAttrsFini( expInfo, pthreadAttr);
#endif
    bar_BarriersFini( expInfo);
}

static void
bar_CheckPreconditions( )
{
    bar_Assert( BITS_IN_BYTE * sizeof( atomic_Data_t) == HW_ATOMIC_DATA_SIZE_IN_BITS);
#if defined( SNZI_BARRIER)
#   if !defined( ARCH_LL_SC) && !defined( ARCH_CAS)
    bar_Assert( 0);
#   endif
#else /* SNZI_BARRIER */
    #if !defined( ARCH_LL_SC) && !defined( ARCH_CAS) && !defined( ARCH_FETCH_AND_ADD)
    bar_Assert( 0);
#   endif
#endif /* !SNZI_BARRIER */
}

#ifndef NDEBUG
static void
bar_PrintExperimentInfo( exp_Info_t * expInfo)
{
    printf( "Number of logical cpus: %i \n", CPUS_NUM);
#ifdef PTHREAD_BARRIER
    printf( "Test pthread barrier...\n");
#endif
#ifdef SR_BARRIER
    printf( "Test sense reversing barrier...\n");
#endif
#ifdef TREE_BARRIER
#   ifdef T_GLOBAL_SENSE
    printf( "Test combining barrier with global sense...\n");
#   endif
#   ifdef T_LOCAL_SENSE
    printf( "Test combining barrier with local sense...\n");
#   endif
#endif
#ifdef SNZI_BARRIER
    printf( "Test snzi barrier...\n");
#endif
#ifdef DSMN_BARRIER
    printf( "Test dsmn barrier...\n");
#endif
#ifdef ITP_BARRIER
    printf( "Test itp barrier...\n");
#endif
}
#endif /* !NDEBUG */

static void
bar_PrintTableHeader( )
{
    printf( "Hostname,");
    printf( "Architecture,");
    printf( "Experiment Number,");
    printf( "Benchmark,");
    printf( "Barrier,");
    printf( "Radix,");
    printf( "Spinning,");
    printf( "Threads Number,");
    printf( "Nanoseconds per Barrier\n");
}

#ifdef DELAYED_PRINT
static void
bar_SaveTableLine( exp_Info_t * expInfo)
{
    int barriersNum;
    int radix;
    double timePerBarrier;
    
#if defined( PURE_BENCHMARK) || defined( LDIMBL_BENCHMARK)
    barriersNum = expInfo->hiBarNum;
#endif
#ifdef SANITY_BENCHMARK
    barriersNum = ((expInfo->hiBarNum + expInfo->curThreadsNum * 2) / (expInfo->curThreadsNum * 2 + 1)) *
        (expInfo->curThreadsNum * 2 + 1);
#endif
    timePerBarrier =
        (double) (expInfo->timer [ EXP_STAGE_EXP ].deltaTime - 
                  expInfo->timer [ EXP_STAGE_REF ].deltaTime) /
        (double) (barriersNum);

    if ( expInfo->timer [ EXP_STAGE_EXP ].deltaTime < expInfo->timer [ EXP_STAGE_REF ].deltaTime )
    {
        bar_Assert( expInfo->curThreadsNum == 1);
        timePerBarrier = 0.0;
    }
    bar_Assert( expInfo->currTableLine < EXP_LINES_NUM);
#if defined( SNZI_BARRIER) || defined( TREE_BARRIER)
    radix = expInfo->curRadixNum;
#else
    radix = UNDEFINED_RADIX;
#endif
    expInfo->tableLines [ expInfo->currTableLine ].threadsNum = expInfo->curThreadsNum;
    expInfo->tableLines [ expInfo->currTableLine ].radix = radix;
    expInfo->tableLines [ expInfo->currTableLine ].timePerBarrier = timePerBarrier;
    expInfo->currTableLine++;
    
}

static void
bar_PrintTableLines( exp_Info_t * expInfo)
{
    int i = 0;
    
    for ( i = 0; i < expInfo->currTableLine; i++)
    {
        printf( "%s,", HOSTNAME_STR);
        printf( "%s,", ARCH_STR);
        printf( "%s,", EXP_ID_STR);
        printf( "%s,", BENCH_STR);
        printf( "%s,", BARRIER_STR);
        printf( "%d,", expInfo->tableLines [ i ].radix);
        printf( "%s,", SPINNING_STR);
        printf( "%d,", expInfo->tableLines [ i ].threadsNum);
        printf( "%4.2f\n", expInfo->tableLines [ i ].timePerBarrier);
    }
}
#endif /* DELAYED_PRINT */

#ifndef DELAYED_PRINT
static void
bar_PrintTableLine( exp_Info_t * expInfo)
{
    int barriersNum;
    double barOverhead;
    
#if defined( PURE_BENCHMARK) || defined( LDIMBL_BENCHMARK)
    barriersNum = expInfo->hiBarNum;
#endif
#ifdef SANITY_BENCHMARK
    barriersNum = ((expInfo->hiBarNum + expInfo->curThreadsNum * 2) / (expInfo->curThreadsNum * 2 + 1)) *
        (expInfo->curThreadsNum * 2 + 1);
#endif

    barOverhead = 
        (double) (expInfo->timer [ EXP_STAGE_EXP ].deltaTime - 
                  expInfo->timer [ EXP_STAGE_REF ].deltaTime) /
        (double) (barriersNum);

    printf( "%s,", HOSTNAME_STR);
    printf( "%s,", ARCH_STR);
    printf( "%s,", EXP_ID_STR);
    printf( "%s,", BENCH_STR);
    printf( "%s,", BARRIER_STR);
#if defined( SNZI_BARRIER) || defined( TREE_BARRIER)
    printf( "%d,", expInfo->curRadixNum);
#else
    printf( "%d,", UNDEFINED_RADIX);
#endif
    printf( "%s,", SPINNING_STR);
    printf( "%d,", expInfo->curThreadsNum);
    printf( "%4.2f\n", barOverhead);
}
#endif /* !DELAYED_PRINT */

static void
bar_StartTimer( exp_Timer_t * timer)
{
    while ( clock_gettime( timer->clockId, & timer->startTime) )
    {
        ;
    }
}

static void
bar_StopTimer( exp_Timer_t * timer)
{
    while ( clock_gettime( timer->clockId, & timer->stopTime) )
    {
        ;
    }
    timer->deltaTime = ((long long int)(timer->stopTime.tv_sec - timer->startTime.tv_sec)) * NANOSEC_IN_SEC +
        (long long int)(timer->stopTime.tv_nsec - timer->startTime.tv_nsec);
}

static void
bar_SetParentThreadAffinity( )
{
    int i;

    cpu_set_t onlineCpuSet;
    CPU_ZERO( &onlineCpuSet);
    sys_SetOnlineCpuSet( &onlineCpuSet);

    i = sizeof( cpu_set_t) * BITS_IN_BYTE - 1;
    for ( ; ;)
    {
        int ret;
        cpu_set_t currCpuSet;
        
        if ( !CPU_ISSET( i, &onlineCpuSet) )
        {
            if ( i == 0 )
            {
                bar_InternalError( __FILE__, __LINE__);
            }
            i--;
            continue;
        }
        CPU_ZERO( &currCpuSet);
        CPU_SET( i, &currCpuSet);
        pthread_setaffinity_np( pthread_self( ), sizeof( cpu_set_t), &currCpuSet);
        break;
    }
}

static void
bar_SetOnlineCpuSet( )
{
    CPU_ZERO( & bar_onlineCpuSet);
    /* FIXME: Get online cpu set properly using lsproc or its code */
    sched_getaffinity( 0, sizeof( cpu_set_t), & bar_onlineCpuSet);
}

static void
bar_InitExperiment( exp_Info_t * expInfo)
{
    expInfo->loExpNum = 1;
    expInfo->hiExpNum = EXPERIMENTS_NUM;

    expInfo->loBarNum = 1;
    expInfo->hiBarNum = BARRIERS_NUM;

    expInfo->loThreadsNum = 1;
    expInfo->hiThreadsNum = THREADS_MAX_NUM;

#if defined( SNZI_BARRIER) || defined( TREE_BARRIER)
    expInfo->loRadixNum = 2;
    expInfo->hiRadixNum = THREADS_MAX_NUM;
#endif

    expInfo->timer [ EXP_STAGE_REF ].clockId = EXP_CLOCK_ID;
    expInfo->timer [ EXP_STAGE_EXP ].clockId = EXP_CLOCK_ID;

#ifdef DELAYED_PRINT
    expInfo->currTableLine = 0;
#endif
    bar_SetOnlineCpuSet( );
#ifndef OMP_BARRIER
    bar_SetParentThreadAffinity( );
#endif
}

static void
bar_PrintUnsupportedConfiguration( )
{
    fprintf( stderr, "unsupported configuration!\n");
}

static inline bool
bar_IsUnsupportedConfiguration( )
{
#if defined( ARCH_X86_FAMILY) && defined( HWYIELD_SPINNING)
    if ( sys_GetPrivilegeLevel( ) != X86_RING_0 )
    {
        fprintf( stderr, "HLT requires ring 0 access!\n");
    }
    return TRUE;
#endif
#if defined( ARCH_X86_FAMILY) && defined( WFE_SPINNING)
    {
        int in_eax = 1;
        int eax;
        int ebx;
        int ecx;
        int edx;
    
        x86_Cpuid( in_eax, & eax, & ebx, & ecx, & edx);
        if ( ecx & (1 << MONITOR_CPUID_ECX_BIT) )
        {
            if ( sys_GetPrivilegeLevel( ) != X86_RING_0 )
            {
                /* FIXME Too conservative deicision. Need to do more precise check.
                   X86 ISA 8.10.3 The instructions are conditionally available at levels greater than 0.
                   Use the following steps to detect the availability of MONITOR and MWAIT: ... */
                fprintf( stderr, "MONITOR/MWAIT requires RING 0 access!\n");

                return TRUE;
            }
        } else
        {
            fprintf( stderr, "Processor does not MONITOR/MWAIT!\n");

            return TRUE;
        }
    }
#endif
    return FALSE;
}

static void
bar_ReadArgs( int argc,
              const char * argv [ ])
{
    if ( argc > 1 )
    {
        HOSTNAME_STR = argv [ 1 ];
    } else
    {
        HOSTNAME_STR = "unknown-host";
    }
    if ( argc > 2 )
    {
        EXP_ID_STR = argv [ 2 ];
    } else
    {
        EXP_ID_STR = "0";
    }
    if ( argc > 3 )
    {
        if ( !strcmp( argv [ 3 ], "yes") )
        {
            INTERPOLATE_RADIX = TRUE;
        } else if ( !strcmp( argv [ 3 ], "no") )
        {
            INTERPOLATE_RADIX = FALSE;
        } else
        {
            bar_InternalError( __FILE__, __LINE__);
        }
    } else
    {
        INTERPOLATE_RADIX = DEF_INTERPOLATE_RADIX;
    }
#ifdef ITP_BARRIER
    if ( argc > 4 )
    {
        BARRIER_STR = argv [ 4 ];
    }
#endif
}

static void
bar_ThreadsCpuOverloadAdjustment( exp_Info_t * expInfo)
{
#if !defined( PTHREAD_BARRIER) && !defined( PTYIELD_SPINNING)
    if ( expInfo->curThreadsNum > CPUS_NUM)
    {
        expInfo->hiBarNum = BUSY_WAIT_THREAD_CPU_OVERLOAD_BARRIERS_NUM;
    }
#endif
#if defined( ARCH_ARM_FAMILY) && defined( PAUSE_SPINNING)
    expInfo->hiBarNum = BUSY_WAIT_THREAD_CPU_OVERLOAD_BARRIERS_NUM;
#endif
#if defined( PTHREAD_BARRIER)
    expInfo->hiBarNum = BARRIERS_NUM / 10;
#endif
}

int
main( int argc, 
      const char * argv [ ])
{
    exp_Info_t expInfo;
   
    bar_ReadArgs( argc, argv);
     
    if ( bar_IsUnsupportedConfiguration( ) )
    {
        return 0;
    }

    bar_CheckPreconditions( );
    bar_InitExperiment( & expInfo);
#ifndef NDEBUG
    bar_PrintExperimentInfo( & expInfo);
#endif
#ifndef DELAYED_PRINT
    bar_PrintTableHeader( );
#endif

    for ( expInfo.curThreadsNum = expInfo.loThreadsNum;
          expInfo.curThreadsNum <= expInfo.hiThreadsNum; 
          expInfo.curThreadsNum++ )
    {
        bar_ThreadsCpuOverloadAdjustment( &expInfo);
#if defined( SNZI_BARRIER) || defined( TREE_BARRIER)
        for ( expInfo.curRadixNum = expInfo.loRadixNum;
#   if defined( SNZI_BARRIER) || defined( TREE_BARRIER)
              (
#   ifdef TRNM_BARRIER
              (expInfo.curRadixNum <= MA_GRANULARITY) &&
#   endif
              (expInfo.curRadixNum <= (INTERPOLATE_RADIX ? expInfo.curThreadsNum : expInfo.hiRadixNum))) ||
              ((expInfo.curThreadsNum == 1) && (expInfo.curRadixNum == 2));
#   endif
              expInfo.curRadixNum++ )
#endif
        {
            /* warming up */
            expInfo.expStage = EXP_STAGE_REF;
            bar_TestBarrier( & expInfo);
            
            expInfo.expStage = EXP_STAGE_EXP;
            bar_TestBarrier( & expInfo);

            for ( expInfo.curExpNum = expInfo.loExpNum;
                  expInfo.curExpNum <= expInfo.hiExpNum ;
                  expInfo.curExpNum++ )
            {
                expInfo.expStage = EXP_STAGE_REF;
                bar_StartTimer( & expInfo.timer [ expInfo.expStage]);
                bar_TestBarrier( & expInfo);
                bar_StopTimer( & expInfo.timer [ expInfo.expStage]);

                expInfo.expStage = EXP_STAGE_EXP;
                bar_StartTimer( & expInfo.timer [ expInfo.expStage]);
                bar_TestBarrier( & expInfo);
                bar_StopTimer( & expInfo.timer [ expInfo.expStage]);

#ifdef DELAYED_PRINT
                bar_SaveTableLine( & expInfo);
#else
                bar_PrintTableLine( & expInfo);
#endif
            }
        }
    }
#ifdef DELAYED_PRINT
    bar_PrintTableHeader( );
    bar_PrintTableLines( & expInfo);
#endif

    return 0;
}

