#ifndef BARRIER_H
#define BARRIER_H


/**
 * Debug and assert switches
 */
#define NDEBUG
#define NASSERT

#define TRUE          1
#define FALSE         0

#define RETURN_OK     FALSE
#define RETURN_FAIL   TRUE

/**
 * Number of logical CPUs
 */
#ifndef CPUS_NUM
#   define CPUS_NUM 2
#endif


/**
 * Number fo threads and experiments
 */
#ifndef THREADS_MAX_NUM
#   define THREADS_MAX_NUM CPUS_NUM
#endif
#ifndef CEIL_LOG2_THREADS_MAX_NUM
/* Rough approximation given another constraint that it should be greater than zero */
#   define CEIL_LOG2_THREADS_MAX_NUM THREADS_MAX_NUM
#endif
#ifndef BARRIERS_NUM
#   define BARRIERS_NUM 1000000
#endif

#ifndef EXPERIMENTS_NUM
#   define EXPERIMENTS_NUM 10
#endif

#ifndef CPU_MAP_PRIORITY_DELTA
#   define CPU_MAP_PRIORITY_DELTA 1
#endif

#define BARRIERS_MAX_NUM 4

/**
 * 'clock_gettime' clock type
 */
#ifndef EXP_CLOCK_ID
#   define EXP_CLOCK_ID CLOCK_MONOTONIC
#endif

#ifdef ARCH_MIC
#   define ARCH_X86_FAMILY
#   define ARCH_STORE_NR
#   define ARCH_STORE_NR_NGO
#endif
#ifdef ARCH_X86_64
#   define ARCH_X86_FAMILY
#   define ARCH_X86_MONITOR_MWAIT
#endif
#ifdef ARCH_ARMV7L
#   define ARCH_ARM_FAMILY
#endif

/**
 * Architecture type
 */
#if !defined( ARCH_ARM_FAMILY) && !defined ( ARCH_X86_FAMILY)
/* #define ARCH_ARM_FAMILY */
#   define ARCH_X86_FAMILY
#endif

/**
 * Spinning amortization
 */
#if !defined( SPIN_SPINNING) && !defined( HWYIELD_SPINNING) && !defined( PTYIELD_SPINNING) && \
    !defined( PAUSE_SPINNING) && !defined( WFE_SPINNING)
#define SPIN_SPINNING
/* #define HWYIELD_SPINNING */
/* #define PTYIELD_SPINNING */
/* #define PAUSE_SPINNING */
/* #define WFE_SPINNING */
#endif

#if defined( PTYIELD_SPINNING) || defined( HWYIELD_SPINNING)
#   define YIELD_SPINNING
#endif

#ifndef BUSY_WAIT_THREAD_CPU_OVERLOAD_BARRIERS_NUM
#define BUSY_WAIT_THREAD_CPU_OVERLOAD_BARRIERS_NUM (THREADS_MAX_NUM * 2 + 1)
#endif

/**
 * Coherency line size
 */
#ifndef CCL_SIZE
#   define CCL_SIZE 128
#endif

/**
 * Maximum atomic data size in bits
 * FIXME: Maximum atomic data size should be defined more precisely depending on MP architecture 
 */
#define HW_ATOMIC_DATA_SIZE_IN_BITS 32

/**
 * Max memory access size in bits.
 */
#define HW_MAX_MA_SIZE_IN_BITS 32

/**
 * Min memory access size in bits.
 */
#define HW_MIN_MA_SIZE_IN_BITS 8

/**
 * Memory access granularity.
 */
#define MA_GRANULARITY (HW_MAX_MA_SIZE_IN_BITS / HW_MIN_MA_SIZE_IN_BITS)

/**
 * Memory access min/max size volatile types.
 */
#define DEFINE_MAX_MA_VOLATILE_TYPE_H(max_ma_size) typedef volatile int ## max_ma_size ## _t int_max_ma_vol_t
#define DEFINE_MIN_MA_VOLATILE_TYPE_H(min_ma_size) typedef volatile int ## min_ma_size ## _t int_min_ma_vol_t
#define DEFINE_MAX_MA_VOLATILE_TYPE(max_ma_size) DEFINE_MAX_MA_VOLATILE_TYPE_H(max_ma_size)
#define DEFINE_MIN_MA_VOLATILE_TYPE(min_ma_size) DEFINE_MIN_MA_VOLATILE_TYPE_H(min_ma_size)
DEFINE_MAX_MA_VOLATILE_TYPE( HW_MAX_MA_SIZE_IN_BITS);
DEFINE_MIN_MA_VOLATILE_TYPE( HW_MIN_MA_SIZE_IN_BITS) ;

/**
 * Memory access min/max size types.
 */
#define DEFINE_MAX_MA_TYPE_H(max_ma_size) typedef int ## max_ma_size ## _t int_max_ma_t
#define DEFINE_MIN_MA_TYPE_H(min_ma_size) typedef int ## min_ma_size ## _t int_min_ma_t
#define DEFINE_MAX_MA_TYPE(max_ma_size) DEFINE_MAX_MA_TYPE_H(max_ma_size)
#define DEFINE_MIN_MA_TYPE(min_ma_size) DEFINE_MIN_MA_TYPE_H(min_ma_size)
DEFINE_MAX_MA_TYPE( HW_MAX_MA_SIZE_IN_BITS);
DEFINE_MIN_MA_TYPE( HW_MIN_MA_SIZE_IN_BITS) ;

/**
 * Bytes in byte
 */
#define BITS_IN_BYTE 8


/**
 * Atomic type.
 * FIXME: Atomic data type should be defined more precisely depending on MP architecture 
 */
typedef unsigned int atomic_Data_t;


/**
 * Barrier test type: calc (with some calculations to check correctness) or pure
 */
#if !defined( SANITY_BENCHMARK) && !defined( PURE_BENCHMARK) && !defined( LDIMBL_BENCHMARK)
/* #   define SANITY_BENCHMARK */
/* #   define LDIMBL_BENCHMARK */
#   define PURE_BENCHMARK
#endif

/**
 * Defualt value for INTERPOLATE_RADIX
 */
#if !defined( DEF_INTERPOLATE_RADIX)
#   define DEF_INTERPOLATE_RADIX TRUE
#endif



/**
 * Barrier type
 */
#if !defined( PTHREAD_BARRIER) && !defined ( SR_BARRIER) &&\
    !defined( CTRGS_BARRIER) && !defined( CTRLS_BARRIER) &&\
    !defined( SNZI_BARRIER) && !defined( DSMN_BARRIER) &&\
    !defined( STNGS_BARRIER) && !defined( STNLS_BARRIER) &&\
    !defined( DTNGS_BARRIER) && !defined( DTNLS_BARRIER) &&\
    !defined( STRGS_BARRIER) && !defined( STRLS_BARRIER) &&\
    !defined( ITP_BARRIER) && !defined( OMP_BARRIER)
/* #   define PTHREAD_BARRIER */
#   define SR_BARRIER
/* #   define CTRGS_BARRIER */
/* #   define CTRLS_BARRIER */
/* #   define STRGS_BARRIER */
/* #   define STRLS_BARRIER */
/* #   define STNGS_BARRIER */
/* #   define STNLS_BARRIER */
/* #   define DTNGS_BARRIER */
/* #   define DTNLS_BARRIER */
/* #   define SNZI_BARRIER */
/* #   define DSMN_BARRIER */
/* #   define ITP_BARRIER */
/* #   define OMP_BARRIER */
#endif


/**
 * Different types of tree barriers where threads are asigned to leaves only
 * with global or local sense
 */
#ifdef CTRGS_BARRIER
#   define COMBINED_BARRIER
#   define TREE_BARRIER
#   define T_GLOBAL_SENSE
#endif
#ifdef CTRLS_BARRIER
#   define COMBINED_BARRIER
#   define TREE_BARRIER
#   define T_LOCAL_SENSE
#endif
#ifdef DTNGS_BARRIER
#   define TRNM_BARRIER
#   define TREE_BARRIER
#   define T_GLOBAL_SENSE
#endif
#ifdef DTNLS_BARRIER
#   define TRNM_BARRIER
#   define TREE_BARRIER
#   define T_LOCAL_SENSE
#endif
#ifdef STNGS_BARRIER
#   define TRNM_BARRIER
#   define TREE_BARRIER
#   define T_GLOBAL_SENSE
#endif
#ifdef STNLS_BARRIER
#   define TRNM_BARRIER
#   define TREE_BARRIER
#   define T_LOCAL_SENSE
#endif

#ifdef TRNM_BARRIER
#   if defined( DTNGS_BARRIER) || defined( DTNLS_BARRIER)
#       define TRNM_DYNM_WIN
#   endif
#   if defined( STNGS_BARRIER) || defined( STNLS_BARRIER)
#       define MCS_TREE_VARIATION
#       define TRNM_STAT_WIN
#       ifdef MCS_TREE_VARIATION
#           define TRNM_STAT_WIN_ID (-1)
#       else
#           define TRNM_STAT_WIN_ID (0)
#       endif
#   endif
#   define TRNM_TRUE TRUE
#   define TRNM_FALSE FALSE
#endif

#ifdef ARCH_X86_FAMILY
#   define ARCH_CAS
#   define ARCH_FETCH_AND_ADD
#   define INTRA_PROCESSOR_FORWARDING_ALLOWED
#endif
#ifdef ARCH_ARM_FAMILY
#   define ARCH_LL_SC
#   define INTRA_PROCESSOR_FORWARDING_ALLOWED
#endif


#ifdef ARCH_X86_FAMILY
typedef enum x86_Ring_e
{
    X86_RING_0 = 0,
    X86_RING_1 = 1,
    X86_RING_2 = 2,
    X86_RING_3 = 3,
    x86_RINGS_NUM,
    X86_RINGS_MASK = 3
} x86_Ring_t;
#endif

#define MONITOR_CPUID_IN_EAX 1
#define MONITOR_CPUID_ECX_BIT 3


#ifndef ITP_BARRIER
typedef unsigned int bool;
#endif

#define UNDEFINED_RADIX 1

typedef enum parity_e
{
    PARITY_EVEN,
    PARITY_ODD,
    PARITY_NUM
} parity_t;

#ifdef PTHREAD_BARRIER
typedef pthread_barrier_t bar_Barrier_t;
#endif

#ifdef SR_BARRIER
typedef struct sr_barrier_t
{
    volatile __attribute__ ( ( aligned( CCL_SIZE))) int sense;
    volatile __attribute__ ( ( aligned( CCL_SIZE))) int count;
    char padding [ CCL_SIZE - sizeof( int) ];
    int threadsNum;
    int barrierId;
} __attribute__ ( ( aligned( CCL_SIZE))) sr_barrier_t; 
#endif

#ifdef DSMN_BARRIER
typedef struct dsmn_barrier_t
{
    int barrierId;
    int ceilLog2ThreadsNum;
} __attribute__ ( ( aligned( CCL_SIZE))) dsmn_barrier_t; 
#endif

#ifdef TREE_BARRIER
#ifdef TRNM_BARRIER
typedef union trnm_Data_t
{
     int_min_ma_vol_t part [ MA_GRANULARITY ];
     int_max_ma_vol_t full;
} __attribute__ ( ( aligned( CCL_SIZE))) trnm_Data_t;
#endif
typedef struct tree_node_t
{
#ifdef T_LOCAL_SENSE
    volatile __attribute__ ( ( aligned( CCL_SIZE))) bool sense;
    char paddingS [ CCL_SIZE - sizeof( bool) ];
#endif
#ifdef COMBINED_BARRIER
    volatile __attribute__ ( ( aligned( CCL_SIZE))) int count;
    char paddingC [ CCL_SIZE - sizeof( int) ];
    int threadsNum;
#endif
#ifdef TRNM_BARRIER
#   ifdef TRNM_STAT_WIN
    volatile trnm_Data_t trnmDataCurr;
#   endif
#   ifdef TRNM_DYNM_WIN
    volatile trnm_Data_t trnmDataCurr [ PARITY_NUM ];
#   endif
    trnm_Data_t trnmDataInit;
    /* numeration start from 0 and increases from leaves to root */
    int tier;
#endif
    struct tree_node_t * parent;
} __attribute__ ( ( aligned( CCL_SIZE))) tree_node_t;

typedef struct tree_barrier_t
{
#ifdef T_GLOBAL_SENSE
    volatile __attribute__ ( ( aligned( CCL_SIZE))) bool sense;
    char padding [ CCL_SIZE - sizeof( bool) ];
#endif
    int radix;
    int leavesNum;
    int inodesNum;
    int threadsNum;
    struct tree_node_t * leaves [ 1 << CEIL_LOG2_THREADS_MAX_NUM ];
    struct tree_node_t inodes [ 1 << CEIL_LOG2_THREADS_MAX_NUM ];
    int barrierId;
} __attribute__ ( ( aligned( CCL_SIZE))) tree_barrier_t; 
#endif /* TREE_BARRIER */

#ifdef SNZI_BARRIER

#define SNZI_NUM_PER_BARRIER 3
#define C_SIZE (HW_ATOMIC_DATA_SIZE_IN_BITS / 2 - 1)
#define A_SIZE 1
#define V_SIZE (HW_ATOMIC_DATA_SIZE_IN_BITS / 2)

struct snzi_node_t;

typedef enum snzi_node_type_e
{
    SNZI_NODE_TYPE_ROOT,
    SNZI_NODE_TYPE_INODE
} snzi_node_type_t;

typedef union snzi_x_t
{
    struct
    {
        volatile atomic_Data_t c : C_SIZE;
        volatile atomic_Data_t a : A_SIZE;
        volatile atomic_Data_t v : V_SIZE;
    } f;
    volatile atomic_Data_t d;
}  __attribute__ ( ( aligned( CCL_SIZE))) snzi_x_t;

typedef struct snzi_root_t
{
    volatile snzi_x_t X;
    volatile __attribute__ ( ( aligned( CCL_SIZE))) atomic_Data_t I;
    char padding [ CCL_SIZE - sizeof( atomic_Data_t) ];
}  __attribute__ ( ( aligned( CCL_SIZE))) snzi_root_t;

typedef struct snzi_inode_t
{
    volatile snzi_x_t X;
    struct snzi_node_t * parent;
}  __attribute__ ( ( aligned( CCL_SIZE))) snzi_inode_t;

typedef struct snzi_node_t
{
    union
    {
        snzi_inode_t inode;
        snzi_root_t root;
    } n;
    snzi_node_type_t type;
}  __attribute__ ( ( aligned( CCL_SIZE))) snzi_node_t; 

typedef struct snzi_t
{
    int radix;
    int leavesNum;
    int inodesNum;
    int threadsNum;
    struct snzi_root_t * root;
    struct snzi_node_t * leaves [ 1 << CEIL_LOG2_THREADS_MAX_NUM ];
    struct snzi_node_t inodes [ 1 << CEIL_LOG2_THREADS_MAX_NUM ];
} snzi_t;

typedef enum snzi_order_e
{
    SNZI_ORDER_PREV,
    SNZI_ORDER_CURR,
    SNZI_ORDER_NEXT,
    SNZI_ORDER_NUM
} snzi_order_t;

typedef struct snzi_barrier_t
{
    snzi_t snzi [ SNZI_ORDER_NUM ];
    int barrierId;
} __attribute__ ( ( aligned( CCL_SIZE))) snzi_barrier_t;
#endif /* SNZI_BARRIER */

#define NANOSEC_IN_SEC 1000000000

/**
 * Timer info.
 */
typedef struct exp_Timer_t
{
    clockid_t clockId;
    struct timespec startTime;
    struct timespec stopTime;
    unsigned long long int deltaTime;
} exp_Timer_t;

/**
 * Timer info.
 */
typedef struct exp_ClockCounter_t
{
    unsigned long long int startClock;
    unsigned long long int stopClock;
    unsigned long long int deltaClock;;
} exp_ClockCounter_t;

/**
 * Experiment stage.
 */
typedef enum exp_Stage_e
{
    EXP_STAGE_REF,
    EXP_STAGE_EXP,
    EXP_STAGE_NUM
} exp_Stage_e;


#define DELAYED_PRINT

#ifdef DELAYED_PRINT

/* FIXME Too rough apprximation. */
#define EXP_LINES_NUM ((THREADS_MAX_NUM * THREADS_MAX_NUM * EXPERIMENTS_NUM))

/**
 * Experiment table line.
 */
typedef struct exp_TableLine_t
{
    int threadsNum;
    int radix;
    double timePerBarrier;
} exp_TableLine_t;
#endif

/**
 * Experiment info.
 */
typedef struct exp_Info_t
{
    int loExpNum;
    int hiExpNum;
    int curExpNum;
    int loThreadsNum;
    int hiThreadsNum;
    int curThreadsNum;
#if defined ( SNZI_BARRIER) || defined ( TREE_BARRIER)
    int loRadixNum;
    int hiRadixNum;
    int curRadixNum;
#endif
    int loBarNum;
    int hiBarNum;
    exp_Stage_e expStage;
    exp_Timer_t timer [ EXP_STAGE_NUM ];
    exp_ClockCounter_t clockCounter [ EXP_STAGE_NUM ];
    
#ifdef DELAYED_PRINT
    int currTableLine;
    exp_TableLine_t tableLines [ EXP_LINES_NUM ];
#endif
} exp_Info_t;

typedef struct tls_Sense_t
{
    bool data;
    char padding [ CCL_SIZE - sizeof( bool) ];
} __attribute__ ( ( aligned( CCL_SIZE), packed)) tls_Sense_t;

typedef struct tls_Parity_t
{
    parity_t data;
    char padding [ CCL_SIZE - sizeof( parity_t) ];
} __attribute__ ( ( aligned( CCL_SIZE))) tls_Parity_t;

/**
 * Thread local data.
 */
typedef struct tls_Data_t
{
#ifdef DSMN_BARRIER
    volatile tls_Sense_t my_flags [ PARITY_NUM ] [ CEIL_LOG2_THREADS_MAX_NUM ] [ BARRIERS_MAX_NUM ];
    volatile tls_Sense_t * partner_flags [ PARITY_NUM ] [ CEIL_LOG2_THREADS_MAX_NUM ] [ BARRIERS_MAX_NUM ];
    tls_Parity_t parity [ BARRIERS_MAX_NUM ];
    tls_Sense_t sense [ BARRIERS_MAX_NUM ];
#endif
#ifdef SNZI_BARRIER
    snzi_t * snzi [ BARRIERS_MAX_NUM ] [ SNZI_ORDER_NUM ];
#endif
#if defined( SR_BARRIER) || defined( TREE_BARRIER)
    tls_Sense_t sense [ BARRIERS_MAX_NUM ];
#endif
#ifdef ITP_BARRIER
    PARTICIPANT * participant [ BARRIERS_MAX_NUM ];
#endif
    int threadId;
    exp_Info_t * expInfo;
} __attribute__ ( ( aligned( CCL_SIZE))) tls_Data_t;
#endif /* !BARRIER_H */
