#ifndef _TASK_H
#define _TASK_H
#include <stddef.h>

// #include <tr1/unordered_map>

#include "hashtable.h"
#define NO_SET_AFFINITY -1
#define FOLLOW_AFFINITY -2
#define INIT_AFFINITY 0  //-3

#ifdef SEG_BUFF
#define SEG_MASTER 1
#endif

// for task type
typedef int task_type;
#define NOT_SET 0
#define STENCIL_TASK 1
#define NOAFFINITY_TASK 2
#define BLOCKING_TASK 3
#define COMPOSITE_TASK 4
#define COMPOSITE_END_TASK 5
#define BARRIER_TASK 6

//
#define ADDR_CHUNKINIT 54
#define ADDR_CHUNKMORE 5

// #define SLAVE_TBL_SIZE 65536 //哈希表大小
// #define SLAVE_TBL_SIZE 65536//哈希表大小 65536
// #define SLAVE_TBL_SIZE 32768//哈希表大小 65536/2
#define SLAVE_TBL_SIZE 16384//哈希表大小 65536/4
// #define SLAVE_TBL_SIZE 8192//哈希表大小 65536/8
#define COLLISION_BUF_SIZE 2048   
#define SLAVE_BUF_SIZE 1024  //1024、2048、3072、4096,3d7p在4线程下好像3048|4098比较好
#define SLAVE_BUF_INITIAL_SIZE 128
#define NEED_SPAWN_SUBSIZE 4096  //子图need_spawn_tasks大小
#define VECTOR_SUBSIZE   8192//每个子图任务个数大小
#define HASH_MULT_VALUE 11400714819323198485
#define SUBTOP_SIZE   500

#define BUF_OWNER_MASTER 0  //buffer所有权在主核
#define BUF_OWNER_SLAVE 1  //buffer所有权在从核
#ifdef DUE_IDLE
#define HELPER_BUFFER_COUNT 16  //两个buffer防止阻塞，主从核分别访问不同的buffer
#else
#define HELPER_BUFFER_COUNT 2  //两个buffer防止阻塞，主从核分别访问不同的buffer
#endif
#ifdef SUPPORT_PARTITION
#define NO_SET_PART_ID -1
#endif

#ifdef ACEMESH_PARA
//__threal_local_fix long cur_time;
#define BUF_SIZE 2048
#define MAXT 10000000
#endif
//

// TODO
enum task_state {
    ALLOCATED = 0,
    READY,
    RUNNING
    //    FREED
};
enum DA_state
{
    INITED = 0,
    SATISFIED,
    USING,
    FINISHED,
    DELETABLE
};
typedef enum __attribute__((__packed__)) {
    NO_ACCESS_TYPE = 0,
    READ_ACCESS_TYPE,
    WRITE_ACCESS_TYPE,
    READWRITE_ACCESS_TYPE,
    CONCURRENT_ACCESS_TYPE,
    COMMUTATIVE_ACCESS_TYPE,
    REDUCTION_ACCESS_TYPE,
    WEAK_ACCESS
} DataAccessType;

// struct DataAccess;
// struct task;
// typedef struct tuple_rw_access {
//     struct DataAccess **r_accesses;
//     struct DataAccess *w_access;
//     int _rsize;
// } tuple_rw_access;
typedef struct BottomMapEntry {
    struct DataAccess *_access;
    // ReductionInfo *_reductionInfo;
    // struct tuple_rw_access *per_access;
    // BottomMapEntry(DataAccess *access) : _access(access)
    // //_reductionInfo(nullptr)
    // {
    // }

    // BottomMapEntry() : _access(nullptr)
    // //_reductionInfo(nullptr)
    // {
    // }
} BottomMapEntry;
typedef struct new_slave_table_item {  //哈希表项
    void * data_addr;
    struct DataAccess *data_access;
    struct new_slave_table_item * succ;
} new_slave_table_item;

typedef struct new_slave_table {  //哈希表
    int col_idx ;
    //struct new_slave_table_item *items;
    //struct new_slave_table_item *col_items;
    struct new_slave_table_item items[SLAVE_TBL_SIZE];
    struct new_slave_table_item col_items[COLLISION_BUF_SIZE];
    
} new_slave_table;

#ifdef DISTRUBUTE_HASH
//分布式哈希表
typedef struct domain_hash{
    int domain_id;
    struct new_slave_table *hash;

}
#endif
struct DataAccess {
    // private:
    //! 8-byte fields
    //! The originator of the access
    struct task *_originator;

    //! Next task with an access matching this one
    struct DataAccess *_successor;
    // struct DataAccess *_child;
    int _type __attribute__((aligned(32)));
    int _accessFlags __attribute__((aligned(32)));
    int deleted __attribute__((aligned(32)));
    int top __attribute__((aligned(32)));  // 0: False, 1: True
    int mutex_access __attribute__((aligned(32)));  //后继处理锁
     int countlock __attribute__((aligned(32)));  
    // std::atomic<access_flags_t> _accessFlags;
    
};
struct TaskDataAccesses {
    // typedef std::tr1::unordered_map<void *, BottomMapEntry> bottom_map_t;
    // typedef std::tr1::unordered_map<void *, DataAccess> access_map_t;
    // typedef HashMap bottom_map_t;
    //! This will handle the dependencies of nested tasks.
#ifdef DISTRUBUTE_HASH
    struct domain_hash *_subaccessBottomMap;
#else
    struct new_slave_table *_subaccessBottomMap;
#endif
    struct DataAccess *_accessArray;
    void **_addressArray;
    // void **vector_subtasks;
    // void **need_spawn_subtasks; 
    size_t _currentIndex;


    // access_map_t *_accessMap;

    // TaskDataAccesses()
    //     : _subaccessBottomMap(), _accessArray(nullptr), _addressArray(nullptr), _currentIndex(0)
    // //_accessMap(nullptr)
    // {
    // }
};

struct task {
    int ref_count_t;
    
    int affinity_id;
#if defined(USE_PRIORITY_QUEUE) || defined(LOCAL_MULTI_PRIORI)
    int priority_id;
#endif
#ifdef REUSE_GRAPH
    int backup_ref;
    int reused;
    int stored;
    int is_base_task;
#endif
#ifdef TEMP_AFFI
    int bak_affinity_id;
#endif
    struct task *next;
    struct task *parent;
    int _countdownToBeWokenUp __attribute__((aligned(32)));  // for nest:all child
    int _predecessorCount __attribute__((aligned(32)));      // for access
    struct TaskDataAccesses _dataAccesses;
  //每个父任务需要包含一个need_spawn_subtasks列表
    // Need to get back to the task from TaskDataAccesses for instrumentation purposes
    // friend struct TaskDataAccesses;
   
    int _endExecute;
#ifdef CONCURRENT_CONSTRUCT_GRAPH
    /*
    volatile int finished_lock;
    volatile int successor_lock;
    volatile int ref_lock;
    int edge;
    int state; //Four states: 0: initialization; 1：ready； 2：finish； 3：over
    int pre;
    */
    // int ref_lock __attribute__((aligned(32)));
    int successor_lock __attribute__((aligned(32)));
    int over __attribute__((aligned(32)));  // 0: False, 1: True
#endif
    // int spawn_order_id;
    // unsigned char extra_state;
};
struct new_slave_table * init_new_slave_table();
struct new_slave_table *slave_init_new_slave_table();
// struct domain_hash * init_new_slave_table();
// struct domain_hash *slave_init_new_slave_table();
void setparent(struct task *child_task, struct task *t);

struct task *getparent(struct task *t);
void initprecount(struct task *t);

void initialize(struct DataAccess *cur, int type, struct task *originator, int _accessFlags_, int top_);

void initTaskDataAccess(struct task *t, int _size);

struct TaskDataAccesses *getDataAccesses(struct task *t);
struct DataAccess *findAccess(struct task *t, void *address);
void setChild(struct DataAccess *parent, struct DataAccess *child);
void setSuccessor(struct DataAccess *pre, struct DataAccess *successor);
void set_unblock_Successor(struct DataAccess *pre, struct DataAccess *successor);
int predec_count(struct task *t);
int dec_predecessorCount(struct task *t);
int child_count(struct task *t);
int dec_childCount(struct task *t);
int inc_predecessorCount(struct task *t);

// for slave
void slave_setparent(struct task *child_task, struct task *t);
struct task *slave_getparent(struct task *t);
void slave_initprecount(struct task *t);

void slave_initialize(struct DataAccess *cur, int type, struct task *originator, int _accessFlags_, int top_);

void slave_initTaskDataAccess(struct task *t, int _size);
struct TaskDataAccesses *slave_getDataAccesses(struct task *t);
struct DataAccess *slave_findAccess(struct task *t, void *address);
void slave_setChild(struct DataAccess *parent, struct DataAccess *child);
void slave_setSuccessor(struct DataAccess *pre, struct DataAccess *successor);
void slave_set_unblock_Successor(struct DataAccess *pre, struct DataAccess *successor);
int slave_predec_count(struct task *t);
int slave_dec_predecessorCount(struct task *t);
int slave_child_count(struct task *t);
int slave_dec_childCount(struct task *t);
int slave_inc_predecessorCount(struct task *t);

#ifdef MTEST_LIGHT
struct mtest_task {
    int *comm_handle1;
    int *comm_handle2;
    int comm_kind1;
    int comm_kind2;
    struct task *comm_task;

    struct mtest_task *next;
};
#endif
// struct task  task_constructor();
void task_constructor(struct task *self);
struct task task_constructor_id(int id);

void slave_task_constructor(struct task *self);

void slave_spawn(struct task *t);
void enqueue(struct task *t);

void adjust_affinity_id(struct task *self, struct task *another_task);
int slave_ref_count(struct task *self);

// for dag reuse
#ifdef REUSE_GRAPH
void store_ref_count(struct task *self);
void restore_ref_count(struct task *self);

void set_stored(struct task *self, int flag);
int get_stored(struct task *self);

void set_reused_flag(struct task *self, int flag);
int get_reused_flag(struct task *self);
#endif

#if defined(USE_PRIORITY_QUEUE) || defined(LOCAL_MULTI_PRIORI)
int get_priority_id(struct task *self);
void set_priority_id(struct task *self, int id);
#endif
/*int get_spawn_order_id(struct task* self) ;
void set_spawn_order_id(struct task* self,int id);
*/
int slave_is_stolen_task(struct task *self);

struct empty_task {
    struct task task_base;
};

#endif
