#pragma once

#include <ddblk/apds/combine_int.h>
// #include <os.h>
#include <ucontext.h>
#include <errno.h>
#include <ddblk/apds/delegation/rcl/fqueue.h>
#include <unix/ddblk/futex.h>
#include <sys/mman.h>
#include <unix/coreaff.h>
#define RCL_MAX_SERVER			(1)

#define rcl_alloc(size)			aligned_alloc(HDEF_CACHE_LINE_BYTES, size)
#define rcl_free(ptr, size)		free(ptr)

#define RCL_PROT_NONE			0x0

/* futex 已经有了 */
// SAI long
// futex(int *uaddr, int futex_op, uint32_t val,
// 	const struct timespec *timeout, uint32_t *uaddr2, uint32_t val3)
// {
// 	return syscall(SYS_futex, uaddr, futex_op, val,
// 	timeout, uaddr2, val3);
// }


SAI void setprio(pthread_t thread_id, unsigned int prio)
{
	//printf("%d goes to prio %d\n", self.id, prio);
	if (pthread_setschedprio(thread_id, prio)){
		//fprintf(stderr, "[%lu]unable to set priority %u", thread_id, prio);
	}
}

static __thread struct native_thread *volatile me; /* (local) pointer to the the native thread */

// #define rclprintf(x, args...) fprintf(stderr, args)
#define rclprintf(args, ...)	
// printf(__VA_ARGS__)

typedef struct liblock_cond
{
	struct liblock_lib *lib;
	int has_attr;
	pthread_condattr_t attr;
	union
	{
		pthread_cond_t posix_cond;
		void *data;
	} impl;
} liblock_cond_t;

// #define local_val_compare_and_swap(type, ptr, x, y) __sync_
/*
	*	time spec shortcuts
*/
static const struct timespec manager_timeout = {0, 50000000};
#define ts_lt(ts1, ts2)			\
	(((ts1)->tv_sec < (ts2)->tv_sec) ||	\
	 (((ts1)->tv_sec == (ts2)->tv_sec) && ((ts1)->tv_nsec < (ts2)->tv_nsec)))

#define ts_le(ts1, ts2)			\
	(((ts1)->tv_sec < (ts2)->tv_sec) ||	\
	 (((ts1)->tv_sec == (ts2)->tv_sec) && ((ts1)->tv_nsec <= (ts2)->tv_nsec)))

#define ts_add(res, ts1, ts2)		\
	(				\
		{			\
			(res)->tv_sec = (ts1)->tv_sec + (ts2)->tv_sec;		\
			(res)->tv_nsec = (ts1)->tv_nsec + (ts2)->tv_nsec;	\
			if ((res)->tv_nsec > 1e9)		\
			{					\
				(res)->tv_nsec -= 1e9;		\
				(res)->tv_sec++;		\
			}					\
		})

#define ts_sub(res, ts1, ts2)			\
	(					\
		{				\
			(res)->tv_sec = (ts1)->tv_sec - (ts2)->tv_sec;		\
			if ((ts1)->tv_nsec < (ts2)->tv_nsec)			\
			{							\
				(res)->tv_sec--;				\
				(res)->tv_nsec = 1e9 + (ts1)->tv_nsec - (ts2)->tv_nsec;	\
			}							\
			else							\
				(res)->tv_nsec = (ts1)->tv_nsec - (ts2)->tv_nsec;	\
		})

#define ts_gettimeofday(ts, tz)				\
	(						\
		{					\
			struct timeval tv;		\
			int r = gettimeofday(&tv, tz);	\
			(ts)->tv_sec = tv.tv_sec;	\
			(ts)->tv_nsec = tv.tv_usec * 1e3;	\
			r;				\
		})

#define ts_print(ts) printf("%ld.%9.0ld", (ts)->tv_sec, (ts)->tv_nsec)

#define r_align(n, r) (((n) + (r)-1) & -(r))
#define cache_align(n) r_align(n, HDEF_CACHE_LINE_BYTES)
#define pad_to_cache_line(n) (cache_align(n) - (n))

#ifndef MAX_THREADS
#define MAX_THREADS (256)
#endif

#ifndef PAGE_SIZE
#define PAGE_SIZE (4096)
#endif

#define PRIO_BACKUP 2
#define PRIO_SERVICING 3
#define PRIO_MANAGER 4

// server state
#define SERVER_DOWN 0
#define SERVER_STOPPING 1
#define SERVER_STARTING 2
#define SERVER_UP 3

#define STACK_SIZE r_align(1024 * 1024, PAGE_SIZE)
#define MINI_STACK_SIZE r_align(64 * 1024, PAGE_SIZE)

#define lock_state(server) (	\
	{	\
		/*lock_print(server, "state locking");*/	\
		pthread_mutex_lock(&(server)->lock_state);	\
		/*lock_print(server, "state locked");*/		\
	})

#define unlock_state(server)	\
	do		\
	{		\
		/*lock_print(server, "state unlock");*/		\
		pthread_mutex_unlock(&(server)->lock_state);	\
			\
	} while (0)

struct start_routine
{
	void *(*start_routine)(void *);
	word_t core_id;
	void *arg;
	const char *server_type;
};

typedef struct server_struct
{
	/* always used by the server in non blocked case, also private in this case */
	int volatile state;					/* state of the server (running, starting...) */
	int volatile alive;					/* true if native thread are able to make progress */
	int volatile timestamp;					/* current timestamp */
	int volatile nb_ready_and_servicing; 			/* number of threads and of mini threads that are pendings */
	char pad0[pad_to_cache_line(4 * sizeof(int))];

	/* always shared (in read) in non blocked case */
	word_t core_id;			  /* core where the server run (read by client) */
	struct _rclnode *requests; /* the request array (read by client) */
	word_t produced_id;
	word_t consumed_id;
	char pad1[pad_to_cache_line(2 * sizeof(void *))];

	/* used in blocked case, private */
	struct fqueue *volatile mini_thread_all;  /* list of all active mini threads               */
	struct native_thread *all_threads;		  /* list all the threads */
	struct fqueue *volatile prepared_threads; /* list of prepared threads */
	int volatile nb_free_threads;			  /* number of servicing threads that are not executing critical sections */
	int wakeup;								  /* futex value */
	struct timespec volatile next_deadline;	  /* next timed wait deadline */
	char pad2[pad_to_cache_line(3 * sizeof(void *) + 2 * sizeof(int) + sizeof(struct timespec))];

	/* not intensive shared accesses */
	void (*volatile callback)();				  /* callback called when the server is ready to handle request */
	struct fqueue *volatile mini_thread_timed;	  /* sorted list of mini threads that have timeout */
	struct fqueue *volatile mini_thread_ready;	  /* list of active mini threads                   */
	struct fqueue *volatile mini_thread_prepared; /* list of sleeping mini threads                 */
	pthread_mutex_t lock_state;					  /* lock for state transition */
	pthread_cond_t cond_state;					  /* condition to wait on state transition */
	int volatile nb_attached_locks;				  /* number of locks attached to this server */

	char pad3[pad_to_cache_line(4 * sizeof(void *) + sizeof(int) + sizeof(pthread_mutex_t) + sizeof(pthread_cond_t))];
#ifdef BASIC_PROFILING_ON
	struct profiling profiling;
	char pad4[pad_to_cache_line(sizeof(struct profiling))];
#endif
} server_t;

struct mini_thread
{
	ucontext_t context;				  /* context of the mini thread */
	server_t *server;				  /* server of the mini thread, used for broadcast */
	int volatile is_timed;			  /* true if timed */
	struct timespec deadline;		  /* deadline, only used when the mini-thread is in a timed wait */
	liblock_cond_t *volatile wait_on; /* queue of the mini_thread */
	int volatile wait_res;			  /* result of the wait (timeout or not) */
	struct fqueue ll_ready;
	struct fqueue ll_timed;
	struct fqueue ll_all;
	void *stack;
};

struct native_thread
{
	int volatile timestamp;					  /* has recently run */
	struct mini_thread *volatile mini_thread; /* currently associated mini thread */
	server_t *server;						  /* server of the thread */
	pthread_t tid;							  /* thread id */
	int is_servicing;						  /* interrupted */
	ucontext_t initial_context;				  /* initial context of the thread */
	void *stack;							  /* pointer to the stack */
	struct fqueue ll;						  /* pointer to next node */
	struct native_thread *all_next;			  /* next thread */
};


SAI void launch_server(server_t *server, void (*callback)());

typedef struct
{
	server_t *lk_server;
	lkword_t volatile locked;
} _rcl_t;

typedef struct _rclnode
{
	struct
	{
		APDSFUTURE_DECL
		_rcl_t *lock;
	} ALIGN(HDEF_CACHE_LINE_BYTES);
} _rclnode_t;


static void rcl_construct_init(server_t **servers)
{
	// servers = apdsnode_alloc(sizeof(struct server *) * RCL_MAX_SERVER);
	// if (!servers) fatal("server alloc failed\n");
	// fake_impl.locked = 1;

	for (word_t i = 0; i < RCL_MAX_SERVER; i++)
	{
		word_t cid = i;
		size_t request_size = r_align(sizeof(_rclnode_t) * APDS_MAX_THREADS, PAGE_SIZE);
		size_t server_size = r_align(sizeof(server_t), PAGE_SIZE);
		void *ptr = rcl_alloc(request_size + server_size);
		if (!ptr) fatal("servers alloc failed\n");
		// 清0
		__memset(ptr, 0, request_size + server_size);
		// ((uint64_t volatile *)ptr)[0] = 0; // To avoid a page fault later.
		//liblock_bind_mem(ptr, request_size + server_size, core->node);

		servers[cid] = (server_t *)((uintptr_t)ptr + request_size);
		servers[cid]->core_id = (HDEF_LITTLECORE_OFFST + cid + HDEF_LITTLECORE_NR ) % HDEF_NATIVE_CPU_NR;

		servers[cid]->state = SERVER_DOWN;
		servers[cid]->nb_attached_locks = 0;
		servers[cid]->nb_free_threads = 0;
		servers[cid]->nb_ready_and_servicing = 0;
		servers[cid]->requests = (_rclnode_t *)ptr;

		servers[cid]->mini_thread_all = 0;
		servers[cid]->mini_thread_timed = 0;
		servers[cid]->mini_thread_ready = 0;
		servers[cid]->mini_thread_prepared = 0;

		servers[cid]->all_threads = 0;
		servers[cid]->prepared_threads = 0;

		servers[cid]->timestamp = 1;

		pthread_mutex_init(&servers[cid]->lock_state, 0);
		pthread_cond_init(&servers[cid]->cond_state, 0);
	}

	// atexit(force_shutdown);

#ifdef ADVANCED_PROFILING_ON
	if (PAPI_is_initialized() == PAPI_NOT_INITED &&
		PAPI_library_init(PAPI_VER_CURRENT) != PAPI_VER_CURRENT)
		fatal("PAPI_library_init");

	PAPI_thread_init((unsigned long (*)(void))pthread_self);
#endif

}

DECL_COMBINE(rcl)

SAI void __rcl_init(_rcl_t *s, server_t *rcl_server)
{
	s->lk_server = rcl_server;
	s->locked = 0;
	faa(&rcl_server->nb_attached_locks, 1);
}

SAI void *__rcl_destruct(_rcl_t *s)
{
	fas(&s->lk_server->nb_attached_locks, 1);
	return (void *)s;
}
#define RCL_FLAG_PENDING	(0x1UL)
SAI _rclnode_t *__rcl_fetch(_rcl_t *s, apdsfunc_t *pfunc, apdsarg_t *args, apdstinfo_t *tinfo){
	PARAM_USED(tinfo);
	server_t *server = s->lk_server;
	word_t era = faa(&server->produced_id, 1);
	_rclnode_t *node = &server->requests[era % APDS_MAX_THREADS];
	__DEBUG_ASSERT(node->flag != RCL_FLAG_PENDING);
	apds_movargs(&node->args, args);
	node->func = pfunc;
	node->lock = s;
	/* 确保已经清空 */
	atm_strel(&node->flag, RCL_FLAG_PENDING);
	// printf("era ok %lu\n", era % APDS_MAX_THREADS);
	return node;
}

SAI apdsret_t __rcl_wait(_rcl_t *s, _rclnode_t *node, apdstinfo_t *tinfo, INFUN(cmbn_wait, waitpolicy)){
	waitcnt_t cnt = -1;
	lkword_t flg_wait = RCL_FLAG_PENDING;
	PARAM_USED(s, tinfo);
	/* fprintf(stderr, "%lu\n", server->core_id); */
	/* 防止嵌套 */
	server_t *server = s->lk_server;
	if (me && tinfo->cid == server->core_id)
	{
		abort();
		lkword_t lock = 0;
		while (!cas_acq(&s->locked, &lock, 1))
		{ 	/* one of my thread own the lock */
			me->timestamp = server->timestamp;
			cnt = waitpolicy->waitf(&node->flag, &flg_wait, cnt);
		}
		apdsret_t ret = apds_sync_single(node->func, &node->args);

		atm_strel(&s->locked, 0); /* I release the lock */
		return ret;
	}
	static TLS_INITIAL unsigned int core_binded;
	if (!me && !core_binded){
		/* 排除服务核 */
		cid_t server_core = (HDEF_LITTLECORE_OFFST + HDEF_NATIVE_CPU_NR - 1) % HDEF_NATIVE_CPU_NR;
		cpu_set_t __mycpuset; __memset(&__mycpuset, 0xff, sizeof(cpu_set_t));
		__CPU_CLR_S(server_core, sizeof(cpu_set_t), &__mycpuset);
		while (0 != try_commit_aff(&__mycpuset)){
		}
		core_binded = 1;
	}

	while (atm_ld(&node->flag) == RCL_FLAG_PENDING)
	{
		cnt = waitpolicy->waitf(&node->flag, &flg_wait, cnt);
	}
	mp_rmb();

	return apds_args_ret(&node->args);
}

PARAM_NONULL(1) SAI _rcl_t *_rcl_init(void *ptr, size_t size, INFUN(cmbn_node, nodemgr))
{
	PARAM_USED(nodemgr);
	static word_t rcl_nr = 0;
	static server_t *servers[RCL_MAX_SERVER];
	if (size < sizeof(_rcl_t)) return NULL;
	_rcl_t *s = (_rcl_t *)ptr;
	/* 平均分server */
	word_t cur_rclnr = faa(&rcl_nr, 1);
	if (cur_rclnr == 0){
		/* 第一个负责创建服务器 */
		rcl_construct_init(servers);
		for (word_t idx = 0; idx < RCL_MAX_SERVER; idx++){
			launch_server(servers[idx], NULL);
		}
	}
	__rcl_init(s, servers[cur_rclnr % RCL_MAX_SERVER]);
	return s;
}
PARAM_NONULL(1) SAI void *_rcl_destruct(_rcl_t *s, INFUN(cmbn_node, nodemgr)){
	PARAM_USED(nodemgr);
	void *rptr = __rcl_destruct(s);
	return rptr;
}
PARAM_NONULL(1, 2, 3) SAI _rclnode_t *_rcl_fetch(_rcl_t *s, apdsfunc_t *pfunc, apdsarg_t *args, apdstinfo_t *tinfo, INFUN(cmbn_node, nodemgr)){
	PARAM_USED(nodemgr);
	return __rcl_fetch(s, pfunc, args, tinfo);
}
PARAM_NONULL(1, 2) SAI apdsret_t _rcl_wait(_rcl_t *s, _rclnode_t *node, apdstinfo_t *tinfo, INFUN(cmbn_wait, waitpolicy), INFUN(cmbn_node, nodemgr)){
	PARAM_USED(nodemgr);
	apdsret_t ret = __rcl_wait(s, node, tinfo, waitpolicy);
	return ret;
}


// static HOT ALWAYS_INLINE word_t rcl_wait(rcl_t *l, rclnode_t *req, apdstls_t *tls,  waitf_t * const pause){
// 	// server_t *server = l->lk_server;
// 	hword_t pcnt = 0;

	
// 	while (atm_ld(&req->func))
// 	{
// 		pcnt = pause(pcnt);
// 	}

// 	return apds_args_ret(&req->args);
// }

// SAI word_t rcl_operate(rcl_t *l, apdsfunc_t *func, apdsarg_t *args, apdstls_t *tls, waitf_t * const pause)
// {
// 	server_t *server = l->lk_server;
// 	// rclnode_t *req = rcl_getnode(l, tls);
// 	//rclprintf(server, "*** sending operation %p::%p for client %d - %p", pending, val, self.id, (void*)pthread_self());
// 	if (me && tls->cached_cid == server->core_id)
// 	{
// 		word_t res;
// 		// abort();
// 		// local_val_compare_and_swap(int, &impl->locked, 0, 1);
// 		word_t lock = 0;
// 		while (cas(&l->locked, &lock, 1))
// 		{ /* one of my thread own the lock */
// 			me->timestamp = server->timestamp;
// 			sched_yield(); /* give a chance to one of our thread to release the lock */
// 		}

//         res = apds_sync_single(func, args);

// 		atm_strel(&l->locked, 0); /* I release the lock */
// 		return res;
// 	}
// 	// 这里要传tid


// 	// req->lock = l;
//     // cac_movargs(&req->args, args);
// 	// atm_strel(&req->func, func);

// 	rclnode_t *req = rcl_future(l, func, args, tls);
// 	return rcl_wait(l, req, tls, pause);

// }

/*
 * mini-threads
 */
/* commut from in to out */
static void servicing_loop();
MAY_UNUSED static int mini_thread_lt(struct fqueue *ln, struct fqueue *rn)
{
	struct mini_thread *l = (struct mini_thread *)ln->content, *r = (struct mini_thread *)rn->content;
	return ts_lt(&l->deadline, &r->deadline);
}

SAI void swap_mini_thread(struct mini_thread *in, struct mini_thread *out)
{
	//rclprintf(in->server, "switching from %p to %p", in, out);
	me->mini_thread = out;
	swapcontext(&in->context, &out->context);
}

static struct mini_thread *allocate_mini_thread(server_t *server)
{
	struct mini_thread *res;

	res = rcl_alloc(sizeof(struct mini_thread));
	if (!res) return NULL;
	
	res->stack = rcl_alloc(STACK_SIZE);
	if (!res->stack) abort();
	mprotect(res->stack, PAGE_SIZE, RCL_PROT_NONE);

	rclprintf(server, "CREATE context %p with stack at %p and size %d", res, res->stack, STACK_SIZE);

	getcontext(&res->context);
	res->server = server;
	res->context.uc_link = 0;
	res->context.uc_stack.ss_sp = res->stack;
	res->context.uc_stack.ss_size = STACK_SIZE;
	res->ll_ready.content = res;
	res->ll_timed.content = res;
	res->ll_all.content = res;

	makecontext(&res->context, (void (*)())servicing_loop, 0);

	fqueue_enqueue(&server->mini_thread_all, &res->ll_all);

	return res;
}

static struct mini_thread *get_ready_mini_thread(server_t *server)
{
	struct fqueue *res;

	res = fqueue_dequeue(&server->mini_thread_ready);

	if (res)
	{
		__sync_fetch_and_sub(&server->nb_ready_and_servicing, 1);
		return (struct mini_thread *)res->content;
	}
	else
		return 0;
}

static struct mini_thread *get_or_allocate_mini_thread(server_t *server)
{
	struct mini_thread *res;
	struct fqueue *node;

	res = get_ready_mini_thread(server);

	rclprintf(server, "*** get or allocate mini thread: ready is %p", res);
	if (res)
		return res;

	node = fqueue_dequeue(&server->mini_thread_prepared);

	if (node)
		return (struct mini_thread *)node->content;
	else
		return allocate_mini_thread(server);
}

SAI void insert_in_ready_and_remove_from_timed(struct fqueue *node)
{
	struct mini_thread *mini_thread = (struct mini_thread *)node->content;
	server_t *server = mini_thread->server;
	rclprintf(server, "++++      reinjecting mini thread: %p", mini_thread);
	//ll_print("ready", &server->mini_thread_ready);
	fqueue_remove(&server->mini_thread_timed, &mini_thread->ll_timed, 0);
	fqueue_enqueue(&server->mini_thread_ready, &mini_thread->ll_ready);
	__sync_fetch_and_add(&server->nb_ready_and_servicing, 1);
	rclprintf(server, "++++      reinjecting mini thread: %p done", mini_thread);
	//ll_print("ready", &server->mini_thread_ready);
}

SAI int pthread_create_and_bind(word_t core_id, const char *server_type, pthread_t *thread,
					const pthread_attr_t *attr, void *(*start_routine)(void *), void *arg)
{
	PARAM_USED(core_id, server_type);
	// cpu_set_t cpuset;
	// struct start_routine *r = aligned_alloc(sizeof(struct start_routine));
	pthread_attr_t other_attr;
	int res;

	if (attr){
		__memcpy(&other_attr, attr, sizeof(pthread_attr_t));
	}
	else
		pthread_attr_init(&other_attr);

	//printf("****   create thread on core %d\n", core->core_id);
	// __CPU_ZERO_S(sizeof(cpuset), &cpuset);
	// __CPU_SET_S(core_id, sizeof(cpuset), &cpuset);
	// pthread_attr_setaffinity_np(&other_attr, sizeof(cpu_set_t), &cpuset);

	// r->start_routine = start_routine;
	// r->core_id = core_id;
	// r->arg = arg;
	// r->server_type = server_type;
	// pthread_attr_setschedpolicy(&attr, SCHED_FIFO);
	// printf("build thread on %d\n", core_id : -1);
AGAIN:
	res = pthread_create(thread, &other_attr, start_routine, arg);
	// res = pthread_create(thread, NULL, start_routine, arg);

	//printf("build thread %p on %d done\n", (void*)*thread, core ? core->core_id : -1);

	if (res)
	{
		// EAGAIN;
		fprintf(stderr, "error: %d\n", res);
		/* fatal("pthread_create failed\n"); */
		goto AGAIN;
	}
	return 0;
}

SAI int servicing_loop_slow_path(server_t *server, int time)
{
	struct mini_thread *next = get_ready_mini_thread(server), *cur;

	if (next)
	{
		cur = me->mini_thread;
		/* more than one ready mini threads, activate the next one and put the running one in the prepared list */
		rclprintf(server, "servicing-loop::elect mini-thread: %p (and %p goes to prepared)", next, cur);
		fqueue_enqueue(&server->mini_thread_prepared, &cur->ll_ready);
		//ll_print("prepared", &server->mini_thread_prepared);
		swap_mini_thread(cur, next);
		rclprintf(server, "servicing-loop::mini-thread: %p is up", me->mini_thread);
		time = 0;
	}
	else if (server->nb_free_threads > 0)
	{
		/* more than one free thread, put this thread in the sleeping state (not servicing anymore) */

		// if(local_val_compare_and_swap(int, &me->is_servicing, 1, 0)) {
		int t = 1;
		if (cas(&me->is_servicing, &t, 0))
		{
			//rclprintf(server, "servicing::gona sleep %p %p %p", me, &me->ll, me->ll.content);

			__sync_fetch_and_sub(&server->nb_ready_and_servicing, 1);

			fqueue_enqueue(&server->prepared_threads, &me->ll);

			//rclprintf(server, "servicing-loop::unactivating");
			futex(&me->is_servicing, FUTEX_WAIT_PRIVATE, 0, 0, 0, 0);
			//rclprintf(server, "servicing-loop::activating");

			if (!me->is_servicing)
				fatal("inconsistent futex");

			// local_fetch_and_add(&server->nb_free_threads, -1);
			faa(&server->nb_free_threads, -1);
		}

		time = 0;
	}
	else
	{
		/* ok, I have strictly more than one servicing thread and no free threads, this one is free, activate the next */
		//static int z=0; if(!(++z % 200000)) rclprintf(server, "servicing-loop::yield processor");
		// local_fetch_and_add(&server->nb_free_threads, 1);
		faa(&server->nb_free_threads, 1);
		if (time++ > 1000)
		{
			sched_yield(); /* all the threads are busy */
			time = 0;
		}
		// local_fetch_and_add(&server->nb_free_threads, -1);
		faa(&server->nb_free_threads, -1);
	}

	return time;
}

//
static void servicing_loop()
{
	server_t *server = me->server;
	// void (*callback)() = server->callback;

	// if (callback && __sync_val_compare_and_swap(&server->callback, callback, 0) == callback)
	// {
	// 	callback();
	// }

	int time = 0;

	// rclprintf(server, "::: start servicing loop %p", me->mini_thread);
	// fprintf(stderr, "::: start servicing loop %p\n", me->mini_thread);
	lkword_t t;
	do
	{
		me->timestamp = server->timestamp;
		server->alive = 1;

		_rclnode_t *request;
		_rclnode_t *last;
		lkword_t pending;
		extern cid_t __max_threads;
		// last = &server->requests[atm_ld(&__max_threads)];
		/* last = &server->requests[MAX_THREADS]; */
		// fprintf(stderr, "::: service max num %lu\n", __max_threads);
		// request=&server->requests[id_manager.first];
		word_t era = server->consumed_id;
		word_t endera = atm_ld(&server->produced_id);
		word_t help_cnt = 0;
		word_t sidx = era % APDS_MAX_THREADS;
		word_t eidx = endera % APDS_MAX_THREADS;
		// while (help_cnt < (endera - era)){
		if (endera - era >= (APDS_MAX_THREADS/2)) printf("diff %lu\n", endera - era);

		while (help_cnt < (endera - era)){
			word_t idx_offst = 0;
			// printf("start helping [%lu, %lu]\n", era, endera);
			
			request = &server->requests[sidx];
			last = &server->requests[eidx];
			do
			{
				APDS_PREFETCH(request + 1, 1, 0);
				pending = atm_ld(&request->flag);
				if (pending)
				{
					_rcl_t *lock = atm_ld(&request->lock);
					APDS_PREFETCH((word_t *)&lock->locked);
					t = 0;
					if (cas_acq(&lock->locked, &t, 1))
					{
						// printf("helped %lu\n", era + idx_offst);
						apds_set_ret(&request->args, apds_sync_single(request->func, &request->args));

						atm_strel(&request->flag, 0);

						atm_st(&lock->locked, 0);
						help_cnt += 1;
					}
					else{
						printf("%lu Cas failed??\n", era + idx_offst);
					}
				}else{
					printf("%lu completed???\n", era+idx_offst);
				}
				idx_offst++;
				request = &server->requests[(era + idx_offst) % APDS_MAX_THREADS];
				__DEBUG_ASSERT(era + idx_offst <= endera);
			} while(request != last);
		}
		__DEBUG_ASSERT(help_cnt == endera - era);
		// request = &server->requests[sidx];
		// last = &server->requests[eidx];
		// word_t idx_offst = 0;
		// do
		// {
		// 	APDS_PREFETCH(request + 1, 1, 0);
		// 	pending = atm_ld(&request->flag);
		// 	if (pending)
		// 	{
		// 		_rcl_t *lock = atm_ld(&request->lock);
		// 		APDS_PREFETCH((word_t *)&lock->locked);
		// 		t = 0;
		// 		if (cas_acq(&lock->locked, &t, 1))
		// 		{
		// 			printf("helped %lu???\n", era + idx_offst);
		// 			apds_set_ret(&request->args, apds_sync_single(request->func, &request->args));

		// 			atm_strel(&request->flag, 0);

		// 			atm_st(&lock->locked, 0);
		// 			// help_cnt += 1;
		// 		}
		// 	}else{
		// 		// printf("%lu completed???\n", era+idx_offst);
		// 	}
		// 	idx_offst++;
		// 	request = &server->requests[(era + idx_offst) % APDS_MAX_THREADS];
		// } while(request != last);
		// }
		// printf("batch over, consumed to %lu\n", endera);
		
		//{ static int n=0; if(!(++n % 500000)) rclprintf(server, "servicing loop is running"); }

		if (server->nb_ready_and_servicing > 1)
		{
			time = servicing_loop_slow_path(server, time);
		}

	} while (server->state >= SERVER_STARTING);

	setcontext(&me->initial_context);
}

// #define BIND_CORE
// 线程执行的
static void *servicing_thread(void *arg)
{
	struct native_thread *native_thread = arg;
	// ASSUME(native_thread);
	server_t *server = native_thread->server;

	rclprintf(server, "start: servicing thread %d", gettid());
	

	// fprintf(stderr, "service\n");
// #ifdef BIND_CORE
// 	__bind_mycore(server->core_id);
// #endif
	me = native_thread;
	me->mini_thread = get_or_allocate_mini_thread(server);

	// 为什么本地减需要？
	// local_fetch_and_add(&server->nb_free_threads, -1);
	faa(&server->nb_free_threads, -1);
	// server->nb_free_threads -= 1;
	////* cmb(); */

	/* 绑定到(小核 起始 - 1) % 最大核心数, 即大核 */
	/* 排除小核 */
/* 	cpu_set_t mycpuset;
	__memset(&mycpuset, 0xff, sizeof(cpu_set_t));
	for (word_t idx = 0; idx < HDEF_LITTLECORE_NR; idx++){
		cid_t cid = (HDEF_LITTLECORE_OFFST + idx + HDEF_NATIVE_CPU_NR - 1) % HDEF_NATIVE_CPU_NR;
		__CPU_CLR_S(cid, sizeof(cpu_set_t), &mycpuset);
	}
	while (0 != try_commit_aff(&mycpuset)){ } */
	BIND_CORE( (HDEF_LITTLECORE_OFFST  + HDEF_NATIVE_CPU_NR - 1) % HDEF_NATIVE_CPU_NR);
	/* 排除小核 */
	// if (HDEF_)
	/* 7核一定没问题 */
	// BIND_CORE(7);

	if (server->state == SERVER_UP)
	{

		// liblock_on_server_thread_start("rcl", self.id);
		getcontext(&me->initial_context);
		if (server->state == SERVER_UP)
			setcontext(&me->mini_thread->context);

		// liblock_on_server_thread_end("rcl", self.id);
	}

	__sync_fetch_and_sub(&server->nb_ready_and_servicing, 1);

	//rclprintf(server, "::: quitting serviving-loop %p", pthread_self());

	return 0;
}

static void ensure_at_least_one_free_thread(server_t *server)
{
	rclprintf(server, "ensure at least");
	if (server->nb_free_threads < 1)
	{
		/* ouch, no more free thread, creates or activates a new one */

		if (server->state >= SERVER_STARTING)
		{
			rclprintf(server, "no more free threads %d", server->nb_free_threads);
			struct fqueue *node = fqueue_dequeue(&server->prepared_threads);
			struct native_thread *elected;

			if (node)
			{
				elected = node->content;
				rclprintf(server, "REACTIVATING servicing thread %p", elected);
			}
			else
			{
				elected = rcl_alloc(sizeof(struct native_thread));
				if (!elected) fatal("elected failed\n");
				elected->ll.content = elected;

				elected->stack = rcl_alloc(MINI_STACK_SIZE);
				/* PROT_NONE 0x0 */
				mprotect(elected->stack, PAGE_SIZE, RCL_PROT_NONE);

				elected->server = server;

				elected->all_next = server->all_threads;
				server->all_threads = elected;

				rclprintf(server, "CREATE a new servicing thread %p with stack at %p (%p %p)", elected, elected->stack, &elected->ll, elected->ll.content);
			}

			elected->timestamp = server->timestamp - 1;
			// local_fetch_and_add(&server->nb_free_threads, 1);
			faa(&server->nb_free_threads, 1);
			faa(&server->nb_ready_and_servicing, 1);
			elected->is_servicing = 1;

			if (node)
				futex(&elected->is_servicing, FUTEX_WAKE_PRIVATE, 1, 0, 0, 0);
			else
			{
				struct sched_param param;
				pthread_attr_t attr;

				param.sched_priority = PRIO_SERVICING;
				pthread_attr_init(&attr);

				// !没有权限？
				// pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED);
				pthread_attr_setschedpolicy(&attr, SCHED_FIFO);
				pthread_attr_setschedparam(&attr, &param);

/* 				cpu_set_t cpuset;
				__CPU_ZERO_S(sizeof(cpu_set_t), &cpuset);
				__CPU_SET_S(server->core_id, sizeof(cpu_set_t), &cpuset);
				pthread_attr_setaffinity_np(&attr, sizeof(cpu_set_t), &cpuset); */
				rclprintf(server, "launching the new servicing thread %p", elected);
				pthread_create_and_bind(server->core_id, "rcl", &elected->tid, &attr, servicing_thread, elected);
				//rclprintf(server, "launching of the servicing thread %p done", elected);
			}
		}
	}
	//rclprintf(server, "ensure done");
}

SAI void wakeup_manager(server_t *server)
{
	server->wakeup = 1;
	futex(&server->wakeup, FUTEX_WAKE_PRIVATE, 1, 0, 0, 0);
}
// 备份线程
MAY_UNUSED static void *backup_thread(void *arg)
{
	server_t *server = (server_t *)arg;

	//rclprintf(server, "start: backup");
	while (server->state >= SERVER_STARTING)
	{
		//rclprintf(server, "+++ backup thread is running");
		server->alive = 0;
		wakeup_manager(server);
	}
	//rclprintf(server, "+++ quitting backup thread %p", pthread_self());

	return 0;
}

// 服务器管理线程
SAI void *manager_thread(void *arg)
{
	// pthread_t backup_tid;
	server_t *server = (server_t *)arg;
	struct native_thread *native_thread, *next;
	struct sched_param param;
	// pthread_attr_t attr;
	struct timespec now, deadline;
	int done;

	rclprintf(server, "start: manager");

	// 设置亲和性
	//
	// 设置排他核 set exclusive
	// cpu_set_t cpuset;
	// __memset(&cpuset, 0xFF, sizeof(cpu_set_t));
	// __CPU_CLR_S(APDS_CPU_NR, sizeof(cpu_set_t), &cpuset);
	// pthread_setaffinity_np(pthread_self(), sizeof(cpu_set_t), &cpuset);
	
	server->alive = 1;

	lock_state(server);

	/* make sure that we own the lock when going to FIFO scheduling */
	param.sched_priority = PRIO_MANAGER;

	if (pthread_setschedparam(pthread_self(), SCHED_FIFO, &param) == -1)
		fatal("pthread_setschedparam failed"); //, strerror(errno));
#ifdef BACKUP
	param.sched_priority = PRIO_BACKUP;
	pthread_attr_init(&attr);
	pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED);
	pthread_attr_setschedpolicy(&attr, SCHED_FIFO);
	pthread_attr_setschedparam(&attr, &param);

	pthread_create_and_bind(server->core_id, "rcl", &backup_tid, &attr, backup_thread, server);
#endif

	ensure_at_least_one_free_thread(server);

	server->state = SERVER_UP;

	pthread_cond_broadcast(&server->cond_state);

	unlock_state(server);

	// #ifndef CONTINUE_MANAGER
	// 	return;
	// #endif
	while (server->state == SERVER_UP)
	{
		rclprintf(server, "manager is working (%d)", server->alive);
		server->wakeup = 0;

		if (!server->alive)
		{
			rclprintf(server, "no more alive servicing threads");

			ensure_at_least_one_free_thread(server);

			server->alive = 1;
			done = 0;

			while (!done)
			{
				struct native_thread *cur = server->all_threads;
				while (cur)
				{
					if (cur->is_servicing)
					{
						if (done || (cur->timestamp == cur->server->timestamp))
						{
							setprio(cur->tid, PRIO_BACKUP);
							setprio(cur->tid, PRIO_SERVICING);
						}
						else
						{
							cur->timestamp = server->timestamp; /* set it here because could be in I/O */
							done = 1;
						}
					}
					cur = cur->all_next;
				}

				if (!done)
					server->timestamp++;
			}
		}
		else
			server->alive = 0;

		ts_gettimeofday(&now, 0);
		ts_add(&deadline, &now, &manager_timeout);

		done = 0;

		//printf("+++++++++++++++++++++++++++++++++++++\n");
		//printf("++++      manager: current time: "); ts_print(&now); printf("\n");
		//printf("++++      manager: initial deadline: "); ts_print(&deadline); printf("\n");
		while (!done)
		{
			struct fqueue *node = server->mini_thread_timed;
			if (node)
			{
				struct mini_thread *cur = node->content;
				//printf("++++      manager: find waiter: %p\n", cur);
				if (ts_le(&cur->deadline, &now))
				{
					//printf("++++      manager: reinject expired deadline\n");
					fqueue_remove((struct fqueue **)&cur->wait_on->impl.data, &cur->ll_ready, insert_in_ready_and_remove_from_timed);
				}
				else
				{
					struct timespec ddd = cur->deadline;
					if (ts_lt(&ddd, &deadline))
					{
						//printf("++++      manager: change deadline to: "); ts_print(&deadline); printf("\n");
						deadline = ddd;
					}
					done = 1;
				}
			}
			else
				done = 1;
		}
		//printf("++++      manager: next deadline: "); ts_print(&deadline); printf("\n");

		server->next_deadline = deadline;

		ts_sub(&deadline, &deadline, &now);
		//printf("++++      manager: next deadline: "); ts_print(&deadline); printf("\n");

		// rclprintf(server, "manager::sleeping");
		// fprintf(stderr, "?\n");
		futex(&server->wakeup, FUTEX_WAIT_PRIVATE, 0, &deadline, 0, 0);
		//rclprintf(server, "manager::sleeping done");
	}

	//rclprintf(server, "unblocking all the servicing threads");

	for (native_thread = server->all_threads; native_thread; native_thread = native_thread->all_next)
	{
		int state = __sync_lock_test_and_set(&native_thread->is_servicing, 2);

		if (!state)
		{
			// local_fetch_and_add(&server->nb_free_threads, 1);
			faa(&server->nb_free_threads, 1);
			faa(&server->nb_ready_and_servicing, 1);
			futex(&native_thread->is_servicing, FUTEX_WAKE_PRIVATE, 1, 0, 0, 0);
		}
	}

	//rclprintf(server, "waiting backup");
#ifdef BACKUP
	if (pthread_join(backup_tid, 0) != 0)
		fatal("pthread_join");
#endif

	//rclprintf(server, "waiting servicing threads");

	for (native_thread = server->all_threads; native_thread; native_thread = next)
	{
		next = native_thread->all_next;
		pthread_join(native_thread->tid, 0);
		rcl_free(native_thread->stack, MINI_STACK_SIZE);
		rcl_free(native_thread, sizeof(struct native_thread));
	}

	//rclprintf(server, "freeing mini threads");

	{
		struct fqueue *ll_cur;
		while ((ll_cur = fqueue_dequeue(&server->mini_thread_all)))
		{
			struct mini_thread *cur = (struct mini_thread *)ll_cur->content;
			rcl_free(cur->stack, STACK_SIZE);
			rcl_free(cur, sizeof(struct mini_thread));
		}
	}

	rclprintf(server, "quitting");

	server->mini_thread_timed = 0;
	server->mini_thread_ready = 0;
	server->mini_thread_prepared = 0;

	server->prepared_threads = 0;

	lock_state(server);

	server->state = SERVER_DOWN;
	pthread_cond_broadcast(&server->cond_state);

	unlock_state(server);

	return 0;
}

// 构造和析构
static server_t *servers[RCL_MAX_SERVER]; /* array of server (one per core) */

// 创建服务线程
SAI void launch_server(server_t *server, void (*callback)())
{
	pthread_t tid;

	//rclprintf(server, "launch server 1: %d", server->state);
	while (server->state != SERVER_DOWN && server->state != SERVER_UP)
	{
		//rclprintf(server, "launch_server::waiting the server");
		pthread_cond_wait(&server->cond_state, &server->lock_state);
		//rclprintf(server, "launch_server::waiting the server done");
	}

	//rclprintf(server, "launch server 2: %d", server->state);

	if (server->state == SERVER_UP)
		return;

	server->callback = callback;

	//rclprintf(server, "launching server");

	server->state = SERVER_STARTING;

	pthread_create_and_bind(server->core_id, "rcl", &tid, NULL, manager_thread, server);

	while (server->state == SERVER_STARTING)
	{
		//rclprintf(server, "launch_server::waiting the server");
		pthread_cond_wait(&server->cond_state, &server->lock_state);
		//rclprintf(server, "launch_server::waiting the server done");
	}
}

SAI void destroy_server(server_t *server)
{
	// !这里锁加不加
	//lock_state(server);

	if (server->state == SERVER_UP)
	{
		server->state = SERVER_STOPPING;
		wakeup_manager(server);

		while (server->state == SERVER_STOPPING)
		{
			//rclprintf(lock->server, "launch_server::waiting the end of the server");
			pthread_cond_wait(&server->cond_state, &server->lock_state);
			//rclprintf(impl->server, "launch_server::waiting the end of server done");
		}
	}

	//unlock_state(server);
}

SAI void force_shutdown()
{

	for (word_t i = 0; i < RCL_MAX_SERVER; i++)
		destroy_server(servers[i]);
}

// SAI void rcl_init(rcl_t *l){
// 	static word_t rcl_lockid = 0;
// 	server_t *server = servers[0];
// 	if (faa(&rcl_lockid, 1) == 0)
//     {
// 		rcl_construct_init();
			
// 		server = servers[0];
// 		// ASSUME(server);
//         // fprintf(stderr, "launch rcl server at core 0\n");
//         launch_server(server, NULL);
//     }

// 	l->lk_server = server;
// 	l->locked = 0;
// 	faa(&server->nb_attached_locks, 1);

// }


// SAI rcl_t *rcl_create()
// {
// 	// server_t *server = servers[0];
// 	rcl_t *ret = (rcl_t *)apdsnode_alloc(sizeof(rcl_t));

// 	rcl_init(ret);
// 	// impl->liblock_lock = lock;

// 	// __sync_fetch_and_add(&server->nb_attached_locks, 1);

// 	// liblock_reserve_core_for(core, "rcl");
// 	// 把核心预留给服务器
// 	// cpu_set_t baseset;
// 	// cpu_set_t client_cpuset;
// 	// // __memset(&client_cpuset, 1, sizeof(cpu_set_t));
// 	// // CPU_CLR(0, &client_cpuset);

// 	// pthread_getaffinity_np(pthread_self(), sizeof(cpu_set_t), &baseset);
// 	// // sched_setaffinity(getpid(), sizeof(cpu_set_t), &client_cpuset);
// 	// // if (!self.running_core)
// 	// // {
// 	// // 	CPU_CLR(core->core_id, &baseset);
// 	// // }
// 	// __CPU_CLR_S(0, sizeof(baseset), &baseset);

// 	// pthread_setaffinity_np(pthread_self(), sizeof(cpu_set_t), &baseset);

// 	return ret;
// }

// SAI void rcl_destroy(rcl_t *s)
// {
// 	__memset(s, 0, sizeof(rcl_t));
// 	apdsnode_free(s, sizeof(rcl_t));
// }
