#pragma once


#include <linux/rseq.h>
#include <sys/rseq.h>
typedef __PTRDIFF_TYPE__ ptrdiff_t;

typedef struct
{
	struct
	{
		__u32 version;
		/* enum rseq_cs_flags */
		__u32 flags;
		__u64 start_ip;
		/* Offset from start_ip. */
		__u64 post_commit_offset;
		__u64 abort_ip;
	} __attribute__((aligned(4 * sizeof(__u64))));
	/* 新增的指向锁值的指针 */
	__u64 rst_plock;
	/* 指向存储上下文的 有必要吗? - 可以优化掉,存储到栈上即可 */
	/* __u64 pctx_tosave; */
	/* 指定的锁释放值 */
	__u64 rst_plock_val;
	/* 指向存储上下文指针 */
	/* req state 不为0 说明需要存储上下文 */
	union
	{
		__u64 rst_pctx;
		__u64 rst_pstate;
	};
	/* 重启的ip和sp,可能能复用原始的值 */
	__u64 restart_sp;
} rseq_cs_ext_t;
/*
 * struct rseq is aligned on 4 * 8 bytes to ensure it is always
 * contained within a single cache-line.
 *
 * A single struct rseq per thread is allowed.
 */
struct rseq_ext
{
	/*
	 * Restartable sequences cpu_id_start field. Updated by the
	 * kernel. Read by user-space with single-copy atomicity
	 * semantics. This field should only be read by the thread which
	 * registered this data structure. Aligned on 32-bit. Always
	 * contains a value in the range of possible CPUs, although the
	 * value may not be the actual current CPU (e.g. if rseq is not
	 * initialized). This CPU number value should always be compared
	 * against the value of the cpu_id field before performing a rseq
	 * commit or returning a value read from a data structure indexed
	 * using the cpu_id_start value.
	 */
	__u32 cpu_id_start;
	/*
	 * Restartable sequences cpu_id field. Updated by the kernel.
	 * Read by user-space with single-copy atomicity semantics. This
	 * field should only be read by the thread which registered this
	 * data structure. Aligned on 32-bit. Values
	 * RSEQ_CPU_ID_UNINITIALIZED and RSEQ_CPU_ID_REGISTRATION_FAILED
	 * have a special semantic: the former means "rseq uninitialized",
	 * and latter means "rseq initialization failed". This value is
	 * meant to be read within rseq critical sections and compared
	 * with the cpu_id_start value previously read, before performing
	 * the commit instruction, or read and compared with the
	 * cpu_id_start value before returning a value loaded from a data
	 * structure indexed using the cpu_id_start value.
	 */
	__u32 cpu_id;
	/*
	 * Restartable sequences rseq_cs field.
	 *
	 * Contains NULL when no critical section is active for the current
	 * thread, or holds a pointer to the currently active struct rseq_cs.
	 *
	 * Updated by user-space, which sets the address of the currently
	 * active rseq_cs at the beginning of assembly instruction sequence
	 * block, and set to NULL by the kernel when it restarts an assembly
	 * instruction sequence block, as well as when the kernel detects that
	 * it is preempting or delivering a signal outside of the range
	 * targeted by the rseq_cs. Also needs to be set to NULL by user-space
	 * before reclaiming memory that contains the targeted struct rseq_cs.
	 *
	 * Read and set by the kernel. Set by user-space with single-copy
	 * atomicity semantics. This field should only be updated by the
	 * thread which registered this data structure. Aligned on 64-bit.
	 *
	 * 32-bit architectures should update the low order bits of the
	 * rseq_cs field, leaving the high order bits initialized to 0.
	 */
	__u64 rseq_cs;

	/*
	 * Restartable sequences flags field.
	 *
	 * This field should only be updated by the thread which
	 * registered this data structure. Read by the kernel.
	 * Mainly used for single-stepping through rseq critical sections
	 * with debuggers.
	 *
	 * - RSEQ_CS_FLAG_NO_RESTART_ON_PREEMPT
	 *	 Inhibit instruction sequence block restart on preemption
	 *	 for this thread.
	 * - RSEQ_CS_FLAG_NO_RESTART_ON_SIGNAL
	 *	 Inhibit instruction sequence block restart on signal
	 *	 delivery for this thread.
	 * - RSEQ_CS_FLAG_NO_RESTART_ON_MIGRATE
	 *	 Inhibit instruction sequence block restart on migration for
	 *	 this thread.
	 */
	__u32 flags;

	union
	{
		__u32 pad;
		__u32 ext_mode;
	};
} __attribute__((aligned(4 * sizeof(__u64))));

#ifndef RSEQ_SIG
#define RSEQ_SIG 0x53053053
#endif

extern const ptrdiff_t __rseq_offset;

static inline __attribute__((always_inline)) struct rseq_ext *get_rseqext()
{
	return (struct rseq_ext *)((__u64)__builtin_thread_pointer() + __rseq_offset);
}

static inline __attribute__((always_inline)) unsigned int rseqext_get_cpuhint(struct rseq_ext *p)
{
	return p->cpu_id;
}

static inline __attribute__((always_inline)) void rseqext_initcsext(rseq_cs_ext_t *t, void (*jmp_back)(void), void *lock_func, unsigned long lock_func_len, void *restart_sp)
{
	t->restart_sp = (__u64)restart_sp;
	t->abort_ip = (__u64)jmp_back;
	t->start_ip = (__u64)lock_func;
	t->post_commit_offset = (__u64)lock_func_len;
}
static inline __attribute__((always_inline)) void rseqext_assignlock(rseq_cs_ext_t *t, void *rlock_ptr, void *ctx_area, void *pctx_area)
{
	(void)ctx_area;
	t->rst_plock = (__u64)rlock_ptr;
	t->rst_plock_val = (__u64)0;
	t->rst_pstate = (__u64)pctx_area;
}
static inline __attribute__((always_inline)) void rseqext_regchkpt(rseq_cs_ext_t *t, void *rlock_ptr, void *ctx_area, void *pctx_area, void (*jmp_back)(void), void *lock_func, unsigned long lock_func_len, void *restart_sp)
{
	rseqext_assignlock(t, rlock_ptr, ctx_area, pctx_area);
	rseqext_initcsext(t, jmp_back, lock_func, lock_func_len, restart_sp);
}
static inline __attribute__((always_inline)) void rseqext_setready(struct rseq_ext *rseqp)
{
	rseqp->pad = 0xdeadbeef;
}
static inline __attribute__((always_inline)) void rseqext_setchkpt(struct rseq_ext *rseqp, rseq_cs_ext_t *t)
{
	rseqp->rseq_cs = (__u64)t;
}