const std = @import("std");
const assert = std.debug.assert;
const mem = std.mem;
const Allocator = mem.Allocator;
const Alignment = std.mem.Alignment;

// umalloc.c
const c = struct {
    extern fn free(ptr: ?*anyopaque) void;
    extern fn malloc(nbytes: usize) ?*anyopaque;
    extern fn malloc_usable_size(ptr: ?*anyopaque) usize;
};

/// Returns the Allocator used for APIs in Zig
pub const c_allocator: Allocator = .{
    .ptr = undefined,
    .vtable = &CAllocator.vtable,
};

// this is basically just a copy of the standard CAllocator
const CAllocator = struct {
    const vtable: Allocator.VTable = .{
        .alloc = alloc,
        .resize = resize,
        .remap = remap,
        .free = free,
    };

    fn alloc(
        _: *anyopaque,
        len: usize,
        alignment: Alignment,
        return_address: usize,
    ) ?[*]u8 {
        _ = return_address;
        assert(len > 0);
        return alignedAlloc(len, alignment);
    }

    fn resize(
        _: *anyopaque,
        buf: []u8,
        alignment: Alignment,
        new_len: usize,
        ret_addr: usize,
    ) bool {
        _ = alignment;
        _ = ret_addr;
        if (new_len <= buf.len) {
            return true;
        }

        const full_len = alignedAllocSize(buf.ptr);
        if (new_len <= full_len) {
            return true;
        }
        return false;
    }

    fn remap(
        context: *anyopaque,
        memory: []u8,
        alignment: Alignment,
        new_len: usize,
        ret_addr: usize,
    ) ?[*]u8 {
        // realloc would potentially return a new allocation that does not
        // respect the original alignment.
        return if (resize(context, memory, alignment, new_len, ret_addr)) memory.ptr else null;
    }

    fn free(
        _: *anyopaque,
        buf: []u8,
        alignment: Alignment,
        ret_addr: usize,
    ) void {
        _ = alignment;
        _ = ret_addr;
        alignedFree(buf.ptr);
    }

    fn getHeader(ptr: [*]u8) *[*]u8 {
        return @ptrFromInt(@intFromPtr(ptr) - @sizeOf(usize));
    }

    fn alignedAlloc(len: usize, alignment: Alignment) ?[*]u8 {
        const alignment_bytes = alignment.toByteUnits();

        // Thin wrapper around regular malloc, overallocate to account for
        // alignment padding and store the orignal malloc()'ed pointer before
        // the aligned address.
        const unaligned_ptr: [*]u8 = @ptrCast(c.malloc(len + alignment_bytes - 1 + @sizeOf(usize)) orelse return null);
        const unaligned_addr = @intFromPtr(unaligned_ptr);
        const aligned_addr = mem.alignForward(usize, unaligned_addr + @sizeOf(usize), alignment_bytes);
        const aligned_ptr = unaligned_ptr + (aligned_addr - unaligned_addr);
        getHeader(aligned_ptr).* = unaligned_ptr;

        return aligned_ptr;
    }

    fn alignedFree(ptr: [*]u8) void {
        const unaligned_ptr = getHeader(ptr).*;
        c.free(unaligned_ptr);
    }

    fn alignedAllocSize(ptr: [*]u8) usize {
        const unaligned_ptr = getHeader(ptr).*;
        const delta = @intFromPtr(ptr) - @intFromPtr(unaligned_ptr);
        return c.malloc_usable_size(unaligned_ptr) - delta;
    }
};

/// Asserts allocations are within `@alignOf(std.c.max_align_t)` and directly calls
/// `malloc`/`free`. Does not attempt to utilize `malloc_usable_size`.
/// This allocator is safe to use as the backing allocator with
/// `ArenaAllocator` for example and is more optimal in such a case
/// than `c_allocator`.
pub const raw_c_allocator: Allocator = .{
    .ptr = undefined,
    .vtable = &RawCAllocator.vtable,
};

const RawCAllocator = struct {
    const vtable: Allocator.VTable = .{
        .alloc = alloc,
        .resize = Allocator.noResize,
        .remap = Allocator.noRemap,
        .free = free,
    };

    fn alloc(
        context: *anyopaque,
        nbytes: usize,
        alignment: Alignment,
        ret_addr: usize,
    ) ?[*]u8 {
        _ = context;
        _ = ret_addr;
        assert(alignment.compare(.lte, comptime .fromByteUnits(@alignOf(std.c.max_align_t))));
        // Note that this pointer cannot be aligncasted to max_align_t because if
        // len is < max_align_t then the alignment can be smaller. For example, if
        // max_align_t is 16, but the user requests 8 bytes, there is no built-in
        // type in C that is size 8 and has 16 byte alignment, so the alignment may
        // be 8 bytes rather than 16. Similarly if only 1 byte is requested, malloc
        // is allowed to return a 1-byte aligned pointer.
        return @ptrCast(c.malloc(nbytes));
    }

    fn free(
        context: *anyopaque,
        memory: []u8,
        alignment: Alignment,
        ret_addr: usize,
    ) void {
        _ = context;
        _ = alignment;
        _ = ret_addr;
        c.free(memory.ptr);
    }
};
