const PageAllocator = @This();
const std = @import("std");
const Allocator = std.mem.Allocator;
const Alignment = std.mem.Alignment;
const assert = std.debug.assert;

const os = @import("../os.zig");
const Page = os.mem.Page;
const PGSIZE = os.PGSIZE;

pub const allocator: Allocator = .{
    .ptr = undefined,
    .vtable = &vtable,
};

const vtable: Allocator.VTable = .{
    .alloc = alloc,
    .resize = resize,
    .remap = remap,
    .free = free,
};

/// Points to the first usable byte after kernel, aligned to page boundary.
/// Must be mutable for runtime memory management.
extern var heap_start: u8 align(PGSIZE);
const heap: [*]align(PGSIZE) Page = @ptrCast(&heap_start);

/// a singly linked list of memory pages, each node represents one or more contiguous memory pages
const Run = struct {
    /// points to the next node in the linked list
    next: ?*align(PGSIZE) Run = null,
    /// the number of pages in this node
    len: usize = 1,

    /// Splits a node, consuming `npages` from its front. Returns the remaining node.
    fn fragment(target: Node, npages: usize) ?Node {
        if (target.len == npages) return target.next;

        assert(target.len > npages);
        const pages: [*]align(PGSIZE) Page = @ptrCast(target);
        const rest: Node = @ptrCast(&pages[npages]);

        rest.* = .{
            .next = target.next,
            .len = target.len - npages,
        };
        return rest;
    }

    /// Attempts to coalesce two contiguous page nodes into a single larger node.
    /// Returns the last node in the chain if merging isn't possible (non-contiguous).
    fn coalesce(a: Node, b: ?Node) Node {
        if (b != null and a.offset(b.?) == a.len) {
            a.len += b.?.len;
            a.next = b.?.next;
            return a;
        } else {
            a.next = b;
            return b orelse a;
        }
    }

    /// Calculates the page offset between two memory nodes.
    fn offset(a: OpaqueNode, b: OpaqueNode) usize {
        const a_addr: usize = @intFromPtr(a);
        const b_addr: usize = @intFromPtr(b);
        assert(a_addr <= b_addr);
        return (b_addr - a_addr) / PGSIZE;
    }
};

const Node = *align(PGSIZE) Run;
/// An opaque pointer to a Run node, used only for address comparison and offset calculation.
/// The referenced memory may not be managed by this allocator and its content is inaccessible.
const OpaqueNode = *align(PGSIZE) const Run;

// Per-CPU page allocator instance fields
freelist: ?Node,
lock: os.Lock.Spin,

/// Array of per-CPU page allocators for SMP systems
var pas: [os.NCPU]PageAllocator = undefined;
var pages_per_cpu: usize = 0;
var lock_names: [os.NCPU][4]u8 = undefined;

/// Initializes the page allocator with a page-aligned heap end address.
pub fn init(heap_end: usize) void {
    errdefer |err| @panic(@errorName(err));
    assert(std.mem.isAligned(heap_end, PGSIZE));
    const total_pages = (heap_end - @intFromPtr(&heap[0])) / PGSIZE;
    pages_per_cpu = try std.math.divCeil(usize, total_pages, os.proc.ncpus);

    // Initialize a separate page allocator for each CPU
    for (0..os.proc.ncpus) |i| {
        const start = i * pages_per_cpu;
        const length = @min(pages_per_cpu, total_pages - start);
        const first_node: Node = @ptrCast(&heap[start]);

        first_node.* = .{ .len = length };
        const name = try std.fmt.bufPrint(&lock_names[i], "pa{d}", .{i});
        pas[i] = .{ .freelist = first_node, .lock = .init(name) };
        assert(pas[i].freelist.?.len == length);
    }
}

/// Expands the heap by adding new memory regions without affecting existing allocations.
/// Both from and to addresses should be aligned to page boundaries.
pub fn grow(from: usize, to: usize) void {
    assert(std.mem.isAligned(from, PGSIZE));
    assert(std.mem.isAligned(to, PGSIZE));

    // Recalculate pages per CPU when heap grows. The grown memory region may contain holes
    // (non-contiguous physical pages), but we treat it as a contiguous block for simplicity.
    // The allocator will handle any fragmentation internally through its freelist management.
    const total_pages = (to - @intFromPtr(&heap[0])) / PGSIZE;
    if (total_pages > pages_per_cpu * os.proc.ncpus) {
        pages_per_cpu = std.math.divCeil(usize, total_pages, os.proc.ncpus) catch unreachable;
    }

    const memory: [*]align(PGSIZE) Page = @ptrFromInt(from);
    const len: usize = (to - from) / PGSIZE;
    if (len == 0) return;

    // Add the new pages to the allocator's freelist by simulating a free operation.
    const bytes = std.mem.sliceAsBytes(memory[0..len]);
    @memset(bytes[0..], undefined);
    free(allocator.ptr, bytes, .fromByteUnits(PGSIZE), @returnAddress());
}

test allocator {
    try std.heap.testAllocator(allocator);
    try std.heap.testAllocatorAligned(allocator);
    try std.heap.testAllocatorLargeAlignment(allocator);
    try std.heap.testAllocatorAlignedShrink(allocator);
}

fn alloc(context: *anyopaque, nbytes: usize, alignment: Alignment, ret_addr: usize) ?[*]u8 {
    _ = context;
    _ = ret_addr;
    assert(nbytes > 0);

    const cpu = os.proc.Cpu.push();
    defer cpu.pop();
    const id = os.proc.Cpu.id();

    // Try allocating from current CPU first, then round-robin through others
    for (0..os.proc.ncpus) |i| {
        const n = (id + i) % os.proc.ncpus;
        return pas[n].map(nbytes, alignment) orelse continue;
    }
    return null;
}

fn free(context: *anyopaque, memory: []u8, alignment: Alignment, ret_addr: usize) void {
    _ = context;
    _ = ret_addr;
    const pa = owner(memory);
    pa.unmap(memory, alignment);
}

fn resize(context: *anyopaque, memory: []u8, alignment: Alignment, new_len: usize, ret_addr: usize) bool {
    _ = context;
    _ = ret_addr;
    const pa = owner(memory);
    return pa.realloc(memory, alignment, new_len);
}

fn remap(context: *anyopaque, memory: []u8, alignment: Alignment, new_len: usize, ret_addr: usize) ?[*]u8 {
    _ = context;
    _ = ret_addr;
    const pa = owner(memory);
    return if (pa.realloc(memory, alignment, new_len)) memory.ptr else null;
}

/// Returns the PageAllocator responsible for managing the given memory slice.
fn owner(memory: []u8) *PageAllocator {
    const addr = @intFromPtr(memory.ptr);
    assert(std.mem.isAligned(addr, PGSIZE));
    const n = (addr - @intFromPtr(&heap[0])) / PGSIZE;
    return &pas[n / pages_per_cpu];
}

pub fn map(self: *PageAllocator, nbytes: usize, alignment: Alignment) ?[*]u8 {
    _ = alignment;

    // Calculate required pages (rounding up to page boundary)
    if (nbytes > std.math.maxInt(usize) - PGSIZE) return null;
    const aligned_len = os.mem.pageRoundUp(nbytes);
    const npages = aligned_len / PGSIZE;

    // Thread-safe freelist access
    self.lock.acquire();
    defer self.lock.release();

    // Linear search for first-fit block with sufficient pages
    const prev: ?Node, const target: Node = blk: {
        var prev: ?Node = null;
        var it = self.freelist;
        break :blk while (it) |node| : (it = node.next) {
            if (node.len >= npages) break .{ prev, node };
            prev = node;
        } else return null;
    };

    // Split the found block and update freelist pointers
    self.claim(prev, target, npages);
    return @ptrCast(target);
}

pub fn unmap(self: *PageAllocator, memory: []u8, alignment: Alignment) void {
    _ = alignment;

    // Calculate page-aligned size and page count
    const buf_aligned_len = os.mem.pageRoundUp(memory.len);
    const npages = buf_aligned_len / PGSIZE;

    self.lock.acquire();
    defer self.lock.release();

    // Initialize freed block metadata
    var target: Node = @ptrCast(@alignCast(memory.ptr));
    target.len = npages;
    const prev, const next = self.flank(target);

    // Merge with previous block or insert at list head
    if (prev) |p| {
        target = p.coalesce(target);
    } else {
        self.freelist = target;
    }

    // Merge with next contiguous block if available
    _ = target.coalesce(next);
}

pub fn realloc(self: *PageAllocator, memory: []u8, alignment: Alignment, new_len: usize) bool {
    const current = os.mem.pageRoundUp(memory.len) / PGSIZE;
    const new_pages = os.mem.pageRoundUp(new_len) / PGSIZE;

    // Requested size matches current allocation
    if (new_pages == current) return true;

    // Shrinking the allocation - release excess pages
    if (new_pages < current) {
        const pages: [*]align(PGSIZE) Page = @ptrCast(@alignCast(memory.ptr));
        const bytes = std.mem.sliceAsBytes(pages[0..current][new_pages..]);
        self.unmap(bytes, alignment);
        return true;
    }

    self.lock.acquire();
    defer self.lock.release();

    // Growing the allocation - need adjacent free space
    const target: OpaqueNode = @ptrCast(@alignCast(memory.ptr));
    const prev, const next = self.flank(target);
    const pages_needed = new_pages - current;
    if (next) |n| {
        if (target.offset(n) == current and n.len >= pages_needed) {
            self.claim(prev, n, pages_needed);
            return true;
        }
    }

    return false;
}

/// Returns the nodes immediately preceding and following the target address.
fn flank(self: *PageAllocator, target: OpaqueNode) struct { ?Node, ?Node } {
    const addr: usize = @intFromPtr(target);
    if (self.freelist == null or addr < @intFromPtr(self.freelist.?))
        return .{ null, self.freelist };

    var it = self.freelist;
    const prev: Node = while (true) {
        if (it.?.next) |n| {
            if (addr < @intFromPtr(n)) break it.?;
            it = n;
        } else {
            break it.?;
        }
    } else unreachable;

    const next = prev.next;
    return .{ prev, next };
}

/// Allocates `npages` from the target node, updating the freelist pointers.
/// The target node is consumed from the front, and the remainder is re-linked.
fn claim(self: *PageAllocator, prev: ?Node, target: Node, npages: usize) void {
    const rest = target.fragment(npages);
    if (prev) |p| {
        p.next = rest;
    } else {
        self.freelist = rest;
    }
}
