const std = @import("std");
const allocator = std.heap.page_allocator;

const csr = @import("riscv").csr;

const os = @import("os.zig");
const qemu = os.arch.qemu;
const trampoline = os.trap.trampoline;
const KSTACK = os.KSTACK;

const Pte = vms.Pte;
const PhyAddr = vms.PhyAddr;
const PageTable = @import("mem/PageTable.zig").PageTable;

pub const vms = if (os.is_64_bit) os.arch.vms.sv39 else os.arch.vms.sv32;
pub const PGSIZE = vms.PGSIZE;
pub const Page = [PGSIZE]u8;
pub const PageAllocator = @import("mem/PageAllocator.zig");
pub const UserTable = @import("mem/PageTable.zig").User;
pub const VirtAddr = vms.VirtAddr;

/// kernel.ld sets this to end of kernel code.
extern const _text_end: u8;

/// the kernel's page table.
var kpgt: PageTable = undefined;

pub fn init() void {
    errdefer @panic("mem init");
    kpgt = try .create();

    for (qemu.memmap) |map| {
        try kvmmap(map[0], map[1], .{ .readable = true, .writable = true });
    }

    // map kernel text executable and read-only.
    const etext_addr = @intFromPtr(&_text_end);
    try kvmmap(qemu.KERNBASE, etext_addr - qemu.KERNBASE, .{ .readable = true, .executable = true });

    // map kernel data and the physical RAM we'll make use of.
    try kvmmap(etext_addr, qemu.PHYSTOP - etext_addr, .{ .readable = true, .writable = true });

    // map the trampoline for trap entry/exit to the highest virtual address in the kernel.
    const trampoline_addr = @intFromPtr(&trampoline._trampoline);
    try kpgt.map(.trampoline, PGSIZE, @bitCast(trampoline_addr), .{ .readable = true, .executable = true });

    // Allocate pages for each process's kernel stack.
    for (0..os.NPROC) |i| {
        const pages = try allocator.create([KSTACK * PGSIZE]u8);
        @memset(pages, 0);
        const phy_addr: PhyAddr = @bitCast(@intFromPtr(pages));

        // Map it high in memory, followed by an invalid guard page.
        const virt_addr: VirtAddr = @bitCast(VirtAddr.TRAMPOLINE - ((i + 1) * (KSTACK + 1) * PGSIZE));
        try kpgt.map(virt_addr, KSTACK * PGSIZE, phy_addr, .{ .readable = true, .writable = true });
    }
}

// flush the TLB.
fn flush() void {
    // the zero, zero means flush all TLB entries.
    asm volatile ("sfence.vma zero, zero");
}

/// Switch the current CPU's h/w page table register to the kernel's page table, and enable paging.
pub fn inithart() void {
    // wait for any previous writes to the page table memory to finish.
    flush();

    csr.satp.set(.{
        .mode = if (os.is_64_bit) .sv39 else .sv32,
        .ppn = kpgt.addr.ppn,
    });

    // flush stale entries from the TLB.
    flush();
}

/// add a direct-mapping to the kernel page table. only used when booting.
/// does not flush TLB or enable paging.
pub fn kvmmap(addr: usize, size: usize, flags: Pte.Flags) !void {
    try kpgt.map(@bitCast(addr), pageRoundUp(size), @bitCast(addr), flags);
}

pub fn pageRoundUp(addr: usize) usize {
    return std.mem.alignForward(usize, addr, PGSIZE);
}

pub fn pageRoundDown(addr: usize) usize {
    return std.mem.alignBackward(usize, addr, PGSIZE);
}
