const std = @import("std");
const allocator = std.heap.page_allocator;
const assert = std.debug.assert;

const os = @import("../os.zig");
const fs = os.fs;
const trampoline = os.trap.trampoline;

const mem = @import("../mem.zig");
const vms = mem.vms;
const PGSIZE = vms.PGSIZE;
const Page = mem.Page;
const Pte = vms.Pte;
const PhyAddr = vms.PhyAddr;
const VirtAddr = vms.VirtAddr;

pub const PageTable = packed union {
    ptes: *[vms.ptes]Pte,

    // converts to other types
    page: *align(PGSIZE) Page,
    addr: PhyAddr,

    comptime {
        assert(@bitSizeOf(PageTable) == @bitSizeOf(usize));
    }

    pub fn create() !PageTable {
        const page = try allocator.create(Page);
        @memset(page, 0);

        return .{ .page = @alignCast(page) };
    }

    /// Recursively free page-table pages. All leaf mappings must already have been removed.
    pub fn destroy(self: PageTable) void {
        // there are 2^9 = 512 PTEs in a page table.
        for (self.ptes) |*pte| {
            if (!pte.flags.valid) continue;
            if (pte.flags.readable or pte.flags.writable or pte.flags.executable) {
                @panic("destroy: leaf");
            }

            const child: PageTable = .fromPhyAddr(pte.getPhyAddr(0));
            child.destroy();
            pte.* = .{};
        }

        allocator.destroy(self.page);
    }

    fn fromPhyAddr(addr: PhyAddr) PageTable {
        return .{ .addr = @bitCast(addr) };
    }

    /// Return the address of the PTE in page table pagetable that corresponds to virtual address virt.
    /// If alloc==true, create any required page-table pages.
    fn walk(self: PageTable, virt: VirtAddr, comptime is_alloc: bool) !*Pte {
        const addr: usize = @bitCast(virt);
        if (addr >= VirtAddr.MAXVA) @panic("walk");

        var pt = self;
        const vpns = virt.vpn();

        for (vpns[0 .. vpns.len - 1]) |index| {
            const pte = &pt.ptes[index];

            if (pte.flags.valid) {
                pt = .fromPhyAddr(pte.getPhyAddr(0));
            } else {
                if (!is_alloc) return error.NotMapped;
                pt = try .create();
                pte.ppn = pt.addr.ppn;
                pte.flags.valid = true;
            }
        }

        const pte = &pt.ptes[vpns[vpns.len - 1]];
        if (is_alloc and pte.flags.valid) return error.Mapped;
        if (!is_alloc and !pte.flags.valid) return error.NotMapped;
        return pte;
    }

    /// Create PTEs for virtual addresses starting at virt that refer to physical addresses starting at
    /// pa. va and size MUST be page-aligned. Returns error if walk() couldn't allocate a needed
    /// page-table page.
    pub fn map(self: PageTable, virt: VirtAddr, size: usize, phy: PhyAddr, flags: Pte.Flags) error{OutOfMemory}!void {
        assert(size > 0);
        const virt_addr: usize = @bitCast(virt);
        if (!std.mem.isAligned(virt_addr, PGSIZE) or !std.mem.isAligned(size, PGSIZE))
            @panic("mappages: not aligned");

        const last = virt_addr + (size - PGSIZE);
        var va = virt_addr;
        var pa: usize = @bitCast(phy);

        while (true) {
            const pte = self.walk(@bitCast(va), true) catch |err| switch (err) {
                error.Mapped => @panic("mappages: remap"),
                else => |e| return e,
            };
            const phy_addr: PhyAddr = @bitCast(pa);
            pte.ppn = phy_addr.ppn;
            pte.flags = flags;
            pte.flags.valid = true;

            if (va == last) break;
            va += PGSIZE;
            pa += PGSIZE;
        }
    }

    /// Remove npages of mappings starting from virt. virt must be page-aligned. It's OK if the mappings
    /// don't exist. Optionally free the physical memory.
    pub fn unmap(self: PageTable, virt: VirtAddr, npages: usize, do_free: bool) void {
        assert(npages >= 1);
        const start: usize = @bitCast(virt);
        assert(std.mem.isAligned(start, PGSIZE));

        var addr: vms.Addr = start;
        const end: vms.Addr = addr + npages * PGSIZE;

        while (addr < end) : (addr += PGSIZE) {
            // was leaf page table allocated?
            const pte = self.walk(.fromInt(addr), false) catch continue;
            if (pte.flags == Pte.Flags{ .valid = true }) @panic("uvmunmap: not a leaf");

            if (do_free) {
                const pt: PageTable = .fromPhyAddr(pte.getPhyAddr(0));
                allocator.destroy(pt.page);
            }
            pte.* = .{};
        }
    }
};

/// User PageTable
pub const User = packed struct(usize) {
    pt: PageTable,

    /// Create a user page table for a given process, with no user memory, but with trampoline and
    /// trapframe pages. Caller must call `destroy` when done.
    pub fn create(tf: *trampoline.TrapFrame) !User {
        // An empty page table.
        const pt: PageTable = try .create();
        errdefer pt.destroy();

        // map the trampoline code (for system call return) at the highest user virtual address. only the
        // supervisor uses it, on the way to/from user space, so not PTE_U.
        try pt.map(.trampoline, PGSIZE, @bitCast(@intFromPtr(&trampoline._trampoline)), .{
            .readable = true,
            .executable = true,
        });
        errdefer pt.unmap(.trampoline, 1, false);

        // map the trapframe page just below the trampoline page
        try pt.map(.trapframe, PGSIZE, @bitCast(@intFromPtr(tf)), .{
            .readable = true,
            .writable = true,
        });
        errdefer pt.unmap(.trapframe, 1, false);

        return .{ .pt = pt };
    }

    /// Free a process's page table, and free the physical memory it refers to.
    pub fn destroy(self: User, size: usize) void {
        self.pt.unmap(.trampoline, 1, false);
        self.pt.unmap(.trapframe, 1, false);
        if (size > 0) self.pt.unmap(.zero, mem.pageRoundUp(size) / PGSIZE, true);
        self.pt.destroy();
    }

    /// Look up a virtual address, return the physical address, or error if not mapped. Can only be used
    /// to look up user pages.
    fn translate(self: User, virt: VirtAddr, check: Pte.Flags) error{ NotMapped, InvalidAddress }!PhyAddr {
        const addr: usize = @bitCast(virt);
        if (addr >= VirtAddr.MAXVA) return error.InvalidAddress;
        const pte = try self.pt.walk(virt, false);

        const flags = check.join(.{ .user = true });
        if (!flags.subsetOf(pte.flags)) return error.InvalidAddress;

        return pte.getPhyAddr(virt.offset);
    }

    /// Translates virtual to physical address with lazy allocation. Automatically maps unmapped pages
    /// instead of failing.
    pub fn lazyTranslate(self: User, virt: usize, check: Pte.Flags) error{ OutOfMemory, InvalidAddress }!PhyAddr {
        return self.translate(@bitCast(virt), check) catch |err| switch (err) {
            error.NotMapped => {
                try self.mapLazyPage(virt);
                return self.translate(@bitCast(virt), check) catch |e| @panic(@errorName(e));
            },
            else => |e| return e,
        };
    }

    /// Handles page faults by lazy-mapping unmapped pages. Used in page fault trap handler. Fails if page
    /// already mapped.
    pub fn pageFault(self: User, virt: usize) error{ OutOfMemory, InvalidAddress, Mapped }!void {
        _ = self.translate(@bitCast(virt), .{}) catch |err| switch (err) {
            error.NotMapped => return self.mapLazyPage(virt),
            else => |e| return e,
        };
        return error.Mapped;
    }

    /// Allocates and maps user memory if process references a lazily allocated page. Returns error if virtual
    /// address exceeds process size or allocation fails.
    fn mapLazyPage(self: User, virt: usize) error{ OutOfMemory, InvalidAddress }!void {
        const p = os.proc.current().?;
        if (virt >= p.ctx.uproc.sz) return error.InvalidAddress;

        const old = mem.pageRoundDown(virt);
        _ = try self.grow(old, PGSIZE, .{ .writable = true });
    }

    /// Allocate PTEs and physical memory to grow process from oldsz by increase, which need not be page
    /// aligned. Returns new size or error.OutOfMemory.
    pub fn grow(self: User, oldsz: usize, increase: usize, flags: Pte.Flags) error{OutOfMemory}!usize {
        const perm = flags.join(.{ .readable = true, .user = true });

        if (increase == 0) return oldsz;
        const newsz: usize = oldsz + increase;
        var addr = mem.pageRoundUp(oldsz);

        while (addr < newsz) : (addr += PGSIZE) {
            errdefer _ = self.shrink(addr, addr - oldsz);

            const page = try allocator.create(Page);
            errdefer allocator.free(page);
            @memset(page, 0);

            try self.pt.map(@bitCast(addr), PGSIZE, @bitCast(@intFromPtr(page)), perm);
        }
        return newsz;
    }

    /// Deallocate user pages to shrink process size from oldsz by decrease. oldsz and newsz need not be
    /// page-aligned. oldsz can be larger than the actual process size. Returns the new process size.
    pub fn shrink(self: User, oldsz: usize, decrease: usize) usize {
        if (decrease == 0) return oldsz;
        if (oldsz < decrease) return oldsz;
        const newsz: usize = oldsz - decrease;

        const old = mem.pageRoundUp(oldsz);
        const new = mem.pageRoundUp(newsz);
        if (new < old) {
            const npages = (old - new) / PGSIZE;
            self.pt.unmap(@bitCast(new), npages, true);
        }

        return newsz;
    }

    /// Given a parent process's page table, copy its memory into a child's page table. Copies both the
    /// page table and the physical memory.
    pub fn copy(old: User, new: User, size: usize) error{OutOfMemory}!void {
        var i: usize = 0;
        while (i < size) : (i += PGSIZE) {
            // page table hasn't been allocated
            const pte = old.pt.walk(@bitCast(i), false) catch continue;
            errdefer new.pt.unmap(.zero, i / PGSIZE, true);

            const page = try allocator.create(Page);
            errdefer allocator.free(page);

            const pt: PageTable = .fromPhyAddr(pte.getPhyAddr(0));
            @memcpy(page, pt.page);

            try new.pt.map(@bitCast(i), PGSIZE, @bitCast(@intFromPtr(page)), pte.flags);
        }
    }

    /// mark a PTE invalid for user access. used by exec for the user stack guard page.
    pub fn clear(self: User, virt: VirtAddr) error{NotMapped}!void {
        const pte = try self.pt.walk(virt, false);
        pte.flags.user = false;
    }

    /// Copy from kernel to user. Copy bytes from source to virtual address dest in a given page table.
    pub fn copyout(self: User, dest: VirtAddr, source: []const u8) error{ OutOfMemory, InvalidAddress }!void {
        var n: usize = 0;
        var virt_addr: usize = @bitCast(dest);

        while (n < source.len) {
            // forbid copyout over read-only user text pages.
            const phy_addr = try self.lazyTranslate(virt_addr, .{ .writable = true });
            const ptr: [*]u8 = phy_addr.toPtr();

            const addr = mem.pageRoundDown(virt_addr);
            const nbytes = @min(PGSIZE - (virt_addr - addr), source.len - n);
            @memcpy(ptr[0..nbytes], source[n..][0..nbytes]);

            n += nbytes;
            virt_addr = addr + PGSIZE;
        }
    }

    /// Copy from user to kernel. Copy bytes to dest from virtual address source in a given page table
    /// with optional sentinel termination.
    pub fn copyin(self: User, dest: []u8, source: VirtAddr, comptime sentinel: ?u8) !if (sentinel) |s| [:s]const u8 else void {
        var n: usize = 0;
        var virt_addr: usize = @bitCast(source);
        var got: bool = false;

        while (n < dest.len) {
            const phy_addr = try self.lazyTranslate(virt_addr, .{});
            const ptr: [*]u8 = phy_addr.toPtr();

            const addr = mem.pageRoundDown(virt_addr);
            var nbytes = @min(PGSIZE - (virt_addr - addr), dest.len - n);

            if (sentinel) |s| {
                if (std.mem.indexOfScalar(u8, ptr[0..nbytes], s)) |i| {
                    got = true;
                    nbytes = i + 1;
                }
            }
            @memcpy(dest[n..][0..nbytes], ptr[0..nbytes]);

            n += nbytes;
            if (sentinel) |s| if (got) return dest[0 .. n - 1 :s];
            virt_addr = addr + PGSIZE;
        } else if (sentinel != null) return error.InvalidString;
    }

    /// Load an ELF program segment into pagetable at virtual address va. va must be page-aligned and the
    /// pages from va to va+sz must already be mapped.
    fn loadSegment(self: User, va: usize, inode: *os.fs.Inode, offset: usize, sz: usize) !void {
        const p = os.proc.current().?;
        var i: usize = 0;
        while (i < sz) : (i += PGSIZE) {
            const pa = try self.translate(@bitCast(va + i), .{});
            const addr: usize = @bitCast(pa);
            const n = @min(sz - i, PGSIZE);

            var writer = p.writer(.kernel, &.{}, @ptrFromInt(addr));
            const r = try inode.read(&writer.io_w, @intCast(offset + i), n);
            if (r != n) return error.LoadSegmentFailed;
        }
    }

    /// Loads an ELF executable into memory and returns its entry point and memory size.
    pub fn loadProgram(self: User, path: [*:0]const u8) !struct { usize, usize } {
        const ElfHdr = if (os.is_64_bit) std.elf.Elf64_Ehdr else std.elf.Elf32_Ehdr;
        const ProgHdr = if (os.is_64_bit) std.elf.Elf64_Phdr else std.elf.Elf32_Phdr;

        fs.log.begin();
        defer fs.log.end();

        // Open the executable file.
        const inode = try fs.path.getinode(path);
        defer inode.put();

        inode.lock();
        defer inode.unlock();

        // Read the ELF header.
        const elf = try inode.readStruct(ElfHdr, 0);

        // Is this really an ELF file?
        if (!std.mem.eql(u8, elf.e_ident[0..std.elf.MAGIC.len], std.elf.MAGIC))
            return error.BadElfMagic;

        // Load program into memory.
        var sz: usize = 0;
        var off = elf.e_phoff;
        for (0..elf.e_phnum) |_| {
            errdefer _ = self.shrink(sz, sz);

            const ph = try inode.readStruct(ProgHdr, @intCast(off));
            if (ph.p_type != std.elf.PT_LOAD) continue;
            if (ph.p_memsz < ph.p_filesz) return error.InvalidMemSize;
            if (ph.p_vaddr + ph.p_memsz < ph.p_vaddr) return error.InvalidVaddr;
            if (!std.mem.isAligned(ph.p_vaddr, PGSIZE)) return error.UnalignedSegment;

            sz = try self.grow(sz, ph.p_vaddr + ph.p_memsz - sz, .{
                .executable = ph.p_flags & 0x1 != 0,
                .writable = ph.p_flags & 0x2 != 0,
            });

            try self.loadSegment(ph.p_vaddr, inode, ph.p_offset, ph.p_filesz);
            off += @sizeOf(ProgHdr);
        }

        return .{ elf.e_entry, sz };
    }

    /// Pushes a single argument onto the user stack with proper alignment.
    fn pushArg(self: User, base: usize, stack: usize, arg: []const u8) !usize {
        var sp = stack - arg.len;
        sp -= sp % 16; // riscv sp must be 16-byte aligned
        if (sp < base) return error.SystemResources;
        try self.copyout(@bitCast(sp), arg);
        return sp;
    }

    /// Builds the complete argument vector on the user stack for process execution. Pushes each
    /// argument string followed by the argv pointer array. Returns the final stack pointer position.
    pub fn pushArgv(self: User, base: usize, stack: usize, argv: []const [:0]const u8) !usize {
        var sp = stack;
        const arg_list = try allocator.allocSentinel(usize, argv.len, 0);
        defer allocator.free(arg_list);

        // Copy argument strings into new stack, remember their addresses in arg_list.
        for (argv, 0..) |arg, i| {
            sp = try self.pushArg(base, sp, arg[0 .. arg.len + 1]);
            arg_list[i] = sp;
        }

        // push a copy of arg_list, the array of argv[] pointers.
        return self.pushArg(base, sp, @ptrCast(arg_list[0 .. arg_list.len + 1]));
    }
};
