const Proc = @This();

lock: os.Lock.Spin = .init("proc"),

// p.lock must be held when using these:

/// Process state
state: State = .unused,
/// If non-zero, sleeping on chan
chan: ?*anyopaque = null,
/// If non-zero, have been killed
killed: bool = false,
/// Exit status to be returned to parent's wait
status: i32 = 0,
/// Process ID
pid: pid_t = 0,

// wait_lock must be held when using this:

/// Parent process
parent: ?*Proc = null,

// the following are private to the process, so p.lock need not be held.

/// (NUL-terminated) Process name (debugging)
name: os.lib.Name(32) = .empty,
/// swtch() here to run process
context: Context = .{},
/// Virtual address of kernel stack
kstack: usize = 0,
/// Current directory
cwd: *os.fs.Inode = undefined,
/// Open files
files: [os.NOFILE]?*os.fs.File = @splat(null),

/// Process execution context
ctx: Ctx = .{ .kthread = .{} },

const State = enum(u8) {
    unused,
    used,
    sleeping,
    runnable,
    running,
    zombie,
};

pub const Type = std.meta.Tag(Ctx);
pub const Ctx = union(enum) { kthread: Kthread, uproc: Uproc };

/// Kernel thread execution context.
pub const Kthread = struct {
    pub const Fn = *const fn (?*const anyopaque) void;

    /// Thread entry point function
    func: Fn = &idle,
    /// Optional argument passed to the thread function
    arg: ?*const anyopaque = null,

    /// Kernel thread daemon process
    fn idle(_: ?*const anyopaque) noreturn {
        // Continuously waits for child threads and sleeps when idle.
        const kthreadd = proc.current().?;
        while (true) {
            kthreadd.waitAll();
        }
    }
};

pub const Uproc = struct {
    /// data page for trampoline
    trapframe: *align(os.PGSIZE) TrapFrame,
    /// User page table
    user: UserTable,
    /// Size of process memory (bytes)
    sz: usize = 0,
    /// for trace syscall
    mask: usize = 0,

    pub fn create() !Uproc {
        // Allocate a trapframe page.
        const trap_frame = try allocator.create(os.trap.trampoline.TrapFrame);
        errdefer allocator.destroy(trap_frame);
        const trapframe: *align(os.PGSIZE) TrapFrame = @alignCast(trap_frame);

        // An empty user page table.
        const user: UserTable = try .create(trapframe);
        errdefer user.destroy(0);

        return .{ .trapframe = trapframe, .user = user };
    }

    pub fn destroy(up: *Uproc) void {
        allocator.destroy(up.trapframe);
        up.user.destroy(up.sz);
    }

    /// Retrieves a raw argument value from the user process's trap frame.
    pub fn arg(up: *Uproc, comptime n: u8) usize {
        return switch (n) {
            inline 0, 1, 2, 3, 4, 5 => |x| @field(
                up.trapframe.reg,
                std.fmt.comptimePrint("a{}", .{x}),
            ),
            else => @compileError("too many arguments"),
        };
    }

    /// Safely copies a null-terminated string from user space into a provided kernel buffer.
    pub fn fetchStr(up: *Uproc, addr: usize, buf: []u8) ![:0]const u8 {
        return up.user.copyin(buf, .fromInt(addr), 0) catch |err| switch (err) {
            error.InvalidString => return error.InvalidArgument,
            else => |e| return e,
        };
    }
};

/// helps ensure that wakeups of wait()ing parents are not lost.
/// helps obey the memory model when using p.parent. must be acquired before any p.lock.
var wait_lock: os.Lock.Spin = .init("wait_lock");

/// Allocate a file descriptor for the given file.
pub fn fdalloc(p: *Proc, f: *os.fs.File) error{ProcessFdQuotaExceeded}!os.fs.fd_t {
    return for (0..p.files.len) |i| {
        if (p.files[i] == null) {
            p.files[i] = f;
            break @intCast(i);
        }
    } else error.ProcessFdQuotaExceeded;
}

/// Create a new process that shares the parent's resources. Used as the common foundation for both
/// fork() and spawn().
fn clone(p: *Proc) !*Proc {
    const new = try proc.alloc(p.ctx);
    errdefer comptime unreachable;

    // increment reference counts on open file descriptors.
    for (p.files, &new.files) |old_file, *new_file| {
        if (old_file) |of| {
            new_file.* = of.dup();
        }
    }

    new.cwd = p.cwd.dup();
    new.name.set(p.name.get());

    p.adopt(new);
    return new;
}

/// Create a new process, copying the parent. Sets up child kernel stack to return as if from fork()
/// system call.
pub fn fork(p: *Proc) !pid_t {
    const new = try p.clone();
    errdefer {
        new.lock.acquire();
        new.state = .unused;
        new.lock.release();
    }

    new.ctx = .{ .uproc = try .create() };
    errdefer new.ctx.uproc.destroy();

    const p_uproc = &p.ctx.uproc;
    const n_uproc = &new.ctx.uproc;

    // Copy user memory from parent to child.
    try p_uproc.user.copy(n_uproc.user, p_uproc.sz);
    errdefer comptime unreachable;

    n_uproc.sz = p_uproc.sz;

    // copy saved user registers.
    n_uproc.trapframe.* = p_uproc.trapframe.*;

    // Cause fork to return 0 in the child.
    n_uproc.trapframe.reg.a0 = 0;

    new.run();

    // Return child's pid in the parent.
    return new.pid;
}

/// Asynchronously create a kernel thread. Thread won't execute until scheduler picks it.
pub fn spawn(p: *Proc, name: []const u8, kthread: Kthread) *Proc {
    const new = p.clone() catch @panic("failed to clone process");
    errdefer comptime unreachable;

    new.name.set(name);
    new.ctx = .{ .kthread = kthread };

    new.run();
    return new;
}

/// Establishes parent-child relationship for thread joining.
/// This function must be called without holding p.lock to prevent deadlock scenarios.
pub fn adopt(p: *Proc, child: *Proc) void {
    if (p.lock.holding()) @panic("wait_lock should be acquired first");
    wait_lock.acquire();
    defer wait_lock.release();
    child.parent = p;
}

/// Marks thread as runnable and ready for scheduling.
pub fn run(p: *Proc) void {
    p.lock.acquire();
    defer p.lock.release();
    p.state = .runnable;
}

pub fn exec(p: *Proc, path: [*:0]const u8, argv: []const [:0]const u8) !usize {
    std.debug.assert(argv.len >= 1);
    var sz: usize = 0;
    const p_uproc = &p.ctx.uproc;
    const old = p_uproc.user;
    const oldsz: usize = p_uproc.sz;

    const new: UserTable = try .create(p_uproc.trapframe);
    errdefer new.destroy(sz);

    // Load program into memory.
    const entry, sz = new.loadProgram(path) catch return error.InvalidExe;

    // Allocate some pages at the next page boundary.
    // Make the first inaccessible as a stack guard. Use the rest as the user stack.
    const PGSIZE = os.PGSIZE;
    sz = mem.pageRoundUp(sz);
    sz = try new.grow(sz, os.USTACK * PGSIZE, .{ .writable = true });
    new.clear(@bitCast(sz - os.USTACK * PGSIZE)) catch unreachable;

    // Push all command line arguments onto the stack
    var sp = sz;
    const sp_base = sp - (os.USTACK - 1) * PGSIZE;
    sp = try new.pushArgv(sp_base, sp, argv);

    // a0 and a1 contain arguments to user main(argc, argv).
    // argc is returned via the system call return value, which goes in a0.
    p_uproc.trapframe.reg.a1 = sp;

    // Save program name for debugging.
    p.name.set(argv[0]);

    // Commit to the user image.
    p_uproc.user = new;
    p_uproc.sz = sz;
    p_uproc.trapframe.epc = entry; // sets entry point to ELF entry (usually 0x0)
    p_uproc.trapframe.reg.sp = sp;
    old.destroy(oldsz);

    return argv.len;
}

/// Shrink user memory by n bytes.
pub fn shrink(p: *Proc, n: usize) !void {
    const size = p.ctx.uproc.sz;
    if (n > size) return error.InvalidArgument;
    p.ctx.uproc.sz = p.ctx.uproc.user.shrink(size, n);
}

/// Switch to scheduler.
/// Must hold only p.lock and have changed p.state. It should be p.interrupt and p.depth == 1,
/// but that would break in the few places where a lock is held but there's no process.
pub fn sched(p: *Proc) void {
    if (os.arch.intr.get()) @panic("sched interruptible");
    if (!p.lock.holding()) @panic("missing p.lock");
    if (p.state == .running) @panic("sched running");

    const cpu = Cpu.this();
    if (cpu.depth != 1) @panic("invalid lock depth");

    // Saves and restores interrupt because interrupt is a property of this kernel thread, not this CPU.
    const enable = cpu.interrupt;
    // Switch from process to scheduler context;
    // scheduler will later switch back when selecting this process to run again
    p.context.swtch(&cpu.context);
    // Back from scheduler - restore interrupt state
    const new = Cpu.this();
    new.interrupt = enable;
}

/// Give up the CPU for one scheduling round.
pub fn yield(p: *Proc) void {
    p.lock.acquire();
    defer p.lock.release();

    p.state = .runnable;
    p.sched();
}

/// Exit the current process. Does not return.
/// An exited process remains in the zombie state until its parent calls wait().
pub fn exit(p: *Proc, status: i32) noreturn {
    if (p == proc.init_proc) @panic("init exiting");

    // Close all open files.
    var i: usize = 0;
    while (i < p.files.len) : (i += 1) {
        if (p.files[i]) |f| {
            f.close();
            p.files[i] = null;
        }
    }

    os.fs.log.begin();
    p.cwd.put();
    os.fs.log.end();
    p.cwd = undefined;

    wait_lock.acquire();

    // Give any children to init.
    p.reparent();

    // Parent might be sleeping in wait().
    proc.wakeup(p.parent.?);

    p.lock.acquire();
    p.status = status;
    p.state = .zombie;

    // release wait_lock after settig state to zombie.
    // ensure parent sees consistent zombie state when it checks.
    wait_lock.release();

    // Jump into the scheduler, never to return.
    p.sched();
    @panic("zombie exit");
}

/// Puts the current process to sleep on the channel `chan`.
/// Releases the condition lock `cond` before sleeping and re-acquires it upon waking.
pub fn sleep(p: *Proc, chan: *anyopaque, cond: *os.Lock.Spin) void {
    // Must acquire p.lock in order to change p.state and then call sched.
    // Once we hold p.lock, we can be guaranteed that we won't miss any wakeup (wakeup locks p.lock),
    // so it's okay to release `cond`.
    p.lock.acquire();
    cond.release();

    // Configure sleep state before yielding CPU.
    p.chan = chan;
    p.state = .sleeping;

    // Switch to scheduler context - process will remain asleep until explicitly woken.
    // The process lock is held across this call to maintain synchronization invariants.
    p.sched();

    // Tidy up.
    p.chan = null;

    // Release process lock and re-acquire the original condition lock.
    // This restores the locking state expected by the caller.
    p.lock.release();
    cond.acquire();
}

/// Wait for a child process to exit and return its pid. Return error if this process has no children.
pub fn wait(p: *Proc, status: ?*i32) !pid_t {
    wait_lock.acquire();
    defer wait_lock.release();

    while (true) {
        var havekids: bool = false;

        for (&proc.all_procs) |*pp| {
            if (pp.parent != p) continue;
            havekids = true;

            // make sure the child isn't still in exit() or swtch().
            pp.lock.acquire();
            defer pp.lock.release();

            if (pp.state != .zombie) continue;
            // Found one.
            const pid = pp.pid;
            if (status) |st| {
                var w = p.writer(.auto, &.{}, @ptrCast(st));
                w.io_w.writeInt(i32, pp.status, os.native_endian) catch |err| @panic(@errorName(err));
            }

            pp.free();
            return pid;
        } else {
            // No point waiting if we don't have any children.
            if (!havekids or p.isKilled()) return error.NoChildren;

            // Wait for a child to exit. Reacquires wait_lock when awakened.
            p.sleep(p, &wait_lock);
        }
    }
}

/// Blocks the current process until all child processes/threads have terminated.
pub fn waitAll(p: *Proc) void {
    while (true) {
        _ = p.wait(null) catch return;
    }
}

/// free a proc structure and the data hanging from it, including user pages. p.lock must be held.
pub fn free(p: *Proc) void {
    switch (p.ctx) {
        .uproc => |*up| up.destroy(),
        else => {},
    }

    p.state = .unused;
    p.chan = null;
    p.killed = false;
    p.status = 0;
    p.pid = 0;
    p.parent = null;
    p.name = .empty;
    p.context = .{};
}

/// Pass p's abandoned children to init. Caller must hold wait_lock.
pub fn reparent(p: *Proc) void {
    const parent_proc = switch (p.ctx) {
        .kthread => proc.kthreadd,
        .uproc => proc.init_proc,
    };

    for (&proc.all_procs) |*pp| {
        if (pp.parent == p) {
            pp.parent = parent_proc;
            proc.wakeup(parent_proc);
        }
    }
}

pub fn setKilled(p: *Proc) void {
    p.lock.acquire();
    defer p.lock.release();

    p.killed = true;
}

pub fn isKilled(p: *Proc) bool {
    p.lock.acquire();
    defer p.lock.release();

    return p.killed;
}

/// Controls whether copy operations target user or kernel address space.
/// - `auto`: Automatically detect based on process context
/// - `kernel`: Force kernel address space (bypass user page table)
pub const Direction = enum { auto, kernel };

pub fn writer(p: *Proc, mode: Direction, buffer: []u8, dst: [*]u8) Writer {
    return .{
        .p = p,
        .mode = mode,
        .dst = dst,
        .io_w = .{
            .vtable = &.{ .drain = Writer.drain },
            .buffer = buffer,
        },
    };
}

/// A writer that handles data transfer to either kernel or user address space
/// based on the current process context and specified direction mode.
pub const Writer = struct {
    p: *Proc,
    mode: Direction,
    dst: [*]u8,
    io_w: Io.Writer,

    fn drain(io_w: *Io.Writer, data: []const []const u8, splat: usize) !usize {
        const w: *Writer = @alignCast(@fieldParentPtr("io_w", io_w));
        return os.lib.drain(w, puts, io_w, data, splat);
    }

    fn puts(w: *Writer, buf: []const u8) Io.Writer.Error!usize {
        copyout(w.p, w.mode, w.dst, buf) catch return error.WriteFailed;
        w.dst += buf.len;
        return buf.len;
    }

    /// copy to either a user address, or kernel address,
    fn copyout(p: *Proc, mode: Direction, dst: [*]u8, src: []const u8) !void {
        if (mode == .kernel or p.ctx == .kthread) {
            @memcpy(dst[0..src.len], src);
        } else {
            try p.ctx.uproc.user.copyout(.fromPtr(dst), src);
        }
    }
};

pub fn reader(p: *Proc, mode: Direction, buffer: []u8, src: [*]const u8) Reader {
    return .{
        .p = p,
        .mode = mode,
        .src = src,
        .io_r = .{
            .vtable = &.{ .stream = Reader.stream },
            .buffer = buffer,
            .seek = 0,
            .end = 0,
        },
    };
}

/// A reader that handles data transfer from either kernel or user address space
/// based on the current process context and specified direction mode.
pub const Reader = struct {
    p: *Proc,
    mode: Direction,
    src: [*]const u8,
    io_r: Io.Reader,

    fn stream(io_r: *Io.Reader, io_w: *Io.Writer, limit: Io.Limit) !usize {
        const r: *Reader = @alignCast(@fieldParentPtr("io_r", io_r));
        return os.lib.stream(r, gets, io_w, limit);
    }

    fn gets(r: *Reader, buf: []u8) Io.Reader.StreamError!usize {
        copyin(r.p, r.mode, buf, r.src) catch return error.ReadFailed;
        r.src += buf.len;
        return buf.len;
    }

    /// copy from either a user address, or kernel address,
    fn copyin(p: *Proc, mode: Direction, dst: []u8, src: [*]const u8) !void {
        if (mode == .kernel or p.ctx == .kthread) {
            @memcpy(dst, src[0..dst.len]);
        } else {
            try p.ctx.uproc.user.copyin(dst, .fromPtr(src), null);
        }
    }
};

const std = @import("std");
const Io = std.Io;
const allocator = std.heap.page_allocator;

const proc = @import("../proc.zig");
const pid_t = proc.pid_t;

const os = @import("../os.zig");
const TrapFrame = os.trap.trampoline.TrapFrame;
const mem = os.mem;
const UserTable = mem.UserTable;

const Cpu = @import("Cpu.zig");
const Context = @import("context.zig").Context;
