//! driver for qemu's virtio disk device.
//! uses qemu's mmio interface to virtio.
//!
//! qemu ... -drive file=fs.img,if=none,format=raw,id=x0 -device virtio-blk-device,drive=x0,bus=virtio-mmio-bus.0

/// feature bits for block device
const Feature = packed struct(virtio.Reg.Int) {
    @"0-4": u5 = 0,
    ro: bool = false,

    @"6": u1 = 0,
    scsi: bool = false,

    @"8-10": u3 = 0,
    config_wce: bool = false,
    mq: bool = false,

    @"13-26": u14 = 0,
    any_layout: bool = false,
    indirect_desc: bool = false,
    event_idx: bool = false,

    _: u2 = 0,
};

// 5.2.4 Device configuration layout
const Config = extern struct {
    capacity: u64,
    size_max: u32,
    seg_max: u32,
};

/// the format of the first descriptor in a disk request. to be followed by two more descriptors
/// containing the block, and a one-byte status.
pub const Request = extern struct {
    type: Type,
    reserved: u32 = 0,
    /// the offset (multiplied by 512) where the read or write is to occur
    sector: u64,

    /// these are specific to virtio block devices, e.g. disks, described in Section 5.2 of the spec.
    pub const Type = enum(u32) {
        /// read the disk
        read = 0,
        /// write the disk
        write = 1,
        flush = 4,
        get_id = 8,
        get_lifetime = 10,
        discard = 11,
        write_zeroes = 13,
        erase = 14,
    };
};

/// this many virtio descriptors. must be a power of two.
pub const NUM: usize = 8;

const Disk = struct {
    pub const Queue = virtio.Queue(NUM);

    vdev: virtio.Device(Feature),
    queue: *Queue,

    // our own book-keeping
    // all NUM descriptors start out unused.
    /// is a descriptor free?
    free: [NUM]bool = @splat(true),

    /// track info about in-flight operations, for use when completion interrupt arrives. indexed by
    /// first descriptor index of chain.
    info: [NUM]struct { buf: ?*Buffer = null, status: u8 = 0 } = @splat(.{}),

    /// disk command headers. one-for-one with descriptors, for convenience.
    ops: [NUM]Request = undefined,

    lock: os.Lock.Spin = .init("disk"),
};

var disk: Disk = undefined;

pub fn init(reg: virtio.Mmio) void {
    errdefer @panic("virtio disk init failed");
    const queue: *Disk.Queue = try .create(allocator);
    errdefer queue.destroy(allocator);

    disk = .{
        .vdev = .{ .reg = reg },
        .queue = queue,
    };

    try disk.vdev.negotiate(.{}, .{
        .ro = true,
        .scsi = true,
        .config_wce = true,
        .mq = true,
        .any_layout = true,
        .indirect_desc = true,
        .event_idx = true,
    });
    defer disk.vdev.finish();

    errdefer comptime unreachable;

    // 5.2.4 Device configuration layout
    // The capacity of the device (expressed in 512-byte sectors)
    const capacity = disk.vdev.get(Config, "capacity");
    log.debug("capacity {} KiB", .{capacity / 2});

    disk.vdev.setQueue(NUM, queue);
}

pub fn intr() void {
    disk.lock.acquire();
    defer disk.lock.release();

    // the device won't raise another interrupt until we tell it we've seen this interrupt, which the
    // following line does. this may race with the device writing new entries to the "used" ring, in
    // which case we may process the new completion entries in this interrupt, and have nothing to do in
    // the next interrupt, which is harmless.
    disk.vdev.ack();

    fence();

    // the device increments disk.used.index when it adds an entry to the used ring.

    while (disk.queue.used_index != disk.queue.used.index) : (disk.queue.used_index +%= 1) {
        fence();
        const id = disk.queue.used.ring[disk.queue.used_index % NUM].id;

        assert(disk.info[id].status == 0);

        if (disk.info[id].buf) |buf| {
            buf.own = false; // disk is done with buf
            os.proc.wakeup(buf);
        } else {
            @panic("virtio_disk_intr status");
        }
    }
}

pub fn read(buf: *Buffer) void {
    readWrite(buf, false);
}

pub fn write(buf: *Buffer) void {
    readWrite(buf, true);
}

fn readWrite(buf: *Buffer, is_write: bool) void {
    const sector: u64 = buf.block.index * (fs.BSIZE / 512);

    disk.lock.acquire();
    defer disk.lock.release();

    // the spec's Section 5.2 says that legacy block operations use three descriptors: one for
    // type/reserved/sector, one for the data, one for a 1-byte status result.

    // allocate the three descriptors.
    const idx: [3]usize = while (true) {
        if (alloc3Descs()) |i| break i;
        const p = os.proc.current().?;
        p.sleep(&disk.free[0], &disk.lock);
    } else unreachable;

    // format the three descriptors. qemu's virtio-blk.c reads them.

    var req: *Request = &disk.ops[idx[0]];

    req.type = if (is_write) .write else .read;
    req.reserved = 0;
    req.sector = sector;

    disk.queue.desc[idx[0]] = .{
        .addr = @intFromPtr(req),
        .len = @sizeOf(@TypeOf(req.*)),
        .flags = .{ .next = true },
        .next = @intCast(idx[1]),
    };

    disk.queue.desc[idx[1]] = .{
        .addr = @intFromPtr(&buf.data),
        .len = fs.BSIZE,
        .flags = .{ .write = !is_write, .next = true },
        .next = @intCast(idx[2]),
    };

    disk.info[idx[0]].status = 0xff; // device writes 0 on success
    disk.queue.desc[idx[2]] = .{
        .addr = @intFromPtr(&(disk.info[idx[0]].status)),
        .len = 1,
        .flags = .{ .write = true }, // device writes the status
        .next = 0,
    };

    // record struct Buf for intr().
    buf.own = true;
    disk.info[idx[0]].buf = buf;

    // tell the device the first index in our chain of descriptors.
    disk.queue.avail.ring[disk.queue.avail.index % NUM] = @intCast(idx[0]);

    fence();

    // tell the device another avail ring entry is available.
    disk.queue.avail.index +%= 1;

    fence();

    disk.vdev.notify(0); // value is queue number

    // Wait for intr() to say request is finished.
    const p = os.proc.current().?;
    while (buf.own == true) {
        p.sleep(buf, &disk.lock);
    }

    disk.info[idx[0]].buf = null;
    freeChain(idx[0]);
}

/// mark a descriptor as free.
fn freeDesc(i: usize) void {
    assert(i < NUM);
    assert(!disk.free[i]);

    disk.queue.desc[i] = .{};
    disk.free[i] = true;

    os.proc.wakeup(&disk.free[0]);
}

/// free a chain of descriptors
fn freeChain(index: usize) void {
    var i = index;
    while (true) {
        const flags = disk.queue.desc[i].flags;
        const next = disk.queue.desc[i].next;
        freeDesc(i);
        i = if (flags.next) next else break;
    }
}

fn allocDesc() ?usize {
    return for (0..NUM) |i| {
        if (disk.free[i]) {
            disk.free[i] = false;
            break i;
        }
    } else null;
}

/// allocate three descriptors (they need not be contiguous). disk transfers always use three
/// descriptors.
fn alloc3Descs() ?[3]usize {
    var idx: [3]usize = undefined;
    for (0..3) |i| {
        idx[i] = allocDesc() orelse {
            for (0..i) |j| freeDesc(idx[j]);
            return null;
        };
    }
    return idx;
}

const std = @import("std");
const log = std.log.scoped(.disk);
const assert = std.debug.assert;
const allocator = std.heap.page_allocator;
const os = @import("../../os.zig");
const fs = os.fs;
const Buffer = fs.Buffer;
const virtio = os.driver.virtio;
const fence = os.Lock.fence;
