 /*
 * Copyright (C) 2019 Red Hat, Inc.
 *
 * Written By: Gal Hammer <ghammer@redhat.com>
 *
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions
 * are met :
 * 1. Redistributions of source code must retain the above copyright
 *    notice, this list of conditions and the following disclaimer.
 * 2. Redistributions in binary form must reproduce the above copyright
 *    notice, this list of conditions and the following disclaimer in the
 *    documentation and / or other materials provided with the distribution.
 * 3. Neither the names of the copyright holders nor the names of their contributors
 *    may be used to endorse or promote products derived from this software
 *    without specific prior written permission.
 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' AND
 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
 * ARE DISCLAIMED.IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE
 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
 * SUCH DAMAGE.
 */

#include "viofs.h"
#include "ioctl.tmh"

#define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))

static inline int GetVirtQueueIndex(IN PDEVICE_CONTEXT Context,
                                    IN BOOLEAN HighPrio)
{
    int index = HighPrio ? VQ_TYPE_HIPRIO : VQ_TYPE_REQUEST;

    UNREFERENCED_PARAMETER(Context);
    
    TraceEvents(TRACE_LEVEL_VERBOSE, DBG_IOCTL, "VirtQueueIndex: %d", index);
    
    return index;
}

static SIZE_T GetRequiredScatterGatherSize(IN PVIRTIO_FS_REQUEST Request)
{
    SIZE_T n;

    ASSERT(Request->RequestType == VIRTIO_FS_REQUEST_TYPE_WDF);

    n = DIV_ROUND_UP(Request->u.Wdf.InputBufferLength, PAGE_SIZE) +
        DIV_ROUND_UP(Request->u.Wdf.OutputBufferLength, PAGE_SIZE);

    TraceEvents(TRACE_LEVEL_VERBOSE, DBG_IOCTL, "Required SG Size: %Iu", n);

    return n;
}

static PMDL VirtFsAllocatePages(IN SIZE_T TotalBytes)
{
    PHYSICAL_ADDRESS low_addr;
    PHYSICAL_ADDRESS high_addr;
    PHYSICAL_ADDRESS skip_bytes;

    low_addr.QuadPart = 0;
    high_addr.QuadPart = -1;
    skip_bytes.QuadPart = 0;

    return MmAllocatePagesForMdlEx(low_addr, high_addr, skip_bytes,
        TotalBytes, MmCached,
        MM_DONT_ZERO_ALLOCATION | MM_ALLOCATE_FULLY_REQUIRED);
}

static int FillScatterGatherFromMdl(OUT struct scatterlist sg[],
                                    IN PMDL Mdl,
                                    IN size_t Length)
{
    PPFN_NUMBER pfn;
    ULONG total_pages;
    ULONG len;
    ULONG j;
    int i = 0;

    while (Mdl != NULL)
    {
        total_pages = MmGetMdlByteCount(Mdl) / PAGE_SIZE;
        pfn = MmGetMdlPfnArray(Mdl);
        for (j = 0; j < total_pages; j++)
        {
            len = (ULONG)(min(Length, PAGE_SIZE));
            Length -= len;
            sg[i].physAddr.QuadPart = (ULONGLONG)pfn[j] << PAGE_SHIFT;
            sg[i].length = len;
            i += 1;
        }
        Mdl = Mdl->Next;
    }

    return i;
}

BOOLEAN VirtFsRemoveRequest(
    IN PDEVICE_CONTEXT    Context,
    IN PVIRTIO_FS_REQUEST Request)
{
    BOOLEAN isRequestFound = FALSE;

    WdfSpinLockAcquire(Context->RequestsLock);
    PSINGLE_LIST_ENTRY iter = &Context->RequestsList;
    while (iter->Next != NULL)
    {
        PVIRTIO_FS_REQUEST current;

        current = CONTAINING_RECORD(iter->Next, VIRTIO_FS_REQUEST, ListEntry);
        if (Request == current)
        {
            TraceEvents(TRACE_LEVEL_VERBOSE, DBG_DPC,
                "Delete FSReq %p RequestType: %u", current, current->RequestType);
            iter->Next = current->ListEntry.Next;
            isRequestFound = TRUE;
            break;
        }

        iter = iter->Next;
    }
    WdfSpinLockRelease(Context->RequestsLock);

    return isRequestFound;
}

static inline BOOLEAN VirtFsOpcodeIsHighPrio(IN UINT32 Opcode)
{
    return (Opcode == FUSE_FORGET) ||
        (Opcode == FUSE_INTERRUPT) ||
        (Opcode == FUSE_BATCH_FORGET);
}

#if VIRTFS_ENABLE_GFS
static inline BOOLEAN VirtFsOpcodeIsGFS(IN UINT32 Opcode)
{
    return Opcode == GFS;
}
#endif // VIRTFS_ENABLE_GFS

EVT_WDF_REQUEST_CANCEL VirtFsEvtRequestCancel;

static NTSTATUS VirtFsEnqueueWdfRequest(
    IN PDEVICE_CONTEXT Context,
    IN PVIRTIO_FS_REQUEST fs_req,
    IN BOOLEAN HighPrio)
{
    NTSTATUS status;
    WDFSPINLOCK vq_lock;
    PVIRTIO_FS_REQUEST_WDF Request;
    struct virtqueue *vq;
    struct scatterlist *sg;
    size_t sg_size;
    int vq_index;
    int ret;
    int out_num, in_num;

    Request = &fs_req->u.Wdf;

    TraceEvents(TRACE_LEVEL_VERBOSE, DBG_IOCTL, "--> %!FUNC!");

    vq_index = GetVirtQueueIndex(Context, HighPrio);
    vq = Context->VirtQueues[vq_index];
    vq_lock = Context->VirtQueueLocks[vq_index];

    sg_size = GetRequiredScatterGatherSize(fs_req);
    sg = ExAllocatePoolUninitialized(NonPagedPool,
        sg_size * sizeof(struct scatterlist), VIRT_FS_MEMORY_TAG);
    if (sg == NULL)
    {
        TraceEvents(TRACE_LEVEL_ERROR, DBG_IOCTL,
            "Failed to allocate a %Iu items scatter-gatter list", sg_size);
        return STATUS_INSUFFICIENT_RESOURCES;
    }

    out_num = FillScatterGatherFromMdl(sg, Request->InputBuffer,
        Request->InputBufferLength);
    in_num = FillScatterGatherFromMdl(sg + out_num, Request->OutputBuffer,
        Request->OutputBufferLength);
    if ((out_num + in_num) > VIRT_FS_INDIRECT_CAPACITY)
    {
        TraceEvents(TRACE_LEVEL_ERROR, DBG_IOCTL,
            "The buffer is too big to fit into vio-queue: in %d, out %d",
            in_num, out_num);
        ExFreePoolWithTag(sg, VIRT_FS_MEMORY_TAG);
        return STATUS_INVALID_PARAMETER;
    }

    status = GetIndirectPages(Context, out_num + in_num, TRUE, &fs_req->IndirectPages);
    if (!NT_SUCCESS(status))
    {
        TraceEvents(TRACE_LEVEL_ERROR, DBG_IOCTL, "Failed to alloc indirect pages: 0x%x", status);
        ExFreePoolWithTag(sg, VIRT_FS_MEMORY_TAG);
        return status;
    }

    TraceEvents(TRACE_LEVEL_VERBOSE, DBG_IOCTL, "Push %p Request: %p",
        Request, Request->Request);

    WdfSpinLockAcquire(Context->RequestsLock);
    PushEntryList(&Context->RequestsList, &fs_req->ListEntry);
    WdfSpinLockRelease(Context->RequestsLock);

    WdfSpinLockAcquire(vq_lock);
    ret = -1;
    if (Context->VirtioDeviceReady) {
        ret = virtqueue_add_buf(vq, sg, out_num, in_num, fs_req,
            fs_req->IndirectPages->VirtAddr,
            fs_req->IndirectPages->PhysAddr.QuadPart);
    }

    if (ret < 0)
    {
        WdfSpinLockRelease(vq_lock);

        VirtFsRemoveRequest(Context, fs_req);
        
        ExFreePoolWithTag(sg, VIRT_FS_MEMORY_TAG);

        return STATUS_UNSUCCESSFUL;
    }

    // call with vq_lock held, to be synced with reset
    virtqueue_kick(vq);

    WdfSpinLockRelease(vq_lock);

    ExFreePoolWithTag(sg, VIRT_FS_MEMORY_TAG);

    TraceEvents(TRACE_LEVEL_VERBOSE, DBG_IOCTL, "<-- %!FUNC!");

    return STATUS_SUCCESS;
}

#if VIRTFS_ENABLE_WINFSP
static VOID HandleGetVolumeName(IN PDEVICE_CONTEXT Context,
    IN WDFREQUEST Request,
    IN size_t OutputBufferLength)
{
    NTSTATUS status;
    WCHAR WideTag[MAX_FILE_SYSTEM_NAME + 1];
    ULONG WideTagActualSize;
    BYTE *out_buf;
    char tag[MAX_FILE_SYSTEM_NAME];
    size_t size;

    RtlZeroMemory(WideTag, sizeof(WideTag));

    VirtIOWdfDeviceGet(&Context->VDevice,
        FIELD_OFFSET(VIRTIO_FS_CONFIG, Tag),
        &tag, sizeof(tag));

    status = RtlUTF8ToUnicodeN(WideTag, sizeof(WideTag), &WideTagActualSize,
        tag, sizeof(tag));

    if (!NT_SUCCESS(status))
    {
        TraceEvents(TRACE_LEVEL_VERBOSE, DBG_POWER,
            "Failed to convert config tag: %!STATUS!", status);
        status = STATUS_SUCCESS;
    }
    else
    {
        TraceEvents(TRACE_LEVEL_INFORMATION, DBG_POWER,
            "Config tag: %s Tag: %S", tag, WideTag);
    }

    size = (wcslen(WideTag) + 1) * sizeof(WCHAR);

    if (OutputBufferLength < size)
    {
        TraceEvents(TRACE_LEVEL_ERROR, DBG_IOCTL, "Insufficient out buffer");
        WdfRequestComplete(Request, STATUS_BUFFER_TOO_SMALL);
        return;
    }

    status = WdfRequestRetrieveOutputBuffer(Request, size, &out_buf, NULL);
    if (!NT_SUCCESS(status))
    {
        TraceEvents(TRACE_LEVEL_ERROR, DBG_IOCTL,
            "WdfRequestRetrieveOutputBuffer failed");
        WdfRequestComplete(Request, status);
        return;
    }

    RtlCopyMemory(out_buf, WideTag, size);
    WdfRequestCompleteWithInformation(Request, STATUS_SUCCESS, size);
}
#endif // VIRTFS_ENABLE_WINFSP

void CopyBuffer(void* _Dst, void const* _Src, size_t _Size) {
    RtlCopyMemory(_Dst, _Src, _Size);
}

static VOID HandleSubmitFuseRequest(IN PDEVICE_CONTEXT Context,
    IN WDFREQUEST Request,
    IN size_t OutputBufferLength,
    IN size_t InputBufferLength)
{
    WDFMEMORY handle;
    NTSTATUS status;
    PVIRTIO_FS_REQUEST fs_req;
    PVIRTIO_FS_REQUEST_WDF fs_req_wdf;
    PVOID in_buf_va;
    PVOID in_buf, out_buf;
    BOOLEAN hiprio;

    if (InputBufferLength < sizeof(struct fuse_in_header))
    {
        TraceEvents(TRACE_LEVEL_ERROR, DBG_IOCTL, "Insufficient in buffer");
        status = STATUS_BUFFER_TOO_SMALL;
        goto complete_wdf_req_no_fs_req;
    }

    if (OutputBufferLength < sizeof(struct fuse_out_header))
    {
        TraceEvents(TRACE_LEVEL_ERROR, DBG_IOCTL, "Insufficient out buffer");
        status = STATUS_BUFFER_TOO_SMALL;
        goto complete_wdf_req_no_fs_req;
    }

    status = WdfRequestRetrieveInputBuffer(Request, InputBufferLength,
        &in_buf, NULL);

    if (!NT_SUCCESS(status))
    {
        TraceEvents(TRACE_LEVEL_ERROR, DBG_IOCTL,
            "WdfRequestRetrieveInputBuffer failed");
        goto complete_wdf_req_no_fs_req;
    }

    status = WdfRequestRetrieveOutputBuffer(Request, OutputBufferLength,
        &out_buf, NULL);

    if (!NT_SUCCESS(status))
    {
        TraceEvents(TRACE_LEVEL_ERROR, DBG_IOCTL,
            "WdfRequestRetrieveOutputBuffer failed");
        goto complete_wdf_req_no_fs_req;
    }

    status = WdfMemoryCreateFromLookaside(Context->RequestsLookaside,
        &handle);
    if (!NT_SUCCESS(status))
    {
        TraceEvents(TRACE_LEVEL_ERROR, DBG_IOCTL,
            "WdfMemoryCreateFromLookaside failed");
        goto complete_wdf_req_no_fs_req;
    }

    fs_req = WdfMemoryGetBuffer(handle, NULL);
    fs_req->RequestType = VIRTIO_FS_REQUEST_TYPE_WDF;
    fs_req_wdf = &fs_req->u.Wdf;

    fs_req->Handle = handle;
    fs_req_wdf->Request = Request;
    fs_req_wdf->InputBuffer = VirtFsAllocatePages(InputBufferLength);
    fs_req_wdf->InputBufferLength = InputBufferLength;
    fs_req_wdf->OutputBuffer = VirtFsAllocatePages(OutputBufferLength);
    fs_req_wdf->OutputBufferLength = OutputBufferLength;
    fs_req->IndirectPages = NULL; // will be filled in VirtFsEnqueueWdfRequest

    if ((fs_req_wdf->InputBuffer == NULL) || (fs_req_wdf->OutputBuffer == NULL))
    {
        TraceEvents(TRACE_LEVEL_ERROR, DBG_IOCTL, "Data allocation failed");
        status = STATUS_INSUFFICIENT_RESOURCES;
        goto complete_wdf_req;
    }

    in_buf_va = MmMapLockedPagesSpecifyCache(fs_req_wdf->InputBuffer, KernelMode,
        MmCached, NULL, FALSE, NormalPagePriority);
    if (in_buf_va == NULL)
    {
        TraceEvents(TRACE_LEVEL_ERROR, DBG_IOCTL, "MmMapLockedPages failed");
        status = STATUS_INSUFFICIENT_RESOURCES;
        goto complete_wdf_req;
    }

    CopyBuffer(in_buf_va, in_buf, InputBufferLength);
    MmUnmapLockedPages(in_buf_va, fs_req_wdf->InputBuffer);

    status = WdfRequestMarkCancelableEx(Request, VirtFsEvtRequestCancel);
    if (!NT_SUCCESS(status))
    {
        TraceEvents(TRACE_LEVEL_ERROR, DBG_IOCTL,
            "WdfRequestMarkCancelableEx failed: %!STATUS!", status);
        goto complete_wdf_req;
    }

#if VIRTFS_ENABLE_GFS
    // Block user-mode IOCTL_VIRTFS_FUSE_REQUEST that are not GFS opcode
    if (!VirtFsOpcodeIsGFS(((struct fuse_in_header *)in_buf)->opcode))
    {
        TraceEvents(TRACE_LEVEL_WARNING, DBG_IOCTL, "Opcode not GFS opcode");
        goto complete_wdf_req;
    }
#endif // VIRTFS_ENABLE_GFS

    hiprio = VirtFsOpcodeIsHighPrio(((struct fuse_in_header *)in_buf)->opcode);

    status = VirtFsEnqueueWdfRequest(Context, fs_req, hiprio);
    if (!NT_SUCCESS(status))
    {
        TraceEvents(TRACE_LEVEL_ERROR, DBG_IOCTL,
            "VirtFsEnqueueRequest failed: %!STATUS!", status);
        status = WdfRequestUnmarkCancelable(Request);
        __analysis_assume(status != STATUS_NOT_SUPPORTED);
        if (status != STATUS_CANCELLED)
        {
            goto complete_wdf_req;
        }
    }

    return;

complete_wdf_req:
    FreeVirtFsRequest(Context, fs_req);

complete_wdf_req_no_fs_req:
    TraceEvents(TRACE_LEVEL_VERBOSE, DBG_IOCTL,
        "Complete Request: %p Status: %!STATUS!", Request, status);
    WdfRequestComplete(Request, status);
}

C_ASSERT(sizeof(VIOFS_SG) == sizeof(struct VirtIOBufferDescriptor));

static NTSTATUS VirtFsEnqueueKernelRequest(
    IN PDEVICE_CONTEXT Context,
    IN PVIRTIO_FS_REQUEST fs_req,
    IN BOOLEAN HighPrio)
{
    TraceEvents(TRACE_LEVEL_VERBOSE, DBG_IOCTL, "--> %!FUNC!");

    PVIOFS_INKERNEL_REQUEST Request = fs_req->u.Direct;

    NTSTATUS status;
    status = GetIndirectPages(Context, Request->InputSgCount + Request->OutputSgCount, FALSE,
        &fs_req->IndirectPages);
    if (!NT_SUCCESS(status))
    {
        TraceEvents(TRACE_LEVEL_ERROR, DBG_IOCTL, "Failed to alloc indirect pages: 0x%x", status);
        return status;
    }

    int index = GetVirtQueueIndex(Context, HighPrio);
    struct virtqueue* vq = Context->VirtQueues[index];
    WDFSPINLOCK vq_lock = Context->VirtQueueLocks[index];

    TraceEvents(TRACE_LEVEL_VERBOSE, DBG_IOCTL, "Push %p KernelRequest: %p",
        fs_req, Request);

    WdfSpinLockAcquire(Context->RequestsLock);
    PushEntryList(&Context->RequestsList, &fs_req->ListEntry);
    WdfSpinLockRelease(Context->RequestsLock);

    WdfSpinLockAcquire(vq_lock);
    int ret = -1;
    if (Context->VirtioDeviceReady)
    {
        // Note: Input and Output are inversed, because
        // InputBuffer becomes "output" to host direction
        ret = virtqueue_add_buf(
            vq, (struct VirtIOBufferDescriptor*)Request->Sg,
            Request->InputSgCount, Request->OutputSgCount,
            fs_req,
            fs_req->IndirectPages->VirtAddr,
            fs_req->IndirectPages->PhysAddr.QuadPart);
    }

    if (ret < 0)
    {
        WdfSpinLockRelease(vq_lock);

        VirtFsRemoveRequest(Context, fs_req);

        return STATUS_UNSUCCESSFUL;
    }

    // call with vq_lock held, to be synced with reset
    virtqueue_kick(vq);

    WdfSpinLockRelease(vq_lock);

    TraceEvents(TRACE_LEVEL_VERBOSE, DBG_IOCTL, "<-- %!FUNC!");

    return STATUS_SUCCESS;
}

static NTSTATUS HandleSubmitInkernelFuseRequest(
    IN  PDEVICE_CONTEXT Context,
    IN  PVIOFS_INKERNEL_REQUEST  ViofsRequest)
{
    WDFMEMORY handle;
    NTSTATUS status;
    PVIRTIO_FS_REQUEST fs_req;
    BOOLEAN hiprio;

    status = WdfMemoryCreateFromLookaside(Context->RequestsLookaside,
        &handle);

    if (!NT_SUCCESS(status))
    {
        TraceEvents(TRACE_LEVEL_ERROR, DBG_IOCTL,
            "WdfMemoryCreateFromLookaside failed");
        return STATUS_INSUFFICIENT_RESOURCES;
    }

    fs_req = WdfMemoryGetBuffer(handle, NULL);
    fs_req->Handle = handle;
    fs_req->RequestType = VIRTIO_FS_REQUEST_TYPE_INKERNEL;
    fs_req->u.Direct = ViofsRequest;
    fs_req->IndirectPages = NULL; // Will be filled in VirtFsEnqueueKernelRequest

    hiprio = VirtFsOpcodeIsHighPrio(ViofsRequest->FuseOpcode);

    status = VirtFsEnqueueKernelRequest(Context, fs_req, hiprio);
    if (!NT_SUCCESS(status))
    {
        __analysis_assume(status != STATUS_NOT_SUPPORTED);
        FreeVirtFsRequest(Context, fs_req);
        return status;
    }

    return STATUS_PENDING;
}

static PVIRTIO_FS_REQUEST RemoveInkernelRequest(
    IN  PDEVICE_CONTEXT context,
    IN  PVIOFS_INKERNEL_REQUEST  pViofsRequest)
{
    PSINGLE_LIST_ENTRY iter;
    PVIRTIO_FS_REQUEST pFoundRequest = NULL;

    WdfSpinLockAcquire(context->RequestsLock);
    iter = &context->RequestsList;
    while (iter->Next != NULL)
    {
        PVIRTIO_FS_REQUEST entry = CONTAINING_RECORD(iter->Next,
            VIRTIO_FS_REQUEST, ListEntry);
        if (entry->RequestType == VIRTIO_FS_REQUEST_TYPE_INKERNEL &&
            pViofsRequest == entry->u.Direct)
        {
            TraceEvents(TRACE_LEVEL_VERBOSE, DBG_IOCTL,
                "Clear virtio inkernel request %p", entry);
            entry->u.Direct = NULL;
            pFoundRequest = entry;
            break;
        }

        iter = iter->Next;
    };
    WdfSpinLockRelease(context->RequestsLock);
    return pFoundRequest;
}

static BOOLEAN HandleCancelInkernelRequest(
    IN  PDEVICE_CONTEXT context,
    IN  PVIOFS_INKERNEL_REQUEST  pViofsRequest)
{
    PVIRTIO_FS_REQUEST pFsRequest;

    pFsRequest = RemoveInkernelRequest(context, pViofsRequest);
    if (!pFsRequest)
    {
        TraceEvents(TRACE_LEVEL_WARNING, DBG_IOCTL,
            "The request %p appears to be completed; cannot cancel", pViofsRequest);
        return FALSE;
    }

    // Try to free the request.
    // We try to detach the request from q
    int index = GetVirtQueueIndex(context, VirtFsOpcodeIsHighPrio(pViofsRequest->FuseOpcode));
    struct virtqueue* vq = context->VirtQueues[index];
    WDFSPINLOCK vq_lock = context->VirtQueueLocks[index];

    void* detached_buf = NULL;
    WdfSpinLockAcquire(vq_lock);
    // Cannot access queue if device is not ready.
    if (context->VirtioDeviceReady)
    {
        detached_buf = virtqueue_detach_unused_buf(vq);
    }
    WdfSpinLockRelease(vq_lock);

    // It doesn't matter, if detached_buf doesn't match request.
    // We just detach any request available. But need to log this.
    if (detached_buf != pFsRequest)
    {
        TraceEvents(TRACE_LEVEL_ERROR, DBG_IOCTL,
            "detached_buf %p != request %p", pFsRequest, detached_buf);
    }

    VirtFsRemoveRequest(context, pFsRequest);
    FreeVirtFsRequest(context, pFsRequest);

    TraceEvents(TRACE_LEVEL_VERBOSE, DBG_IOCTL, "<-- %!FUNC!");
    return TRUE;
}

static VOID HandleResetRequest(
    IN  PDEVICE_CONTEXT context)
{
    NTSTATUS status;
    VIRTIO_WDF_QUEUE_PARAM params[VQ_TYPE_MAX];

    // Mark device as not ready during reset
    ULONG deviceReady = InterlockedExchange(&context->VirtioDeviceReady, FALSE);
    if (!deviceReady) {
        return; // Reset is already in progress in another thread;
    }

    // Need go through all queue-locks to sync with submit.
    // LockAcquire/LockRelease will ensure, that submit
    // either finished and will see VirtioDeviceReady as false
    // and will not start request.
    for (ULONG i = 0; i < VQ_TYPE_MAX; i++)
    {
        WDFSPINLOCK vq_lock = context->VirtQueueLocks[i];
        WdfSpinLockAcquire(vq_lock);
        WdfSpinLockRelease(vq_lock);
    }

    //
    // Here we are guaranteed that noone are accessing the queue.
    //
    VirtIOWdfDestroyQueues(&context->VDevice);

    params[VQ_TYPE_HIPRIO].Interrupt = context->WdfInterrupt[VQ_TYPE_HIPRIO];
    params[VQ_TYPE_REQUEST].Interrupt = context->WdfInterrupt[VQ_TYPE_REQUEST];

    status = VirtIOWdfInitQueues(&context->VDevice,
        context->NumQueues, context->VirtQueues, params);
    if (NT_SUCCESS(status))
    {
        for (ULONG i = 0; i < VQ_TYPE_MAX; i++)
        {
            struct virtqueue* vq = context->VirtQueues[i];

            virtqueue_enable_cb(vq);
            virtqueue_kick(vq);
        }
        VirtIOWdfSetDriverOK(&context->VDevice);
        InterlockedExchange(&context->VirtioDeviceReady, TRUE);
    }
    else
    {
        VirtIOWdfSetDriverFailed(&context->VDevice);
        TraceEvents(TRACE_LEVEL_ERROR, DBG_POWER,
            "VirtIOWdfInitQueues failed with %x", status);
    }
}

static VOID HandleGetInKernelApi(IN PDEVICE_CONTEXT Context,
    IN WDFREQUEST Request,
    IN size_t OutputBufferLength)
{
    NTSTATUS status;
    VIRTIOFS_INKERNEL_FUSE_REQUEST_ROUTINE* ret;

    UINT8 vio_status = virtio_get_status(&Context->VDevice.VIODevice);
    if (0 == (vio_status & VIRTIO_CONFIG_S_DRIVER_OK))
    {
        TraceEvents(TRACE_LEVEL_ERROR, DBG_IOCTL,
            "GetInKernelApi: virtio_device is not ready");
        WdfRequestComplete(Request, STATUS_DEVICE_NOT_READY);
        return;
    }

    if (OutputBufferLength < sizeof(*ret))
    {
        TraceEvents(TRACE_LEVEL_ERROR, DBG_IOCTL,
            "Insufficient out buffer for GetInKernelApi");
        WdfRequestComplete(Request, STATUS_BUFFER_TOO_SMALL);
        return;
    }

    status = WdfRequestRetrieveOutputBuffer(Request, sizeof(*ret), &ret, NULL);
    if (!NT_SUCCESS(status))
    {
        TraceEvents(TRACE_LEVEL_ERROR, DBG_IOCTL,
            "WdfRequestRetrieveOutputBuffer failed for GetInKernelApi");
        WdfRequestComplete(Request, status);
        return;
    }

    ret->ApiVersion = VIRTFS_INKERNEL_API_VERSION;
    ret->ViofsContext = Context;
    ret->Func = HandleSubmitInkernelFuseRequest;
    ret->CancelFunc = HandleCancelInkernelRequest;
    ret->ResetFunc = HandleResetRequest;

    WdfRequestCompleteWithInformation(Request, STATUS_SUCCESS, sizeof(*ret));
}

VOID VirtFsEvtIoDeviceControl(IN WDFQUEUE Queue,
                              IN WDFREQUEST Request,
                              IN size_t OutputBufferLength,
                              IN size_t InputBufferLength,
                              IN ULONG IoControlCode)
{
    TraceEvents(TRACE_LEVEL_VERBOSE, DBG_IOCTL,
        "--> %!FUNC! Queue: %p Request: %p IoCtrl: %x InLen: %Iu OutLen: %Iu",
        Queue, Request, IoControlCode, InputBufferLength, OutputBufferLength);

#if !VIRTFS_ENABLE_WINFSP && !VIRTFS_ENABLE_GFS
    PIRP pIrp = WdfRequestWdmGetIrp(Request);
    if (!pIrp || pIrp->RequestorMode != KernelMode) {
        TraceEvents(TRACE_LEVEL_WARNING, DBG_IOCTL,
            "%!FUNC! Access from user level is forbidden: AccessMode = %d",
            pIrp ? pIrp->RequestorMode : -1);
            WdfRequestComplete(Request, STATUS_ACCESS_DENIED);
    }
#endif

    PDEVICE_CONTEXT context = GetDeviceContext(WdfIoQueueGetDevice(Queue));

    switch (IoControlCode)
    {
#if VIRTFS_ENABLE_WINFSP
        case IOCTL_VIRTFS_GET_VOLUME_NAME:
            HandleGetVolumeName(context, Request, OutputBufferLength);
            break;
#endif // VIRTFS_ENABLE_WINFSP

        case IOCTL_VIRTFS_FUSE_REQUEST:
            HandleSubmitFuseRequest(context, Request, OutputBufferLength,
                InputBufferLength);
            break;

        case IOCTL_VIRTFS_GET_INKERNEL_API:
            HandleGetInKernelApi(context, Request, OutputBufferLength);
            break;

        default:
            WdfRequestComplete(Request, STATUS_INVALID_DEVICE_REQUEST);
            break;
    }

    TraceEvents(TRACE_LEVEL_VERBOSE, DBG_IOCTL, "<-- %!FUNC!");
}

VOID VirtFsEvtIoStop(IN WDFQUEUE Queue,
                     IN WDFREQUEST Request,
                     IN ULONG ActionFlags)
{
    PDEVICE_CONTEXT context = GetDeviceContext(WdfIoQueueGetDevice(Queue));

    TraceEvents(TRACE_LEVEL_VERBOSE, DBG_IOCTL,
        "--> %!FUNC! Request: %p ActionFlags: 0x%08x", Request, ActionFlags);

    if (ActionFlags & WdfRequestStopRequestCancelable)
    {
        NTSTATUS status = WdfRequestUnmarkCancelable(Request);
        __analysis_assume(status != STATUS_NOT_SUPPORTED);
        if (status == STATUS_CANCELLED)
        {
            WdfRequestStopAcknowledge(Request, FALSE);
            goto end_io_stop;
        }
    }

    if (ActionFlags & WdfRequestStopActionSuspend)
    {
        WdfRequestStopAcknowledge(Request, TRUE);
    }
    else if (ActionFlags & WdfRequestStopActionPurge)
    {
        PSINGLE_LIST_ENTRY iter;

        WdfSpinLockAcquire(context->RequestsLock);
        iter = &context->RequestsList;
        while (iter->Next != NULL)
        {
            PVIRTIO_FS_REQUEST removed = CONTAINING_RECORD(iter->Next,
                VIRTIO_FS_REQUEST, ListEntry);

            if (removed->RequestType == VIRTIO_FS_REQUEST_TYPE_WDF &&
                Request == removed->u.Wdf.Request)
            {
                removed->u.Wdf.Request = NULL;
                break;
            }

            iter = iter->Next;
        };
        WdfSpinLockRelease(context->RequestsLock);

        WdfRequestComplete(Request, STATUS_CANCELLED);
    }
    else
    {
        WdfRequestComplete(Request, STATUS_UNSUCCESSFUL);
    }

end_io_stop:
    TraceEvents(TRACE_LEVEL_VERBOSE, DBG_IOCTL, "<-- %!FUNC!");
}

VOID VirtFsEvtRequestCancel(IN WDFREQUEST Request)
{
    PDEVICE_CONTEXT context = GetDeviceContext(WdfIoQueueGetDevice(
        WdfRequestGetIoQueue(Request)));
    PSINGLE_LIST_ENTRY iter;

    TraceEvents(TRACE_LEVEL_VERBOSE, DBG_IOCTL,
        "--> %!FUNC! Cancelled Request: %p", Request);
    
    WdfRequestComplete(Request, STATUS_CANCELLED);

    WdfSpinLockAcquire(context->RequestsLock);
    iter = &context->RequestsList;
    while (iter->Next != NULL)
    {
        PVIRTIO_FS_REQUEST entry = CONTAINING_RECORD(iter->Next,
            VIRTIO_FS_REQUEST, ListEntry);

        if (entry->RequestType == VIRTIO_FS_REQUEST_TYPE_WDF &&
            Request == entry->u.Wdf.Request)
        {
            TraceEvents(TRACE_LEVEL_VERBOSE, DBG_IOCTL,
                "Clear virtio fs request %p", entry);
            entry->u.Wdf.Request = NULL;
            break;
        }

        iter = iter->Next;
    };
    WdfSpinLockRelease(context->RequestsLock);

    TraceEvents(TRACE_LEVEL_VERBOSE, DBG_IOCTL, "<-- %!FUNC!");
}
