#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <errno.h>
#include <stdatomic.h>
#include <liburing.h>
#include <sys/mman.h>

#include <unistd.h>


#include "iouring_lib.h"

/**
io_uring - 异步I/O 设施

提供两个内核空间与用户空间共享的环形队列（ring queue）或环形缓冲区（ring buffer）

1、SQ（submission queue，请求提交队列）和 SQE（submission queue entry，提交队列项）
用户态程序将需要执行的 I/O 请求构造成一个 SQE ，然后放入 SQ 中，并修改 SQ 的 tail

2、CQ（completion queue，请求完成队列） 和 CQE（completion queue entry，完成队列项）
内核从SQ中读取SQE，并修改SQ的 head，然后执行 SQE 对应的 I/O 操作，执行结束之后，将执行结果构造成一个 CQE 放入 CQ 中，
并修改 CQ 的 tail

*/

typedef struct IOURingContext
{
    int iRunFlag;
    int iRingFd;

    unsigned int *puiSQTail;            // 提交环形队列的尾索引(tail)的内存地址
    unsigned int *puiSQMask;            // 提交环形队列的索引掩码内存地址
    struct io_uring_sqe* pstSQEnties;   // 提交环形队列项数组首地址

    unsigned int *puiCQHead;            // 完成环形队列的头索引(head)的内存地址
    unsigned int *puiCQTail;            // 完成环形队列的尾索引(tail)的内存地址
    unsigned int *puiCQMask;            // 完成环形队列的索引掩码内存地址
    struct io_uring_cqe* pstCQEnties;   // 完成队列项数组首地址
}IOURING_CONTEXT_S;

// /* Macros for barriers needed by io_uring */
// #define io_uring_smp_store_release(p, v)            \
//     atomic_store_explicit((_Atomic typeof(*(p)) *)(p), (v), \
//                     memory_order_release)

// #define io_uring_smp_load_acquire(p)                \
//     atomic_load_explicit((_Atomic typeof(*(p)) *)(p),   \
//                 memory_order_acquire)

IOURING_HANDLE IOURING_Create(unsigned int uiEntries)
{
    struct io_uring_params stParams = {};
    IOURING_CONTEXT_S* pstCtx;
    
    // 1. 创建共享环形队列
    // 创建 io_uring 上下文描述符 io_uring_ctx
    // 可以通过设置 io_uring_params 中的 flags 控制 io_uring_ctx 的创建

    //stParams.flags = IORING_SETUP_IOPOLL;
    int iFd = io_uring_setup(uiEntries, &stParams);
    if (iFd == -1)
    {
        printf("io_uring_setup error, %s\n", strerror(errno));
        return IOURING_INVALID_HANDLE;
    }

    // 2. 为 SQ 和 CQ 分配内存
    // 内核 SQ 和 CQ 对应的实体为 string io_rings
    // 由于这部分内存需要在内核态和用户态之前进行共享，因此需要使用 mmap 分配共享内存
    // 实际上这部分内存在 调用 io_uring_setup 内核就已经申请好了，mmap 只是将对应的内存地址告知用户态
    size_t ulSQSize = stParams.sq_off.array + stParams.sq_entries * sizeof(unsigned int);
    size_t ulCQSize = stParams.cq_off.cqes + stParams.cq_entries * sizeof(struct io_uring_cqe);
    if (stParams.features & IORING_FEAT_SINGLE_MMAP)
    {
        // 设置了 IORING_FEAT_SINGLE_MMAP 标记，表明 SQ 环形队列（SQ rings）和 CQ 环形队列(CQ rings)的内存
        // 合并了，用户空间只需要执行一次 mmap 来获取内存即可

        if (ulSQSize < ulCQSize)
        {
            ulSQSize = ulCQSize;
        }
    }
    // 映射在提交和完成队列环缓冲区中
    // 5.4 版本之前的内核只映射在提交环形缓冲区中
    /* Map in the submission and completion queue ring buffers.
    *  Kernels < 5.4 only map in the submission queue, though.
    */
    void *pCQ;
    void *pSQ = mmap(0, ulSQSize, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE, iFd, IORING_OFF_SQ_RING);
    if (pSQ == MAP_FAILED)
    {
        printf("mmap IORING_OFF_SQ_RING error, %s\n", strerror(errno));
        close(iFd);
        return IOURING_INVALID_HANDLE;
    }

    if (stParams.features & IORING_FEAT_SINGLE_MMAP)
    {
        pCQ = pSQ;
    }
    else
    {
        // 老版本的内核 完成队列环形缓冲区需要单独映射
        /* Map in the completion queue ring buffer in older kernels separately */
        pCQ = mmap(0, ulCQSize, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE, iFd, IORING_OFF_CQ_RING);
        if (pCQ == MAP_FAILED)
        {
            printf("mmap IORING_OFF_CQ_RING error, %s\n", strerror(errno));

            munmap(pSQ, ulSQSize);
            close(iFd);
            return IOURING_INVALID_HANDLE;
        }
    } 

    // 映射在提交队列项数组上
    /* Map in the submission queue entries array */
    void* pSQEntries = mmap(0, stParams.sq_entries * sizeof(struct io_uring_sqe),
                            PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE,
                            iFd, IORING_OFF_SQES);
    if (pSQEntries == MAP_FAILED)
    {
        printf("mmap IORING_OFF_SQES error, %s\n", strerror(errno));

        munmap(pSQ, ulSQSize);
        munmap(pCQ, ulCQSize);
        close(iFd);
        return IOURING_INVALID_HANDLE;
    }

    pstCtx = (IOURING_CONTEXT_S*)malloc(sizeof(IOURING_CONTEXT_S));
    if (pstCtx == NULL)
    {
        close(iFd);
        return IOURING_INVALID_HANDLE;
    }

    // 记录提交队列相关信息
    pstCtx->puiSQTail = pSQ + stParams.sq_off.tail;
    pstCtx->puiSQMask = pSQ + stParams.sq_off.ring_mask;
    stParams.sq_off.array;
    pstCtx->pstSQEnties = pSQEntries;

    // 记录完成队列相关信息
    pstCtx->puiCQHead = pCQ + stParams.cq_off.head;
    pstCtx->puiCQTail = pCQ + stParams.cq_off.tail;
    pstCtx->puiCQMask = pCQ + stParams.cq_off.ring_mask;
    pstCtx->pstCQEnties = pCQ + stParams.cq_off.cqes;

    pstCtx->iRingFd = iFd;
    pstCtx->iRunFlag = 1;

    return pstCtx;
}

void IOURING_Destroy(IOURING_HANDLE hIOURing)
{
    if (hIOURing == NULL)
    {
        return ;
    }

    IOURING_CONTEXT_S* pstCtx = (IOURING_CONTEXT_S*)hIOURing;
    
    if (pstCtx->iRingFd)
    {
        close(pstCtx->iRingFd);
    }

    free(pstCtx);
}

int io_uring_SubmitToSQ(struct IOURingContext* pstCtx, int iFd, int iOpCode)
{
    unsigned int uiSQTail;
    struct io_uring_sqe* pstSQEntry;

    uiSQTail = io_uring_smp_load_acquire(pstCtx->puiSQTail);

    pstSQEntry = &pstCtx->pstSQEnties[uiSQTail & *pstCtx->puiSQMask];
    pstSQEntry->opcode = iOpCode;
    pstSQEntry->fd = iFd;

    uiSQTail++;

    /* Update the tail */
    // 更新提交队列尾索引
    io_uring_smp_store_release(pstCtx->puiSQTail, uiSQTail);

    /*
    * Tell the kernel we have submitted events with the io_uring_enter()
    * system call. We also pass in the IOURING_ENTER_GETEVENTS flag which
    * causes the io_uring_enter() call to wait until min_complete
    * (the 3rd param) events complete.
    * */

    // 调用 io_uring_enter 系统调用通知内核我们已经提交了事件
    int ret = io_uring_enter(pstCtx->iRingFd, 1, 1, IORING_ENTER_GETEVENTS, NULL);
    if(ret < 0) {
        printf("io_uring_enter error, %s\n", strerror(errno));
        return -1;
    }

    return ret;
}

int IOURING_Read(IOURING_HANDLE hIOURing, int iFd, char* pcBuff, size_t ulBuffLen)
{
    IOURING_CONTEXT_S* pstCtx;
    unsigned int uiSQTail;
    struct io_uring_sqe* pstSQEntry;

    if (hIOURing == IOURING_INVALID_HANDLE)
    {
        return -1;
    }

    pstCtx = (IOURING_CONTEXT_S*)hIOURing;
    uiSQTail = io_uring_smp_load_acquire(pstCtx->puiSQTail);

    pstSQEntry = &pstCtx->pstSQEnties[uiSQTail & *pstCtx->puiSQMask];
    pstSQEntry->opcode = IORING_OP_READ;
    pstSQEntry->fd = iFd;
    pstSQEntry->addr = (unsigned long)pcBuff;
    pstSQEntry->len = ulBuffLen;

    uiSQTail++;

    /* Update the tail */
    // 更新提交队列尾索引
    io_uring_smp_store_release(pstCtx->puiSQTail, uiSQTail);

    /*
    * Tell the kernel we have submitted events with the io_uring_enter()
    * system call. We also pass in the IOURING_ENTER_GETEVENTS flag which
    * causes the io_uring_enter() call to wait until min_complete
    * (the 3rd param) events complete.
    * */

    // 调用 io_uring_enter 系统调用通知内核我们已经提交了事件
    int ret = io_uring_enter(pstCtx->iRingFd, 1, 1, IORING_ENTER_GETEVENTS, NULL);
    if(ret < 0) {
        printf("io_uring_enter error, %s\n", strerror(errno));
        return -1;
    }

    return ret;
}

int IOURING_Write(IOURING_HANDLE hIOURing, int iFd, const char* pcData)
{
    IOURING_CONTEXT_S* pstCtx;
    unsigned int uiSQTail;
    struct io_uring_sqe* pstSQEntry;

    if (hIOURing == IOURING_INVALID_HANDLE)
    {
        return -1;
    }

    pstCtx = (IOURING_CONTEXT_S*)hIOURing;
    //uiSQTail = io_uring_smp_load_acquire(pstCtx->puiSQTail);
    uiSQTail = *pstCtx->puiSQTail;// 此处不需要加内存屏障??

    pstSQEntry = &pstCtx->pstSQEnties[uiSQTail & *pstCtx->puiSQMask];
    pstSQEntry->opcode = IORING_OP_WRITE;
    pstSQEntry->fd = iFd;
    pstSQEntry->addr = (unsigned long)pcData;
    pstSQEntry->len = strlen(pcData);

    uiSQTail++;

    /* Update the tail */
    // 更新提交队列尾索引
    io_uring_smp_store_release(pstCtx->puiSQTail, uiSQTail);

    /*
    * Tell the kernel we have submitted events with the io_uring_enter()
    * system call. We also pass in the IOURING_ENTER_GETEVENTS flag which
    * causes the io_uring_enter() call to wait until min_complete
    * (the 3rd param) events complete.
    * */

    // 调用 io_uring_enter 系统调用通知内核我们已经提交了事件
    int ret = io_uring_enter(pstCtx->iRingFd, 1, 1, IORING_ENTER_GETEVENTS, NULL);
    if(ret < 0) {
        printf("io_uring_enter error, %s\n", strerror(errno));
        return -1;
    }

    return ret;
}

int IOURING_ReadFromCQ(IOURING_HANDLE hIOURing)
{
    struct io_uring_cqe *pstCQEntry;
    unsigned int uiCQHead;

    if (hIOURing == NULL)
    {
        return -1;
    }

    IOURING_CONTEXT_S* pstCtx = (IOURING_CONTEXT_S*)hIOURing;

    // 加读屏障 read barrier
    uiCQHead = io_uring_smp_load_acquire(pstCtx->puiCQHead);

    // 由于是环形缓冲区，如果头索引和尾索引相等，则说明队列为空
    if (uiCQHead == *pstCtx->puiCQTail)
    {
        return -1;
    }

    // 获取完成队列中的头结点
    pstCQEntry = &pstCtx->pstCQEnties[uiCQHead & (*pstCtx->puiCQMask)];
    if (pstCQEntry->res < 0)
    {
        printf("Failed to handle I/O request, %s\n", strerror(pstCQEntry->res));
    }

    uiCQHead++;

    /* Write barrier so that update to the head are made visible */
    // 加写屏障，确保 head 的更新可见
    io_uring_smp_store_release(pstCtx->puiCQHead, uiCQHead);

    return pstCQEntry->res;
}

static struct io_uring_cqe *__GetHeadCQE(IOURING_CONTEXT_S* pstCtx)
{
    unsigned int uiCQHead;
    struct io_uring_cqe *pstCQEntry;
    
    // 加读屏障 read barrier
    uiCQHead = io_uring_smp_load_acquire(pstCtx->puiCQHead);

    // 由于是环形缓冲区，如果头索引和尾索引相等，则说明队列为空
    if (uiCQHead == *pstCtx->puiCQTail)
    {
        return NULL;
    }

    pstCQEntry = &pstCtx->pstCQEnties[uiCQHead & (*pstCtx->puiCQMask)];

    uiCQHead++;

    /* Write barrier so that update to the head are made visible */
    // 加写屏障，确保 head 的更新可见
    io_uring_smp_store_release(pstCtx->puiCQHead, uiCQHead);

    return pstCQEntry;
}

void IOURING_Schedule(IOURING_HANDLE hIOURing)
{
    IOURING_CONTEXT_S* pstCtx;
    struct io_uring_cqe *pstCQEntry;

    if (hIOURing == NULL)
    {
        return;
    }

    pstCtx = (IOURING_CONTEXT_S*)hIOURing;

    while (pstCtx->iRunFlag)
    {
        pstCQEntry = __GetHeadCQE(pstCtx);
        if (pstCQEntry == NULL)
        {
            printf("No completion queue entry\n");
            sleep(1);
            continue;
        }

        if (pstCQEntry->res < 0)
        {
            printf("Failed to handle I/O request");
        }
        else
        {
            printf("res  : %d\n", pstCQEntry->res);
            printf("flags: 0x%x\n", pstCQEntry->flags);
        }
    }
}