#include <linux/init.h>
#include <linux/module.h>
#include <linux/atomic.h>
#include <linux/sched.h>
#include <linux/cdev.h>
#include <linux/fs.h>
#include <linux/wait.h>
#include <linux/poll.h>

#include "log.h"

#define SCULL_PIPE_BUF_SIZE 5

struct scull_pipe {
    wait_queue_head_t inq, outq;
    char *buffer, *end;
    int buffersize;
    char *rp, *wp;
    int nreaders, nwriters;
    struct semaphore sem;
    struct cdev cdev;
    struct fasync_struct *async_queue;
};

struct scull_pipe g_dev;
dev_t g_devno;

static int scull_p_buffer_avai(const struct scull_pipe *pdev) {
    if (pdev->rp == pdev->wp) {
        return pdev->buffersize - 1;
    }
    return ((pdev->rp + pdev->buffersize - pdev->wp) % pdev->buffersize) - 1;
}

static inline int scull_p_buffer_empty(const struct scull_pipe *pdev) {
    return pdev->rp == pdev->wp;
}

static int scull_p_open(struct inode *inode, struct file *filp) {
    LOGI("open\n");
    struct scull_pipe *pdev;

    pdev = container_of(inode->i_cdev, struct scull_pipe, cdev);
    filp->private_data = pdev;

    return 0;
}

static ssize_t scull_p_read(struct file *filp, char __user *buf, size_t count, loff_t *pos) {
    LOGI("read\n");
    struct scull_pipe *pdev;
    char comm[TASK_COMM_LEN] = {0};

    pdev = filp->private_data;
    if (down_interruptible(&pdev->sem)) {
        return -ERESTARTSYS;
    }

    while (scull_p_buffer_empty(pdev)) {
        LOGI("buffer empty\n");
        // buffer empty
        up(&pdev->sem);
        // nonblock reading
        if (filp->f_flags & O_NONBLOCK) {
            LOGI("nonblock read\n");
            return -EAGAIN;
        }

        LOGI("%s(%d) waiting for read\n", get_task_comm(comm, current), task_pid_nr(current));
        if (wait_event_interruptible(pdev->inq, !scull_p_buffer_empty(pdev))) {
            LOGE("waiting interrupted\n");
            return -ERESTARTSYS;
        }

        if (down_interruptible(&pdev->sem)) {
            return -ERESTARTSYS;
        }
    }

    // buffer having data
    if (pdev->rp < pdev->wp) {
        count = min((size_t)(pdev->wp - pdev->rp), count);
    } else {
        count = min((size_t)(pdev->end - pdev->rp), count);
    }
    if (copy_to_user(buf, pdev->rp, count)) {
        LOGE("copy data failed\n");
        up(&pdev->sem);
        return -EFAULT;
    }
    pdev->rp += count;
    if (pdev->rp == pdev->end) {
        pdev->rp = pdev->buffer;
    }
    up(&pdev->sem);

    // wake up writers
    wake_up_interruptible(&pdev->outq);

    LOGI("%s(%d) read %ld bytes\n", get_task_comm(comm, current), task_pid_nr(current), count);
    return count;
}

static ssize_t scull_p_write(struct file *filp, const char __user *buf, size_t count, loff_t *pos) {
    LOGI("write\n");
    struct scull_pipe *pdev;
    size_t avai = 0;
    DEFINE_WAIT(wait);

    pdev = filp->private_data;

    if (down_interruptible(&pdev->sem)) {
        return -ERESTARTSYS;
    }
    LOGI("got semphore\n");

    // wait event when buffer full
    while ((avai = scull_p_buffer_avai(pdev)) == 0) {
        LOGI("buffer full\n");
        up(&pdev->sem);

        prepare_to_wait(&pdev->outq, &wait, TASK_INTERRUPTIBLE);
        if (scull_p_buffer_avai(pdev) == 0) {
            schedule();
        }
        finish_wait(&pdev->outq, &wait);

        if (signal_pending(current)) {
            LOGI("write interrupted\n");
            return -ERESTARTSYS;
        }

        if (down_interruptible(&pdev->sem)) {
            return -ERESTARTSYS;
        }
    }
    LOGI("available buffer %ld bytes\n", avai);

    // write data to buffer
    count = min(avai, count);
    if (pdev->wp < pdev->rp) {
        count = min((size_t)(pdev->rp - pdev->wp - 1), count);
    } else {
        count = min((size_t)(pdev->end - pdev->wp), count);
    }
    if (copy_from_user(pdev->wp, buf, count)) {
        LOGE("copy data failed\n");
        up(&pdev->sem);
        return -EFAULT;
    }
    pdev->wp += count;
    if (pdev->wp == pdev->end) {
        pdev->wp = pdev->buffer;
    }
    up(&pdev->sem);

    // wake up readers
    wake_up_interruptible(&pdev->inq);

    if (pdev->async_queue) {
        kill_fasync(&pdev->async_queue, SIGIO, POLL_IN);
    }
    LOGI("write %ld bytes\n", count);

    return count;
}

static unsigned int scull_p_poll(struct file *filp, poll_table *wait) {
    LOGI("poll\n");
    struct scull_pipe *pdev = filp->private_data;
    unsigned int mask = 0;

    if (down_interruptible(&pdev->sem)) {
        return -ERESTARTSYS; // error if interrupted
    }

    poll_wait(filp, &pdev->inq, wait);
    poll_wait(filp, &pdev->outq, wait);

    if (!scull_p_buffer_empty(pdev)) {
        mask |= POLLIN | POLLRDNORM; // data available for reading
    }
    if (scull_p_buffer_avai(pdev) > 0) {
        mask |= POLLOUT | POLLWRNORM; // space available for writing
    }
    up(&pdev->sem);

    return mask;
}

static int scull_p_fasync(int fd, struct file *filp, int mode) {
    LOGI("fasync\n");
    struct scull_pipe *pdev = filp->private_data;
    return fasync_helper(fd, filp, mode, &pdev->async_queue);
}

static int scull_p_release(struct inode *inode, struct file *filp) {
    LOGI("release\n");
    scull_p_fasync(-1, filp, 0); // remove from async queue
    return 0;
}

const struct file_operations fops = {
    .owner = THIS_MODULE,
    .open = scull_p_open,
    .read = scull_p_read,
    .write = scull_p_write,
    .poll = scull_p_poll,
    .fasync = scull_p_fasync,
    .release = scull_p_release,
};

static int __init scull_p_init(void) {
    LOGI("init\n");
    char buf[32];
    int err = 0;

    g_dev.buffer = kmalloc(SCULL_PIPE_BUF_SIZE, GFP_KERNEL);
    if (!g_dev.buffer) {
        LOGE("alloc buffer");
        return -ENOMEM;
    }
    // buffer ranges [buffer, end)
    g_dev.end = g_dev.buffer + SCULL_PIPE_BUF_SIZE;
    g_dev.buffersize = SCULL_PIPE_BUF_SIZE;
    g_dev.rp = g_dev.wp = g_dev.buffer;
    g_dev.nreaders = g_dev.nwriters = 0;

    sema_init(&g_dev.sem, 1);
    init_waitqueue_head(&g_dev.inq);
    init_waitqueue_head(&g_dev.outq);

    cdev_init(&g_dev.cdev, &fops);
    err = alloc_chrdev_region(&g_devno, 0, 1, "scullpipe");
    if (err) {
        LOGE("alloc dev_t\n");
        return -EFAULT;
    }
    err = cdev_add(&g_dev.cdev, g_devno, 1);
    if (err) {
        LOGE("add scullpipe dev\n");
        return -EFAULT;
    }

    format_dev_t(buf, g_devno);
    LOGI("add scullpipe dev at %s\n", buf);
    return 0;
}
module_init(scull_p_init);

static void __exit scull_p_exit(void) {
    LOGI("exit\n");
    if (g_dev.buffer) {
        kfree(g_dev.buffer);
        g_dev.buffer = g_dev.end = NULL;
        g_dev.buffersize = 0;
        g_dev.rp = g_dev.wp = NULL;
    }
    cdev_del(&g_dev.cdev);
    unregister_chrdev_region(g_devno, 1);
}
module_exit(scull_p_exit);

MODULE_LICENSE("GPL");