#include <linux/init.h>
#include <linux/module.h>
#include <linux/cdev.h>
#include <linux/slab.h>
#include <linux/fs.h>
#include <linux/poll.h>
#include <linux/uaccess.h>
#include <linux/sched/signal.h>

MODULE_LICENSE("GPL");

#ifndef SCULL_QUANTUM
#define SCULL_QUANTUM 4000
#endif

#ifndef SCULL_QSET
#define SCULL_QSET    1000
#endif

#ifndef SCULL_MAJOR
#define SCULL_MAJOR 0   /* dynamic major by default */
#endif

#ifndef SCULL_NR_DEVS
#define SCULL_NR_DEVS 4    /* scull0 through scull3 */
#endif

typedef struct Scull_Dev {
    wait_queue_head_t inq, outq;  /* read and write queues */
    char *buffer, *end;             /* begin of buf, end of buf */
    int buffersize;                 /* used in pointer arithmetic */
    char *rp, *wp;                  /* where to read, where to write */
    int nreaders, nwriters;         /* number of openings for r/w */
    struct fasync_struct *async_queue; /* asynchronous readers */
    struct semaphore sem;     /* mutual exclusion semaphore     */
    struct cdev cdev;	  /* Char device structure		*/
    int flag;
} Scull_Dev;

Scull_Dev *scull_devices = NULL; /* allocated in scull_init_module */
int scull_major =   SCULL_MAJOR;
int scull_minor =   0;
int scull_nr_devs = SCULL_NR_DEVS;    /* number of bare scull devices */
int scull_quantum = SCULL_QUANTUM;
int scull_qset =    SCULL_QSET;
static struct class *scull_class = NULL;
static struct device **scull_device_fsys = NULL;

static void scull_cleanup_module(void)
{
    dev_t devno;
    int i;

    devno = MKDEV(scull_major, scull_minor);
    if (scull_devices) {
        for (i = 0; i < scull_nr_devs; i++) {
            cdev_del(&scull_devices[i].cdev);
            device_destroy(scull_class, MKDEV(scull_major, scull_minor + i));
        }
        kfree(scull_devices);
        scull_devices = NULL;
        if (scull_device_fsys) {
            kfree(scull_device_fsys);
            scull_device_fsys = NULL;
        }
    }

    unregister_chrdev_region(devno, scull_nr_devs);
    class_destroy(scull_class);

    printk(KERN_DEBUG "exit scull pipe.\n");
}

ssize_t scull_read(struct file *filp, char *buf, size_t count,
                loff_t *f_pos)
{
    Scull_Dev *dev = filp->private_data;

    if (down_interruptible(&dev->sem)) {
        return -ERESTARTSYS;
    }

    while (dev->rp == dev->wp)
    {
        up(&dev->sem);
        if (filp->f_flags & O_NONBLOCK) {
            return -EAGAIN;
        }
        printk(KERN_NOTICE "%s reading going to sleep.\n", current->comm);
        wait_event_interruptible(dev->inq, dev->rp != dev->wp);
        if (signal_pending(current)) {
            return -ERESTARTSYS;
        }
        if (down_interruptible(&dev->sem)) {
            return -ERESTARTSYS;
        }
    }

    if (dev->wp >dev->rp) {
        count = min(count, (size_t)(dev->wp-dev->rp));
    } else
    {
        count = min(count, (size_t)(dev->end - dev->rp));
    }

    if (copy_to_user(buf, dev->rp, count)) {
        up(&dev->sem);
        return -EFAULT;
    }
    dev->rp += count;

    if (dev->rp == dev->end) {
        dev->rp = dev->buffer;
    }
    up(&dev->sem);

    dev->flag = 1;
    wake_up_interruptible(&dev->outq);
    printk(KERN_NOTICE "%s read %li bytes\n", current->comm, (long)count);
    return count;
}

ssize_t scull_write(struct file *filp, const char *buf, size_t count,
                loff_t *f_pos)
{
    Scull_Dev *dev = filp->private_data;
    int left;

    if (down_interruptible(&dev->sem)) {
        return -ERESTARTSYS;
    }

    left = (dev->rp + dev->buffersize - dev->wp - 1) % dev->buffersize + 1;
    printk(KERN_NOTICE "write: left %i wp %lx rp %lx\n", left, (long unsigned int)dev->wp, (long unsigned int)dev->rp);
    while(left == 1) {
        up(&dev->sem);
        if (filp->f_flags & O_NONBLOCK) {
            return -EAGAIN;
        }

        dev->flag = 0;
        wait_event_interruptible(dev->outq, dev->flag);
        if (signal_pending(current)) {
            return -ERESTARTSYS;
        }
        if (down_interruptible(&dev->sem)) {
            return -ERESTARTSYS;
        }
        left = (dev->rp + dev->buffersize - dev->wp - 1) % dev->buffersize + 1;
    }

    printk(KERN_NOTICE "write: wp %lx rp %lx\n", (long unsigned int)dev->wp, (long unsigned int)dev->rp);
    if (dev->wp >= dev->rp) {
        count = min(count, (size_t)(dev->end - dev->wp));
        if (count == left) {
            count--;
        }
    } else
    {
        count = min(count, (size_t)(dev->rp - dev->wp - 1));
    }

    if (copy_from_user(dev->wp, buf, count)) {
        up(&dev->sem);
        return -EFAULT;
    }
    dev->wp += count;
    if (dev->wp == dev->end) {
        dev->wp = dev->buffer;
    }
    printk(KERN_NOTICE "write: wp %lx rp %lx\n", (long unsigned int)dev->wp, (long unsigned int)dev->rp);
    up(&dev->sem);

    dev->flag = 1;
    wake_up_interruptible(&dev->inq);

    if (dev->async_queue) {
        kill_fasync(&dev->async_queue, SIGIO, POLL_IN);
    }

    printk(KERN_NOTICE "%s write %li bytes\n", current->comm, (long)count);
    return count;
}

int scull_open(struct inode *inode, struct file *filp)
{
	Scull_Dev *dev; /* device information */

    dev = container_of(inode->i_cdev, Scull_Dev, cdev);
    filp->private_data = dev;
    if (down_interruptible(&dev->sem)) {
        return -ERESTARTSYS;
    }
    if (!dev->buffer) {
        dev->buffer = kmalloc(scull_quantum, GFP_KERNEL);
        if (!dev->buffer) {
            up(&dev->sem);
            return -ENOMEM;
        }
        dev->buffersize = scull_quantum;
        dev->end = dev->buffer + dev->buffersize;
        printk(KERN_NOTICE "open: buffer %lx end %lx\n", (long unsigned int)dev->buffer, (long unsigned int)dev->end);
    }
    if (!dev->rp) {
        dev->rp = dev->buffer;
    }
    if (!dev->wp) {
        dev->wp = dev->buffer;
    }
    if (filp->f_mode & FMODE_READ) {
        dev->nreaders++;
    }
    if (filp->f_mode & FMODE_WRITE) {
        dev->nwriters++;
    }
    up(&dev->sem);

    return 0;
}
int scull_fasync(int fd,struct file *filp,int mode)
{
    Scull_Dev *dev = filp->private_data;
    return fasync_helper(fd, filp, mode, &dev->async_queue);
}
unsigned int scull_poll(struct file *filp, poll_table *wait)
{
    Scull_Dev *dev = filp->private_data;
    unsigned int mask = 0;

    /*
     * The buffer is circular; it is considered full
     * if "wp" is right behind "rp". "left" is 0 if the
     * buffer is emply, and is "1" if it is completely full.
     */
    int left = (dev->rp + dev->buffersize - dev->wp) % dev->buffersize;

        printk(KERN_NOTICE "%s poll.\n", current->comm);
    poll_wait(filp, &dev->inq,  wait);
    poll_wait(filp, &dev->outq, wait);
        printk(KERN_NOTICE "%s poll.\n", current->comm);
    if (dev->rp != dev->wp) mask |= POLLIN | POLLRDNORM;  /* readable */
    if (left != 1)          mask |= POLLOUT | POLLWRNORM; /* writable */

    return mask;
}
int scull_release(struct inode *inode, struct file *filp)
{
    Scull_Dev *dev = filp->private_data;

    scull_fasync(-1, filp, 0);
    if (down_interruptible(&dev->sem)) {
        return -ERESTARTSYS;
    }
    if (filp->f_mode & FMODE_READ) {
        dev->nreaders--;
    }

    if (filp->f_mode & FMODE_WRITE) {
        dev->nwriters--;
    }
    if (dev->nwriters + dev->nreaders == 0) {
        kfree(dev->buffer);
        dev->buffer = NULL;
        printk(KERN_NOTICE "%s release.\n", current->comm);
    }
    up(&dev->sem);
    printk(KERN_NOTICE "%s release.\n", current->comm);
	return 0;
}
struct file_operations scull_fops = {
    .owner = THIS_MODULE,
    .read = scull_read,
    .write = scull_write,
    .open = scull_open,
    .poll = scull_poll,
    .release = scull_release,
};

static void scull_setup_cdev(Scull_Dev *dev, int index)
{
    int err, devno = MKDEV(scull_major, scull_minor + index);

    cdev_init(&dev->cdev, &scull_fops);
    dev->cdev.owner = THIS_MODULE;
    dev->cdev.ops = &scull_fops;
    err = cdev_add(&dev->cdev, devno, 1);
    if (err) {
        printk(KERN_NOTICE "Error %d adding scull %d\n", err, index);
    }
    scull_device_fsys[index] = device_create(scull_class, NULL, devno, NULL, "scullpipe%d", index);
    if (scull_device_fsys[index] < 0) {
        printk(KERN_WARNING "scull is not able to create device.\n");
    }
}
static int scull_init_module(void)
{
    int result, i;
    dev_t dev = 0;

    result = alloc_chrdev_region(&dev, scull_minor, scull_nr_devs,
    "scullpipe");
    if (result < 0) {
        printk(KERN_WARNING "scullpipe is not able to get major.\n");
        goto fail;
    }
    scull_class = class_create(THIS_MODULE, "scullpipe");
    if (IS_ERR(scull_class)) {
        printk(KERN_WARNING "scull is not create class.\n");
        return -EBUSY;
    }

    scull_device_fsys = kmalloc(scull_nr_devs*sizeof(struct device*), GFP_KERNEL);
    if (!scull_device_fsys) {
        class_destroy(scull_class);
        result = -ENOMEM;
        return result;
    }

    scull_major = MAJOR(dev);
    scull_devices = kmalloc(scull_nr_devs * sizeof(Scull_Dev), GFP_KERNEL);
    if (!scull_devices) {
        result = -ENOMEM;
        goto fail;
    }
    memset(scull_devices, 0, scull_nr_devs * sizeof(Scull_Dev));
    for (i = 0; i < scull_nr_devs; i++) {
        init_waitqueue_head(&(scull_devices[i].inq));
        init_waitqueue_head(&(scull_devices[i].outq));
        sema_init(&scull_devices[i].sem, 1);
        scull_setup_cdev(&scull_devices[i], i);
    }

    printk(KERN_DEBUG "init scull pipe.\n");
    return 0;

fail:
    scull_cleanup_module();
    return result;
}

module_init(scull_init_module);
module_exit(scull_cleanup_module);