/*************************************************************************
    > File Name: pipe.c
    > Author: Xing.Dai
    > Mail: Xing.Dai@aliyun.com 
    > Created Time: 2018-02-27 11:09
 ************************************************************************/
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/slab.h>
#include <linux/fs.h>
#include <linux/proc_fs.h>
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/fcntl.h>
#include <linux/poll.h>
#include <linux/cdev.h>
#include <linux/uaccess.h>
#include <linux/sched.h>
#include <linux/sched/signal.h>

#include "scull.h"

struct scull_pipe {
	wait_queue_head_t inq, outq;	/* read and write queues. */
	char *buffer, *end;				/* begin of buf, end of buf. */
	int buffersize;					/* used in pointer arithetic. */
	char *rp, *wp;					/* where to read, where to write. */
	int nreaders, nwriters;
	struct fasync_struct *async_queue;	/* asyncronous readers. */
	struct semaphore sem;
	struct cdev cdev;					
};

/* parameters. */
static int scull_p_nr_devs = SCULL_P_NR_DEVS;
int scull_p_buffer = SCULL_P_BUFFER;
dev_t scull_p_devno;		/* Our first device number. */

module_param(scull_p_nr_devs, int, 0);
module_param(scull_p_buffer, int, 0);

static struct scull_pipe *scull_p_devices;

static int scull_p_fasync(int fd, struct file *filp, int mode);
static int spacefree(struct scull_pipe *dev);

/**
 * @Synopsis  
 *
 * @Param inode
 * @Param filp
 *
 * @Returns   
 */
static int scull_p_open(struct inode *inode, struct file *filp)
{
	struct scull_pipe *dev;
	
	dev = container_of(inode->i_cdev, struct scull_pipe, cdev);
	filp->private_data = dev;

	if(down_interruptible(&dev->sem))
		return -ERESTARTSYS;
#if 0
	/* move alloc buffer step to init func. */
	if(!dev->buffer) {
		PDEBUG("%s: alloc mem. \n", __func__);
		dev->buffer = kmalloc(scull_p_buffer, GFP_KERNEL);
		if(!dev->buffer) {
			up(&dev->sem);
			return -ENOMEM;
		}
	}
	dev->buffersize = scull_p_buffer;
	dev->end = dev->buffer + dev->buffersize;
	dev->rp = dev->wp = dev->buffer;
#endif
	if(filp->f_mode & FMODE_READ)
		dev->nreaders++;
	if(filp->f_mode & FMODE_WRITE)
		dev->nwriters++;
	up(&dev->sem);

	return nonseekable_open(inode, filp);
}


/**
 * @Synopsis  Called by close node files.
 *
 * @Param inode
 * @Param filp
 *
 * @Returns   
 */
static int scull_p_release(struct inode *inode, struct file *filp)
{
	struct scull_pipe *dev = filp->private_data;

	/* Remove this filp from the asynchronously notified filp's */
	scull_p_fasync(-1, filp, 0);
	down(&dev->sem);
	if(filp->f_mode & FMODE_READ)
		dev->nreaders--;
	if(filp->f_mode & FMODE_WRITE)
		dev->nwriters--;
#if 0
	/* Relase this buffer in scull_p_cleanup func(). */
	if(dev->nreaders + dev->nwriters == 0) {
		kfree(dev->buffer);
		dev->buffer = NULL;
	}
#endif
	up(&dev->sem);
	return 0;
}


/**
 * @Synopsis  Read device func. rp == wp means no data.
 *
 * @Param filp
 * @Param buf
 * @Param count
 * @Param f_pos
 *
 * @Returns   
 */
static ssize_t scull_p_read(struct file *filp, char __user *buf, size_t count, loff_t *f_pos)
{
	struct scull_pipe *dev = filp->private_data;

	if(down_interruptible(&dev->sem)) 
		return -ERESTARTSYS;

	while (dev->rp == dev->wp) { /* nothing to read */
		up(&dev->sem);	/* release the lock.*/
		if (filp->f_flags & O_NONBLOCK)
			return -EAGAIN;
		PDEBUG("\"%s\" reading: going to sleep\n", current->comm);
		if (wait_event_interruptible(dev->inq, (dev->rp != dev->wp)))
			return -ERESTARTSYS;
		/* otherwise loop, but first reacquire the lock */
		if (down_interruptible(&dev->sem))
			return -ERESTARTSYS;
	}

	/* ok, data is there, return somthing. */
	if (dev->wp > dev->rp)
		count = min(count, (size_t)(dev->wp - dev->rp));
	else  /* the write pointer has wrapped, return data up to dev->end */
		count = min(count, (size_t)(dev->end - dev->rp));
	if (copy_to_user(buf, dev->rp, count)) {
		up(&dev->sem);
		return -EFAULT;
	}
	dev->rp += count;
	if (dev->rp == dev->end)
		dev->rp = dev->buffer;
	up(&dev->sem);

	/* Finally, awake any writers and return. */
	wake_up_interruptible(&dev->outq);
	PDEBUG("\"%s\" did read %li bytes.\n", current->comm, (long)count);
	return count;

}

/**
 * @Synopsis Wait for space for writing; caller mast hold device semaphore. On
 *			 error the semaphore will be released before returning. 
 *
 * @Param dev
 * @Param filp
 *
 * @Returns   
 */
static int scull_getwritespace(struct scull_pipe *dev, struct file *filp)
{
	while(spacefree(dev) == 0) {
		DEFINE_WAIT(wait);

		up(&dev->sem);
		if (filp->f_flags & O_NONBLOCK)
			return -EAGAIN;
		PDEBUG("\"%s\" writing: going to sleep \n", current->comm);
		prepare_to_wait(&dev->outq, &wait, TASK_INTERRUPTIBLE);
		if (spacefree(dev) == 0)
			schedule();
		finish_wait(&dev->outq, &wait);
		if (signal_pending(current))
			return -ERESTARTSYS;
		if (down_interruptible(&dev->sem))
			return -ERESTARTSYS;
	}
	return 0;
}

static int spacefree(struct scull_pipe *dev)
{
	if (dev->rp == dev->wp)
		return dev->buffersize - 1;
	return ((dev->rp + dev->buffersize - dev->wp) % dev->buffersize) - 1;
}

static ssize_t scull_p_write(struct file *filp, const char __user *buf, size_t count, loff_t *f_pos)
{
	struct scull_pipe *dev = filp->private_data;
	int result;

	if (down_interruptible(&dev->sem))
		return -ERESTARTSYS;

	/* Make sure there's space to write. */
	result = scull_getwritespace(dev, filp);
	if (result)
		return result; /* scull_getwritespace called up(&dev->sem)*/

	/* ok, space is there, accept something. */
	count = min(count, (size_t)spacefree(dev));
	if (dev->wp >= dev->rp)
		count = min(count, (size_t)(dev->end - dev->wp)); /* to end-of-buf */
	else 
		count = min(count, (size_t)(dev->rp - dev->wp - 1));
	PDEBUG("Going to accept %li bytes to %p from %p\n", (long)count, dev->wp, buf);
	if (copy_from_user(dev->wp, buf, count)) {
		up(&dev->sem);
		return -EFAULT;
	}
	dev->wp += count;
	if (dev->wp == dev->end)
		dev->wp = dev->buffer; /* wrapped */
	up(&dev->sem);

	/* Finally, awake any reader */
	wake_up_interruptible(&dev->inq); /* blocked in read() and select */

	if (dev->async_queue)
		kill_fasync(&dev->async_queue, SIGIO, POLL_IN);
	PDEBUG("\"%s\" did write %li bytes\n", current->comm, (long)count);
	return count;
}

static int scull_p_fasync(int fd, struct file *filp, int mode)
{
	struct scull_pipe *dev = filp->private_data;
	
	return fasync_helper(fd, filp, mode, &dev->async_queue);
}

static unsigned int scull_p_poll(struct file *filp, poll_table *wait)
{
	struct scull_pipe *dev = filp->private_data;
	unsigned int mask = 0;

	/*
	 * The buffer is circular; it is considered full
	 * if "wp" is right behind "rp" and empty if the 
	 * two are equal. 
	 */
	down(&dev->sem);
	poll_wait(filp, &dev->inq, wait);
	poll_wait(filp, &dev->outq, wait);
	if (dev->rp != dev->wp)
		mask |= POLLIN | POLLRDNORM; /* readable */
	if (spacefree(dev))
		mask |= POLLOUT | POLLWRNORM; /* writable */
	up(&dev->sem);
	return mask;
}

struct file_operations scull_pipe_fops = {
	.owner	= THIS_MODULE,
	.open	= scull_p_open,
	.release = scull_p_release,
	.read	= scull_p_read,
	.write	= scull_p_write,
	.fasync = scull_p_fasync,
	.llseek = no_llseek,
	.poll   = scull_p_poll,
};

static void scull_p_setup_cdev(struct scull_pipe *dev, int index)
{
	int err, devno;
	
	dev->buffer = kmalloc(scull_p_buffer, GFP_KERNEL);
	if (!dev->buffer) {
		printk(KERN_NOTICE "Error! %s alloc mem failed.\n", __func__);
		return;
	} 
	dev->buffersize = scull_p_buffer;
	dev->end = dev->buffer + dev->buffersize;
	dev->rp = dev->wp = dev->buffer;	/* rd and wr from the beginning. */

	devno = scull_p_devno + index;
	cdev_init(&dev->cdev, &scull_pipe_fops);
	dev->cdev.owner = THIS_MODULE;
	err = cdev_add(&dev->cdev, devno, 1);
	if (err) {
		printk(KERN_NOTICE "Error %d adding scullpipe %d.\n", err, index);
	}
}

/**
 * @Synopsis  Called by scull dev init. 
 *
 * @Param firstdev: scull
 *
 * @Returns   
 */
int scull_p_init(dev_t firstdev)
{
	int i, result;

	/* scull_p_nr_devs initial value is 4. */
	/* register_chr_dev_region(dev_t from, unsigned count, const char *name); */
	/* alloc_chrdev_region(dev_t *dev, unsigned baseminor, unsigned count, const char *name);*/
	result = register_chrdev_region(firstdev, scull_p_nr_devs, "scullp");
	if(result < 0) {
		printk(KERN_NOTICE "Unable to get scullp region, error %d.\n", result);
		return 0;
	}
	scull_p_devno = firstdev;
	scull_p_devices = kmalloc(scull_p_nr_devs * sizeof(struct scull_pipe), GFP_KERNEL);
	if(scull_p_devices == NULL) {
		unregister_chrdev_region(firstdev, scull_p_nr_devs);
		return 0;
	}
	memset(scull_p_devices, 0, scull_p_nr_devs * sizeof(struct scull_pipe));
	for(i = 0; i < scull_p_nr_devs; i++) {
		init_waitqueue_head(&(scull_p_devices[i].inq));
		init_waitqueue_head(&(scull_p_devices[i].outq));
		sema_init(&scull_p_devices[i].sem, 1);
		scull_p_setup_cdev(scull_p_devices + i, i);
		printk(KERN_NOTICE "scull_p_dev[%d] addr: %p\n", i, &scull_p_devices[i]);
	}

	return scull_p_nr_devs;
}

void scull_p_cleanup(void)
{
	int i;

	if(!scull_p_devices)
		return;
	for(i = 0; i < scull_p_nr_devs; i++) {
		cdev_del(&scull_p_devices[i].cdev);
		kfree(scull_p_devices[i].buffer);
	}
	kfree(scull_p_devices);
	unregister_chrdev_region(scull_p_devno, scull_p_nr_devs);
	scull_p_devices = NULL;
}
