#include <linux/init.h>
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/cdev.h>
#include <linux/slab.h>
#include <linux/sched.h>
#include <linux/uaccess.h>
#include <linux/poll.h>

#include "scullpipe.h"

static dev_t scullpipe_devno;
static int scullpipe_buffer = SCULLPIPE_BUFFER;
static int scullpipe_major = 0;
static int scullpipe_nr_devs = DEVCOUNT;
static struct scull_pipe *scullpipe_devs;

module_param(scullpipe_major, int, 0);
module_param(scullpipe_nr_devs, int, 0);

/*page 171*/
static int scullpipe_fasync (int fd, struct file * filp, int mode)

{
	struct scull_pipe *dev = filp->private_data;
	return fasync_helper(fd, filp, mode, &dev->async_queue);
}


int scullpipe_open (struct inode * inode, struct file * filp)
{
	struct scull_pipe *dev;

	dev = container_of(inode->i_cdev, struct scull_pipe, cdev);
	filp->private_data = dev;

	if (mutex_lock_interruptible(&dev->lock))
		return -ERESTARTSYS;
	if (!dev->buffer) {
		dev->buffer = kmalloc(scullpipe_buffer, GFP_KERNEL);
		if (!dev->buffer) {
			mutex_unlock(&dev->lock);
			return -ENOMEM;
		}
	}
	dev->buffersize = scullpipe_buffer;
	dev->end = dev->buffer + dev->buffersize;
	dev->rp = dev->wp = dev->buffer;

	if (filp->f_mode & FMODE_READ)
		dev->nreaders++;
	if (filp->f_mode & FMODE_WRITE)
		dev->nwriters++;
	mutex_unlock(&dev->lock);

	/*page 172*/
	return nonseekable_open(inode, filp);
}

int scullpipe_release (struct inode * inode, struct file * filp)
{
	struct scull_pipe *dev = filp->private_data;

	scullpipe_fasync(-1, filp, 0);
	mutex_lock(&dev->lock);
	if (filp->f_mode & FMODE_READ)
		dev->nreaders--;
	if (filp->f_mode & FMODE_WRITE)
		dev->nwriters--;
	if (dev->nreaders + dev->nwriters == 0) {
		kfree(dev->buffer);
		dev->buffer = NULL;
	}
	mutex_unlock(&dev->lock);
	return 0;
}

/*page 154*/
ssize_t scullpipe_read (struct file * filp, char __user * buf, 
			size_t count , loff_t *pos)
{
	struct scull_pipe *dev = filp->private_data;

	if (mutex_lock_interruptible(&dev->lock))
		return -ERESTARTSYS;

	while (dev->rp == dev->wp) {
		mutex_unlock(&dev->lock);
		if (filp->f_flags & O_NONBLOCK)
			return -EAGAIN;
		PDEBUG("\"%s\" reading: going to sleep...\n", current->comm);
		if (wait_event_interruptible(dev->inq, (dev->rp != dev->wp)))
			return -ERESTARTSYS;
		if (mutex_lock_interruptible(&dev->lock))
			return -ERESTARTSYS;
	}

	if (dev->wp > dev->rp)
		count = min(count, (size_t)(dev->wp - dev->rp));
	else
		count = min(count, (size_t)(dev->end - dev->rp));

	if (copy_to_user(buf, dev->rp, count)) {
		mutex_unlock(&dev->lock);
		return -EFAULT;
	}

	dev->rp += count;
	if (dev->rp == dev->end)
		dev->rp = dev->buffer;
	mutex_unlock(&dev->lock);

	wake_up_interruptible(&dev->outq);
	PDEBUG("\"%s\" did read %li bytes\n", current->comm, (long)count);
	return count;
}

/*page 158*/
static int spacefree(struct scull_pipe *dev)
{
	if (dev->rp == dev->wp)
		return dev->buffersize - 1;
	return ((dev->rp + dev->buffersize - dev->wp) % dev->buffersize) - 1;
}


/*page 159*/
static int scull_getwritespace(struct scull_pipe *dev, struct file *filp)
{
	while (spacefree(dev) == 0) {
		DEFINE_WAIT(wait);

		mutex_unlock(&dev->lock);
		if (filp->f_flags & O_NONBLOCK)
			return -EAGAIN;
		prepare_to_wait(&dev->outq, &wait, TASK_INTERRUPTIBLE);
		if (spacefree(dev) == 0)
			schedule();
		finish_wait(&dev->outq, &wait);
		if (signal_pending(current))
			return -ERESTARTSYS;
		if (mutex_lock_interruptible(&dev->lock))
			return -ERESTARTSYS;
	}
	return 0;
}

/*page158*/
ssize_t scullpipe_write (struct file * filp, const char __user *buf, 
			size_t count, loff_t * pos)
{
	struct scull_pipe *dev = filp->private_data;
	int result;

	if (mutex_lock_interruptible(&dev->lock))
		return -ERESTARTSYS;
	result = scull_getwritespace(dev, filp);
	if (result)
		return result;

	count = min(count, (size_t)spacefree(dev));
	if (dev->wp >= dev->rp)
		count = min(count, (size_t)(dev->end - dev->wp));
	else
		count = min(count, (size_t)(dev->rp - dev->wp - 1));
	PDEBUG("Going to accept %li bytes to %p from %p\n",
			(long)count, dev->wp, buf);

	if (copy_from_user(dev->wp, buf, count)) {
		mutex_unlock(&dev->lock);
		return -EFAULT;
	}
	dev->wp += count;
	if (dev->wp == dev->end)
		dev->wp = dev->buffer;
	mutex_unlock(&dev->lock);

	wake_up_interruptible(&dev->inq);

	if (dev->async_queue)
		kill_fasync(&dev->async_queue, SIGIO, POLL_IN);
	PDEBUG("\"%s\" did write %li bytes\n", current->comm, (long)count);
	return count;
}

/*page 165*/
static unsigned int scullpipe_poll (struct file *filp, 
				struct poll_table_struct *wait)
{
	struct scull_pipe *dev = filp->private_data;
	unsigned int mask = 0;

	mutex_lock(&dev->lock);
	poll_wait(filp, &dev->inq, wait);
	poll_wait(filp, &dev->outq, wait);
	if (dev->rp != dev->wp)
		mask |= POLLIN | POLLRDNORM;
	if (spacefree(dev))
		mask |= POLLOUT | POLLWRNORM;
	mutex_unlock(&dev->lock);
	return mask;
}

static struct file_operations scullpipe_fops = {
	.owner = THIS_MODULE,
	.open = scullpipe_open,
	.release = scullpipe_release,
	.read = scullpipe_read,
	.write = scullpipe_write,
	.fasync = scullpipe_fasync,
	.poll = scullpipe_poll,
	.llseek = no_llseek, /*page173*/
};

static int scullpipe_register_dev(void)

{
	int result;
	if (scullpipe_major) {
		scullpipe_devno = MKDEV(scullpipe_major, BASEMINOR);
		result = register_chrdev_region(scullpipe_devno, 
				scullpipe_nr_devs, DEVNAME);
		if (result < 0)
			return result;
		scullpipe_major = MAJOR(scullpipe_devno);
			
	} else {
		result = alloc_chrdev_region(&scullpipe_devno, BASEMINOR, 
				scullpipe_nr_devs, DEVNAME);
		if (result < 0)
			return result;
	}
	return 0;
}

static void scullpipe_setup_cdev(struct scull_pipe * dev, int index)
{
	int err, devno = scullpipe_devno + index;
	cdev_init(&dev->cdev, &scullpipe_fops);
	dev->cdev.owner = THIS_MODULE;
	err = cdev_add(&dev->cdev, devno, 1);
	if (err < 0)
		printk(KERN_NOTICE "Error %d adding scullpipe%d", err, index);
}


static void scullpipe_exit(void)
{
	int i;
	for (i = 0; i < scullpipe_nr_devs; i++)
		cdev_del(&scullpipe_devs[i].cdev);
	unregister_chrdev_region(scullpipe_devno, scullpipe_nr_devs);
}


static int scullpipe_init(void)
{
	
	int i, result;
	
	result = scullpipe_register_dev();
	if (result < 0) {
		printk(KERN_NOTICE "scullpipe: Unable to register dev!!\n");
		return 0;
	}
	PDEBUG("dev major = %i\n", MAJOR(scullpipe_devno));

	scullpipe_devs = kmalloc(scullpipe_nr_devs * sizeof(struct scull_pipe),
				GFP_KERNEL);
	if (scullpipe_devs == NULL) {
		unregister_chrdev_region(scullpipe_devno, scullpipe_nr_devs);
		return -ENOMEM;
	}
	memset(scullpipe_devs, 0, sizeof(struct scull_pipe) * scullpipe_nr_devs);

	for (i = 0; i < scullpipe_nr_devs; i++) {
		init_waitqueue_head(&scullpipe_devs[i].inq);
		init_waitqueue_head(&scullpipe_devs[i].outq);
		mutex_init(&scullpipe_devs[i].lock);
		scullpipe_setup_cdev(scullpipe_devs + i, i);
	}
	return 0;
	
}

module_init(scullpipe_init);
module_exit(scullpipe_exit);
MODULE_LICENSE("Dual BSD/GPL");

