#include <linux/module.h>
#include <linux/init.h>
#include <linux/moduleparam.h>

#include <linux/kernel.h>
#include <linux/fs.h>
#include <linux/slab.h> //kmalloc();
#include <linux/errno.h>  // error code
#include <linux/types.h>
#include <linux/proc_fs.h>
#include <linux/fcntl.h>
#include <linux/seq_file.h>
#include <linux/cdev.h>

//#include <asm/system.h>
#include <asm/uaccess.h>

#include "scull.h"

MODULE_AUTHOR("WCF");
MODULE_LICENSE("GPL");

int scull_major = 0;
int scull_minor = 0;
int scull_quantum = SCULL_QUANTUM;
int scull_qset =    SCULL_QSET;

struct scull_dev *scull_devices;
unsigned int  scull_nr_devs = 4;

module_param(scull_major, int, S_IRUGO);
module_param(scull_minor, int, S_IRUGO);
module_param(scull_nr_devs, int, S_IRUGO);
module_param(scull_quantum, int, S_IRUGO);
module_param(scull_qset, int, S_IRUGO);

int scull_trim(struct scull_dev *dev)
{
  struct scull_qset *next, *dptr;
  int qset = dev->qset;
  int i;
  
  for(dptr = dev->data; dptr; dptr = next){
    if(dptr->data){
      for(i = 0; i < qset; i++)
	kfree(dptr->data[i]);
      dptr->data = NULL;     
    }
    next = dptr->next;
    kfree(dptr);
  }
  dev->size = 0;
  dev->quantum = scull_quantum;
  dev->qset = scull_qset;
  dev->data = NULL;
  return 0;
}
struct scull_qset *scull_follow(struct scull_dev *dev, int n)
{
  struct scull_qset *qs = dev->data;

  /* Allocate first qset explicitly if need be */
  if (! qs) {
    qs = dev->data = kmalloc(sizeof(struct scull_qset), GFP_KERNEL);
    if (qs == NULL)
      return NULL;  /* Never mind */
    memset(qs, 0, sizeof(struct scull_qset));
  }
  
  /* Then follow the list */
  while (n--) {
    if (!qs->next) {
      qs->next = kmalloc(sizeof(struct scull_qset), GFP_KERNEL);
      if (qs->next == NULL)
	return NULL;  /* Never mind */
      memset(qs->next, 0, sizeof(struct scull_qset));
    }
    qs = qs->next;
    continue;
  }
  return qs;
}

ssize_t scull_read(struct file *filp, char __user *buf, size_t count, loff_t *f_pos)
{
	struct scull_dev *dev = filp->private_data; 
	struct scull_qset *dptr;	// the first listitem 
	int quantum = dev->quantum, qset = dev->qset;
	int itemsize = quantum * qset; // how many bytes in the listitem 
	int item, s_pos, q_pos, rest;
	ssize_t retval = 0;
	
	if (down_interruptible(&dev->sem))
		return -ERESTARTSYS;
	if (*f_pos >= dev->size)
		goto out;
	if (*f_pos + count > dev->size)
		count = dev->size - *f_pos;

	// find listitem, qset index, and offset in the quantum 
	item = (long)*f_pos / itemsize;
	rest = (long)*f_pos % itemsize;
	s_pos = rest / quantum; q_pos = rest % quantum;

	// follow the list up to the right position (defined elsewhere) 
	dptr = scull_follow(dev, item);

	if (dptr == NULL || !dptr->data || ! dptr->data[s_pos])
	  goto out; // don't fill holes 

	// read only up to the end of this quantum 
	if (count > quantum - q_pos)
		count = quantum - q_pos;

	if (copy_to_user(buf, dptr->data[s_pos] + q_pos, count)) {
		retval = -EFAULT;
		goto out;
	}
	*f_pos += count;
	retval = count;
 
  out:
	up(&dev->sem);
	return retval;
	
}

ssize_t scull_write(struct file *filp, const char __user *buf, size_t count, loff_t *f_pos)
{
  struct scull_dev *dev = filp->private_data;
  struct scull_qset *dptr;
  int quantum = dev->quantum ,qset = dev->qset;
  int itemsize = quantum * qset;
  int item, s_pos, q_pos, rest;
  ssize_t retval = -ENOMEM;
   
  if(down_interruptible(&dev->sem))
    return -ERESTARTSYS;
  item = (long)*f_pos / itemsize;
  rest = (long)*f_pos % itemsize;
  s_pos = rest / quantum;
  q_pos = rest % quantum;
  
  dptr = scull_follow(dev,item);
  if(dptr == NULL)
    goto out;
  if(! dptr->data){
    dptr->data = kmalloc(qset * sizeof(char *),GFP_KERNEL );
    if(!dptr->data)
      goto out;
    memset(dptr->data, 0, qset * sizeof(char *));
  }
  if(!dptr->data[s_pos]){
    dptr->data[s_pos] = kmalloc(quantum, GFP_KERNEL);
    if(!dptr-> data[s_pos])
      goto out;
  }
  if(count > quantum - q_pos)
    count = quantum - q_pos;
  if(copy_from_user(dptr->data[s_pos] + q_pos, buf, count)){
    retval = -EFAULT;
    goto out;
  }
  *f_pos += count;
  retval = count;
  if(dev->size < *f_pos)
    dev->size = *f_pos;
 out:
  up(&dev->sem);
 
  return retval;
  
}


int scull_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned arg)
{
  int err = 0, tmp;
  int retval = 0;
  
  if(_IOC_TYPE(cmd) != SCULL_IOC_MAGIC) return -ENOTTY;
  if(_IOC_NR(cmd) > SCULL_IOC_MAXNR) return -ENOTTY;
  
  if(_IOC_DIR(cmd) & _IOC_READ)
    err = !access_ok(VERIFY_WRITE, (void __user *)arg, _IOC_SIZE(cmd) );
  else if (_IOC_DIR(cmd) & _IOC_WRITE)
    err = !access_ok(VERIFY_READ, (void __user *)arg, _IOC_SIZE(cmd) );
  if(err) return -EFAULT;
  
  switch(cmd){
  case SCULL_IOCRESET:
    scull_quantum = SCULL_QUANTUM;
    scull_qset = SCULL_QSET;
    break;
  case SCULL_IOCSQUANTUM:
    if(! capable (CAP_SYS_ADMIN))
      return -EPERM;
    retval = __get_user(scull_quantum, (int __user *)arg );
    break;
  case SCULL_IOCTQUANTUM:
    if(! capable(CAP_SYS_ADMIN))
      return -EPERM;
    scull_quantum = arg;
    break;
  case SCULL_IOCGQUANTUM:
    retval = __put_user(scull_quantum, (int __user *)arg );
    break;
  case SCULL_IOCQQUANTUM:
    return scull_quantum;
  case SCULL_IOCXQUANTUM:
    if(! capable (CAP_SYS_ADMIN) )
      return -EPERM;
    tmp = scull_quantum;
    retval = __get_user(scull_quantum,(int __user *)arg);
    if(retval == 0)
      retval = __put_user(tmp, (int __user *)arg);
    break;
  case SCULL_IOCHQUANTUM:
    if(!capable (CAP_SYS_ADMIN))
      return -EPERM;
    tmp = scull_quantum;
    scull_quantum = arg;
    return tmp;
  case SCULL_IOCTQSET:
    if(! capable (CAP_SYS_ADMIN))
      return -EPERM;
    retval = __get_user(scull_qset, (int __user *) arg);
    break;
    /////////////////////////////////////
    }
  return retval;

}

loff_t scull_llseek(struct file *filp, loff_t off, int whence)
{
  struct scull_dev *dev = filp->private_data;
  loff_t newpos;
  
  switch(whence){
  case 0:
    newpos = off;
    break;
  case 1:
    newpos = filp->f_pos + off;
    break;
  case 2:
    newpos = dev->size + off;
    break;
  default:
    return -EINVAL;
  }
  if(newpos < 0) return -EINVAL;
  filp->f_pos = newpos;
  
  return newpos;

}
int scull_open(struct inode *inode, struct file *filp)
{
  struct scull_dev *dev;
  
  dev = container_of(inode->i_cdev, struct scull_dev, cdev);
  filp->private_data = dev;
  if( (filp->f_flags & O_ACCMODE) == O_WRONLY ){
    if(down_interruptible(&dev->sem) )
      return -ERESTARTSYS;
    scull_trim(dev);
    up(&dev->sem);  
  }
  return 0;
}

int scull_release(struct inode *inode, struct file *filp)
{
  return 0;
}

struct file_operations scull_fops = {
  .owner   = THIS_MODULE,
  .llseek  = scull_llseek,
  .read    = scull_read,
  .write   = scull_write,
  //.ioctl = scull_ioctl,
  .open    = scull_open,
  .release = scull_release,
};

void scull_cleanup_module(void)
{
  int i;
  dev_t devno = MKDEV(scull_major, scull_minor);
  if(scull_devices){
    for(i = 0; i< scull_nr_devs; i++)
      {
	scull_trim(scull_devices + i);
	cdev_del(&scull_devices[i].cdev);
      }
    kfree(scull_devices);
  }
#ifdef SCULL_DEBUG
 
#endif

  unregister_chrdev_region(devno, scull_nr_devs);
  //scull_p_cleanup();
  //scull_accsess_cleanup();
  printk(KERN_ALERT "rmmod scull\n");
}

static void scull_setup_cdev(struct scull_dev * dev, int index)
{
  int err, devno = MKDEV(scull_major, scull_minor + index);
  
  cdev_init(&dev->cdev,&scull_fops);
  dev->cdev.owner = THIS_MODULE;
  dev->cdev.ops = &scull_fops;
  err = cdev_add(&dev->cdev, devno, 1);
  if(err)
    printk(KERN_NOTICE "Error %d adding scull%d \n",err,index);
 
}
//
static int __init scull_init_module(void)
{
  int result,i;
  dev_t dev = 0;
  if(scull_major){
    dev = MKDEV(scull_major, scull_minor);
    result = register_chrdev_region(dev, scull_nr_devs, "scull");
  }else{
    result =alloc_chrdev_region(&dev, scull_minor, scull_nr_devs, "scull");
    scull_major = MAJOR(dev);
  }
  if(result < 0){
    printk(KERN_WARNING "scull: can't get major %d\n", scull_major);
    return result;
  }
  scull_devices = kmalloc(scull_nr_devs * sizeof(struct scull_dev), GFP_KERNEL);
  if(!scull_devices){
    result = -ENOMEM;
    goto fail;
  }
  memset(scull_devices, 0, scull_nr_devs * sizeof(struct scull_dev));
  for(i = 0; i < scull_nr_devs; i++){
    scull_devices[i].quantum = scull_quantum;
    scull_devices[i].qset = scull_qset;
    // init_MUTEX(&scull_devices[i].sem);
    sema_init(&scull_devices[i].sem,1);//after 2.6.25 
    scull_setup_cdev(&scull_devices[i],i);
  }
    dev = MKDEV(scull_major, scull_minor + scull_nr_devs);
    //dev += scull_p_init(dev);
    //dev += scull_access_init(dev);
#ifdef SCULL_DEBUG

#endif
    printk(KERN_ALERT "insmod scull\n");
    return 0;
  fail:
    scull_cleanup_module();
    return result;
}

module_init(scull_init_module);
module_exit(scull_cleanup_module);
