/* Design for this sample:
 * 	1.request dev_t content; alloc_chrdev_region,major is determinated by OS and 
 * 	  minor should be started from BEGIN_NUMBER;
 * 	2.will allocate a list of block for every device every time it's created and 
 * 	  ths list of mem block will be increased when content for device is enlarged,
 * 	3.
 *
 */
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/cdev.h>
#include <linux/fs.h>
#include <linux/mutex.h>
#include <linux/list.h>
#include <linux/string.h>
#include <asm/uaccess.h>
#include <linux/mutex.h>
#include <linux/wait.h>
#include <linux/sched.h>
#include <linux/moduleparam.h>
#include <linux/slab.h>

#define BEGIN_NUMBER  0
#define COUNT  4
#define MY_BUF_LEN	20
enum {
	CMD1,
	CMD2,
	CMD3
};
#define JD_QUTO_COUNT 	1000
#define JD_QUTO_SIZE	1000
static unsigned int  jd_quto_count = JD_QUTO_COUNT;
static unsigned int  jd_quto_size = JD_QUTO_SIZE;
module_param(jd_quto_size, int , 0);
module_param(jd_quto_count, int , 0);

typedef struct jd_data_set{
	struct jd_data_set *next;
	void **data;
} jd_data_set_t;

typedef struct jd_dev{
	jd_data_set_t *jd_node;
	int jd_quto_count;
	int jd_quto_size;
	struct mutex jd_lock;
	struct cdev jd_dev;
	long jd_size;

} jd_dev_t;

struct kmem_cache * jd_cache = NULL;
static jd_dev_t my_dev;
static const char *MY_CHAR_NAME = "mychar";
wait_queue_head_t my_queue;
dev_t id;
int tag = 0;
int cache_count = 0;
void *ptr = NULL;
static void item_ctor(struct kmem_cache * cache, void *item){
	//printk("destroy item.\n");
	//cache_count--;
	memset(item, 0, jd_quto_size);
}
/*
static jd_data_set_t * get_set(int item){
	jd_data_set_t *set = my_dev.node;
	while(item--){
		if(set->next != NULL)
			set = set->next;
	};
}
*/
static void jd_init(void){
	id = MKDEV(253, 2);
	my_dev.jd_node = NULL;
	my_dev.jd_size = 0;
	my_dev.jd_quto_size = jd_quto_size;
	my_dev.jd_quto_count = jd_quto_count;
	mutex_init(&my_dev.jd_lock);
	init_waitqueue_head(&my_queue);
	jd_cache = kmem_cache_create("jd_cache", jd_quto_size, SLAB_HWCACHE_ALIGN, 0, item_ctor);
}


static void jd_trim(jd_dev_t *dev){
	jd_data_set_t * set, *next;
	int i = 0;
	if(dev && dev->jd_node != NULL){//should release previous contents before writing.
		for(set = dev->jd_node; set != NULL ; set = next){
			next = set->next;	
			for(i = 0; i < jd_quto_count; i++){
				kmem_cache_free(jd_cache, set->data[i]);
			}
			kfree(set->data);
		}
	}
}
static void jd_release(void){
	jd_trim(&my_dev);
	kmem_cache_destroy(jd_cache);
}
static ssize_t read(struct file *f, char __user *data, size_t len, loff_t *off){
	int res = 0;
	int n = 0;
	printk("off = %d.\n", *off);
	mutex_lock(&my_dev.jd_lock);//read operation should be in the following section.
#if 0
	if(!wait_event_timeout(my_queue, false , 10000)){
		printk("timeout\n");
		//return -ERESTARTSYS;
	}
	tag = 0;
#endif
	mutex_unlock(&my_dev.jd_lock);
#ifdef DEBUG
	printk("read len = %d.\n", len);
#endif
	return res;
}
static ssize_t write(struct file *f, const char __user *data, size_t len, loff_t *off){
	int res = 0, i = 0, j = 0;
	int minor = iminor(f->f_dentry->d_inode);
	int set_len = jd_quto_size * jd_quto_count;
	int set_count = (len - 1)/ set_len + 1;
	int item_pos = (len % set_len - 1) / jd_quto_size + 1;
	int pos = (len % set_len) % jd_quto_size;
	jd_data_set_t *set;
	*off = 10;
	printk("off = %d.\n", *off);

	jd_dev_t *dev = (jd_dev_t *) f->private_data;
	printk("set_len = %d, set_count = %d, item_pos = %d ,pos = %d\n", set_len, set_count, item_pos, pos);
	if(dev->jd_node == NULL)
		printk("null.\n");
	jd_trim(dev);

#if 0
	tag = 1;
	wake_up(&my_queue);
#endif
	mutex_lock(&my_dev.jd_lock);//write action should be in the following section.
	set = kmalloc(set_count * sizeof(jd_data_set_t), GFP_KERNEL);
	for(i = 0; i < set_count ; i++){
		printk("alloc : %d\n", i);
		if(i == 0)
			dev->jd_node = &set[0];
		else
			set[i - 1].next = &set[i];
		set[i].next = NULL;
		set[i].data = kmalloc(jd_quto_count * sizeof(void *), GFP_KERNEL);
		for(j = 0; j < jd_quto_count; j++){
			set[i].data[j] = kmem_cache_alloc(jd_cache, GFP_KERNEL);	
		}
	}
#ifdef DEBUG
	printk("write len = %d.\n", len);
#endif
	mutex_unlock(&my_dev.jd_lock);
	return res;
}
static int open(struct inode *in, struct file *f){
	int res = 0;
	jd_dev_t *dev = container_of(in->i_cdev, jd_dev_t, jd_dev);
	f->private_data = dev;
#ifdef DEBUG
	printk("open dev file major = %d, minor = %d dev->queue = %d.\n", imajor(in), iminor(in), dev->jd_quto_count);
#endif
	return res;
}
static int ioctl(struct inode *i, struct file *f, unsigned int cmd, unsigned long ptr){
	printk("cmd = %d.\n", cmd);
	if(cmd < CMD1 || cmd > CMD3){
		return -EINVAL;
	}
	return 0;
}
struct file_operations c_fops = {
	.owner = THIS_MODULE,
	.open = open,
	.read = read,
	.ioctl = ioctl,
	.write = write,
};
static int __init init(void){
	
	int res = 0;
	printk("count = %d, size = %d.\n", jd_quto_count, jd_quto_size);
	jd_init();
	register_chrdev_region(id, 1, MY_CHAR_NAME);
	//if((res = alloc_chrdev_region(&id, BEGIN_NUMBER, COUNT, MY_CHAR_NAME)) != 0){
	//	printk("failed to alloc device id.\n");
	//	res = -1;
	//	goto out;
	//}
	cdev_init(&my_dev.jd_dev, &c_fops);
	if(jd_cache == NULL)
		goto out;

	res = cdev_add(&my_dev.jd_dev, id, 4);
	if(res < 0){
		printk("failed to add char device.\n");
		kmem_cache_destroy(jd_cache);
	}
	printk("sucess to load driver for device major = %d, minor = %d.\n", MAJOR(id), MINOR(id));

out:
	return res; /* 0 means no error during loading process,then module will 
				 be inserted into kernel.
			   */
}
static void __exit final(void){
	unregister_chrdev_region(id, COUNT);
	cdev_del(&my_dev.jd_dev);
	jd_release();
}

module_init(init);
module_exit(final);
MODULE_LICENSE("GPL");
