/*
 * kernel hash table 
 */

#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/list.h>
#include <linux/fs.h>
#include <linux/device.h>
#include <linux/cdev.h>
#include <asm/uaccess.h>
#include <linux/vmalloc.h>

#include "./include/khtable.h"

//#define HASH_DEBUG 1 
#define USE_CACHE 1

#ifdef HASH_DEBUG
#define error_handler(str, exit_value) do{\
	printk(KERN_ERR str);\
	printk(KERN_ERR "\n");\
	return exit_value;\
}while(0)
#else
#define error_handler(str, exit_value) do{\
	return exit_value;\
}while(0)
#endif

/* string(key-str and value-str) length MAX limit */
#define KH_ALLOC_LIMIT 100

/* entry of ubuntu's sys_call_table */
#define SYS_CALL_TABLE_ADDR 0xffffffff81a001c0

/* sys_call number */
#define SYSCALL_GET_222 222
#define SYSCALL_PUT_223 223
#define SYSCALL_DEL_251 251

/* backup cr0 reg */
u64 orig_cr0;
unsigned long *sys_call_table_addr = NULL;

/* point to origin sys_call func, for backup */
static int(*syscall222_backup)(void);
static int(*syscall223_backup)(void);
static int(*syscall251_backup)(void);

/* declare */
int sys_khash_put (const char *key, size_t ksize, const char *value, size_t vsize);
int sys_khash_get (const char *key, size_t ksize, char *value, size_t vsize);
int sys_khash_del (const char *key, size_t ksize);

/* get write permissions on the sys_call_table */
static inline u64 clear_cr0(void)
{
        u64 cr0 = 0;
        u64 ret;

        asm volatile ("movq %%cr0, %0"
                        : "=a"(cr0));
        ret = cr0;

        /* clear the 20 bit of CR0, a.k.a WP bit */
        cr0 &= ~0x10000LL;

        asm volatile ("movq %0, %%cr0"
                        :
                        : "a"(cr0));
        return ret;
}

/* restore origin permissions on the sys_call_table */
static inline void setback_cr0(u64 val) {
	asm volatile ("movq %0, %%cr0"
	 				 :
					 : "a"(val));
}

/*
 * Replace the function in the original sys_call_table with
 * our sys_call funcation.make sure you have write permissions 
 * on the sys_call_table before you do that.
 */
static __init void install_sys_call(void){
    sys_call_table_addr=(unsigned long*)(SYS_CALL_TABLE_ADDR);
    printk("call_init......\n");

	/*
	 * The value in the table is backed up when the module is loaded 
	 * them should be restored when the module is unloaded.
	 */
    syscall222_backup=(int(*)(void))(sys_call_table_addr[SYSCALL_GET_222]);
    syscall223_backup=(int(*)(void))(sys_call_table_addr[SYSCALL_PUT_223]);
    syscall251_backup=(int(*)(void))(sys_call_table_addr[SYSCALL_DEL_251]);

    orig_cr0=clear_cr0();

	/* now, replace the original function with our sys_call function */
    sys_call_table_addr[SYSCALL_GET_222]=(unsigned long)sys_khash_get;
    sys_call_table_addr[SYSCALL_PUT_223]=(unsigned long)sys_khash_put;
    sys_call_table_addr[SYSCALL_DEL_251]=(unsigned long)sys_khash_del;

    setback_cr0(orig_cr0);
}

static __exit void uninstall_sys_call(void){
    printk(KERN_INFO "uninstall_sys_call......\n");

	orig_cr0=clear_cr0();

	/* restore origin sys_call funcation in sys_call_table. */
    sys_call_table_addr[SYSCALL_GET_222]=(unsigned long)syscall222_backup;
    sys_call_table_addr[SYSCALL_PUT_223]=(unsigned long)syscall223_backup;
    sys_call_table_addr[SYSCALL_DEL_251]=(unsigned long)syscall251_backup;

    setback_cr0(orig_cr0);
}

/* 
 * prime table, contains 29 elem. 
 * used for hash algorithm. 
 */
static unsigned int prime_tlb[] = {
	2000003,			/* It is currently only used for this item */

	7,				/* 0 */
    17,             /* 1 */
    37,             /* 2 */
    79,             /* 3 */
    163,            /* 4 */
    331,            /* 5 */
    673,            /* 6 */
    1361,           /* 7 */
    2729,           /* 8 */
    5471,           /* 9 */
    10949,          /* 10 */
    21911,          /* 11 */
    43853,          /* 12 */
    87719,          /* 13 */
    175447,         /* 14 */
    350899,         /* 15 */
    701819,         /* 16 */
    1403641,        /* 17 */
    2807303,        /* 18 */
    5614657,        /* 19 */
    11229331,       /* 20 */
    22458671,       /* 21 */
    44917381,       /* 22 */
    89834777,       /* 23 */
    179669557,      /* 24 */
    359339171,      /* 25 */
    718678369,      /* 26 */
    1437356741,     /* 27 */
    2147483647      /* 28 (largest signed int prime) */
};

/* hash table head node */
struct kh_table_head hash_table = {0};

#ifdef USE_CACHE
/* slab cache */
struct kmem_cache *kh_node_cachep = NULL;
#endif

/* alloc memery for hash table and do something initialization work. */
static int hash_init (void){
	int i;
	int byte_size;
	struct hlist_head *standby = NULL;

	/* if hash_table initialized */
	if(hash_table.init_flag == KH_HAS_INIT)
		error_handler("do nothing, hash table was initialized already.", -EPERM);

	byte_size = prime_tlb[0] * sizeof(struct hlist_head);

//	standby = kmalloc(byte_size, GFP_KERNEL);
//		error_handler("hash_init Kmalloc failed.", -ENOMEM);
	/*
	 * kmalloc max size is 128k, is not enough,
	 * so we use vmalloc.
	 */
	standby = vmalloc(byte_size);
	if(!standby)
		error_handler("hash_init vmalloc failed.", -ENOMEM);

#ifdef USE_CACHE
	/*
	 * create high speed cache for slab.
	 * reduce memory allocation and release time consumption 
	 * when need put/del a kh_node.
	 */
	kh_node_cachep = kmem_cache_create("struct kh_node", 
							sizeof(struct kh_node), 0, 0, NULL);
	if(!kh_node_cachep)
		error_handler("kmem_cache_create", -ENOMEM);
#endif
	
	/* init all hlist heads */
	for(i = 0; i < prime_tlb[0]; i++)
		standby[i].first = NULL;

	/* may need lock */
	hash_table.entry = standby;
	hash_table.record_cnt = 0;
	hash_table.bucket_cnt_index = 0;
	hash_table.init_flag = KH_HAS_INIT;

	printk(KERN_INFO "Hash table init finished.\n");	
	return 0;	
} 

/* free memery of kh_node and hash table. */
static int hash_exit (void){
	int index;
	struct hlist_node *tmp = NULL;
	struct kh_node *tpos = NULL;

	if(hash_table.init_flag != KH_HAS_INIT)
		error_handler("The hash table is not initialized, do nothing", 0);

	for(index = 0; index < prime_tlb[hash_table.bucket_cnt_index]; index++){

		hlist_for_each_entry_safe(tpos, tmp, &hash_table.entry[index], hl_node){
			/* free key-value pair */
			kfree(tpos->kvpair.key);
			kfree(tpos->kvpair.value);

			/* del hlist_node and free kh_node */
			hlist_del_init(&tpos->hl_node);
#ifndef USE_CACHE
			kfree(tpos);		
#else
			kmem_cache_free(kh_node_cachep, tpos);
#endif
		}
	}

#ifdef USE_CACHE		
	/* now, no cache inuse. */
	kmem_cache_destroy(kh_node_cachep);
#endif

//	kfree(hash_table.entry);
	/* kmalloc max size is not enough, so we use vmalloc */
	vfree(hash_table.entry);

	printk(KERN_INFO "Hash table free complete.\n");	
	return 0;
}

/*
 * hash algorithm 
 * ELF hash 
 */
unsigned long ELFhash(const char *key){
    register unsigned long h = 0;
    unsigned long x = 0;

	while(*key){
		h = (h << 4) + (*key++);
		if( (x=h & 0xF0000000L)!=0)
		{
			h ^= (x>>24);
			h &= ~x;
		}
	}
	return h % prime_tlb[hash_table.bucket_cnt_index];
}

/*
 * Insert a key-value pair into the hash table.
 * @key: key buf entry
 * @ksize: key buf size
 * @value: value buf entry
 * @vsize: value buf size
 * @return: if error, return error no. else return the byte count of store.
 */
asmlinkage int sys_khash_put (const char *key, size_t ksize, const char *value, size_t vsize){
	int ret = -EFAULT;
	size_t index;
	int act_klen;
	int act_vlen;
	int min_ksize;
	int min_vsize;
	char *new_kbuf = NULL;
	char *new_vbuf = NULL;
	struct hlist_node *tmp = NULL;
	struct kh_node *tpos = NULL;
	struct kh_node *new_node = NULL;

	min_ksize = min((int)ksize, KH_ALLOC_LIMIT);
	/* alloc space */
	new_kbuf = kmalloc(min_ksize, GFP_KERNEL);
	if(!new_kbuf){
		ret = -ENOMEM;
		printk(KERN_ERR "put: new_kbuf kmalloc err\n");
		goto _put_ret_err;
	}

	if(copy_from_user(new_kbuf, key, min_ksize)){
		ret = -EFAULT;
		printk(KERN_ERR "put: copy from user key err\n");
		goto _put_free_kbuf;
	}

	act_klen = strnlen(new_kbuf, min_ksize);
	if(act_klen == min_ksize){
		ret = -EFAULT;
		printk(KERN_ERR "put: key no null-termated err\n");
		goto _put_free_kbuf;
	}

	min_vsize = min((int)vsize, KH_ALLOC_LIMIT);
	/* alloc space */
	new_vbuf = kmalloc(min_vsize, GFP_KERNEL);
	if(!new_vbuf){
		ret = -ENOMEM;
		printk(KERN_ERR "put: new_vbuf kmalloc err\n");
		goto _put_free_kbuf;
	}
	
	if(copy_from_user(new_vbuf, value, min_vsize)){
		ret = -EFAULT;
		printk(KERN_ERR "put: copy from user key err\n");
		goto _put_free_all_buf;
	}

	act_klen = strnlen(new_vbuf, min_vsize);
	if(act_klen == min_vsize){
		ret = -EFAULT;
		printk(KERN_ERR "put: value no null-termated err\n");
		goto _put_free_all_buf;
	}

	/* Create entry and keep it standing */
#ifndef USE_CACHE
	new_node = kmalloc(sizeof(struct kh_node), GFP_KERNEL);
#else
	new_node = kmem_cache_alloc(kh_node_cachep, GFP_KERNEL);
#endif
	if (!new_node){
		ret = -ENOMEM;
		printk(KERN_ERR "put: _new_node_alloc_err\n");
		goto _put_free_all_buf;
	}
	
	index = ELFhash(new_kbuf);
	hlist_for_each_entry_safe(tpos, tmp, &hash_table.entry[index], hl_node){
		/* if exist same key-str, use new value-str. */
		if(strncmp(new_kbuf, tpos->kvpair.key, act_klen + 1) == 0){
			/* free old value */
			kfree(tpos->kvpair.value);
			
			/* point to new value */
			tpos->kvpair.value = new_vbuf;		

			/*
			 * if find, we don't need a new node, just change value point to 
			 * new value buf, so free cache. 
			 */
			kmem_cache_free(kh_node_cachep, new_node);
			
			/* free new_kbuf, because it exists already */
			ret = act_vlen + 1;
			goto _put_free_kbuf;
		}
	}

	/* go here, means a new key-value pair will be added */
	INIT_HLIST_NODE(&new_node->hl_node);
	new_node->kvpair.key = new_kbuf;
	new_node->kvpair.value = new_vbuf;

	/* put new node in the head of hlist, record cnt +1. */		
	hlist_add_head(&new_node->hl_node, &(hash_table.entry)[index]);
	hash_table.record_cnt++;
	
	ret = act_vlen + 1 + act_klen + 1;
	return ret;

_put_free_all_buf:
	kfree(new_vbuf);	

_put_free_kbuf:
	kfree(new_kbuf);	

_put_ret_err:
	return ret;
}

/*
 * if entry exists alreay, copy value to user space and return
 * byte count of copied. if entry does not exist, returns 0.
 * If error, return error value.
 */
asmlinkage int sys_khash_get (const char *key, size_t ksize, char *value, size_t vsize){
	int ret;
	size_t index;
	int act_vlen;
	int act_klen;
	int min_size;
	struct kh_node *tpos = NULL;
	char *key_buf = NULL;

	min_size = min((int)ksize, KH_ALLOC_LIMIT);
	/* alloc space */
	key_buf = kmalloc(min_size, GFP_KERNEL);
	if(!key_buf){
		ret = -ENOMEM;
		printk(KERN_ERR "get: key_buf kmalloc err\n");
		goto _get_ret;
	}

	if(copy_from_user(key_buf, key, min_size)){
		ret = -EFAULT;
		printk(KERN_ERR "get: key_buf copy_from_user err\n");
		goto _get_free_kbuf;
	}

	act_klen = strnlen(key_buf, min_size);
	if(act_klen == min_size){
		ret = -EFAULT;
		printk(KERN_ERR "get: key-str has no null-termined\n");
		goto _get_free_kbuf;
	}
		
	/* calc hash */	
	index = ELFhash(key_buf);
	
	hlist_for_each_entry(tpos, &hash_table.entry[index], hl_node){
		/* check if entry exists alreay */
		if(strcmp(tpos->kvpair.key, key_buf) == 0){
			/*
			 * you must call sys_khash_put to insert data,
			 * so the strlen of data you have insrted is sure
			 * less than KH_ALLOC_LIMIT and has null-terminal.
			 */
			act_vlen = strlen(tpos->kvpair.value);
			/* if dont has null-terminal */
			//if(act_vlen == KH_ALLOC_LIMIT)
				//error_handler("put: act_vlen strnlen error", -EFAULT);
					 
			/* check user-buffer is enough or not, because strnlen 
			 * <= KH_ALLOC_LIMIT. so if act_vlen != KH_ALLOC_LIMIT means
			 * user-buf is enough to store.
			 */
			if(vsize < act_vlen + 1){
				ret = -EFAULT;
				printk(KERN_ERR "get: key-str has no null-termined\n");
				goto _get_free_kbuf;
			}
			if(copy_to_user(value, tpos->kvpair.value, act_vlen + 1)){
				ret = -EFAULT;
				printk(KERN_ERR "get: copy_to_user\n");
				goto _get_free_kbuf;
			}

			return act_vlen + 1;
		}	
	}

	/* go here, means entry not exist and return '\0' to user buffer */
	if(copy_to_user(value, "", 1))
		error_handler("get: copy_to_user '\0'", -EFAULT);

_get_free_kbuf:
	kfree(key_buf);

_get_ret:
	return ret;
}

asmlinkage int sys_khash_del (const char *key, size_t ksize){
	ssize_t index;
	size_t act_klen;
	char *key_buf = NULL;
	struct hlist_node *tmp = NULL;
	struct kh_node *tpos = NULL;
	int min_size;

	min_size = min((int)ksize, KH_ALLOC_LIMIT);
	/* alloc space */
	key_buf = kmalloc(min_size, GFP_KERNEL);
	if(!key_buf)
		error_handler("del: kmalloc", -ENOMEM);

	if(copy_from_user(key_buf, key, min_size)){
		kfree(key_buf);
		error_handler("del: copy_from_user", -EFAULT);
	}

	act_klen = strnlen(key_buf, min_size);
	/* 
	 * if return val of strnlen is equal to min_size, means
	 * key_buf no null-termated.
	 */
	if(act_klen == min_size)
		error_handler("del: key-str has no null-termined", -EINVAL);
	
	index = ELFhash(key_buf);
	hlist_for_each_entry_safe(tpos, tmp, &hash_table.entry[index], hl_node){
		/*
		 * check if exist same key-str, free old value-str when
		 * exist and use new.
		 */
		if(strcmp(key_buf, tpos->kvpair.key) == 0){
			kfree(tpos->kvpair.key);
			kfree(tpos->kvpair.value);
			hlist_del_init(&tpos->hl_node);
			
#ifndef USE_CACHE
			kfree(tpos);
#else
			kmem_cache_free(kh_node_cachep, tpos);
#endif

			hash_table.record_cnt--;
			error_handler("del: free ok", 1);
			//return 1;
		}
	}

	/* go here, means key not exist */
	kfree(key_buf);
	return -EPERM;
}


static int __init mod_init (void){
	install_sys_call();

	hash_init();

	return 0;
}

static void __exit mod_exit (void){
	hash_exit();

	uninstall_sys_call();
}

module_init(mod_init);
module_exit(mod_exit);
MODULE_LICENSE("GPL");
