#include <linux/errno.h>
#include <linux/fs2.h>
#include <linux/hash.h>
#include <linux/gfp.h>
#include <linux/cache.h>

#define I_HASHBITS	i_hash_shift
#define I_HASHMASK	i_hash_mask

static unsigned int i_hash_mask;
static unsigned int i_hash_shift;

static struct hlist_head *inode_hashtable;

static unsigned long hash(struct super_block *sb, unsigned long hashval)
{
	unsigned long tmp;

	tmp = (hashval * (unsigned long)sb) ^ (GOLDEN_RATIO_PRIME + hashval) /
			L1_CACHE_BYTES;
	tmp = tmp ^ ((tmp ^ GOLDEN_RATIO_PRIME) >> I_HASHBITS);
	return tmp & I_HASHMASK;
}

static struct inode *find_inode_fast(struct super_block *sb,
				struct hlist_head *head, unsigned long ino)
{
	struct hlist_node *node;
	struct inode *inode = NULL;

	printf("this is %s(): %d\r\n", __func__, __LINE__);

repeat:
	hlist_for_each_entry(inode, node, head, i_hash) {
    	printf("this is %s(): %d\r\n", __func__, __LINE__);
		if (inode->i_ino != ino)
			continue;
		if (inode->i_sb != sb)
			continue;
		// if (inode->i_state & (I_FREEING|I_CLEAR|I_WILL_FREE)) {
		// 	__wait_on_freeing_inode(inode);
		// 	goto repeat;
		// }
		break;
	}
	printf("this is %s(): %d\r\n", __func__, __LINE__);
	return node ? inode : NULL;
}

static struct inode *ifind_fast(struct super_block *sb,
		struct hlist_head *head, unsigned long ino)
{
	struct inode *inode;

	printf("this is %s(): %d\r\n", __func__, __LINE__);

	// spin_lock(&inode_lock);
	inode = find_inode_fast(sb, head, ino);
	if (inode) {
    	printf("this is %s(): %d\r\n", __func__, __LINE__);
		// __iget(inode);
		// spin_unlock(&inode_lock);
		// wait_on_inode(inode);
		return inode;
	}
	// spin_unlock(&inode_lock);
	return NULL;
}

void inode_init_once(struct inode *inode)
{
	memset(inode, 0, sizeof(*inode));
	INIT_HLIST_NODE(&inode->i_hash);
	INIT_LIST_HEAD(&inode->i_dentry);
	INIT_LIST_HEAD(&inode->i_devices);
	INIT_RADIX_TREE(&inode->i_data.page_tree, GFP_ATOMIC);
	spin_lock_init(&inode->i_data.tree_lock);
	spin_lock_init(&inode->i_data.i_mmap_lock);
	INIT_LIST_HEAD(&inode->i_data.private_list);
	spin_lock_init(&inode->i_data.private_lock);
	// INIT_RAW_PRIO_TREE_ROOT(&inode->i_data.i_mmap);
	INIT_LIST_HEAD(&inode->i_data.i_mmap_nonlinear);
	// i_size_ordered_init(inode);
// #ifdef CONFIG_INOTIFY
// 	INIT_LIST_HEAD(&inode->inotify_watches);
// 	mutex_init(&inode->inotify_mutex);
// #endif
// #ifdef CONFIG_FSNOTIFY
// 	INIT_HLIST_HEAD(&inode->i_fsnotify_mark_entries);
// #endif
}

int inode_init_always(struct super_block *sb, struct inode *inode)
{
	static const struct address_space_operations empty_aops;
	static const struct inode_operations empty_iops;
	static const struct file_operations empty_fops;
	struct address_space *const mapping = &inode->i_data;

	printf("this is %s(): %d\r\n", __func__, __LINE__);

	inode_init_once(inode);

	inode->i_sb = sb;
	inode->i_blkbits = sb->s_blocksize_bits;
	inode->i_flags = 0;
	atomic_set(&inode->i_count, 1);
	inode->i_op = &empty_iops;
	inode->i_fop = &empty_fops;
	inode->i_nlink = 1;
	inode->i_uid = 0;
	inode->i_gid = 0;
	atomic_set(&inode->i_writecount, 0);
	inode->i_size = 0;
	inode->i_blocks = 0;
	inode->i_bytes = 0;
	inode->i_generation = 0;
// #ifdef CONFIG_QUOTA
// 	memset(&inode->i_dquot, 0, sizeof(inode->i_dquot));
// #endif
	// inode->i_pipe = NULL;
	// inode->i_bdev = NULL;
	// inode->i_cdev = NULL;
	// inode->i_rdev = 0;
	inode->dirtied_when = 0;

	// if (security_inode_alloc(inode))
	// 	goto out;

	/* allocate and initialize an i_integrity */
	// if (ima_inode_alloc(inode))
		// goto out_free_security;

	// spin_lock_init(&inode->i_lock);
	// lockdep_set_class(&inode->i_lock, &sb->s_type->i_lock_key);

	// mutex_init(&inode->i_mutex);
	// lockdep_set_class(&inode->i_mutex, &sb->s_type->i_mutex_key);

	// init_rwsem(&inode->i_alloc_sem);
	// lockdep_set_class(&inode->i_alloc_sem, &sb->s_type->i_alloc_sem_key);

	mapping->a_ops = &empty_aops;
	mapping->host = inode;
	mapping->flags = 0;
	// mapping_set_gfp_mask(mapping, GFP_HIGHUSER_MOVABLE);
	// mapping->assoc_mapping = NULL;
	// mapping->backing_dev_info = &default_backing_dev_info;
	mapping->writeback_index = 0;

	/*
	 * If the block_device provides a backing_dev_info for client
	 * inodes then use that.  Otherwise the inode share the bdev's
	 * backing_dev_info.
	 */
	// if (sb->s_bdev) {
	// 	struct backing_dev_info *bdi;

	// 	bdi = sb->s_bdev->bd_inode->i_mapping->backing_dev_info;
	// 	mapping->backing_dev_info = bdi;
	// }
	inode->i_private = NULL;
	inode->i_mapping = mapping;
// #ifdef CONFIG_FS_POSIX_ACL
// 	inode->i_acl = inode->i_default_acl = ACL_NOT_CACHED;
// #endif

// #ifdef CONFIG_FSNOTIFY
// 	inode->i_fsnotify_mask = 0;
// #endif

	return 0;

// out_free_security:
// 	security_inode_free(inode);
out:
	return -ENOMEM;
}

static struct inode *alloc_inode(struct super_block *sb)
{
	struct inode *inode;

	printf("this is %s(): %d\r\n", __func__, __LINE__);

	if (sb->s_op->alloc_inode)
		inode = sb->s_op->alloc_inode(sb);
	else
		// inode = kmem_cache_alloc(inode_cachep, GFP_KERNEL);
		inode = kmalloc(sizeof(struct inode));

	printf("this is %s(): %d\r\n", __func__, __LINE__);

	if (!inode)
		return NULL;

	printf("this is %s(): %d\r\n", __func__, __LINE__);
	if (inode_init_always(sb, inode)) {
    	printf("this is %s(): %d\r\n", __func__, __LINE__);
		// if (inode->i_sb->s_op->destroy_inode)
		// 	inode->i_sb->s_op->destroy_inode(inode);
		// else
		// 	kmem_cache_free(inode_cachep, inode);
		// return NULL;
	}

	return inode;
}

static inline void __inode_add_to_lists(struct super_block *sb, struct hlist_head *head,
			struct inode *inode)
{
	printf("this is %s(): %d\r\n", __func__, __LINE__);
	// inodes_stat.nr_inodes++;
	// list_add(&inode->i_list, &inode_in_use);
	// list_add(&inode->i_sb_list, &sb->s_inodes);
	if (head) {
	    printf("this is %s(): %d\r\n", __func__, __LINE__);
		hlist_add_head(&inode->i_hash, head);        
	    printf("this is %s(): %d\r\n", __func__, __LINE__);
    }
	printf("this is %s(): %d\r\n", __func__, __LINE__);
}

struct inode *new_inode(struct super_block *sb)
{
	/*
	 * On a 32bit, non LFS stat() call, glibc will generate an EOVERFLOW
	 * error if st_ino won't fit in target struct field. Use 32bit counter
	 * here to attempt to avoid that.
	 */
	static unsigned int last_ino;
	struct inode *inode;

	// spin_lock_prefetch(&inode_lock);

	inode = alloc_inode(sb);
	if (inode) {
		// spin_lock(&inode_lock);
		__inode_add_to_lists(sb, NULL, inode);
		inode->i_ino = ++last_ino;
		inode->i_state = 0;
		// spin_unlock(&inode_lock);
	}
	return inode;
}

static struct inode *get_new_inode_fast(struct super_block *sb,
				struct hlist_head *head, unsigned long ino)
{
	struct inode *inode;

	printf("this is %s(): %d\r\n", __func__, __LINE__);

	inode = alloc_inode(sb);
	if (inode) {
    	printf("this is %s(): %d\r\n", __func__, __LINE__);
		struct inode *old;

		// spin_lock(&inode_lock);
		/* We released the lock, so.. */
		old = find_inode_fast(sb, head, ino);
		if (!old) {
        	printf("this is %s(): %d\r\n", __func__, __LINE__);
			inode->i_ino = ino;
			__inode_add_to_lists(sb, head, inode);
			// inode->i_state = I_LOCK|I_NEW;
			// spin_unlock(&inode_lock);

			/* Return the locked inode with I_NEW set, the
			 * caller is responsible for filling in the contents
			 */
			return inode;
		}

		/*
		 * Uhhuh, somebody else created the same inode under
		 * us. Use the old inode instead of the one we just
		 * allocated.
		 */
    	printf("this is %s(): %d\r\n", __func__, __LINE__);
		// __iget(old);
		// spin_unlock(&inode_lock);
		// destroy_inode(inode);
		// inode = old;
		// wait_on_inode(inode);
	}
	return inode;
}

struct inode *iget_locked(struct super_block *sb, unsigned long ino)
{
	struct hlist_head *head = inode_hashtable + hash(sb, ino);
	struct inode *inode;

	printf("this is %s(): %d\r\n", __func__, __LINE__);

	inode = ifind_fast(sb, head, ino);
	if (inode)
		return inode;
	/*
	 * get_new_inode_fast() will do the right thing, re-trying the search
	 * in case it had to block at any point.
	 */
	return get_new_inode_fast(sb, head, ino);
}

int insert_inode_locked(struct inode *inode)
{
	struct super_block *sb = inode->i_sb;
	ino_t ino = inode->i_ino;
	struct hlist_head *head = inode_hashtable + hash(sb, ino);

	inode->i_state |= I_LOCK|I_NEW;
	while (1) {
		struct hlist_node *node;
		struct inode *old = NULL;
		// spin_lock(&inode_lock);
		hlist_for_each_entry(old, node, head, i_hash) {
			if (old->i_ino != ino)
				continue;
			if (old->i_sb != sb)
				continue;
			if (old->i_state & (I_FREEING|I_CLEAR|I_WILL_FREE))
				continue;
			break;
		}
		if ((!node)) {
			hlist_add_head(&inode->i_hash, head);
			// spin_unlock(&inode_lock);
			return 0;
		}
		// __iget(old);
		// spin_unlock(&inode_lock);
		// wait_on_inode(old);
		if ((!hlist_unhashed(&old->i_hash))) {
			// iput(old);
			return -EBUSY;
		}
		// iput(old);
	}
}

static unsigned long ihash_entries = 32768;

void inode_init(void)
{
	int loop;

	printf("this is %s(): %d\r\n", __func__, __LINE__);

	/* inode slab cache */
	// inode_cachep = kmem_cache_create("inode_cache",
	// 				 sizeof(struct inode),
	// 				 0,
	// 				 (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
	// 				 SLAB_MEM_SPREAD),
	// 				 init_once);
	// register_shrinker(&icache_shrinker);

	/* Hash may have been set up in inode_init_early */
	// if (!hashdist)
	// 	return;

	inode_hashtable =
		alloc_large_system_hash("Inode-cache",
					sizeof(struct hlist_head),
					ihash_entries,
					14,
					0,
					&i_hash_shift,
					&i_hash_mask,
					0);

	for (loop = 0; loop < (1 << i_hash_shift); loop++)
		INIT_HLIST_HEAD(&inode_hashtable[loop]);
}
