/*
 * linux/fs/inode.c
 *
 * (C) 1997 Linus Torvalds
 * Rewritten for Xen ARM 2010 by Mirrodance
 */

#include <xen/init.h>
#include <fs/fs.h>
#include <fs/hash.h>
#include <xen/kernel.h>

/*
 * Inode lookup is no longer as critical as it used to be:
 * most of the lookups are going to be through the dcache.
 */
#define I_HASHBITS	i_hash_shift
#define I_HASHMASK	i_hash_mask

static unsigned int i_hash_mask = 0;
static unsigned int i_hash_shift = 0;

static struct hlist_head *inode_hashtable;

/**
 * inode_init_always - perform inode structure intialisation
 * @sb: superblock inode belongs to
 * @inode: inode to initialise
 *
 * These are initializations that need to be done on every inode
 * allocation as the fields are not initialised by slab allocation.
 */
struct inode *inode_init_always(struct super_block *sb, struct inode *inode)
{
	//static const struct address_space_operations empty_aops;
	static struct inode_operations empty_iops;
	static const struct file_operations empty_fops;

	struct address_space * const mapping = &inode->i_data;

	inode->i_sb = sb;
	inode->i_blkbits = sb->s_blocksize_bits;
	inode->i_flags = 0;
	atomic_set(&inode->i_count, 1);
	inode->i_op = &empty_iops;
	inode->i_fop = &empty_fops;
	inode->i_nlink = 1;
	inode->i_uid = 0;
	inode->i_gid = 0;
	atomic_set(&inode->i_writecount, 0);
	inode->i_size = 0;
	inode->i_blocks = 0;
	inode->i_bytes = 0;
	inode->i_generation = 0;
#ifdef CONFIG_QUOTA
	memset(&inode->i_dquot, 0, sizeof(inode->i_dquot));
#endif
	inode->i_pipe = NULL;
	inode->i_bdev = NULL;
	inode->i_cdev = NULL;
	inode->i_rdev = 0;
	inode->dirtied_when = 0;
	//if (security_inode_alloc(inode)) {
	//	if (inode->i_sb->s_op->destroy_inode)
	//		inode->i_sb->s_op->destroy_inode(inode);
	//	else
	//		kmem_cache_free(inode_cachep, (inode));
	//	return NULL;
	//}

	//spin_lock_init(&inode->i_lock);
	//lockdep_set_class(&inode->i_lock, &sb->s_type->i_lock_key);

	//mutex_init(&inode->i_mutex);
	//lockdep_set_class(&inode->i_mutex, &sb->s_type->i_mutex_key);

	//init_rwsem(&inode->i_alloc_sem);
	//lockdep_set_class(&inode->i_alloc_sem, &sb->s_type->i_alloc_sem_key);

	//mapping->a_ops = &empty_aops;
	mapping->host = inode;
	mapping->flags = 0;
	//mapping_set_gfp_mask(mapping, GFP_HIGHUSER_MOVABLE);
	mapping->assoc_mapping = NULL;
	//mapping->backing_dev_info = &default_backing_dev_info;
	//mapping->writeback_index = 0;

	/*
	 * If the block_device provides a backing_dev_info for client
	 * inodes then use that.  Otherwise the inode share the bdev's
	 * backing_dev_info.
	 *
	if (sb->s_bdev) {
		struct backing_dev_info *bdi;

		bdi = sb->s_bdev->bd_inode_backing_dev_info;
		if (!bdi)
			bdi = sb->s_bdev->bd_inode->i_mapping->backing_dev_info;
		mapping->backing_dev_info = bdi;
	}
	*/
	inode->i_private = NULL;
	inode->i_mapping = mapping;

	return inode;
}
EXPORT_SYMBOL(inode_init_always);

void inode_init_once(struct inode *inode);
static struct inode *alloc_inode(struct super_block *sb)
{
        struct inode *inode;

        if (sb->s_op->alloc_inode)
                inode = sb->s_op->alloc_inode(sb);
        else{
		//inode = kmem_cache_alloc(inode_cachep, GFP_KERNEL);
		inode = xmalloc_bytes(sizeof(struct inode));
		inode_init_once(inode);
	}
	if (inode)
		return inode_init_always(sb, inode);
	return NULL;
}

/*
 * These are initializations that only need to be done
 * once, because the fields are idempotent across use
 * of the inode, so let the slab aware of that.
 */
void inode_init_once(struct inode *inode)
{
	memset(inode, 0, sizeof(*inode));
	INIT_HLIST_NODE(&inode->i_hash);
	INIT_LIST_HEAD(&inode->i_dentry);
	INIT_LIST_HEAD(&inode->i_devices);
	//INIT_RADIX_TREE(&inode->i_data.page_tree, GFP_ATOMIC);
	//spin_lock_init(&inode->i_data.tree_lock);
	//spin_lock_init(&inode->i_data.i_mmap_lock);
	INIT_LIST_HEAD(&inode->i_data.private_list);
	//spin_lock_init(&inode->i_data.private_lock);
	//INIT_RAW_PRIO_TREE_ROOT(&inode->i_data.i_mmap);
	INIT_LIST_HEAD(&inode->i_data.i_mmap_nonlinear);
	//i_size_ordered_init(inode);
#ifdef CONFIG_INOTIFY
	INIT_LIST_HEAD(&inode->inotify_watches);
	//mutex_init(&inode->inotify_mutex);
#endif
}

EXPORT_SYMBOL(inode_init_once);

/*
 * find_inode_fast is the fast path version of find_inode, see the comment at
 * iget_locked for details.
 */
static struct inode * find_inode_fast(struct super_block * sb, struct hlist_head *head, unsigned long ino)
{
	struct hlist_node *node;
	struct inode * inode = NULL;

repeat:
	hlist_for_each_entry(inode, node, head, i_hash) {
		if (inode->i_ino != ino)
			continue;
		if (inode->i_sb != sb)
			continue;
		if (inode->i_state & (I_FREEING|I_CLEAR|I_WILL_FREE)) {
			//__wait_on_freeing_inode(inode);
			goto repeat;
		}
		break;
	}
	return node ? inode : NULL;
}

static unsigned long hash(struct super_block *sb, unsigned long hashval)
{
	unsigned long tmp;

	tmp = (hashval * (unsigned long)sb) ^ (GOLDEN_RATIO_PRIME + hashval) /
			L1_CACHE_BYTES;
	tmp = tmp ^ ((tmp ^ GOLDEN_RATIO_PRIME) >> I_HASHBITS);
	return tmp & I_HASHMASK;
}

static inline void
__inode_add_to_lists(struct super_block *sb, struct hlist_head *head,
                        struct inode *inode)
{
        //inodes_stat.nr_inodes++;
        //list_add(&inode->i_list, &inode_in_use);
        //list_add(&inode->i_sb_list, &sb->s_inodes);
        //if (head)
        //        hlist_add_head(&inode->i_hash, head);
}


/**
 *      new_inode       - obtain an inode
 *      @sb: superblock
 *
 *      Allocates a new inode for given superblock. The default gfp_mask
 *      for allocations related to inode->i_mapping is GFP_HIGHUSER_MOVABLE.
 *      If HIGHMEM pages are unsuitable or it is known that pages allocated
 *      for the page cache are not reclaimable or migratable,
 *      mapping_set_gfp_mask() must be called with suitable flags on the
 *      newly created inode's mapping
 *
 */
struct inode *new_inode(struct super_block *sb)
{
        /*
         * On a 32bit, non LFS stat() call, glibc will generate an EOVERFLOW
         * error if st_ino won't fit in target struct field. Use 32bit counter
         * here to attempt to avoid that.
         */
        static unsigned int last_ino;
        struct inode * inode;

        //spin_lock_prefetch(&inode_lock);

        inode = alloc_inode(sb);
        if (inode) {
                //spin_lock(&inode_lock);
                __inode_add_to_lists(sb, NULL, inode);
                inode->i_ino = ++last_ino;
		inode->i_state = 0;
		//spin_unlock(&inode_lock);
        }
        return inode;
}

EXPORT_SYMBOL(new_inode);

/**
 *	iunique - get a unique inode number
 *	@sb: superblock
 *	@max_reserved: highest reserved inode number
 *
 *	Obtain an inode number that is unique on the system for a given
 *	superblock. This is used by file systems that have no natural
 *	permanent inode numbering system. An inode number is returned that
 *	is higher than the reserved limit but unique.
 *
 *	BUGS:
 *	With a large number of inodes live on the file system this function
 *	currently becomes quite slow.
 */
ino_t iunique(struct super_block *sb, ino_t max_reserved)
{
	/*
	 * On a 32bit, non LFS stat() call, glibc will generate an EOVERFLOW
	 * error if st_ino won't fit in target struct field. Use 32bit counter
	 * here to attempt to avoid that.
	 */
	static unsigned int counter;
	struct inode *inode;
	struct hlist_head *head;
	ino_t res;

	//spin_lock(&inode_lock);
	do {
		if (counter <= max_reserved)
			counter = max_reserved + 1;
		res = counter++;
		head = inode_hashtable + hash(sb, res);
		inode = find_inode_fast(sb, head, res);
	} while (inode != NULL);
	//spin_unlock(&inode_lock);

	return res;
}
EXPORT_SYMBOL(iunique);

struct inode *igrab(struct inode *inode)
{
	//spin_lock(&inode_lock);
	if (!(inode->i_state & (I_FREEING|I_CLEAR|I_WILL_FREE)))
		;//__iget(inode);
	else
		/*
		 * Handle the case where s_op->clear_inode is not been
		 * called yet, and somebody is calling igrab
		 * while the inode is getting freed.
		 */
		inode = NULL;
	//spin_unlock(&inode_lock);
	return inode;
}
EXPORT_SYMBOL(igrab);

/**
 *	__insert_inode_hash - hash an inode
 *	@inode: unhashed inode
 *	@hashval: unsigned long value used to locate this object in the
 *		inode_hashtable.
 *
 *	Add an inode to the inode hash for this superblock.
 */
void __insert_inode_hash(struct inode *inode, unsigned long hashval)
{
	struct hlist_head *head = inode_hashtable + hash(inode->i_sb, hashval);
	//spin_lock(&inode_lock);
	hlist_add_head(&inode->i_hash, head);
	//spin_unlock(&inode_lock);
}
EXPORT_SYMBOL(__insert_inode_hash);

void __init inode_init(void)
{
	int loop;

	/* inode slab cache */
	//inode_cachep = kmem_cache_create("inode_cache",
	//				 sizeof(struct inode),
	//				 0,
	//				 (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
	//				 SLAB_MEM_SPREAD),
	//				 init_once);
	//register_shrinker(&icache_shrinker);

	/* Hash may have been set up in inode_init_early */
	//if (!hashdist)
	//	return;

	inode_hashtable = xmalloc_bytes(1024 * sizeof(struct hlist_head));
	//	alloc_large_system_hash("Inode-cache",
	//				sizeof(struct hlist_head),
	//				ihash_entries,
	//				14,
	//				0,
	//				&i_hash_shift,
	//				&i_hash_mask,
	//				0);

	for (loop = 0; loop < (1 << i_hash_shift); loop++)
		INIT_HLIST_HEAD(&inode_hashtable[loop]);
}
