#include <linux/err.h>
#include <linux/errno.h>
#include <linux/mm_types.h>
#include <linux/rbtree.h>
#include <linux/fs2.h>
#include <linux/mm.h>
#include <linux/sched.h>

#include <asm/memory.h>
#include <asm/thread_info.h>

#ifndef arch_mmap_check
#define arch_mmap_check(addr, len, flags)	(0)
#endif

#ifndef arch_rebalance_pgtables
#define arch_rebalance_pgtables(addr, len)		(addr)
#endif

static struct vm_area_struct *find_vma_prepare(struct mm_struct *mm, unsigned long addr,
		struct vm_area_struct **pprev, struct rb_node ***rb_link,
		struct rb_node ** rb_parent)
{
	struct vm_area_struct * vma;
	struct rb_node ** __rb_link, * __rb_parent, * rb_prev;

	printf("this is %s(): %d\r\n", __func__, __LINE__);

	__rb_link = &mm->mm_rb.rb_node;
	rb_prev = __rb_parent = NULL;
	vma = NULL;

	printf("this is %s(): %d\r\n", __func__, __LINE__);

	while (*__rb_link) {
		printf("this is %s(): %d\r\n", __func__, __LINE__);
		struct vm_area_struct *vma_tmp;

		__rb_parent = *__rb_link;
		vma_tmp = rb_entry(__rb_parent, struct vm_area_struct, vm_rb);

		if (vma_tmp->vm_end > addr) {
			printf("this is %s(): %d\r\n", __func__, __LINE__);
			vma = vma_tmp;
			if (vma_tmp->vm_start <= addr)
				break;
			__rb_link = &__rb_parent->rb_left;
		} else {
			printf("this is %s(): %d\r\n", __func__, __LINE__);
			rb_prev = __rb_parent;
			__rb_link = &__rb_parent->rb_right;
		}
	}

	printf("this is %s(): %d\r\n", __func__, __LINE__);

	*pprev = NULL;
	if (rb_prev)
		*pprev = rb_entry(rb_prev, struct vm_area_struct, vm_rb);
	*rb_link = __rb_link;
	*rb_parent = __rb_parent;
	printf("this is %s(): %d\r\n", __func__, __LINE__);
	return vma;
}

/* Look up the first VMA which satisfies  addr < vm_end,  NULL if none. */
struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
{
	struct vm_area_struct *vma = NULL;

	printf("this is %s(): %d\r\n", __func__, __LINE__);

	if (mm) {
		printf("this is %s(): %d\r\n", __func__, __LINE__);
		/* Check the cache first. */
		/* (Cache hit rate is typically around 35%.) */
		vma = mm->mmap_cache;
		if (!(vma && vma->vm_end > addr && vma->vm_start <= addr)) {
			printf("this is %s(): %d\r\n", __func__, __LINE__);
			struct rb_node * rb_node;

			rb_node = mm->mm_rb.rb_node;
			vma = NULL;

			while (rb_node) {
				struct vm_area_struct * vma_tmp;

				vma_tmp = rb_entry(rb_node,
						struct vm_area_struct, vm_rb);

				if (vma_tmp->vm_end > addr) {
					vma = vma_tmp;
					if (vma_tmp->vm_start <= addr)
						break;
					rb_node = rb_node->rb_left;
				} else
					rb_node = rb_node->rb_right;
			}
			if (vma)
				mm->mmap_cache = vma;
		}
	}
	return vma;
}

#if 0
struct vm_area_struct *vma_merge(struct mm_struct *mm,
			struct vm_area_struct *prev, unsigned long addr,
			unsigned long end, unsigned long vm_flags,
		     	struct anon_vma *anon_vma, struct file *file,
			pgoff_t pgoff, struct mempolicy *policy)
{
	pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
	struct vm_area_struct *area, *next;

	/*
	 * We later require that vma->vm_flags == vm_flags,
	 * so this tests vma->vm_flags & VM_SPECIAL, too.
	 */
	if (vm_flags & VM_SPECIAL)
		return NULL;

	if (prev)
		next = prev->vm_next;
	else
		next = mm->mmap;
	area = next;
	if (next && next->vm_end == end)		/* cases 6, 7, 8 */
		next = next->vm_next;

	/*
	 * Can it merge with the predecessor?
	 */
	if (prev && prev->vm_end == addr &&
  			mpol_equal(vma_policy(prev), policy) &&
			can_vma_merge_after(prev, vm_flags,
						anon_vma, file, pgoff)) {
		/*
		 * OK, it can.  Can we now merge in the successor as well?
		 */
		if (next && end == next->vm_start &&
				mpol_equal(policy, vma_policy(next)) &&
				can_vma_merge_before(next, vm_flags,
					anon_vma, file, pgoff+pglen) &&
				is_mergeable_anon_vma(prev->anon_vma,
						      next->anon_vma)) {
							/* cases 1, 6 */
			vma_adjust(prev, prev->vm_start,
				next->vm_end, prev->vm_pgoff, NULL);
		} else					/* cases 2, 5, 7 */
			vma_adjust(prev, prev->vm_start,
				end, prev->vm_pgoff, NULL);
		return prev;
	}

	/*
	 * Can this new request be merged in front of next?
	 */
	if (next && end == next->vm_start &&
 			mpol_equal(policy, vma_policy(next)) &&
			can_vma_merge_before(next, vm_flags,
					anon_vma, file, pgoff+pglen)) {
		if (prev && addr < prev->vm_end)	/* case 4 */
			vma_adjust(prev, prev->vm_start,
				addr, prev->vm_pgoff, NULL);
		else					/* cases 3, 8 */
			vma_adjust(area, addr, next->vm_end,
				next->vm_pgoff - pglen, NULL);
		return area;
	}

	return NULL;
}
#endif

static inline void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
		struct vm_area_struct *prev, struct rb_node *rb_parent)
{
	printf("this is %s(): %d\r\n", __func__, __LINE__);
	if (prev) {
		printf("this is %s(): %d\r\n", __func__, __LINE__);
		vma->vm_next = prev->vm_next;
		prev->vm_next = vma;
	} else {
		printf("this is %s(): %d\r\n", __func__, __LINE__);
		mm->mmap = vma;
		if (rb_parent)
			vma->vm_next = rb_entry(rb_parent,
					struct vm_area_struct, vm_rb);
		else
			vma->vm_next = NULL;
	}
	printf("this is %s(): %d\r\n", __func__, __LINE__);
}

void __vma_link_rb(struct mm_struct *mm, struct vm_area_struct *vma,
		struct rb_node **rb_link, struct rb_node *rb_parent)
{
	printf("this is %s(): %d\r\n", __func__, __LINE__);
	rb_link_node(&vma->vm_rb, rb_parent, rb_link);
	rb_insert_color(&vma->vm_rb, &mm->mm_rb);
	printf("this is %s(): %d\r\n", __func__, __LINE__);
}

static void __vma_link(struct mm_struct *mm, struct vm_area_struct *vma,
	struct vm_area_struct *prev, struct rb_node **rb_link,
	struct rb_node *rb_parent)
{
	printf("this is %s(): %d\r\n", __func__, __LINE__);
	__vma_link_list(mm, vma, prev, rb_parent);
	__vma_link_rb(mm, vma, rb_link, rb_parent);
	// __anon_vma_link(vma);
	printf("this is %s(): %d\r\n", __func__, __LINE__);
}

static void vma_link(struct mm_struct *mm, struct vm_area_struct *vma,
			struct vm_area_struct *prev, struct rb_node **rb_link,
			struct rb_node *rb_parent)
{
	// struct address_space *mapping = NULL;

	printf("this is %s(): %d\r\n", __func__, __LINE__);

	// if (vma->vm_file)
		// mapping = vma->vm_file->f_mapping;

	// if (mapping) {
		// spin_lock(&mapping->i_mmap_lock);
		// vma->vm_truncate_count = mapping->truncate_count;
	// }
	// anon_vma_lock(vma);

	__vma_link(mm, vma, prev, rb_link, rb_parent);
	// __vma_link_file(vma);

	// anon_vma_unlock(vma);
	// if (mapping)
		// spin_unlock(&mapping->i_mmap_lock);

	mm->map_count++;
	// validate_mm(mm);
}

unsigned long mmap_region(struct file *file, unsigned long addr,
			  unsigned long len, unsigned long flags,
			  unsigned int vm_flags, unsigned long pgoff)
{
	struct mm_struct *mm = current->mm;
	struct vm_area_struct *vma, *prev;
	int correct_wcount = 0;
	int error;
	struct rb_node **rb_link, *rb_parent;
	unsigned long charged = 0;
	struct inode *inode =  file ? file->f_path.dentry->d_inode : NULL;

	printf("this is %s(): %d\r\n", __func__, __LINE__);

	/* Clear old maps */
	error = -ENOMEM;
munmap_back:
	vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
	if (vma && vma->vm_start < addr + len) {
		printf("this is %s(): %d\r\n", __func__, __LINE__);
		// if (do_munmap(mm, addr, len))
		// 	return -ENOMEM;
		goto munmap_back;
	}

	// /* Check against address space limit. */
	// if (!may_expand_vm(mm, len >> PAGE_SHIFT))
	// 	return -ENOMEM;

	/*
	 * Set 'VM_NORESERVE' if we should not account for the
	 * memory use of this mapping.
	 */
	// if ((flags & MAP_NORESERVE)) {
	// 	/* We honor MAP_NORESERVE if allowed to overcommit */
	// 	if (sysctl_overcommit_memory != OVERCOMMIT_NEVER)
	// 		vm_flags |= VM_NORESERVE;

	// 	/* hugetlb applies strict overcommit unless MAP_NORESERVE */
	// 	if (file && is_file_hugepages(file))
	// 		vm_flags |= VM_NORESERVE;
	// }

	/*
	 * Private writable mapping: check memory availability
	 */
	// if (accountable_mapping(file, vm_flags)) {
	// 	charged = len >> PAGE_SHIFT;
	// 	if (security_vm_enough_memory(charged))
	// 		return -ENOMEM;
	// 	vm_flags |= VM_ACCOUNT;
	// }

	/*
	 * Can we just expand an old mapping?
	 */
	// vma = vma_merge(mm, prev, addr, addr + len, vm_flags, NULL, file, pgoff, NULL);
	// if (vma)
	// 	goto out;

	/*
	 * Determine the object being mapped and call the appropriate
	 * specific mapper. the address has already been validated, but
	 * not unmapped, but the maps are removed from the list.
	 */
	// vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
	vma = kmalloc(sizeof(struct vm_area_struct));
	if (!vma) {
		error = -ENOMEM;
		goto unacct_error;
	}

	printf("this is %s(): %d\r\n", __func__, __LINE__);
	vma->vm_mm = mm;
	vma->vm_start = addr;
	vma->vm_end = addr + len;
	vma->vm_flags = vm_flags;
	// vma->vm_page_prot = vm_get_page_prot(vm_flags);
	vma->vm_pgoff = pgoff;

	if (file) {
		printf("this is %s(): %d\r\n", __func__, __LINE__);
		error = -EINVAL;
		// if (vm_flags & (VM_GROWSDOWN|VM_GROWSUP))
		// 	goto free_vma;
		// if (vm_flags & VM_DENYWRITE) {
		// 	error = deny_write_access(file);
		// 	if (error)
		// 		goto free_vma;
		// 	correct_wcount = 1;
		// }
		vma->vm_file = file;
		// get_file(file);
		error = file->f_op->mmap(file, vma);
		// if (error)
			// goto unmap_and_free_vma;
		// if (vm_flags & VM_EXECUTABLE)
			// added_exe_file_vma(mm);

		/* Can addr have changed??
		 *
		 * Answer: Yes, several device drivers can do it in their
		 *         f_op->mmap method. -DaveM
		 */
		addr = vma->vm_start;
		pgoff = vma->vm_pgoff;
		vm_flags = vma->vm_flags;
	} else if (vm_flags & VM_SHARED) {
		printf("this is %s(): %d\r\n", __func__, __LINE__);
		// error = shmem_zero_setup(vma);
		// if (error)
		// 	goto free_vma;
	}

	// if (vma_wants_writenotify(vma))
	// 	vma->vm_page_prot = vm_get_page_prot(vm_flags & ~VM_SHARED);

	printf("this is %s(): %d\r\n", __func__, __LINE__);
	vma_link(mm, vma, prev, rb_link, rb_parent);
	file = vma->vm_file;
	printf("this is %s(): %d\r\n", __func__, __LINE__);

	/* Once vma denies write, undo our temporary denial count */
	// if (correct_wcount)
	// 	atomic_inc(&inode->i_writecount);
out:
	// perf_event_mmap(vma);

	mm->total_vm += len >> PAGE_SHIFT;
	// vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
	// if (vm_flags & VM_LOCKED) {
	// 	/*
	// 	 * makes pages present; downgrades, drops, reacquires mmap_sem
	// 	 */
	// 	long nr_pages = mlock_vma_pages_range(vma, addr, addr + len);
	// 	if (nr_pages < 0)
	// 		return nr_pages;	/* vma gone! */
	// 	mm->locked_vm += (len >> PAGE_SHIFT) - nr_pages;
	// } else if ((flags & MAP_POPULATE) && !(flags & MAP_NONBLOCK))
	// 	make_pages_present(addr, addr + len);
	printf("this is %s(): %d\r\n", __func__, __LINE__);
	return addr;

unmap_and_free_vma:
	// if (correct_wcount)
	// 	atomic_inc(&inode->i_writecount);
	// vma->vm_file = NULL;
	// fput(file);

	// /* Undo any partial mapping done by a device driver. */
	// unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
	// charged = 0;
free_vma:
	// kmem_cache_free(vm_area_cachep, vma);
unacct_error:
	// if (charged)
		// vm_unacct_memory(charged);
	return error;
}

unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
		unsigned long len, unsigned long pgoff, unsigned long flags)
{
	struct mm_struct *mm = current->mm;
	struct vm_area_struct *vma;
	unsigned long start_addr;

	printf("this is %s(): %d\r\n", __func__, __LINE__);

	if (len > TASK_SIZE)
		return -ENOMEM;

	// if (flags & MAP_FIXED)
	// 	return addr;

	if (addr) {
		printf("this is %s(): %d\r\n", __func__, __LINE__);
		addr = PAGE_ALIGN(addr);
		vma = find_vma(mm, addr);
		if (TASK_SIZE - len >= addr &&
		    (!vma || addr + len <= vma->vm_start))
			return addr;
	}
	if (len > mm->cached_hole_size) {
			printf("this is %s(): %d\r\n", __func__, __LINE__);
	        start_addr = addr = mm->free_area_cache;
	} else {
			printf("this is %s(): %d\r\n", __func__, __LINE__);
	        start_addr = addr = TASK_UNMAPPED_BASE;
	        mm->cached_hole_size = 0;
	}

full_search:
	for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
		printf("this is %s(): %d\r\n", __func__, __LINE__);
		/* At this point:  (!vma || addr < vma->vm_end). */
		if (TASK_SIZE - len < addr) {
			/*
			 * Start a new search - just in case we missed
			 * some holes.
			 */
			if (start_addr != TASK_UNMAPPED_BASE) {
				addr = TASK_UNMAPPED_BASE;
			        start_addr = addr;
				mm->cached_hole_size = 0;
				goto full_search;
			}
			return -ENOMEM;
		}
		if (!vma || addr + len <= vma->vm_start) {
			/*
			 * Remember the place where we stopped the search:
			 */
			mm->free_area_cache = addr + len;
			return addr;
		}
		if (addr + mm->cached_hole_size < vma->vm_start)
		        mm->cached_hole_size = vma->vm_start - addr;
		addr = vma->vm_end;
	}
}

unsigned long get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
		unsigned long pgoff, unsigned long flags)
{
	// unsigned long (*get_area)(struct file *, unsigned long,
	// 			  unsigned long, unsigned long, unsigned long);

	printf("this is %s(): %d\r\n", __func__, __LINE__);

	// get_area = current->mm->get_unmapped_area;
	// if (file && file->f_op && file->f_op->get_unmapped_area)
	// 	get_area = file->f_op->get_unmapped_area;
	// addr = get_area(file, addr, len, pgoff, flags);
	addr = arch_get_unmapped_area(file, addr, len, pgoff, flags);
	if (IS_ERR_VALUE(addr))
		return addr;

	if (addr > TASK_SIZE - len)
		return -ENOMEM;
	if (addr & ~PAGE_MASK)
		return -EINVAL;

	return arch_rebalance_pgtables(addr, len);
}


unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
			unsigned long len, unsigned long prot,
			unsigned long flags, unsigned long pgoff)
{
	struct mm_struct * mm = current->mm;
	struct inode *inode;
	unsigned int vm_flags;
	int error;
	unsigned long reqprot = prot;

	printf("this is %s(): %d\r\n", __func__, __LINE__);

	// /*
	//  * Does the application expect PROT_READ to imply PROT_EXEC?
	//  *
	//  * (the exception is when the underlying filesystem is noexec
	//  *  mounted, in which case we dont add PROT_EXEC.)
	//  */
	// if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
	// 	if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
	// 		prot |= PROT_EXEC;

	// if (!len)
	// 	return -EINVAL;

	// if (!(flags & MAP_FIXED))
	// 	addr = round_hint_to_min(addr);

	// error = arch_mmap_check(addr, len, flags);
	// if (error)
	// 	return error;

	// /* Careful about overflows.. */
	// len = PAGE_ALIGN(len);
	// if (!len || len > TASK_SIZE)
	// 	return -ENOMEM;

	// /* offset overflow? */
	// if ((pgoff + (len >> PAGE_SHIFT)) < pgoff)
    //            return -EOVERFLOW;

	// /* Too many mappings? */
	// if (mm->map_count > sysctl_max_map_count)
	// 	return -ENOMEM;

	// if (flags & MAP_HUGETLB) {
	// 	struct user_struct *user = NULL;
	// 	if (file)
	// 		return -EINVAL;

	// 	/*
	// 	 * VM_NORESERVE is used because the reservations will be
	// 	 * taken when vm_ops->mmap() is called
	// 	 * A dummy user value is used because we are not locking
	// 	 * memory so no accounting is necessary
	// 	 */
	// 	len = ALIGN(len, huge_page_size(&default_hstate));
	// 	file = hugetlb_file_setup(HUGETLB_ANON_FILE, len, VM_NORESERVE,
	// 					&user, HUGETLB_ANONHUGE_INODE);
	// 	if (IS_ERR(file))
	// 		return PTR_ERR(file);
	// }

	/* Obtain the address to map to. we verify (or select) it and ensure
	 * that it represents a valid section of the address space.
	 */
	printf("this is %s(): %d\r\n", __func__, __LINE__);
	addr = get_unmapped_area(file, addr, len, pgoff, flags);
	if (addr & ~PAGE_MASK)
		return addr;
	printf("this is %s(): %d\r\n", __func__, __LINE__);

	/* Do simple checking here so the lower-level routines won't have
	 * to. we assume access permissions have been handled by the open
	 * of the memory object, so we don't do any here.
	 */
	// vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
	// 		mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;

	// if (flags & MAP_LOCKED)
	// 	if (!can_do_mlock())
	// 		return -EPERM;

	/* mlock MCL_FUTURE? */
	// if (vm_flags & VM_LOCKED) {
	// 	unsigned long locked, lock_limit;
	// 	locked = len >> PAGE_SHIFT;
	// 	locked += mm->locked_vm;
	// 	lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
	// 	lock_limit >>= PAGE_SHIFT;
	// 	if (locked > lock_limit && !capable(CAP_IPC_LOCK))
	// 		return -EAGAIN;
	// }

	// inode = file ? file->f_path.dentry->d_inode : NULL;

	// if (file) {
	// 	switch (flags & MAP_TYPE) {
	// 	case MAP_SHARED:
	// 		if ((prot&PROT_WRITE) && !(file->f_mode&FMODE_WRITE))
	// 			return -EACCES;

	// 		/*
	// 		 * Make sure we don't allow writing to an append-only
	// 		 * file..
	// 		 */
	// 		if (IS_APPEND(inode) && (file->f_mode & FMODE_WRITE))
	// 			return -EACCES;

	// 		/*
	// 		 * Make sure there are no mandatory locks on the file.
	// 		 */
	// 		if (locks_verify_locked(inode))
	// 			return -EAGAIN;

	// 		vm_flags |= VM_SHARED | VM_MAYSHARE;
	// 		if (!(file->f_mode & FMODE_WRITE))
	// 			vm_flags &= ~(VM_MAYWRITE | VM_SHARED);

	// 		/* fall through */
	// 	case MAP_PRIVATE:
	// 		if (!(file->f_mode & FMODE_READ))
	// 			return -EACCES;
	// 		if (file->f_path.mnt->mnt_flags & MNT_NOEXEC) {
	// 			if (vm_flags & VM_EXEC)
	// 				return -EPERM;
	// 			vm_flags &= ~VM_MAYEXEC;
	// 		}

	// 		if (!file->f_op || !file->f_op->mmap)
	// 			return -ENODEV;
	// 		break;

	// 	default:
	// 		return -EINVAL;
	// 	}
	// } else {
	// 	switch (flags & MAP_TYPE) {
	// 	case MAP_SHARED:
	// 		/*
	// 		 * Ignore pgoff.
	// 		 */
	// 		pgoff = 0;
	// 		vm_flags |= VM_SHARED | VM_MAYSHARE;
	// 		break;
	// 	case MAP_PRIVATE:
	// 		/*
	// 		 * Set pgoff according to addr for anon_vma.
	// 		 */
	// 		pgoff = addr >> PAGE_SHIFT;
	// 		break;
	// 	default:
	// 		return -EINVAL;
	// 	}
	// }

	// error = security_file_mmap(file, reqprot, prot, flags, addr, 0);
	// if (error)
	// 	return error;
	// error = ima_file_mmap(file, prot);
	// if (error)
	// 	return error;

	return mmap_region(file, addr, len, flags, vm_flags, pgoff);
}

int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
{
	struct vm_area_struct * __vma, * prev;
	struct rb_node ** rb_link, * rb_parent;

	/*
	 * The vm_pgoff of a purely anonymous vma should be irrelevant
	 * until its first write fault, when page's anon_vma and index
	 * are set.  But now set the vm_pgoff it will almost certainly
	 * end up with (unless mremap moves it elsewhere before that
	 * first wfault), so /proc/pid/maps tells a consistent story.
	 *
	 * By setting it to reflect the virtual start address of the
	 * vma, merges and splits can happen in a seamless way, just
	 * using the existing file pgoff checks and manipulations.
	 * Similarly in do_mmap_pgoff and in do_brk.
	 */
	if (!vma->vm_file) {
		// BUG_ON(vma->anon_vma);
		vma->vm_pgoff = vma->vm_start >> PAGE_SHIFT;
	}
	__vma = find_vma_prepare(mm,vma->vm_start,&prev,&rb_link,&rb_parent);
	if (__vma && __vma->vm_start < vma->vm_end)
		return -ENOMEM;
	// if ((vma->vm_flags & VM_ACCOUNT) &&
	//      security_vm_enough_memory_mm(mm, vma_pages(vma)))
	// 	return -ENOMEM;
	vma_link(mm, vma, prev, rb_link, rb_parent);
	return 0;
}
