#include <linux/mm.h>

#include <asm/pgalloc.h>
#include <asm/pgtable.h>

struct page *mem_map;
void *high_memory;
unsigned long highest_memmap_pfn;

extern struct mm_struct init_mm;

static inline void
copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
		pte_t *dst_pte, pte_t *src_pte, struct vm_area_struct *vma,
		unsigned long addr, int *rss)
{
	unsigned long vm_flags = vma->vm_flags;
	pte_t pte = *src_pte;
	struct page *page;

	printf("this is %s(): %d\r\n", __func__, __LINE__);

	/* pte contains position in swap or file, so copy. */
	// if (!pte_present(pte)) {
	// 	if (!pte_file(pte)) {
	// 		swp_entry_t entry = pte_to_swp_entry(pte);

	// 		swap_duplicate(entry);
	// 		/* make sure dst_mm is on swapoff's mmlist. */
	// 		if (unlikely(list_empty(&dst_mm->mmlist))) {
	// 			spin_lock(&mmlist_lock);
	// 			if (list_empty(&dst_mm->mmlist))
	// 				list_add(&dst_mm->mmlist,
	// 					 &src_mm->mmlist);
	// 			spin_unlock(&mmlist_lock);
	// 		}
	// 		if (is_write_migration_entry(entry) &&
	// 				is_cow_mapping(vm_flags)) {
	// 			/*
	// 			 * COW mappings require pages in both parent
	// 			 * and child to be set to read.
	// 			 */
	// 			make_migration_entry_read(&entry);
	// 			pte = swp_entry_to_pte(entry);
	// 			set_pte_at(src_mm, addr, src_pte, pte);
	// 		}
	// 	}
	// 	goto out_set_pte;
	// }

	/*
	 * If it's a COW mapping, write protect it both
	 * in the parent and the child
	 */
	// if (is_cow_mapping(vm_flags)) {
	// 	printf("this is %s(): %d\r\n", __func__, __LINE__);
		ptep_set_wrprotect(src_mm, addr, src_pte);
		pte = pte_wrprotect(pte);
	// }

	/*
	 * If it's a shared mapping, mark it clean in
	 * the child
	 */
	// if (vm_flags & VM_SHARED)
		// pte = pte_mkclean(pte);
	pte = pte_mkold(pte);

	// page = vm_normal_page(vma, addr, pte);
	// if (page) {
	// 	get_page(page);
	// 	page_dup_rmap(page);
	// 	rss[PageAnon(page)]++;
	// }

out_set_pte:
	set_pte_at(dst_mm, addr, dst_pte, pte);
}

static int copy_pte_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
		pmd_t *dst_pmd, pmd_t *src_pmd, struct vm_area_struct *vma,
		unsigned long addr, unsigned long end)
{
	pte_t *orig_src_pte, *orig_dst_pte;
	pte_t *src_pte, *dst_pte;
	spinlock_t *src_ptl, *dst_ptl;
	int progress = 0;
	int rss[2];

	printf("this is %s(): %d\r\n", __func__, __LINE__);

again:
	rss[1] = rss[0] = 0;
	// dst_pte = pte_alloc_map_lock(dst_mm, dst_pmd, addr, &dst_ptl);
	dst_pte = pte_alloc_map(dst_mm, dst_pmd, addr);
	if (!dst_pte)
		return -ENOMEM;
	// src_pte = pte_offset_map_nested(src_pmd, addr);
	src_pte = pte_offset_map(src_pmd, addr);
	// src_ptl = pte_lockptr(src_mm, src_pmd);
	// spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
	orig_src_pte = src_pte;
	orig_dst_pte = dst_pte;
	// arch_enter_lazy_mmu_mode();

	do {
		/*
		 * We are holding two locks at this point - either of them
		 * could generate latencies in another task on another CPU.
		 */
		if (progress >= 32) {
			progress = 0;
			// if (need_resched() ||
			//     spin_needbreak(src_ptl) || spin_needbreak(dst_ptl))
			// 	break;
		}
		if (pte_none(*src_pte)) {
			progress++;
			continue;
		}
		copy_one_pte(dst_mm, src_mm, dst_pte, src_pte, vma, addr, rss);
		progress += 8;
	} while (dst_pte++, src_pte++, addr += PAGE_SIZE, addr != end);

	// arch_leave_lazy_mmu_mode();
	// spin_unlock(src_ptl);
	// pte_unmap_nested(orig_src_pte);
	// add_mm_rss(dst_mm, rss[0], rss[1]);
	// pte_unmap_unlock(orig_dst_pte, dst_ptl);
	// cond_resched();
	if (addr != end)
		goto again;
	return 0;
}

static inline int copy_pmd_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
		pud_t *dst_pud, pud_t *src_pud, struct vm_area_struct *vma,
		unsigned long addr, unsigned long end)
{
	pmd_t *src_pmd, *dst_pmd;
	unsigned long next;

	printf("this is %s(): %d\r\n", __func__, __LINE__);

	dst_pmd = pmd_alloc(dst_mm, dst_pud, addr);
	if (!dst_pmd)
		return -ENOMEM;
	src_pmd = pmd_offset(src_pud, addr);
	do {
		next = pmd_addr_end(addr, end);
		// if (pmd_none_or_clear_bad(src_pmd))
		// 	continue;
		if (copy_pte_range(dst_mm, src_mm, dst_pmd, src_pmd,
						vma, addr, next))
			return -ENOMEM;
	} while (dst_pmd++, src_pmd++, addr = next, addr != end);
	return 0;
}

static inline int copy_pud_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
		pgd_t *dst_pgd, pgd_t *src_pgd, struct vm_area_struct *vma,
		unsigned long addr, unsigned long end)
{
	pud_t *src_pud, *dst_pud;
	unsigned long next;

	printf("this is %s(): %d\r\n", __func__, __LINE__);

	dst_pud = pud_alloc(dst_mm, dst_pgd, addr);
	if (!dst_pud)
		return -ENOMEM;
	src_pud = pud_offset(src_pgd, addr);
	do {
		next = pud_addr_end(addr, end);
		// if (pud_none_or_clear_bad(src_pud))
		// 	continue;
		if (copy_pmd_range(dst_mm, src_mm, dst_pud, src_pud,
						vma, addr, next))
			return -ENOMEM;
	} while (dst_pud++, src_pud++, addr = next, addr != end);
	return 0;
}

int copy_page_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
		struct vm_area_struct *vma)
{
	pgd_t *src_pgd, *dst_pgd;
	unsigned long next;
	unsigned long addr = vma->vm_start;
	unsigned long end = vma->vm_end;
	int ret;

	printf("this is %s(): %d\r\n", __func__, __LINE__);

	/*
	 * Don't copy ptes where a page fault will fill them correctly.
	 * Fork becomes much lighter when there are big shared or private
	 * readonly mappings. The tradeoff is that copy_page_range is more
	 * efficient than faulting.
	 */
	// if (!(vma->vm_flags & (VM_HUGETLB|VM_NONLINEAR|VM_PFNMAP|VM_INSERTPAGE))) {
	// 	if (!vma->anon_vma)
	// 		return 0;
	// }

	// if (is_vm_hugetlb_page(vma))
	// 	return copy_hugetlb_page_range(dst_mm, src_mm, vma);

	// if (unlikely(is_pfn_mapping(vma))) {
		/*
		 * We do not free on error cases below as remove_vma
		 * gets called on error from higher level routine
		 */
		// ret = track_pfn_vma_copy(vma);
		// if (ret)
			// return ret;
	// }

	/*
	 * We need to invalidate the secondary MMU mappings only when
	 * there could be a permission downgrade on the ptes of the
	 * parent mm. And a permission downgrade will only happen if
	 * is_cow_mapping() returns true.
	 */
	// if (is_cow_mapping(vma->vm_flags))
	// 	mmu_notifier_invalidate_range_start(src_mm, addr, end);

	ret = 0;
	dst_pgd = pgd_offset(dst_mm, addr);
	src_pgd = pgd_offset(src_mm, addr);
	do {
		next = pgd_addr_end(addr, end);
		// if (pgd_none_or_clear_bad(src_pgd))
		// 	continue;
		if (copy_pud_range(dst_mm, src_mm, dst_pgd, src_pgd,
					    vma, addr, next)) {
			ret = -ENOMEM;
			break;
		}
	} while (dst_pgd++, src_pgd++, addr = next, addr != end);

	// if (is_cow_mapping(vma->vm_flags))
	// 	mmu_notifier_invalidate_range_end(src_mm,
	// 					  vma->vm_start, end);
	return ret;
}

#include <linux/gfp.h>
#include <linux/bootmem.h>
#include <asm/map.h>
#include <asm/memory.h>
#define PROT_PTE_DEVICE		L_PTE_PRESENT|L_PTE_YOUNG|L_PTE_DIRTY|L_PTE_WRITE
static void map_address(struct mm_struct *mm, unsigned long addr)
{
	struct page *page;
	unsigned long pfn;
	page = alloc_pages(PGALLOC_GFP, 1);
	pfn = page_to_pfn(page);
	printf("this is %s(): %d	page = 0x%x\r\n", __func__, __LINE__, page);
	printf("this is %s(): %d	pfn = 0x%x\r\n", __func__, __LINE__, pfn);

#if 0
	struct map_desc map;
	// execv
	// map.pfn = __phys_to_pfn(addr);
	map.pfn = pfn;
	map.virtual = 0x1000f000;
	map.length = PAGE_SIZE;
	map.type = MT_UNCACHED;
	create_mapping(&map);
	return;
#endif

	pgd_t *pgd;
	// pgd = pgd_offset_k(addr);
	pgd = pgd_offset(mm, addr);
	pmd_t *pmd = pmd_offset(pgd, addr);
	pte_t *pte;
	printf("this is %s(): %d	pgd = 0x%x\r\n", __func__, __LINE__, pgd);
	printf("this is %s(): %d	pmd = 0x%x\r\n", __func__, __LINE__, pmd);

	if (pmd_none(*pmd)) {
		printf("this is %s(): %d\r\n", __func__, __LINE__);
		pte = alloc_bootmem_low_pages(2 * PTRS_PER_PTE * sizeof(pte_t));
		__pmd_populate(pmd, __pa(pte) | PMD_TYPE_TABLE);
	}

	printf("this is %s(): %d\r\n", __func__, __LINE__);
	pte = pte_offset_kernel(pmd, addr);
	printf("this is %s(): %d	pte = 0x%x\r\n", __func__, __LINE__, pte);
	// do {
	// printf("this is %s(): %d\r\n", __func__, __LINE__);
		set_pte_ext(pte, pfn_pte(pfn, __pgprot(PROT_PTE_DEVICE)), 0);
		// pfn++;
	// } while (pte++, addr += PAGE_SIZE, addr != end);
}

/*
 * We enter with non-exclusive mmap_sem (to exclude vma changes,
 * but allow concurrent faults), and pte mapped but not yet locked.
 * We return with mmap_sem still held, but pte unmapped and unlocked.
 */
static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
		unsigned long address, pte_t *page_table, pmd_t *pmd,
		unsigned int flags)
{
	struct page *page;
	spinlock_t *ptl;
	pte_t entry;

	printf("this is %s(): %d\r\n", __func__, __LINE__);

#if 0
	map_address(mm, address);
	return 0;
#endif

	// if (!(flags & FAULT_FLAG_WRITE)) {
	// 	entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
	// 					vma->vm_page_prot));
	// 	ptl = pte_lockptr(mm, pmd);
	// 	spin_lock(ptl);
	// 	if (!pte_none(*page_table))
	// 		goto unlock;
	// 	goto setpte;
	// }

	/* Allocate our own private page. */
	// pte_unmap(page_table);

	// if (unlikely(anon_vma_prepare(vma)))
	// 	goto oom;
	// page = alloc_zeroed_user_highpage_movable(vma, address);
	page = alloc_pages(PGALLOC_GFP, 1);
	memset(page_address(page), 0, PAGE_SIZE);
	printf("this is %s(): %d	page = 0x%x\r\n", __func__, __LINE__, page);
	printf("this is %s(): %d	page_address(page) = 0x%x\r\n", __func__, __LINE__, page_address(page));
	if (!page)
		goto oom;
	// __SetPageUptodate(page);
	printf("this is %s(): %d\r\n", __func__, __LINE__);

	// if (mem_cgroup_newpage_charge(page, mm, GFP_KERNEL))
	// 	goto oom_free_page;

	// entry = mk_pte(page, vma->vm_page_prot);
	entry = mk_pte(page, L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_USER | L_PTE_WRITE | L_PTE_EXEC);
	// entry = ((pte_t)(page)) | pgprot_val(vma->vm_page_prot);
	// entry = ((pte_t)(page));
	printf("this is %s(): %d	entry = 0x%x\r\n", __func__, __LINE__, entry);
	// if (vma->vm_flags & VM_WRITE) {
	// 	printf("this is %s(): %d\r\n", __func__, __LINE__);
	// 	entry = pte_mkwrite(pte_mkdirty(entry));
	// 	printf("this is %s(): %d\r\n", __func__, __LINE__);
	// }

	printf("this is %s(): %d\r\n", __func__, __LINE__);

	// page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
	page_table = pte_offset_map(pmd, address);
	printf("this is %s(): %d	page_table = 0x%x\r\n", __func__, __LINE__, page_table);
	if (!pte_none(*page_table)) {
		printf("this is %s(): %d\r\n", __func__, __LINE__);
		goto release;
	}
	printf("this is %s(): %d\r\n", __func__, __LINE__);

	// inc_mm_counter(mm, anon_rss);
	// page_add_new_anon_rmap(page, vma, address);
setpte:
	set_pte_at(mm, address, page_table, entry);
	printf("this is %s(): %d\r\n", __func__, __LINE__);

	/* No need to invalidate - it was non-present before */
	update_mmu_cache(vma, address, entry);
unlock:
	// pte_unmap_unlock(page_table, ptl);
	printf("this is %s(): %d\r\n", __func__, __LINE__);
	return 0;
release:
	// mem_cgroup_uncharge_page(page);
	// page_cache_release(page);
	goto unlock;
oom_free_page:
	// page_cache_release(page);
oom:
	return VM_FAULT_OOM;
}

static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
{

}

#define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
({									  \
	int __changed = !pte_same(*(__ptep), __entry);			  \
	if (__changed) {						  \
		set_pte_at((__vma)->vm_mm, (__address), __ptep, __entry); \
		flush_tlb_page(__vma, __address);			  \
	}								  \
	__changed;							  \
})

static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
		unsigned long address, pmd_t *pmd,
		pgoff_t pgoff, unsigned int flags, pte_t orig_pte)
{
	pte_t *page_table;
	spinlock_t *ptl;
	struct page *page;
	pte_t entry;
	int anon = 0;
	int charged = 0;
	struct page *dirty_page = NULL;
	struct vm_fault vmf;
	int ret;
	int page_mkwrite = 0;

	vmf.virtual_address = (void *)(address & PAGE_MASK);
	vmf.pgoff = pgoff;
	vmf.flags = flags;
	vmf.page = NULL;

	printf("this is %s(): %d\r\n", __func__, __LINE__);
	ret = vma->vm_ops->fault(vma, &vmf);
	printf("this is %s(): %d\r\n", __func__, __LINE__);
	// if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))
	// 	return ret;

	// if (unlikely(PageHWPoison(vmf.page))) {
	// 	if (ret & VM_FAULT_LOCKED)
	// 		unlock_page(vmf.page);
	// 	return VM_FAULT_HWPOISON;
	// }

	/*
	 * For consistency in subsequent calls, make the faulted page always
	 * locked.
	 */
	// if (unlikely(!(ret & VM_FAULT_LOCKED)))
	// 	lock_page(vmf.page);
	// else
	// 	VM_BUG_ON(!PageLocked(vmf.page));

	/*
	 * Should we do an early C-O-W break?
	 */
	page = vmf.page;
	// if (flags & FAULT_FLAG_WRITE) {
	// 	if (!(vma->vm_flags & VM_SHARED)) {
	// 		anon = 1;
	// 		if (unlikely(anon_vma_prepare(vma))) {
	// 			ret = VM_FAULT_OOM;
	// 			goto out;
	// 		}
	// 		page = alloc_page_vma(GFP_HIGHUSER_MOVABLE,
	// 					vma, address);
	// 		if (!page) {
	// 			ret = VM_FAULT_OOM;
	// 			goto out;
	// 		}
	// 		if (mem_cgroup_newpage_charge(page, mm, GFP_KERNEL)) {
	// 			ret = VM_FAULT_OOM;
	// 			page_cache_release(page);
	// 			goto out;
	// 		}
	// 		charged = 1;
	// 		/*
	// 		 * Don't let another task, with possibly unlocked vma,
	// 		 * keep the mlocked page.
	// 		 */
	// 		if (vma->vm_flags & VM_LOCKED)
	// 			clear_page_mlock(vmf.page);
	// 		copy_user_highpage(page, vmf.page, address, vma);
	// 		__SetPageUptodate(page);
	// 	} else {
	// 		/*
	// 		 * If the page will be shareable, see if the backing
	// 		 * address space wants to know that the page is about
	// 		 * to become writable
	// 		 */
	// 		if (vma->vm_ops->page_mkwrite) {
	// 			int tmp;

	// 			unlock_page(page);
	// 			vmf.flags = FAULT_FLAG_WRITE|FAULT_FLAG_MKWRITE;
	// 			tmp = vma->vm_ops->page_mkwrite(vma, &vmf);
	// 			if (unlikely(tmp &
	// 				  (VM_FAULT_ERROR | VM_FAULT_NOPAGE))) {
	// 				ret = tmp;
	// 				goto unwritable_page;
	// 			}
	// 			if (unlikely(!(tmp & VM_FAULT_LOCKED))) {
	// 				lock_page(page);
	// 				if (!page->mapping) {
	// 					ret = 0; /* retry the fault */
	// 					unlock_page(page);
	// 					goto unwritable_page;
	// 				}
	// 			} else
	// 				VM_BUG_ON(!PageLocked(page));
	// 			page_mkwrite = 1;
	// 		}
	// 	}

	// }

	// page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
	printf("this is %s(): %d\r\n", __func__, __LINE__);
	page_table = pte_offset_map(pmd, address);
	printf("this is %s(): %d\r\n", __func__, __LINE__);

	/*
	 * This silly early PAGE_DIRTY setting removes a race
	 * due to the bad i386 page protection. But it's valid
	 * for other architectures too.
	 *
	 * Note that if FAULT_FLAG_WRITE is set, we either now have
	 * an exclusive copy of the page, or this is a shared mapping,
	 * so we can make it writable and dirty to avoid having to
	 * handle that later.
	 */
	/* Only go through if we didn't race with anybody else... */
	if (pte_same(*page_table, orig_pte)) {
		printf("this is %s(): %d\r\n", __func__, __LINE__);
		// flush_icache_page(vma, page);
		printf("this is %s(): %d	page = 0x%x\r\n", __func__, __LINE__, page);
		printf("this is %s(): %d	page_address(page) = 0x%x\r\n", __func__, __LINE__, page_address(page));
		// entry = mk_pte(page, vma->vm_page_prot);
		// entry = mk_pte(page, L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_USER);
		entry = mk_pte(page, L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_USER | L_PTE_WRITE | L_PTE_EXEC);
		// if (flags & FAULT_FLAG_WRITE)
		// 	entry = maybe_mkwrite(pte_mkdirty(entry), vma);
		// if (anon) {
		// 	inc_mm_counter(mm, anon_rss);
		// 	page_add_new_anon_rmap(page, vma, address);
		// } else {
		// 	inc_mm_counter(mm, file_rss);
		// 	page_add_file_rmap(page);
		// 	if (flags & FAULT_FLAG_WRITE) {
		// 		dirty_page = page;
		// 		get_page(dirty_page);
		// 	}
		// }
		set_pte_at(mm, address, page_table, entry);
		printf("this is %s(): %d\r\n", __func__, __LINE__);

		/* no need to invalidate: a not-present page won't be cached */
		update_mmu_cache(vma, address, entry);
		printf("this is %s(): %d\r\n", __func__, __LINE__);
	} else {
		printf("this is %s(): %d\r\n", __func__, __LINE__);
		// if (charged)
		// 	mem_cgroup_uncharge_page(page);
		// if (anon)
		// 	page_cache_release(page);
		// else
		// 	anon = 1; /* no anon but release faulted_page */
	}

	// pte_unmap_unlock(page_table, ptl);

out:
	printf("this is %s(): %d\r\n", __func__, __LINE__);
	// if (dirty_page) {
	// 	struct address_space *mapping = page->mapping;

	// 	if (set_page_dirty(dirty_page))
	// 		page_mkwrite = 1;
	// 	unlock_page(dirty_page);
	// 	put_page(dirty_page);
	// 	if (page_mkwrite && mapping) {
	// 		/*
	// 		 * Some device drivers do not set page.mapping but still
	// 		 * dirty their pages
	// 		 */
	// 		balance_dirty_pages_ratelimited(mapping);
	// 	}

	// 	/* file_update_time outside page_lock */
	// 	if (vma->vm_file)
	// 		file_update_time(vma->vm_file);
	// } else {
	// 	unlock_page(vmf.page);
	// 	if (anon)
	// 		page_cache_release(vmf.page);
	// }

	return ret;

unwritable_page:
	// page_cache_release(page);
	return ret;
}

static int do_linear_fault(struct mm_struct *mm, struct vm_area_struct *vma,
		unsigned long address, pte_t *page_table, pmd_t *pmd,
		unsigned int flags, pte_t orig_pte)
{
	pgoff_t pgoff = (((address & PAGE_MASK)
			- vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;

	printf("this is %s(): %d\r\n", __func__, __LINE__);

	// pte_unmap(page_table);
	return __do_fault(mm, vma, address, pmd, pgoff, flags, orig_pte);
}

static inline int handle_pte_fault(struct mm_struct *mm,
		struct vm_area_struct *vma, unsigned long address,
		pte_t *pte, pmd_t *pmd, unsigned int flags)
{
	pte_t entry;
	// spinlock_t *ptl;

	printf("this is %s(): %d	address = 0x%x\r\n", __func__, __LINE__, address);

	entry = *pte;
	printf("this is %s(): %d	*pte = 0x%x\r\n", __func__, __LINE__, *pte);

			// return do_anonymous_page(mm, vma, address,
			// 			 pte, pmd, flags);

# if 1
	printf("this is %s(): %d	!pte_present(entry) = 0x%x\r\n", __func__, __LINE__, !pte_present(entry));
	printf("this is %s(): %d	pte_none(entry) = 0x%x\r\n", __func__, __LINE__, pte_none(entry));
	if (!pte_present(entry)) {
		printf("this is %s(): %d\r\n", __func__, __LINE__);

		if (pte_none(entry)) {
			printf("this is %s(): %d\r\n", __func__, __LINE__);

			// if (address > 0x80000000) {
				printf("this is %s(): %d\r\n", __func__, __LINE__);

				if (vma->vm_ops) {
					printf("this is %s(): %d\r\n", __func__, __LINE__);

					if (vma->vm_ops->fault) {
						printf("this is %s(): %d\r\n", __func__, __LINE__);

						return do_linear_fault(mm, vma, address,
							pte, pmd, flags, entry);
					}
				}
			// }

			printf("this is %s(): %d\r\n", __func__, __LINE__);
			return do_anonymous_page(mm, vma, address,
						 pte, pmd, flags);
		}
		// if (pte_file(entry))
		// 	return do_nonlinear_fault(mm, vma, address,
		// 			pte, pmd, flags, entry);
		// return do_swap_page(mm, vma, address,
		// 			pte, pmd, flags, entry);
	}
#endif

	// ptl = pte_lockptr(mm, pmd);
	// spin_lock(ptl);
	if (!pte_same(*pte, entry)) {
		printf("this is %s(): %d\r\n", __func__, __LINE__);
		goto unlock;
	}
	// if (flags & FAULT_FLAG_WRITE) {
	// 	if (!pte_write(entry))
	// 		return do_wp_page(mm, vma, address,
	// 				pte, pmd, ptl, entry);
	// 	entry = pte_mkdirty(entry);
	// }
	entry = pte_mkyoung(entry);
	if (ptep_set_access_flags(vma, address, pte, entry, flags & FAULT_FLAG_WRITE)) {
		printf("this is %s(): %d\r\n", __func__, __LINE__);
		update_mmu_cache(vma, address, entry);
	} else {
		printf("this is %s(): %d\r\n", __func__, __LINE__);
		/*
		 * This is needed only for protection faults but the arch code
		 * is not yet telling us if this is a protection fault or not.
		 * This still avoids useless tlb flushes for .text page faults
		 * with threads.
		 */
		// if (flags & FAULT_FLAG_WRITE)
		// 	flush_tlb_page(vma, address);
	}
unlock:
	// pte_unmap_unlock(pte, ptl);
	printf("this is %s(): %d\r\n", __func__, __LINE__);
	return 0;
}

/*
 * By the time we get here, we already hold the mm semaphore
 */
int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
		unsigned long address, unsigned int flags)
{
	pgd_t *pgd;
	pud_t *pud;
	pmd_t *pmd;
	pte_t *pte;

	printf("this is %s(): %d\r\n", __func__, __LINE__);

	// __set_current_state(TASK_RUNNING);

	// count_vm_event(PGFAULT);

	// if (unlikely(is_vm_hugetlb_page(vma)))
	// 	return hugetlb_fault(mm, vma, address, flags);

	pgd = pgd_offset(mm, address);
	printf("this is %s(): %d   pgd = 0x%x\r\n", __func__, __LINE__, pgd);
	pud = pud_alloc(mm, pgd, address);
	printf("this is %s(): %d   pud = 0x%x\r\n", __func__, __LINE__, pud);
	if (!pud)
		return VM_FAULT_OOM;
	printf("this is %s(): %d\r\n", __func__, __LINE__);
	pmd = pmd_alloc(mm, pud, address);
	printf("this is %s(): %d   pmd = 0x%x\r\n", __func__, __LINE__, pmd);
	if (!pmd)
		return VM_FAULT_OOM;
	printf("this is %s(): %d   pmd_present(*(pmd)) = 0x%x\r\n", __func__, __LINE__, pmd_present(*(pmd)));
	pte = pte_alloc_map(mm, pmd, address);
	printf("this is %s(): %d   pte = 0x%x\r\n", __func__, __LINE__, pte);
	if (!pte)
		return VM_FAULT_OOM;

	printf("this is %s(): %d\r\n", __func__, __LINE__);
	return handle_pte_fault(mm, vma, address, pte, pmd, flags);
}

int __pte_alloc(struct mm_struct *mm, pmd_t *pmd, unsigned long address)
{
	printf("this is %s(): %d\r\n", __func__, __LINE__);
	pgtable_t new = pte_alloc_one(mm, address);
	printf("this is %s(): %d   new = 0x%x\r\n", __func__, __LINE__, new);
	if (!new)
		return -ENOMEM;

	/*
	 * Ensure all pte setup (eg. pte page lock and page clearing) are
	 * visible before the pte is made visible to other CPUs by being
	 * put into page tables.
	 *
	 * The other side of the story is the pointer chasing in the page
	 * table walking code (when walking the page table without locking;
	 * ie. most of the time). Fortunately, these data accesses consist
	 * of a chain of data-dependent loads, meaning most CPUs (alpha
	 * being the notable exception) will already guarantee loads are
	 * seen in-order. See the alpha page table accessors for the
	 * smp_read_barrier_depends() barriers in page table walking code.
	 */
	// smp_wmb(); /* Could be smp_wmb__xxx(before|after)_spin_lock */

	spin_lock(&mm->page_table_lock);
	if (!pmd_present(*pmd)) {	/* Has another populated it ? */
		mm->nr_ptes++;
		pmd_populate(mm, pmd, new);
		new = NULL;
	}
	spin_unlock(&mm->page_table_lock);
	// if (new)
	// 	pte_free(mm, new);
	return 0;
}

int __pte_alloc_kernel(pmd_t *pmd, unsigned long address)
{
	pte_t *new = pte_alloc_one_kernel(&init_mm, address);
	if (!new)
		return -ENOMEM;

	// smp_wmb(); /* See comment in __pte_alloc */

	// spin_lock(&init_mm.page_table_lock);
	if (!pmd_present(*pmd)) {	/* Has another populated it ? */
		pmd_populate_kernel(&init_mm, pmd, new);
		new = NULL;
	}
	// spin_unlock(&init_mm.page_table_lock);
	if (new)
		pte_free_kernel(&init_mm, new);
	return 0;
}
