#include <linux/sched.h>
// #include <linux/elf.h>
#include <linux/elf2.h>
#include <linux/fs2.h>
#include <linux/mm.h>
#include <linux/binfmts.h>

#include <asm/mmu_context.h>

#define ELF_MIN_ALIGN	PAGE_SIZE

#define ELF_PAGESTART(_v) ((_v) & ~(unsigned long)(ELF_MIN_ALIGN-1))
#define ELF_PAGEOFFSET(_v) ((_v) & (ELF_MIN_ALIGN-1))
#define ELF_PAGEALIGN(_v) (((_v) + ELF_MIN_ALIGN - 1) & ~(ELF_MIN_ALIGN - 1))

static unsigned long elf_map(struct file *filep, unsigned long addr,
		struct elf_phdr *eppnt, int prot, int type,
		unsigned long total_size)
{
	unsigned long map_addr;
	unsigned long size = eppnt->p_filesz + ELF_PAGEOFFSET(eppnt->p_vaddr);
	unsigned long off = eppnt->p_offset - ELF_PAGEOFFSET(eppnt->p_vaddr);
	addr = ELF_PAGESTART(addr);
	size = ELF_PAGEALIGN(size);

	printf("this is %s(): %d\r\n", __func__, __LINE__);

	/* mmap() will return -EINVAL if given a zero size, but a
	 * segment with zero filesize is perfectly valid */
	if (!size)
		return addr;

	printf("this is %s(): %d\r\n", __func__, __LINE__);
	// down_write(&current->mm->mmap_sem);
	/*
	* total_size is the size of the ELF (interpreter) image.
	* The _first_ mmap needs to know the full size, otherwise
	* randomization might put this image into an overlapping
	* position with the ELF binary image. (since size < total_size)
	* So we first map the 'big' image - and unmap the remainder at
	* the end. (which unmap is needed for ELF images with holes.)
	*/
	if (total_size) {
		printf("this is %s(): %d\r\n", __func__, __LINE__);
		// total_size = ELF_PAGEALIGN(total_size);
		// map_addr = do_mmap(filep, addr, total_size, prot, type, off);
		// if (!BAD_ADDR(map_addr))
		// 	do_munmap(current->mm, map_addr+size, total_size-size);
	} else
		map_addr = do_mmap(filep, addr, size, prot, type, off);

	printf("this is %s(): %d\r\n", __func__, __LINE__);

	// up_write(&current->mm->mmap_sem);
	return(map_addr);
}

int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
{
	struct file *interpreter = NULL; /* to shut gcc up */
 	unsigned long load_addr = 0, load_bias = 0;
	int load_addr_set = 0;
	char * elf_interpreter = NULL;
	unsigned long error;
	struct elf_phdr *elf_ppnt, *elf_phdata;
	unsigned long elf_bss, elf_brk;
	int retval, i;
	unsigned int size;
	unsigned long elf_entry;
	unsigned long interp_load_addr = 0;
	unsigned long start_code, end_code, start_data, end_data;
	unsigned long reloc_func_desc = 0;
	// int executable_stack = EXSTACK_DEFAULT;
	unsigned long def_flags = 0;
	struct {
		struct elfhdr elf_ex;
		struct elfhdr interp_elf_ex;
	} *loc;

	printf("this is %s(): %d\r\n", __func__, __LINE__);

	// loc = kmalloc(sizeof(*loc), GFP_KERNEL);
	loc = kmalloc(sizeof(*loc));
	if (!loc) {
		retval = -ENOMEM;
		goto out_ret;
	}
	printf("this is %s(): %d\r\n", __func__, __LINE__);
	
	/* Get the exec-header */
	loc->elf_ex = *((struct elfhdr *)bprm->buf);

	retval = -ENOEXEC;
	/* First of all, some simple consistency checks */
	// if (memcmp(loc->elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
	// 	goto out;

	// if (loc->elf_ex.e_type != ET_EXEC && loc->elf_ex.e_type != ET_DYN)
	// 	goto out;
	// if (!elf_check_arch(&loc->elf_ex))
	// 	goto out;
	// if (!bprm->file->f_op||!bprm->file->f_op->mmap)
	// 	goto out;

	/* Now read in all of the header information */
	// if (loc->elf_ex.e_phentsize != sizeof(struct elf_phdr))
	// 	goto out;
	// if (loc->elf_ex.e_phnum < 1 ||
	//  	loc->elf_ex.e_phnum > 65536U / sizeof(struct elf_phdr))
	// 	goto out;
	size = loc->elf_ex.e_phnum * sizeof(struct elf_phdr);
	retval = -ENOMEM;
	// elf_phdata = kmalloc(size, GFP_KERNEL);
	elf_phdata = kmalloc(size);
	if (!elf_phdata)
		goto out;

	retval = kernel_read(bprm->file, loc->elf_ex.e_phoff,
			     (char *)elf_phdata, size);
	if (retval != size) {
		printf("this is %s(): %d\r\n", __func__, __LINE__);
		// if (retval >= 0)
		// 	retval = -EIO;
		// goto out_free_ph;
	}

	printf("this is %s(): %d\r\n", __func__, __LINE__);

#if 0
	printf("loc->elf_ex.e_phoff = %x\r\n", loc->elf_ex.e_phoff);	
	printf("loc->elf_ex.e_entry = %x\r\n", loc->elf_ex.e_entry);

	for (i = 0; i < loc->elf_ex.e_phnum; i++) {
		printf("elf_phdata->p_type = %x\r\n", elf_phdata->p_type);	
		printf("elf_phdata->p_vaddr = %x, elf_phdata->p_offset = %x, elf_phdata->p_filesz = %x\r\n",
					elf_phdata->p_vaddr, elf_phdata->p_offset, elf_phdata->p_filesz);	
		// if (CHECK_PT_TYPE_LOAD(phdr)) {
		// 	memcpy((char *)elf_phdata->p_vaddr, buf_read + elf_phdata->p_offset, elf_phdata->p_filesz);
		// }
		elf_phdata++;
	}

	printf("this is %s(): %d while1\r\n", __func__, __LINE__);
	while (1);
#endif

	elf_ppnt = elf_phdata;
	elf_bss = 0;
	elf_brk = 0;

	start_code = ~0UL;
	end_code = 0;
	start_data = 0;
	end_data = 0;

	// for (i = 0; i < loc->elf_ex.e_phnum; i++) {
	// 	if (elf_ppnt->p_type == PT_INTERP) {
	// 		/* This is the program interpreter used for
	// 		 * shared libraries - for now assume that this
	// 		 * is an a.out format binary
	// 		 */
	// 		retval = -ENOEXEC;
	// 		if (elf_ppnt->p_filesz > PATH_MAX || 
	// 		    elf_ppnt->p_filesz < 2)
	// 			goto out_free_ph;

	// 		retval = -ENOMEM;
	// 		elf_interpreter = kmalloc(elf_ppnt->p_filesz,
	// 					  GFP_KERNEL);
	// 		if (!elf_interpreter)
	// 			goto out_free_ph;

	// 		retval = kernel_read(bprm->file, elf_ppnt->p_offset,
	// 				     elf_interpreter,
	// 				     elf_ppnt->p_filesz);
	// 		if (retval != elf_ppnt->p_filesz) {
	// 			if (retval >= 0)
	// 				retval = -EIO;
	// 			goto out_free_interp;
	// 		}
	// 		/* make sure path is NULL terminated */
	// 		retval = -ENOEXEC;
	// 		if (elf_interpreter[elf_ppnt->p_filesz - 1] != '\0')
	// 			goto out_free_interp;

	// 		/*
	// 		 * The early SET_PERSONALITY here is so that the lookup
	// 		 * for the interpreter happens in the namespace of the 
	// 		 * to-be-execed image.  SET_PERSONALITY can select an
	// 		 * alternate root.
	// 		 *
	// 		 * However, SET_PERSONALITY is NOT allowed to switch
	// 		 * this task into the new images's memory mapping
	// 		 * policy - that is, TASK_SIZE must still evaluate to
	// 		 * that which is appropriate to the execing application.
	// 		 * This is because exit_mmap() needs to have TASK_SIZE
	// 		 * evaluate to the size of the old image.
	// 		 *
	// 		 * So if (say) a 64-bit application is execing a 32-bit
	// 		 * application it is the architecture's responsibility
	// 		 * to defer changing the value of TASK_SIZE until the
	// 		 * switch really is going to happen - do this in
	// 		 * flush_thread().	- akpm
	// 		 */
	// 		SET_PERSONALITY(loc->elf_ex);

	// 		interpreter = open_exec(elf_interpreter);
	// 		retval = PTR_ERR(interpreter);
	// 		if (IS_ERR(interpreter))
	// 			goto out_free_interp;

	// 		/*
	// 		 * If the binary is not readable then enforce
	// 		 * mm->dumpable = 0 regardless of the interpreter's
	// 		 * permissions.
	// 		 */
	// 		if (file_permission(interpreter, MAY_READ) < 0)
	// 			bprm->interp_flags |= BINPRM_FLAGS_ENFORCE_NONDUMP;

	// 		retval = kernel_read(interpreter, 0, bprm->buf,
	// 				     BINPRM_BUF_SIZE);
	// 		if (retval != BINPRM_BUF_SIZE) {
	// 			if (retval >= 0)
	// 				retval = -EIO;
	// 			goto out_free_dentry;
	// 		}

	// 		/* Get the exec headers */
	// 		loc->interp_elf_ex = *((struct elfhdr *)bprm->buf);
	// 		break;
	// 	}
	// 	elf_ppnt++;
	// }

	// elf_ppnt = elf_phdata;
	// for (i = 0; i < loc->elf_ex.e_phnum; i++, elf_ppnt++)
	// 	if (elf_ppnt->p_type == PT_GNU_STACK) {
	// 		if (elf_ppnt->p_flags & PF_X)
	// 			executable_stack = EXSTACK_ENABLE_X;
	// 		else
	// 			executable_stack = EXSTACK_DISABLE_X;
	// 		break;
	// 	}

	/* Some simple consistency checks for the interpreter */
	// if (elf_interpreter) {
	// 	retval = -ELIBBAD;
	// 	/* Not an ELF interpreter */
	// 	if (memcmp(loc->interp_elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
	// 		goto out_free_dentry;
	// 	/* Verify the interpreter has a valid arch */
	// 	if (!elf_check_arch(&loc->interp_elf_ex))
	// 		goto out_free_dentry;
	// } else {
	// 	/* Executables without an interpreter also need a personality  */
	// 	SET_PERSONALITY(loc->elf_ex);
	// }

	pgd_t *pgd = cpu_get_pgd();
	printf("this is %s(): %d >>> current pgd = 0x%x\r\n", __func__, __LINE__, pgd);

	/* Flush all traces of the currently running executable */
	retval = flush_old_exec(bprm);
	if (retval)
		goto out_free_dentry;

	pgd = cpu_get_pgd();
	printf("this is %s(): %d >>> current pgd = 0x%x\r\n", __func__, __LINE__, pgd);

	printf("this is %s(): %d\r\n", __func__, __LINE__);
	/* OK, This is the point of no return */
	// current->flags &= ~PF_FORKNOEXEC;
	// current->mm->def_flags = def_flags;

	/* Do this immediately, since STACK_TOP as used in setup_arg_pages
	   may depend on the personality.  */
	// SET_PERSONALITY(loc->elf_ex);
	// if (elf_read_implies_exec(loc->elf_ex, executable_stack))
	// 	current->personality |= READ_IMPLIES_EXEC;

	// if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
	// 	current->flags |= PF_RANDOMIZE;
	// arch_pick_mmap_layout(current->mm);

	/* Do this so that we can load the interpreter, if need be.  We will
	   change some of these later */
	current->mm->free_area_cache = current->mm->mmap_base;
	current->mm->cached_hole_size = 0;
	// retval = setup_arg_pages(bprm, randomize_stack_top(STACK_TOP),
	// 			 executable_stack);
	// if (retval < 0) {
	// 	send_sig(SIGKILL, current, 0);
	// 	goto out_free_dentry;
	// }
	
	current->mm->start_stack = bprm->p;

	/* Now we do a little grungy work by mmaping the ELF image into
	   the correct location in memory. */
	for(i = 0, elf_ppnt = elf_phdata; i < loc->elf_ex.e_phnum; i++, elf_ppnt++) {
		int elf_prot = 0, elf_flags;
		unsigned long k, vaddr;

		printf("this is %s(): %d\r\n", __func__, __LINE__);

		if (elf_ppnt->p_type != PT_LOAD)
			continue;

		printf("this is %s(): %d\r\n", __func__, __LINE__);

		// if (unlikely (elf_brk > elf_bss)) {
		// 	unsigned long nbyte;
	            
		// 	/* There was a PT_LOAD segment with p_memsz > p_filesz
		// 	   before this one. Map anonymous pages, if needed,
		// 	   and clear the area.  */
		// 	retval = set_brk (elf_bss + load_bias,
		// 			  elf_brk + load_bias);
		// 	if (retval) {
		// 		send_sig(SIGKILL, current, 0);
		// 		goto out_free_dentry;
		// 	}
		// 	nbyte = ELF_PAGEOFFSET(elf_bss);
		// 	if (nbyte) {
		// 		nbyte = ELF_MIN_ALIGN - nbyte;
		// 		if (nbyte > elf_brk - elf_bss)
		// 			nbyte = elf_brk - elf_bss;
		// 		if (clear_user((void __user *)elf_bss +
		// 					load_bias, nbyte)) {
		// 			/*
		// 			 * This bss-zeroing can fail if the ELF
		// 			 * file specifies odd protections. So
		// 			 * we don't check the return value
		// 			 */
		// 		}
		// 	}
		// }

		// if (elf_ppnt->p_flags & PF_R)
		// 	elf_prot |= PROT_READ;
		// if (elf_ppnt->p_flags & PF_W)
		// 	elf_prot |= PROT_WRITE;
		// if (elf_ppnt->p_flags & PF_X)
		// 	elf_prot |= PROT_EXEC;

		// elf_flags = MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE;

		vaddr = elf_ppnt->p_vaddr;
// 		if (loc->elf_ex.e_type == ET_EXEC || load_addr_set) {
// 			elf_flags |= MAP_FIXED;
// 		} else if (loc->elf_ex.e_type == ET_DYN) {
// 			/* Try and get dynamic programs out of the way of the
// 			 * default mmap base, as well as whatever program they
// 			 * might try to exec.  This is because the brk will
// 			 * follow the loader, and is not movable.  */
// #ifdef CONFIG_X86
// 			load_bias = 0;
// #else
// 			load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
// #endif
// 		}

		error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
				elf_prot, elf_flags, 0);
		// if (BAD_ADDR(error)) {
		// 	send_sig(SIGKILL, current, 0);
		// 	retval = IS_ERR((void *)error) ?
		// 		PTR_ERR((void*)error) : -EINVAL;
		// 	goto out_free_dentry;
		// }

		// if (!load_addr_set) {
		// 	load_addr_set = 1;
		// 	load_addr = (elf_ppnt->p_vaddr - elf_ppnt->p_offset);
			// if (loc->elf_ex.e_type == ET_DYN) {
			// 	load_bias += error -
			// 	             ELF_PAGESTART(load_bias + vaddr);
			// 	load_addr += load_bias;
			// 	reloc_func_desc = load_bias;
			// }
		// }
		k = elf_ppnt->p_vaddr;
		if (k < start_code)
			start_code = k;
		if (start_data < k)
			start_data = k;

		/*
		 * Check to see if the section's size will overflow the
		 * allowed task size. Note that p_filesz must always be
		 * <= p_memsz so it is only necessary to check p_memsz.
		 */
		// if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
		//     elf_ppnt->p_memsz > TASK_SIZE ||
		//     TASK_SIZE - elf_ppnt->p_memsz < k) {
		// 	/* set_brk can never work. Avoid overflows. */
		// 	send_sig(SIGKILL, current, 0);
		// 	retval = -EINVAL;
		// 	goto out_free_dentry;
		// }

		k = elf_ppnt->p_vaddr + elf_ppnt->p_filesz;

		if (k > elf_bss)
			elf_bss = k;
		if ((elf_ppnt->p_flags & PF_X) && end_code < k)
			end_code = k;
		if (end_data < k)
			end_data = k;
		k = elf_ppnt->p_vaddr + elf_ppnt->p_memsz;
		if (k > elf_brk)
			elf_brk = k;
		printf("this is %s(): %d\r\n", __func__, __LINE__);
	}

	loc->elf_ex.e_entry += load_bias;
	elf_bss += load_bias;
	elf_brk += load_bias;
	start_code += load_bias;
	end_code += load_bias;
	start_data += load_bias;
	end_data += load_bias;

	/* Calling set_brk effectively mmaps the pages that we need
	 * for the bss and break sections.  We must do this before
	 * mapping in the interpreter, to make sure it doesn't wind
	 * up getting placed where the bss needs to go.
	 */
	// retval = set_brk(elf_bss, elf_brk);
	// if (retval) {
		// send_sig(SIGKILL, current, 0);
		// goto out_free_dentry;
	// }
	// if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
	// 	send_sig(SIGSEGV, current, 0);
	// 	retval = -EFAULT; /* Nobody gets to see this, but.. */
	// 	goto out_free_dentry;
	// }

	if (elf_interpreter) {
		printf("this is %s(): %d\r\n", __func__, __LINE__);
		// unsigned long uninitialized_var(interp_map_addr);

		// elf_entry = load_elf_interp(&loc->interp_elf_ex,
		// 			    interpreter,
		// 			    &interp_map_addr,
		// 			    load_bias);
		// if (!IS_ERR((void *)elf_entry)) {
		// 	/*
		// 	 * load_elf_interp() returns relocation
		// 	 * adjustment
		// 	 */
		// 	interp_load_addr = elf_entry;
		// 	elf_entry += loc->interp_elf_ex.e_entry;
		// }
		// if (BAD_ADDR(elf_entry)) {
		// 	force_sig(SIGSEGV, current);
		// 	retval = IS_ERR((void *)elf_entry) ?
		// 			(int)elf_entry : -EINVAL;
		// 	goto out_free_dentry;
		// }
		// reloc_func_desc = interp_load_addr;

		// allow_write_access(interpreter);
		// fput(interpreter);
		// kfree(elf_interpreter);
	} else {
		printf("this is %s(): %d >>> loc->elf_ex.e_entry = %x\r\n", __func__, __LINE__, loc->elf_ex.e_entry);
		elf_entry = loc->elf_ex.e_entry;
		// if (BAD_ADDR(elf_entry)) {
		// 	force_sig(SIGSEGV, current);
		// 	retval = -EINVAL;
		// 	goto out_free_dentry;
		// }
	}

	// kfree(elf_phdata);

	// set_binfmt(&elf_format);

// #ifdef ARCH_HAS_SETUP_ADDITIONAL_PAGES
// 	retval = arch_setup_additional_pages(bprm, !!elf_interpreter);
// 	if (retval < 0) {
// 		send_sig(SIGKILL, current, 0);
// 		goto out;
// 	}
// #endif /* ARCH_HAS_SETUP_ADDITIONAL_PAGES */

	// install_exec_creds(bprm);
	// current->flags &= ~PF_FORKNOEXEC;
	// retval = create_elf_tables(bprm, &loc->elf_ex,
			//   load_addr, interp_load_addr);
	// if (retval < 0) {
		// send_sig(SIGKILL, current, 0);
		// goto out;
	// }
	/* N.B. passed_fileno might not be initialized? */
	current->mm->end_code = end_code;
	current->mm->start_code = start_code;
	current->mm->start_data = start_data;
	current->mm->end_data = end_data;
	current->mm->start_stack = bprm->p;

// #ifdef arch_randomize_brk
// 	if ((current->flags & PF_RANDOMIZE) && (randomize_va_space > 1))
// 		current->mm->brk = current->mm->start_brk =
// 			arch_randomize_brk(current->mm);
// #endif

	// if (current->personality & MMAP_PAGE_ZERO) {
	// 	/* Why this, you ask???  Well SVr4 maps page 0 as read-only,
	// 	   and some applications "depend" upon this behavior.
	// 	   Since we do not have the power to recompile these, we
	// 	   emulate the SVr4 behavior. Sigh. */
	// 	down_write(&current->mm->mmap_sem);
	// 	error = do_mmap(NULL, 0, PAGE_SIZE, PROT_READ | PROT_EXEC,
	// 			MAP_FIXED | MAP_PRIVATE, 0);
	// 	up_write(&current->mm->mmap_sem);
	// }

// #ifdef ELF_PLAT_INIT
// 	/*
// 	 * The ABI may specify that certain registers be set up in special
// 	 * ways (on i386 %edx is the address of a DT_FINI function, for
// 	 * example.  In addition, it may also specify (eg, PowerPC64 ELF)
// 	 * that the e_entry field is the address of the function descriptor
// 	 * for the startup routine, rather than the address of the startup
// 	 * routine itself.  This macro performs whatever initialization to
// 	 * the regs structure is required as well as any relocations to the
// 	 * function descriptor entries when executing dynamically links apps.
// 	 */
// 	ELF_PLAT_INIT(regs, reloc_func_desc);
// #endif

	pgd = cpu_get_pgd();
	printf("this is %s(): %d >>> current pgd = 0x%x\r\n", __func__, __LINE__, pgd);

	// start_thread(regs, elf_entry, bprm->p);

	unsigned long *kstack = (unsigned long *)kmalloc(THREAD_SIZE);
	kstack[0] = bprm->argc;
	kstack[1] = bprm->argv;
	printf("this is %s(): %d >>> bprm->argc = %d\r\n", __func__, __LINE__, bprm->argc);
	printf("this is %s(): %d >>> bprm->argv = %x\r\n", __func__, __LINE__, bprm->argv);
	printf("this is %s(): %d >>> kstack[0] = %d\r\n", __func__, __LINE__, kstack[0]);
	printf("this is %s(): %d >>> kstack[1] = %x\r\n", __func__, __LINE__, kstack[1]);
	start_thread(regs, elf_entry, (unsigned long)kstack);

	// start_thread(regs, elf_entry, (unsigned long)kmalloc(THREAD_SIZE));
	retval = 0;
out:
	// kfree(loc);
out_ret:
	return retval;

	/* error cleanup */
out_free_dentry:
	// allow_write_access(interpreter);
	// if (interpreter)
		// fput(interpreter);
out_free_interp:
	// kfree(elf_interpreter);
out_free_ph:
	// kfree(elf_phdata);
	goto out;
}
