#include <linux/sched.h>
#include <linux/gfp.h>
#include <linux/errno.h>

#include <asm/memory.h>
#include <asm/pgalloc.h>
#include <asm/mmu_context.h>

void free_task(struct task_struct *tsk)
{
	// prop_local_destroy_single(&tsk->dirties);
	// account_kernel_stack(tsk->stack, -1);
	// free_thread_info(tsk->stack);
	// rt_mutex_debug_task_free(tsk);
	// ftrace_graph_exit_task(tsk);
	// free_task_struct(tsk);
}

void __put_task_struct(struct task_struct *tsk)
{
	// WARN_ON(!tsk->exit_state);
	// WARN_ON(atomic_read(&tsk->usage));
	// WARN_ON(tsk == current);

	// exit_creds(tsk);
	// delayacct_tsk_free(tsk);

	// if (!profile_handoff_task(tsk))
		free_task(tsk);
}

static void *get_task_struct_addr(void)
{
	static int task_stack_addr_base = 0xD5000000;
	// static int task_stack_addr_base = phys_to_virt(0x90000000);

	void *p = (struct task_struct *)task_stack_addr_base;
	task_stack_addr_base += THREAD_SIZE;

	return p;
}

static inline struct task_struct *alloc_task_struct(void)
{
	// struct task_struct *tsk = (struct task_struct *)__get_free_pages(GFP_KERNEL, 1);
	struct task_struct *tsk = (struct task_struct *)get_task_struct_addr();
	// struct task_struct *tsk = (struct task_struct *)kmalloc(THREAD_SIZE);
	
	memset(tsk, 0 , sizeof(struct task_struct));

	printf("this is %s(): %d	tsk = 0x%x\r\n", __func__, __LINE__, tsk);
	return tsk;	//2^1 * 4K = 8K
}

static inline struct thread_info *alloc_thread_info(struct task_struct *tsk)
{
	// struct thread_info *ti = (struct thread_info *)__get_free_pages(GFP_KERNEL, 1);
	struct thread_info *ti = (struct thread_info *)get_task_struct_addr();
	// struct thread_info *ti = (struct thread_info *)kmalloc(THREAD_SIZE);
	memset(ti, 0 , sizeof(struct task_struct));

	printf("this is %s(): %d	ti = 0x%x\r\n", __func__, __LINE__, ti);
	return ti;	//2^1 * 4K = 8K
}

static struct task_struct *dup_task_struct(struct task_struct *orig)
{
	struct task_struct *tsk;
	struct thread_info *ti;
	int err;

	printf("this is %s(): %d\r\n", __func__, __LINE__);

	tsk = alloc_task_struct();
	if (!tsk)
		return NULL;

	ti = alloc_thread_info(tsk);
	if (!ti) {
		// free_task_struct(tsk);
		return NULL;
	}

	tsk->stack = ti;

	// setup_thread_stack(tsk, orig);
	setup_thread_stack(tsk, tsk);

	// account_kernel_stack(ti, 1);

	return tsk;

out:
	// free_thread_info(ti);
	// free_task_struct(tsk);
	return NULL;
}

static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
{
	struct vm_area_struct *mpnt, *tmp, **pprev;
	struct rb_node **rb_link, *rb_parent;
	int retval;
	unsigned long charge;
	// struct mempolicy *pol;

	printf("this is %s(): %d\r\n", __func__, __LINE__);

	// down_write(&oldmm->mmap_sem);
	// flush_cache_dup_mm(oldmm);
	/*
	 * Not linked in yet - no deadlock potential:
	 */
	// down_write_nested(&mm->mmap_sem, SINGLE_DEPTH_NESTING);

	mm->locked_vm = 0;
	mm->mmap = NULL;
	mm->mmap_cache = NULL;
	mm->free_area_cache = oldmm->mmap_base;
	mm->cached_hole_size = ~0UL;
	mm->map_count = 0;
	// cpumask_clear(mm_cpumask(mm));
	mm->mm_rb = RB_ROOT;
	rb_link = &mm->mm_rb.rb_node;
	rb_parent = NULL;
	pprev = &mm->mmap;
	// retval = ksm_fork(mm, oldmm);
	// if (retval)
	// 	goto out;

	for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
		printf("this is %s(): %d\r\n", __func__, __LINE__);
		struct file *file;

		// if (mpnt->vm_flags & VM_DONTCOPY) {
		// 	long pages = vma_pages(mpnt);
		// 	mm->total_vm -= pages;
		// 	vm_stat_account(mm, mpnt->vm_flags, mpnt->vm_file,
		// 						-pages);
		// 	continue;
		// }
		charge = 0;
		// if (mpnt->vm_flags & VM_ACCOUNT) {
		// 	unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
		// 	if (security_vm_enough_memory(len))
		// 		goto fail_nomem;
		// 	charge = len;
		// }
		// tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
		tmp = kmalloc(sizeof(struct vm_area_struct));
		if (!tmp)
			goto fail_nomem;
		*tmp = *mpnt;
		// pol = mpol_dup(vma_policy(mpnt));
		// retval = PTR_ERR(pol);
		// if (IS_ERR(pol))
			// goto fail_nomem_policy;
		// vma_set_policy(tmp, pol);
		// tmp->vm_flags &= ~VM_LOCKED;
		tmp->vm_mm = mm;
		tmp->vm_next = NULL;
		// anon_vma_link(tmp);
		file = tmp->vm_file;
		// if (file) {
		// 	struct inode *inode = file->f_path.dentry->d_inode;
		// 	struct address_space *mapping = file->f_mapping;

		// 	get_file(file);
		// 	if (tmp->vm_flags & VM_DENYWRITE)
		// 		atomic_dec(&inode->i_writecount);
		// 	spin_lock(&mapping->i_mmap_lock);
		// 	if (tmp->vm_flags & VM_SHARED)
		// 		mapping->i_mmap_writable++;
		// 	tmp->vm_truncate_count = mpnt->vm_truncate_count;
		// 	flush_dcache_mmap_lock(mapping);
		// 	/* insert tmp into the share list, just after mpnt */
		// 	vma_prio_tree_add(tmp, mpnt);
		// 	flush_dcache_mmap_unlock(mapping);
		// 	spin_unlock(&mapping->i_mmap_lock);
		// }

		/*
		 * Clear hugetlb-related page reserves for children. This only
		 * affects MAP_PRIVATE mappings. Faults generated by the child
		 * are not guaranteed to succeed, even if read-only
		 */
		// if (is_vm_hugetlb_page(tmp))
		// 	reset_vma_resv_huge_pages(tmp);

		/*
		 * Link in the new vma and copy the page table entries.
		 */
		*pprev = tmp;
		pprev = &tmp->vm_next;

		__vma_link_rb(mm, tmp, rb_link, rb_parent);
		rb_link = &tmp->vm_rb.rb_right;
		rb_parent = &tmp->vm_rb;

		mm->map_count++;
		retval = copy_page_range(mm, oldmm, mpnt);

		// if (tmp->vm_ops && tmp->vm_ops->open)
		// 	tmp->vm_ops->open(tmp);

		if (retval)
			goto out;
		printf("this is %s(): %d\r\n", __func__, __LINE__);
	}
	/* a new mm has just been created */
	// arch_dup_mmap(oldmm, mm);
	printf("this is %s(): %d\r\n", __func__, __LINE__);
	retval = 0;
out:
	// up_write(&mm->mmap_sem);
	// flush_tlb_mm(oldmm);
	// up_write(&oldmm->mmap_sem);
	printf("this is %s(): %d\r\n", __func__, __LINE__);
	return retval;
fail_nomem_policy:
	// kmem_cache_free(vm_area_cachep, tmp);
fail_nomem:
	retval = -ENOMEM;
	// vm_unacct_memory(charge);
	goto out;
}

static inline int mm_alloc_pgd(struct mm_struct * mm)
{
	mm->pgd = pgd_alloc(mm);
	if (!mm->pgd)
		return -ENOMEM;
	return 0;
}

static struct mm_struct * mm_init(struct mm_struct * mm, struct task_struct *p)
{
	// atomic_set(&mm->mm_users, 1);
	// atomic_set(&mm->mm_count, 1);
	// init_rwsem(&mm->mmap_sem);
	INIT_LIST_HEAD(&mm->mmlist);
	// mm->flags = (current->mm) ?
	// 	(current->mm->flags & MMF_INIT_MASK) : default_dump_filter;
	// mm->core_state = NULL;
	mm->nr_ptes = 0;
	// set_mm_counter(mm, file_rss, 0);
	// set_mm_counter(mm, anon_rss, 0);
	// spin_lock_init(&mm->page_table_lock);
	mm->free_area_cache = TASK_UNMAPPED_BASE;
	mm->cached_hole_size = ~0UL;
	// mm_init_aio(mm);
	// mm_init_owner(mm, p);

	if (!mm_alloc_pgd(mm)) {
		// mm->def_flags = 0;
		// mmu_notifier_mm_init(mm);
		return mm;
	}

	// free_mm(mm);
	return NULL;
}

struct mm_struct * mm_alloc(void)
{
	struct mm_struct * mm;

	// mm = allocate_mm();
	mm = kmalloc(sizeof(struct mm_struct));
	if (mm) {
		memset(mm, 0, sizeof(*mm));
		mm = mm_init(mm, current);
	}
	return mm;
}

/*
 * Allocate a new mm structure and copy contents from the
 * mm structure of the passed in task structure.
 */
struct mm_struct *dup_mm(struct task_struct *tsk)
{
	struct mm_struct *mm, *oldmm = current->mm;
	int err;

	printf("this is %s(): %d\r\n", __func__, __LINE__);

	if (!oldmm)
		return NULL;

	printf("this is %s(): %d\r\n", __func__, __LINE__);

	// mm = allocate_mm();
	mm = kmalloc(sizeof(struct mm_struct));
	if (!mm)
		goto fail_nomem;

	memcpy(mm, oldmm, sizeof(*mm));

	/* Initializing for Swap token stuff */
	// mm->token_priority = 0;
	// mm->last_interval = 0;

	if (!mm_init(mm, tsk))
		goto fail_nomem;

	printf("this is %s(): %d\r\n", __func__, __LINE__);

	if (init_new_context(tsk, mm)) {
		printf("this is %s(): %d\r\n", __func__, __LINE__);
		goto fail_nocontext;
	}

	// dup_mm_exe_file(oldmm, mm);

	err = dup_mmap(mm, oldmm);
	if (err)
		goto free_pt;

	// mm->hiwater_rss = get_mm_rss(mm);
	// mm->hiwater_vm = mm->total_vm;

	// if (mm->binfmt && !try_module_get(mm->binfmt->module))
	// 	goto free_pt;

	return mm;

free_pt:
	/* don't put binfmt in mmput, we haven't got module yet */
	// mm->binfmt = NULL;
	// mmput(mm);

fail_nomem:
	return NULL;

fail_nocontext:
	/*
	 * If init_new_context() failed, we cannot use mmput() to free the mm
	 * because it calls destroy_context()
	 */
	// mm_free_pgd(mm);
	// free_mm(mm);
	return NULL;
}

static int copy_mm(unsigned long clone_flags, struct task_struct * tsk)
{
	struct mm_struct * mm, *oldmm;
	int retval;

	printf("this is %s(): %d\r\n", __func__, __LINE__);

// 	tsk->min_flt = tsk->maj_flt = 0;
// 	tsk->nvcsw = tsk->nivcsw = 0;
// #ifdef CONFIG_DETECT_HUNG_TASK
// 	tsk->last_switch_count = tsk->nvcsw + tsk->nivcsw;
// #endif

	tsk->mm = NULL;
	tsk->active_mm = NULL;

	/*
	 * Are we cloning a kernel thread?
	 *
	 * We need to steal a active VM for that..
	 */
	oldmm = current->mm;
	if (!oldmm) {
		printf("this is %s(): %d	########i am a kernel thread######\r\n", __func__, __LINE__);
		return 0;
	}

	// if (clone_flags & CLONE_VM) {
	// 	atomic_inc(&oldmm->mm_users);
	// 	mm = oldmm;
	// 	goto good_mm;
	// }

	retval = -ENOMEM;
	mm = dup_mm(tsk);
	if (!mm)
		goto fail_nomem;

	printf("this is %s(): %d\r\n", __func__, __LINE__);

good_mm:
	/* Initializing for Swap token stuff */
	// mm->token_priority = 0;
	// mm->last_interval = 0;
	printf("this is %s(): %d\r\n", __func__, __LINE__);

	tsk->mm = mm;
	tsk->active_mm = mm;
	return 0;

fail_nomem:
	return retval;
}

static int copy_fs(unsigned long clone_flags, struct task_struct *tsk)
{
	struct fs_struct *fs = current->fs;
	// if (clone_flags & CLONE_FS) {
	// 	/* tsk->fs is already what we want */
	// 	write_lock(&fs->lock);
	// 	if (fs->in_exec) {
	// 		write_unlock(&fs->lock);
	// 		return -EAGAIN;
	// 	}
	// 	fs->users++;
	// 	write_unlock(&fs->lock);
	// 	return 0;
	// }
	tsk->fs = copy_fs_struct(fs);
	if (!tsk->fs)
		return -ENOMEM;
	return 0;
}

static int copy_files(unsigned long clone_flags, struct task_struct * tsk)
{
	struct files_struct *oldf, *newf;
	int error = 0;

	/*
	 * A background process may not have any files ...
	 */
	// oldf = current->files;
	// if (!oldf)
	// 	goto out;

	// if (clone_flags & CLONE_FILES) {
	// 	atomic_inc(&oldf->count);
	// 	goto out;
	// }

	newf = dup_fd(oldf, &error);
	if (!newf)
		goto out;

	tsk->files = newf;
	error = 0;
out:
	return error;
}

static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
{
	struct signal_struct *sig;

	// if (clone_flags & CLONE_THREAD)
	// 	return 0;

	// sig = kmem_cache_alloc(signal_cachep, GFP_KERNEL);
	sig = kmalloc(sizeof(struct signal_struct));
	tsk->signal = sig;
	if (!sig)
		return -ENOMEM;

	// atomic_set(&sig->count, 1);
	// atomic_set(&sig->live, 1);
	init_waitqueue_head(&sig->wait_chldexit);
	// sig->flags = 0;
	// if (clone_flags & CLONE_NEWPID)
	// 	sig->flags |= SIGNAL_UNKILLABLE;
	// sig->group_exit_code = 0;
	// sig->group_exit_task = NULL;
	// sig->group_stop_count = 0;
	// sig->curr_target = tsk;
	// init_sigpending(&sig->shared_pending);
	// INIT_LIST_HEAD(&sig->posix_timers);

	// hrtimer_init(&sig->real_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
	// sig->it_real_incr.tv64 = 0;
	// sig->real_timer.function = it_real_fn;

	// sig->leader = 0;	/* session leadership doesn't inherit */
	// sig->tty_old_pgrp = NULL;
	// sig->tty = NULL;

	// sig->utime = sig->stime = sig->cutime = sig->cstime = cputime_zero;
	// sig->gtime = cputime_zero;
	// sig->cgtime = cputime_zero;
	// sig->nvcsw = sig->nivcsw = sig->cnvcsw = sig->cnivcsw = 0;
	// sig->min_flt = sig->maj_flt = sig->cmin_flt = sig->cmaj_flt = 0;
	// sig->inblock = sig->oublock = sig->cinblock = sig->coublock = 0;
	// sig->maxrss = sig->cmaxrss = 0;
	// task_io_accounting_init(&sig->ioac);
	// sig->sum_sched_runtime = 0;
	// taskstats_tgid_init(sig);

	// task_lock(current->group_leader);
	// memcpy(sig->rlim, current->signal->rlim, sizeof sig->rlim);
	// task_unlock(current->group_leader);

	// posix_cpu_timers_init_group(sig);

	// acct_init_pacct(&sig->pacct);

	// tty_audit_fork(sig);

	// sig->oom_adj = current->signal->oom_adj;

	return 0;
}

static struct task_struct *copy_process(unsigned long clone_flags,
					unsigned long stack_start,
					struct pt_regs *regs,
					unsigned long stack_size,
					int *child_tidptr,
					// struct pid *pid,
					int *ppid,
					int trace)
{
	struct task_struct *p;
	int retval;
	static int pid = 1;

	printf("this is %s(): %d\r\n", __func__, __LINE__);

	p = dup_task_struct(current);
	if (!p)
		return NULL;

	p->state = TASK_RUNNING;
	p->pid = pid++;
	p->prio = MAX_PRIO-20;
	p->static_prio = MAX_PRIO-20;
	p->normal_prio = MAX_PRIO-20;
	p->policy = SCHED_NORMAL;
	p->rt.time_slice = HZ;
	INIT_LIST_HEAD(&p->tasks);
	INIT_LIST_HEAD(&p->children);
	INIT_LIST_HEAD(&p->sibling);

	/* Perform scheduler related setup. Assign this task to a CPU. */
	sched_fork(p, 0);

	if ((retval = copy_files(clone_flags, p)))
		goto bad_fork_cleanup_semundo;
	if ((retval = copy_fs(clone_flags, p)))
		goto bad_fork_cleanup_files;
	if ((retval = copy_signal(clone_flags, p)))
		goto bad_fork_cleanup_sighand;
	if ((retval = copy_mm(clone_flags, p)))
		goto bad_fork_cleanup_signal;
	retval = copy_thread(clone_flags, stack_start, stack_size, p, regs);
	if (retval)
		goto bad_fork_cleanup_io;

	p->exit_state = 0;

	p->real_parent = current;

	list_add_tail(&p->tasks, &init_task.tasks);
	list_add_tail(&p->sibling, &p->real_parent->children);

	printf("this is %s(): %d\r\n", __func__, __LINE__);

bad_fork_cleanup_io:
bad_fork_cleanup_signal:
bad_fork_cleanup_sighand:
bad_fork_cleanup_files:
bad_fork_cleanup_semundo:
	return p;
}

/*
 *  Ok, this is the main fork-routine.
 *
 * It copies the process, and if successful kick-starts
 * it and waits for it to finish using the VM if required.
 */
long do_fork(unsigned long clone_flags,
	      unsigned long stack_start,
	      struct pt_regs *regs,
	      unsigned long stack_size,
	      int *parent_tidptr,
	      int *child_tidptr)
{
	struct task_struct *p;
	int trace = 0;

	printf("this is %s(): %d\r\n", __func__, __LINE__);

	p = copy_process(clone_flags, stack_start, regs, stack_size,
			 child_tidptr, NULL, trace);

	printf("this is %s(): %d\r\n", __func__, __LINE__);

	wake_up_new_task(p, 0);

	printf("this is %s(): %d\r\n", __func__, __LINE__);

	return p->pid;
	// return 0;
}

void sys_show_task(void)
{
	struct task_struct *task;

    printf("%-6s %-6s %-20s\r\n", 
			"PID",
			"STAT",
			"COMMAND");

    for_each_process(task) {
        printf("%-6d %-6d %-20s\r\n",
               task->pid,
               task->state,
               task->comm);
	}
}
