#include <linux/errno.h>
#include <linux/types.h>
#include <linux/sched.h>
#include <linux/signal.h>
#include <linux/bug.h>

static void __unhash_process(struct task_struct *p)
{
	// nr_threads--;
	// detach_pid(p, PIDTYPE_PID);
	// if (thread_group_leader(p)) {
		// detach_pid(p, PIDTYPE_PGID);
		// detach_pid(p, PIDTYPE_SID);

		// list_del_rcu(&p->tasks);
		list_del_init(&p->tasks);
		// __get_cpu_var(process_counts)--;
	// }
	// list_del_rcu(&p->thread_group);
	list_del_init(&p->sibling);
}

static void __exit_signal(struct task_struct *tsk)
{
	// struct signal_struct *sig = tsk->signal;
	// struct sighand_struct *sighand;

	// BUG_ON(!sig);
	// BUG_ON(!atomic_read(&sig->count));

	// sighand = rcu_dereference(tsk->sighand);
	// spin_lock(&sighand->siglock);

	// posix_cpu_timers_exit(tsk);
	// if (atomic_dec_and_test(&sig->count))
	// 	posix_cpu_timers_exit_group(tsk);
	// else {
	// 	/*
	// 	 * If there is any task waiting for the group exit
	// 	 * then notify it:
	// 	 */
	// 	if (sig->group_exit_task && atomic_read(&sig->count) == sig->notify_count)
	// 		wake_up_process(sig->group_exit_task);

	// 	if (tsk == sig->curr_target)
	// 		sig->curr_target = next_thread(tsk);
	// 	/*
	// 	 * Accumulate here the counters for all threads but the
	// 	 * group leader as they die, so they can be added into
	// 	 * the process-wide totals when those are taken.
	// 	 * The group leader stays around as a zombie as long
	// 	 * as there are other threads.  When it gets reaped,
	// 	 * the exit.c code will add its counts into these totals.
	// 	 * We won't ever get here for the group leader, since it
	// 	 * will have been the last reference on the signal_struct.
	// 	 */
	// 	sig->utime = cputime_add(sig->utime, task_utime(tsk));
	// 	sig->stime = cputime_add(sig->stime, task_stime(tsk));
	// 	sig->gtime = cputime_add(sig->gtime, task_gtime(tsk));
	// 	sig->min_flt += tsk->min_flt;
	// 	sig->maj_flt += tsk->maj_flt;
	// 	sig->nvcsw += tsk->nvcsw;
	// 	sig->nivcsw += tsk->nivcsw;
	// 	sig->inblock += task_io_get_inblock(tsk);
	// 	sig->oublock += task_io_get_oublock(tsk);
	// 	task_io_accounting_add(&sig->ioac, &tsk->ioac);
	// 	sig->sum_sched_runtime += tsk->se.sum_exec_runtime;
	// 	sig = NULL; /* Marker for below. */
	// }

	__unhash_process(tsk);

	/*
	 * Do this under ->siglock, we can race with another thread
	 * doing sigqueue_free() if we have SIGQUEUE_PREALLOC signals.
	 */
	// flush_sigqueue(&tsk->pending);

	// tsk->signal = NULL;
	// tsk->sighand = NULL;
	// spin_unlock(&sighand->siglock);

	// __cleanup_sighand(sighand);
	// clear_tsk_thread_flag(tsk,TIF_SIGPENDING);
	// if (sig) {
	// 	flush_sigqueue(&sig->shared_pending);
	// 	taskstats_tgid_free(sig);
	// 	/*
	// 	 * Make sure ->signal can't go away under rq->lock,
	// 	 * see account_group_exec_runtime().
	// 	 */
	// 	task_rq_unlock_wait(tsk);
	// 	__cleanup_signal(sig);
	// }
}

void release_task(struct task_struct * p)
{
// 	struct task_struct *leader;
// 	int zap_leader;
// repeat:
	// tracehook_prepare_release_task(p);
	/* don't need to get the RCU readlock here - the process is dead and
	 * can't be modifying its own credentials */
	// atomic_dec(&__task_cred(p)->user->processes);

	// proc_flush_task(p);

	// write_lock_irq(&tasklist_lock);
	// tracehook_finish_release_task(p);
	__exit_signal(p);

	/*
	 * If we are the last non-leader member of the thread
	 * group, and the leader is zombie, then notify the
	 * group leader's parent process. (if it wants notification.)
	 */
	// zap_leader = 0;
	// leader = p->group_leader;
	// if (leader != p && thread_group_empty(leader) && leader->exit_state == EXIT_ZOMBIE) {
	// 	BUG_ON(task_detached(leader));
	// 	do_notify_parent(leader, leader->exit_signal);
	// 	/*
	// 	 * If we were the last child thread and the leader has
	// 	 * exited already, and the leader's parent ignores SIGCHLD,
	// 	 * then we are the one who should release the leader.
	// 	 *
	// 	 * do_notify_parent() will have marked it self-reaping in
	// 	 * that case.
	// 	 */
	// 	zap_leader = task_detached(leader);

	// 	/*
	// 	 * This maintains the invariant that release_task()
	// 	 * only runs on a task in EXIT_DEAD, just for sanity.
	// 	 */
	// 	if (zap_leader)
	// 		leader->exit_state = EXIT_DEAD;
	// }

	// write_unlock_irq(&tasklist_lock);
	// release_thread(p);
	// call_rcu(&p->rcu, delayed_put_task_struct);
	// put_task_struct(tsk);

	// p = leader;
	// if (unlikely(zap_leader))
		// goto repeat;
}

void __wake_up_parent(struct task_struct *p, struct task_struct *parent)
{
	printf("this is %s(): %d\r\n", __func__, __LINE__);
	__wake_up_sync_key(&parent->signal->wait_chldexit,
				TASK_INTERRUPTIBLE, 1, p);
}

int do_notify_parent(struct task_struct *tsk, int sig)
{
	printf("this is %s(): %d\r\n", __func__, __LINE__);
	// __wake_up_parent(tsk, tsk->parent);
	__wake_up_parent(tsk, tsk->real_parent);
}

#define DEATH_REAP			-1
static void exit_notify(struct task_struct *tsk, int group_dead)
{
	int signal;

	printf("this is %s(): %d\r\n", __func__, __LINE__);

	preempt_disable();	
	do_notify_parent(tsk, 0);

	tsk->exit_state = signal == DEATH_REAP ? EXIT_DEAD : EXIT_ZOMBIE;
	preempt_enable();
}

void do_exit(void)
{
	struct task_struct *tsk = current;

	printf("this is %s(): %d\r\n", __func__, __LINE__);

	exit_notify(tsk, 0);

	// preempt_disable();
	tsk->state = TASK_DEAD;
	schedule();
	printf("this is %s(): %d\r\n", __func__, __LINE__);
	BUG();
	/* Avoid "noreturn function does return".  */
	for (;;) {}
		// cpu_relax();	/* For when BUG is null */
}

void sys_exit(void)
{
	// do_exit((error_code&0xff)<<8);
	do_exit();
}

struct wait_opts {
	// enum pid_type		wo_type;
	int			wo_flags;
	// struct pid		*wo_pid;
	int			*wo_pid;

	// struct siginfo __user	*wo_info;
	// int __user		*wo_stat;
	// struct rusage __user	*wo_rusage;

	wait_queue_t		child_wait;
	int			notask_error;
};

static int eligible_pid(struct wait_opts *wo, struct task_struct *p)
{
	printf("this is %s(): %d\r\n", __func__, __LINE__);
	// return	wo->wo_type == PIDTYPE_MAX ||
	// 	task_pid_type(p, wo->wo_type) == wo->wo_pid;
	return p->pid == *wo->wo_pid;
}

static int eligible_child(struct wait_opts *wo, struct task_struct *p)
{
	printf("this is %s(): %d\r\n", __func__, __LINE__);
	if (!eligible_pid(wo, p))
		return 0;
	/* Wait for all children (clone and not) if __WALL is set;
	 * otherwise, wait for clone children *only* if __WCLONE is
	 * set; otherwise, wait for non-clone children *only*.  (Note:
	 * A "clone" child here is one that reports to its parent
	 * using a signal other than SIGCHLD.) */
	// if (((p->exit_signal != SIGCHLD) ^ !!(wo->wo_flags & __WCLONE))
	//     && !(wo->wo_flags & __WALL))
	// 	return 0;

	printf("this is %s(): %d\r\n", __func__, __LINE__);
	return 1;
}

static int wait_task_zombie(struct wait_opts *wo, struct task_struct *p)
{
	unsigned long state;
	int retval, status, traced;
	// pid_t pid = task_pid_vnr(p);
	// uid_t uid = __task_cred(p)->uid;
	// struct siginfo __user *infop;

	// if (!likely(wo->wo_flags & WEXITED))
	// 	return 0;

	// if (unlikely(wo->wo_flags & WNOWAIT)) {
	// 	int exit_code = p->exit_code;
	// 	int why, status;

	// 	get_task_struct(p);
	// 	read_unlock(&tasklist_lock);
	// 	if ((exit_code & 0x7f) == 0) {
	// 		why = CLD_EXITED;
	// 		status = exit_code >> 8;
	// 	} else {
	// 		why = (exit_code & 0x80) ? CLD_DUMPED : CLD_KILLED;
	// 		status = exit_code & 0x7f;
	// 	}
	// 	return wait_noreap_copyout(wo, p, pid, uid, why, status);
	// }

	/*
	 * Try to move the task's state to DEAD
	 * only one thread is allowed to do this:
	 */
	// state = xchg(&p->exit_state, EXIT_DEAD);
	// if (state != EXIT_ZOMBIE) {
	// 	BUG_ON(state != EXIT_DEAD);
	// 	return 0;
	// }

	// traced = ptrace_reparented(p);
	/*
	 * It can be ptraced but not reparented, check
	 * !task_detached() to filter out sub-threads.
	 */
	// if (likely(!traced) && likely(!task_detached(p))) {
	// 	struct signal_struct *psig;
	// 	struct signal_struct *sig;
	// 	unsigned long maxrss;

	// 	/*
	// 	 * The resource counters for the group leader are in its
	// 	 * own task_struct.  Those for dead threads in the group
	// 	 * are in its signal_struct, as are those for the child
	// 	 * processes it has previously reaped.  All these
	// 	 * accumulate in the parent's signal_struct c* fields.
	// 	 *
	// 	 * We don't bother to take a lock here to protect these
	// 	 * p->signal fields, because they are only touched by
	// 	 * __exit_signal, which runs with tasklist_lock
	// 	 * write-locked anyway, and so is excluded here.  We do
	// 	 * need to protect the access to parent->signal fields,
	// 	 * as other threads in the parent group can be right
	// 	 * here reaping other children at the same time.
	// 	 */
	// 	spin_lock_irq(&p->real_parent->sighand->siglock);
	// 	psig = p->real_parent->signal;
	// 	sig = p->signal;
	// 	psig->cutime =
	// 		cputime_add(psig->cutime,
	// 		cputime_add(p->utime,
	// 		cputime_add(sig->utime,
	// 			    sig->cutime)));
	// 	psig->cstime =
	// 		cputime_add(psig->cstime,
	// 		cputime_add(p->stime,
	// 		cputime_add(sig->stime,
	// 			    sig->cstime)));
	// 	psig->cgtime =
	// 		cputime_add(psig->cgtime,
	// 		cputime_add(p->gtime,
	// 		cputime_add(sig->gtime,
	// 			    sig->cgtime)));
	// 	psig->cmin_flt +=
	// 		p->min_flt + sig->min_flt + sig->cmin_flt;
	// 	psig->cmaj_flt +=
	// 		p->maj_flt + sig->maj_flt + sig->cmaj_flt;
	// 	psig->cnvcsw +=
	// 		p->nvcsw + sig->nvcsw + sig->cnvcsw;
	// 	psig->cnivcsw +=
	// 		p->nivcsw + sig->nivcsw + sig->cnivcsw;
	// 	psig->cinblock +=
	// 		task_io_get_inblock(p) +
	// 		sig->inblock + sig->cinblock;
	// 	psig->coublock +=
	// 		task_io_get_oublock(p) +
	// 		sig->oublock + sig->coublock;
	// 	maxrss = max(sig->maxrss, sig->cmaxrss);
	// 	if (psig->cmaxrss < maxrss)
	// 		psig->cmaxrss = maxrss;
	// 	task_io_accounting_add(&psig->ioac, &p->ioac);
	// 	task_io_accounting_add(&psig->ioac, &sig->ioac);
	// 	spin_unlock_irq(&p->real_parent->sighand->siglock);
	// }

	/*
	 * Now we are sure this task is interesting, and no other
	 * thread can reap it because we set its state to EXIT_DEAD.
	 */
	// read_unlock(&tasklist_lock);

	// retval = wo->wo_rusage
	// 	? getrusage(p, RUSAGE_BOTH, wo->wo_rusage) : 0;
	// status = (p->signal->flags & SIGNAL_GROUP_EXIT)
	// 	? p->signal->group_exit_code : p->exit_code;
	// if (!retval && wo->wo_stat)
	// 	retval = put_user(status, wo->wo_stat);

	// infop = wo->wo_info;
	// if (!retval && infop)
	// 	retval = put_user(SIGCHLD, &infop->si_signo);
	// if (!retval && infop)
	// 	retval = put_user(0, &infop->si_errno);
	// if (!retval && infop) {
	// 	int why;

	// 	if ((status & 0x7f) == 0) {
	// 		why = CLD_EXITED;
	// 		status >>= 8;
	// 	} else {
	// 		why = (status & 0x80) ? CLD_DUMPED : CLD_KILLED;
	// 		status &= 0x7f;
	// 	}
	// 	retval = put_user((short)why, &infop->si_code);
	// 	if (!retval)
	// 		retval = put_user(status, &infop->si_status);
	// }
	// if (!retval && infop)
	// 	retval = put_user(pid, &infop->si_pid);
	// if (!retval && infop)
	// 	retval = put_user(uid, &infop->si_uid);
	// if (!retval)
	// 	retval = pid;

	// if (traced) {
	// 	write_lock_irq(&tasklist_lock);
	// 	/* We dropped tasklist, ptracer could die and untrace */
	// 	ptrace_unlink(p);
	// 	/*
	// 	 * If this is not a detached task, notify the parent.
	// 	 * If it's still not detached after that, don't release
	// 	 * it now.
	// 	 */
	// 	if (!task_detached(p)) {
	// 		do_notify_parent(p, p->exit_signal);
	// 		if (!task_detached(p)) {
	// 			p->exit_state = EXIT_ZOMBIE;
	// 			p = NULL;
	// 		}
	// 	}
	// 	write_unlock_irq(&tasklist_lock);
	// }
	if (p != NULL)
		release_task(p);

	retval = 1;
	return retval;
}

static int wait_consider_task(struct wait_opts *wo, int ptrace,
				struct task_struct *p)
{
	int ret = eligible_child(wo, p);
	printf("this is %s(): %d\r\n", __func__, __LINE__);
	if (!ret)
		return ret;

	// ret = security_task_wait(p);
	// if (unlikely(ret < 0)) {
	// 	/*
	// 	 * If we have not yet seen any eligible child,
	// 	 * then let this error code replace -ECHILD.
	// 	 * A permission error will give the user a clue
	// 	 * to look for security policy problems, rather
	// 	 * than for mysterious wait bugs.
	// 	 */
	// 	if (wo->notask_error)
	// 		wo->notask_error = ret;
	// 	return 0;
	// }

	// if (likely(!ptrace) && unlikely(task_ptrace(p))) {
	// 	/*
	// 	 * This child is hidden by ptrace.
	// 	 * We aren't allowed to see it now, but eventually we will.
	// 	 */
	// 	wo->notask_error = 0;
	// 	return 0;
	// }

	// if (p->exit_state == EXIT_DEAD)
	// 	return 0;

	/*
	 * We don't reap group leaders with subthreads.
	 */
	// if (p->exit_state == EXIT_ZOMBIE && !delay_group_leader(p))
	if (p->exit_state == EXIT_ZOMBIE)
		return wait_task_zombie(wo, p);

	// /*
	//  * It's stopped or running now, so it might
	//  * later continue, exit, or stop again.
	//  */
	// wo->notask_error = 0;

	// if (task_stopped_code(p, ptrace))
	// 	return wait_task_stopped(wo, ptrace, p);

	// return wait_task_continued(wo, p);
	return 0;
}

static int do_wait_thread(struct wait_opts *wo, struct task_struct *tsk)
{
	struct task_struct *p;

	printf("this is %s(): %d\r\n", __func__, __LINE__);

	list_for_each_entry(p, &tsk->children, sibling) {
		printf("this is %s(): %d\r\n", __func__, __LINE__);
		/*
		 * Do not consider detached threads.
		 */
		// if (!task_detached(p)) {
			int ret = wait_consider_task(wo, 0, p);
			if (ret)
				return ret;
		// }
	}

	printf("this is %s(): %d\r\n", __func__, __LINE__);
	return 0;
}

static int child_wait_callback(wait_queue_t *wait, unsigned mode,
				int sync, void *key)
{
	printf("this is %s(): %d\r\n", __func__, __LINE__);
	// struct wait_opts *wo = container_of(wait, struct wait_opts,
	// 					child_wait);
	// struct task_struct *p = key;

	// if (!eligible_pid(wo, p))
	// 	return 0;

	// if ((wo->wo_flags & __WNOTHREAD) && wait->private != p->parent)
	// 	return 0;

	return default_wake_function(wait, mode, sync, key);
}

static long do_wait(struct wait_opts *wo)
{
	struct task_struct *tsk;
	int retval;

	printf("this is %s(): %d\r\n", __func__, __LINE__);

	// trace_sched_process_wait(wo->wo_pid);

	init_waitqueue_func_entry(&wo->child_wait, child_wait_callback);
	wo->child_wait.private = current;
	add_wait_queue(&current->signal->wait_chldexit, &wo->child_wait);
repeat:
	printf("this is %s(): %d\r\n", __func__, __LINE__);
	/*
	 * If there is nothing that can match our critiera just get out.
	 * We will clear ->notask_error to zero if we see any child that
	 * might later match our criteria, even if we are not able to reap
	 * it yet.
	 */
	wo->notask_error = -ECHILD;
	// if ((wo->wo_type < PIDTYPE_MAX) &&
	//    (!wo->wo_pid || hlist_empty(&wo->wo_pid->tasks[wo->wo_type])))
	// 	goto notask;

	set_current_state(TASK_INTERRUPTIBLE);
	printf("this is %s(): %d\r\n", __func__, __LINE__);
	// read_lock(&tasklist_lock);
	tsk = current;
	// do {
		retval = do_wait_thread(wo, tsk);
		if (retval)
			goto end;

		// retval = ptrace_do_wait(wo, tsk);
		// if (retval)
		// 	goto end;

		// if (wo->wo_flags & __WNOTHREAD)
		// 	break;
	// } while_each_thread(current, tsk);
	// read_unlock(&tasklist_lock);

notask:
	printf("this is %s(): %d\r\n", __func__, __LINE__);
	// retval = wo->notask_error;
	// if (!retval && !(wo->wo_flags & WNOHANG)) {
		// retval = -ERESTARTSYS;
		// if (!signal_pending(current)) {
			schedule();
			goto repeat;
		// }
	// }
end:
	printf("this is %s(): %d\r\n", __func__, __LINE__);
	__set_current_state(TASK_RUNNING);
	remove_wait_queue(&current->signal->wait_chldexit, &wo->child_wait);
	return retval;
}

long sys_wait(int upid, int *stat_addr, int options, struct rusage *ru)
{
	struct wait_opts wo;
	// struct pid *pid = NULL;
	int pid = upid;
	// enum pid_type type;
	long ret;

	printf("this is %s(): %d\r\n", __func__, __LINE__);

	// if (options & ~(WNOHANG|WUNTRACED|WCONTINUED|
	// 		__WNOTHREAD|__WCLONE|__WALL))
	// 	return -EINVAL;

	// if (upid == -1)
	// 	type = PIDTYPE_MAX;
	// else if (upid < 0) {
	// 	type = PIDTYPE_PGID;
	// 	pid = find_get_pid(-upid);
	// } else if (upid == 0) {
	// 	type = PIDTYPE_PGID;
	// 	pid = get_task_pid(current, PIDTYPE_PGID);
	// } else /* upid > 0 */ {
	// 	type = PIDTYPE_PID;
	// 	pid = find_get_pid(upid);
	// }

	// wo.wo_type	= type;
	// wo.wo_pid	= pid;
	wo.wo_pid	= &pid;

	// wo.wo_flags	= options | WEXITED;
	// wo.wo_info	= NULL;
	// wo.wo_stat	= stat_addr;
	// wo.wo_rusage	= ru;
	ret = do_wait(&wo);
	// put_pid(pid);

	/* avoid REGPARM breakage on x86: */
	// asmlinkage_protect(4, ret, upid, stat_addr, options, ru);
	return ret;
}

void sys_kill(int pid)
{
	send_signal(pid, SIGINT);
}

int kill_pg(int pgrp, int sig)
{
	struct task_struct *tsk;

	// // disable_irq();
	// list_for_each_entry(tsk, &task_head, tasks) {
	// 	if (tsk->pgrp == pgrp) {
    //         tsk->signal.action[sig].sa_flags = 1;
	// 	}
    // }
	// // enable_irq();

	return(0);
}
