int procccs(){
	return 0;
}
///*
// *  Checkpoint task structure
// *
// *  Copyright (C) 2008-2009 Oren Laadan
// *
// *  This file is subject to the terms and conditions of the GNU General Public
// *  License.  See the file COPYING in the main directory of the Linux
// *  distribution for more details.
// */
//
///* default debug level for output */
//#define CKPT_DFLAG  CKPT_DSYS
//
//#include <linux/sched.h>
//#include <linux/nsproxy.h>
//#include <linux/posix-timers.h>
//#include <linux/futex.h>
//#include <linux/compat.h>
//#include <linux/poll.h>
//#include <linux/utsname.h>
//#include <linux/syscalls.h>
//#include <linux/pid_namespace.h>
//#include <linux/user_namespace.h>
//#include <linux/syscalls.h>
//#include <linux/security.h> /* security_task_setpgid()   */
//#include <linux/checkpoint.h>
//
//#ifdef CONFIG_FUTEX
//static void save_task_robust_futex_list(struct ckpt_hdr_task *h,
//					struct task_struct *t)
//{
//	/*
//	 * These are __user pointers and thus can be saved without
//	 * the objhash.
//	 */
//	h->robust_futex_list = (unsigned long)t->robust_list;
//	h->robust_futex_head_len = sizeof(*t->robust_list);
//#ifdef CONFIG_COMPAT
//	h->compat_robust_futex_list = ptr_to_compat(t->compat_robust_list);
//	h->compat_robust_futex_head_len = sizeof(*t->compat_robust_list);
//#endif
//}
//
//static void restore_task_robust_futex_list(struct ckpt_hdr_task *h)
//{
//	/* Since we restore the memory map the address remains the same and
//	 * this is safe. This is the same as [compat_]sys_set_robust_list() */
//	if (h->robust_futex_list) {
//		struct robust_list_head __user *rfl;
//		rfl = (void __user *)(unsigned long) h->robust_futex_list;
//		do_set_robust_list(rfl, h->robust_futex_head_len);
//	}
//#ifdef CONFIG_COMPAT
//	if (h->compat_robust_futex_list) {
//		struct compat_robust_list_head __user *crfl;
//		crfl = compat_ptr(h->compat_robust_futex_list);
//		do_compat_set_robust_list(crfl, h->compat_robust_futex_head_len);
//	}
//#endif
//}
//#else /* !CONFIG_FUTEX */
//static inline void save_task_robust_futex_list(struct ckpt_hdr_task *h,
//					       struct task_struct *t)
//{
//}
//
//static inline void restore_task_robust_futex_list(struct ckpt_hdr_task *h)
//{
//}
//#endif /* CONFIG_FUTEX */
//
//
///***********************************************************************
// * Checkpoint
// */

/* dump the task_struct of a given task */
//static int checkpoint_task_struct(struct ckpt_ctx *ctx, struct proc_struct *t)
//{
//	struct ckpt_hdr_task *h;
//	int ret;
//
//	h = ckpt_hdr_get_type(ctx, sizeof(*h), CKPT_HDR_TASK);
//	if (!h)
//		return -ENOMEM;
//
//	if (t->flags & CKPT_PF_FORKNOEXEC)
//		h->flags |= CKPT_PF_FORKNOEXEC;
//	if (t->flags & CKPT_PF_SUPERPRIV)
//		h->flags |= CKPT_PF_SUPERPRIV;
//
//	h->state = t->state;
//	h->exit_state = t->exit_state;
//	h->exit_code = t->exit_code;
//
//	h->sas_ss_sp = t->sas_ss_sp;
//	h->sas_ss_size = t->sas_ss_size;
//
//	if (t->exit_state) {
//		/* zombie - skip remaining state */
//		BUG_ON(t->exit_state != EXIT_ZOMBIE);
//	} else {
//		/* FIXME: save remaining relevant task_struct fields */
//		h->exit_signal = t->exit_signal;
//		h->pdeath_signal = t->pdeath_signal;
//
//		h->set_child_tid = (unsigned long) t->set_child_tid;
//		h->clear_child_tid = (unsigned long) t->clear_child_tid;
//		save_task_robust_futex_list(h, t);
//	}
//
//	ret = ckpt_write_obj(ctx, &h->h);
//	ckpt_hdr_put(ctx, h);
//	if (ret < 0)
//		return ret;
//
//	return ckpt_write_string(ctx, t->comm, TASK_COMM_LEN);
//}

//static int checkpoint_task_ns(struct ckpt_ctx *ctx, struct task_struct *t)
//{
//	struct ckpt_hdr_task_ns *h;
//	struct nsproxy *nsproxy;
//	int ns_objref;
//	int ret;
//
//	rcu_read_lock();
//	nsproxy = task_nsproxy(t);
//	get_nsproxy(nsproxy);
//	rcu_read_unlock();
//
//	ns_objref = checkpoint_obj(ctx, nsproxy, CKPT_OBJ_NS);
//	put_nsproxy(nsproxy);
//
//	ckpt_debug("nsproxy: objref %d\n", ns_objref);
//	if (ns_objref < 0)
//		return ns_objref;
//
//	h = ckpt_hdr_get_type(ctx, sizeof(*h), CKPT_HDR_TASK_NS);
//	if (!h)
//		return -ENOMEM;
//	h->ns_objref = ns_objref;
//	ret = ckpt_write_obj(ctx, &h->h);
//	ckpt_hdr_put(ctx, h);
//
//	return ret;
//}
//
//static int checkpoint_task_creds(struct ckpt_ctx *ctx, struct task_struct *t)
//{
//	int realcred_ref, ecred_ref;
//	struct cred *rcred, *ecred;
//	struct ckpt_hdr_task_creds *h;
//	int ret;
//
//	rcred = (struct cred *) get_cred(t->real_cred);
//	ecred = (struct cred *) get_cred(t->cred);
//
//	realcred_ref = checkpoint_obj(ctx, rcred, CKPT_OBJ_CRED);
//	if (realcred_ref < 0) {
//		ret = realcred_ref;
//		goto error;
//	}
//
//	ecred_ref = checkpoint_obj(ctx, ecred, CKPT_OBJ_CRED);
//	if (ecred_ref < 0) {
//		ret = ecred_ref;
//		goto error;
//	}
//
//	h = ckpt_hdr_get_type(ctx, sizeof(*h), CKPT_HDR_TASK_CREDS);
//	if (!h) {
//		ret = -ENOMEM;
//		goto error;
//	}
//
//	h->cred_ref = realcred_ref;
//	h->ecred_ref = ecred_ref;
//	ret = ckpt_write_obj(ctx, (struct ckpt_hdr *) h);
//	ckpt_hdr_put(ctx, h);
//
//error:
//	put_cred(rcred);
//	put_cred(ecred);
//	return ret;
//}
//
//static int checkpoint_task_objs(struct ckpt_ctx *ctx, struct task_struct *t)
//{
//	struct ckpt_hdr_task_objs *h;
//	int files_objref;
//	int mm_objref;
//	int fs_objref;
//	int sem_undo_objref;
//	int sighand_objref;
//	int signal_objref;
//	int first, ret;
//
//	/*
//	 * Shared objects may have dependencies among them: task->mm
//	 * depends on task->nsproxy (by ipc_ns). Therefore first save
//	 * the namespaces, and then the remaining shared objects.
//	 * During restart a task will already have its namespaces
//	 * restored when it gets to restore, e.g. its memory.
//	 */
//
//	ret = checkpoint_task_creds(ctx, t);
//	ckpt_debug("cred: objref %d\n", ret);
//	if (ret < 0) {
//		ckpt_err(ctx, ret, "%(T)process credentials\n");
//		return ret;
//	}
//
//	ret = checkpoint_task_ns(ctx, t);
//	ckpt_debug("ns: objref %d\n", ret);
//	if (ret < 0) {
//		ckpt_err(ctx, ret, "%(T)process namespaces\n");
//		return ret;
//	}
//
//	files_objref = checkpoint_obj_file_table(ctx, t);
//	ckpt_debug("files: objref %d\n", files_objref);
//	if (files_objref < 0) {
//		ckpt_err(ctx, files_objref, "%(T)files_struct\n");
//		return files_objref;
//	}
//
//	mm_objref = checkpoint_obj_mm(ctx, t);
//	ckpt_debug("mm: objref %d\n", mm_objref);
//	if (mm_objref < 0) {
//		ckpt_err(ctx, mm_objref, "%(T)mm_struct\n");
//		return mm_objref;
//	}
//
//	/* note: this must come *after* file-table and mm */
//	fs_objref = checkpoint_obj_fs(ctx, t);
//	if (fs_objref < 0) {
//		ckpt_err(ctx, fs_objref, "%(T)process fs\n");
//		return fs_objref;
//	}
//
//	sem_undo_objref = checkpoint_obj_sem_undo(ctx, t);
//	if (sem_undo_objref < 0) {
//		ckpt_err(ctx, sem_undo_objref, "%(T)process sem_undo\n");
//		return sem_undo_objref;
//	}
//
//	sighand_objref = checkpoint_obj_sighand(ctx, t);
//	ckpt_debug("sighand: objref %d\n", sighand_objref);
//	if (sighand_objref < 0) {
//		ckpt_err(ctx, sighand_objref, "%(T)sighand_struct\n");
//		return sighand_objref;
//	}
//
//	/*
//	 * Handle t->signal differently because the checkpoint method
//	 * for t->signal needs access to owning task_struct to access
//	 * t->sighand (to lock/unlock). First explicitly determine if
//	 * need to save, and only below invoke checkpoint_obj_signal()
//	 * if needed.
//	 */
//	signal_objref = ckpt_obj_lookup_add(ctx, t->signal,
//					    CKPT_OBJ_SIGNAL, &first);
//	ckpt_debug("signal: objref %d\n", signal_objref);
//	if (signal_objref < 0) {
//		ckpt_err(ctx, signal_objref, "%(T)process signals\n");
//		return signal_objref;
//	}
//
//	h = ckpt_hdr_get_type(ctx, sizeof(*h), CKPT_HDR_TASK_OBJS);
//	if (!h)
//		return -ENOMEM;
//	h->files_objref = files_objref;
//	h->mm_objref = mm_objref;
//	h->fs_objref = fs_objref;
//	h->sem_undo_objref = sem_undo_objref;
//	h->sighand_objref = sighand_objref;
//	h->signal_objref = signal_objref;
//
//	ret = ckpt_write_obj(ctx, &h->h);
//	ckpt_hdr_put(ctx, h);
//	if (ret < 0)
//		return ret;
//
//	/* actually save t->signal, if need to */
//	if (first)
//		ret = checkpoint_obj_signal(ctx, t);
//	if (ret < 0)
//		ckpt_err(ctx, ret, "%(T)signal_struct\n");
//
//	return ret;
//}
//
///* dump the task_struct of a given task */
//int checkpoint_restart_block(struct ckpt_ctx *ctx, struct task_struct *t)
//{
//	struct ckpt_hdr_restart_block *h;
//	struct restart_block *restart_block;
//	long (*fn)(struct restart_block *);
//	s64 base, expire = 0;
//	int ret;
//
//	h = ckpt_hdr_get_type(ctx, sizeof(*h), CKPT_HDR_RESTART_BLOCK);
//	if (!h)
//		return -ENOMEM;
//
//	base = ktime_to_ns(ctx->ktime_begin);
//	restart_block = &task_thread_info(t)->restart_block;
//	fn = restart_block->fn;
//
//	/* FIX: enumerate clockid_t so we're immune to changes */
//
//	if (fn == do_no_restart_syscall) {
//
//		h->function_type = CKPT_RESTART_BLOCK_NONE;
//		ckpt_debug("restart_block: non\n");
//
//	} else if (fn == hrtimer_nanosleep_restart) {
//
//		h->function_type = CKPT_RESTART_BLOCK_HRTIMER_NANOSLEEP;
//		h->arg_0 = restart_block->nanosleep.index;
//		h->arg_1 = (unsigned long) restart_block->nanosleep.rmtp;
//		expire = restart_block->nanosleep.expires;
//		ckpt_debug("restart_block: hrtimer expire %lld now %lld\n",
//			 expire, base);
//
//	} else if (fn == posix_cpu_nsleep_restart) {
//		struct timespec ts;
//
//		h->function_type = CKPT_RESTART_BLOCK_POSIX_CPU_NANOSLEEP;
//		h->arg_0 = restart_block->arg0;
//		h->arg_1 = restart_block->arg1;
//		ts.tv_sec = restart_block->arg2;
//		ts.tv_nsec = restart_block->arg3;
//		expire = timespec_to_ns(&ts);
//		ckpt_debug("restart_block: posix_cpu expire %lld now %lld\n",
//			 expire, base);
//
//#ifdef CONFIG_COMPAT
//	} else if (fn == compat_nanosleep_restart) {
//
//		h->function_type = CKPT_RESTART_BLOCK_COMPAT_NANOSLEEP;
//		h->arg_0 = restart_block->nanosleep.index;
//		h->arg_1 = (unsigned long)restart_block->nanosleep.rmtp;
//		h->arg_2 = (unsigned long)restart_block->nanosleep.compat_rmtp;
//		expire = restart_block->nanosleep.expires;
//		ckpt_debug("restart_block: compat expire %lld now %lld\n",
//			 expire, base);
//
//	} else if (fn == compat_clock_nanosleep_restart) {
//
//		h->function_type = CKPT_RESTART_BLOCK_COMPAT_CLOCK_NANOSLEEP;
//		h->arg_0 = restart_block->nanosleep.index;
//		h->arg_1 = (unsigned long)restart_block->nanosleep.rmtp;
//		h->arg_2 = (unsigned long)restart_block->nanosleep.compat_rmtp;
//		expire = restart_block->nanosleep.expires;
//		ckpt_debug("restart_block: compat_clock expire %lld now %lld\n",
//			 expire, base);
//
//#endif
//	} else if (fn == futex_wait_restart) {
//
//		h->function_type = CKPT_RESTART_BLOCK_FUTEX;
//		h->arg_0 = (unsigned long) restart_block->futex.uaddr;
//		h->arg_1 = restart_block->futex.val;
//		h->arg_2 = restart_block->futex.flags;
//		h->arg_3 = restart_block->futex.bitset;
//		expire = restart_block->futex.time;
//		ckpt_debug("restart_block: futex expire %lld now %lld\n",
//			 expire, base);
//
//	} else if (fn == do_restart_poll) {
//		struct timespec ts;
//
//		h->function_type = CKPT_RESTART_BLOCK_POLL;
//		h->arg_0 = (unsigned long) restart_block->poll.ufds;
//		h->arg_1 = restart_block->poll.nfds;
//		h->arg_2 = restart_block->poll.has_timeout;
//		ts.tv_sec = restart_block->poll.tv_sec;
//		ts.tv_nsec = restart_block->poll.tv_nsec;
//		expire = timespec_to_ns(&ts);
//		ckpt_debug("restart_block: poll expire %lld now %lld\n",
//			 expire, base);
//
//	} else {
//
//		BUG();
//
//	}
//
//	/* common to all restart blocks: */
//	h->arg_4 = (base < expire ? expire - base : 0);
//
//	ckpt_debug("restart_block: args %#llx %#llx %#llx %#llx %#llx\n",
//		 h->arg_0, h->arg_1, h->arg_2, h->arg_3, h->arg_4);
//
//	ret = ckpt_write_obj(ctx, &h->h);
//	ckpt_hdr_put(ctx, h);
//
//	ckpt_debug("restart_block ret %d\n", ret);
//	return ret;
//}
//
///* dump the entire state of a given task */
//int checkpoint_task(struct ckpt_ctx *ctx, struct task_struct *t)
//{
//	int ret;
//
//	ctx->tsk = t;
//
//	ret = checkpoint_task_struct(ctx, t);
//	ckpt_debug("task %d\n", ret);
//	if (ret < 0)
//		goto out;
//
//	/* zombie - we're done here */
//	if (t->exit_state)
//		return 0;
//
//	ret = checkpoint_thread(ctx, t);
//	ckpt_debug("thread %d\n", ret);
//	if (ret < 0)
//		goto out;
//	ret = checkpoint_restart_block(ctx, t);
//	ckpt_debug("restart-blocks %d\n", ret);
//	if (ret < 0)
//		goto out;
//	ret = checkpoint_cpu(ctx, t);
//	ckpt_debug("cpu %d\n", ret);
//	if (ret < 0)
//		goto out;
//	ret = checkpoint_task_objs(ctx, t);
//	ckpt_debug("objs %d\n", ret);
//	if (ret < 0)
//		goto out;
//	ret = checkpoint_task_signal(ctx, t);
//	ckpt_debug("task-signal %d\n", ret);
// out:
//	ctx->tsk = NULL;
//	return ret;
//}

//int ckpt_collect_task(struct ckpt_ctx *ctx, struct task_struct *t)
//{
//	int ret;
//
//	// U11 : no need for Ucore
//	//ret = ckpt_collect_ns(ctx, t);
//	//if (ret < 0)
//	//	return ret;
//	ret = ckpt_collect_file_table(ctx, t);
//	if (ret < 0)
//		return ret;
//	ret = ckpt_collect_mm(ctx, t);
//	if (ret < 0)
//		return ret;
//	ret = ckpt_collect_fs(ctx, t);
//	if (ret < 0)
//		return ret;
//	ret = ckpt_collect_sighand(ctx, t);
//
//	return ret;
//}

///***********************************************************************
// * Restart
// */
//
//static inline int valid_exit_code(int exit_code)
//{
//	if (exit_code >= 0x10000)
//		return 0;
//	if (exit_code & 0xff) {
//		if (exit_code & ~0xff)
//			return 0;
//		if (!valid_signal(exit_code & 0xff))
//			return 0;
//	}
//	return 1;
//}
//
///* read the task_struct into the current task */
//static int restore_task_struct(struct ckpt_ctx *ctx)
//{
//	struct ckpt_hdr_task *h;
//	struct task_struct *t = current;
//	int ret;
//
//	h = ckpt_read_obj_type(ctx, sizeof(*h), CKPT_HDR_TASK);
//	if (IS_ERR(h))
//		return PTR_ERR(h);
//
//	ret = -EINVAL;
//	if (h->state == TASK_DEAD) {
//		if (h->exit_state != EXIT_ZOMBIE)
//			goto out;
//		if (!valid_exit_code(h->exit_code))
//			goto out;
//		t->exit_code = h->exit_code;
//	} else {
//		if (h->exit_code)
//			goto out;
//		if ((thread_group_leader(t) && !valid_signal(h->exit_signal)) ||
//		    (!thread_group_leader(t) && h->exit_signal != -1))
//			goto out;
//		if (!valid_signal(h->pdeath_signal))
//			goto out;
//
//		if (h->flags & ~CKPT_PF_VALID)
//			goto out;
//		if (h->flags & CKPT_PF_FORKNOEXEC)
//			t->flags |= PF_FORKNOEXEC;
//		if (h->flags & CKPT_PF_SUPERPRIV)
//			t->flags |= PF_SUPERPRIV;
//
//		t->exit_signal = h->exit_signal;
//		t->pdeath_signal = h->pdeath_signal;
//
//		t->sas_ss_sp = (unsigned long) h->sas_ss_sp;
//		t->sas_ss_size = h->sas_ss_size;
//
//		t->set_child_tid =
//			(int __user *) (unsigned long) h->set_child_tid;
//		t->clear_child_tid =
//			(int __user *) (unsigned long) h->clear_child_tid;
//
//		/* FIXME: restore remaining relevant task_struct fields */
//
//		restore_task_robust_futex_list(h);
//	}
//
//	memset(t->comm, 0, TASK_COMM_LEN);
//	ret = _ckpt_read_string(ctx, t->comm, TASK_COMM_LEN);
//	if (ret < 0)
//		goto out;
//
//	/* return 1 for zombie, 0 otherwise */
//	ret = (h->state == TASK_DEAD ? 1 : 0);
// out:
//	ckpt_hdr_put(ctx, h);
//	return ret;
//}
//
///*
// * restart is currently serialized, but if/when that changes we want
// * to make sure that setting nsproxy->pidns in restore_task_ns() is only
// * done once.  That's what checkpoint_nslock is for
// */
//DEFINE_SPINLOCK(checkpoint_nslock);
//
//static int restore_task_ns(struct ckpt_ctx *ctx)
//{
//	struct ckpt_hdr_task_ns *h;
//	struct nsproxy *nsproxy;
//	int ret = 0;
//
//	h = ckpt_read_obj_type(ctx, sizeof(*h), CKPT_HDR_TASK_NS);
//	if (IS_ERR(h))
//		return PTR_ERR(h);
//
//	nsproxy = ckpt_obj_fetch(ctx, h->ns_objref, CKPT_OBJ_NS);
//	if (IS_ERR(nsproxy)) {
//		ret = PTR_ERR(nsproxy);
//		goto out;
//	}
//
//	if (nsproxy != current->nsproxy) {
//		spin_lock(&checkpoint_nslock);
//		if (!nsproxy->pid_ns)
//			nsproxy->pid_ns = get_pid_ns(current->nsproxy->pid_ns);
//		spin_unlock(&checkpoint_nslock);
//		get_nsproxy(nsproxy);
//		switch_task_namespaces(current, nsproxy);
//	}
// out:
//	ckpt_debug("nsproxy: ret %d (%p)\n", ret, current->nsproxy);
//	ckpt_hdr_put(ctx, h);
//	return ret;
//}
//
//static int restore_task_creds(struct ckpt_ctx *ctx)
//{
//	struct ckpt_hdr_task_creds *h;
//	struct cred *realcred, *ecred;
//	int ret = 0;
//
//	h = ckpt_read_obj_type(ctx, sizeof(*h), CKPT_HDR_TASK_CREDS);
//	if (IS_ERR(h))
//		return PTR_ERR(h);
//
//	realcred = ckpt_obj_fetch(ctx, h->cred_ref, CKPT_OBJ_CRED);
//	if (IS_ERR(realcred)) {
//		ckpt_debug("Error %ld fetching realcred (ref %d)\n",
//			PTR_ERR(realcred), h->cred_ref);
//		ret = PTR_ERR(realcred);
//		goto out;
//	}
//	ecred = ckpt_obj_fetch(ctx, h->ecred_ref, CKPT_OBJ_CRED);
//	if (IS_ERR(ecred)) {
//		ckpt_debug("Error %ld fetching ecred (ref %d)\n",
//			PTR_ERR(ecred), h->ecred_ref);
//		ret = PTR_ERR(ecred);
//		goto out;
//	}
//	ctx->realcred = realcred;
//	ctx->ecred = ecred;
//
//out:
//	ckpt_debug("Returning %d\n", ret);
//	ckpt_hdr_put(ctx, h);
//	return ret;
//}
//
//static int restore_task_objs(struct ckpt_ctx *ctx)
//{
//	struct ckpt_hdr_task_objs *h;
//	int ret;
//
//	/*
//	 * Namespaces come first, because ->mm depends on ->nsproxy,
//	 * and because shared objects are restored before they are
//	 * referenced. See comment in checkpoint_task_objs.
//	 */
//	ret = restore_task_creds(ctx);
//	if (ret < 0) {
//		ckpt_debug("restore_task_creds returned %d\n", ret);
//		return ret;
//	}
//	ret = restore_task_ns(ctx);
//	if (ret < 0) {
//		ckpt_debug("restore_task_ns returned %d\n", ret);
//		return ret;
//	}
//
//	h = ckpt_read_obj_type(ctx, sizeof(*h), CKPT_HDR_TASK_OBJS);
//	if (IS_ERR(h)) {
//		ckpt_debug("Error fetching task obj\n");
//		return PTR_ERR(h);
//	}
//
//	ret = restore_obj_file_table(ctx, h->files_objref);
//	ckpt_debug("file_table: ret %d (%p)\n", ret, current->files);
//	if (ret < 0)
//		goto out;
//
//	ret = restore_obj_mm(ctx, h->mm_objref);
//	ckpt_debug("mm: ret %d (%p)\n", ret, current->mm);
//	if (ret < 0)
//		goto out;
//
//	ret = restore_obj_fs(ctx, h->fs_objref);
//	ckpt_debug("fs: ret %d (%p)\n", ret, current->fs);
//	if (ret < 0)
//		return ret;
//
//	ret = restore_obj_sem_undo(ctx, h->sem_undo_objref);
//	ckpt_debug("sem_undo: ret %d\n", ret);
//	if (ret < 0)
//		return ret;
//
//	ret = restore_obj_sighand(ctx, h->sighand_objref);
//	ckpt_debug("sighand: ret %d (%p)\n", ret, current->sighand);
//	if (ret < 0)
//		goto out;
//
//	ret = restore_obj_signal(ctx, h->signal_objref);
//	ckpt_debug("signal: ret %d (%p)\n", ret, current->signal);
// out:
//	ckpt_hdr_put(ctx, h);
//	return ret;
//}
//
//static int restore_creds(struct ckpt_ctx *ctx)
//{
//	int ret;
//	const struct cred *old;
//	struct cred *rcred, *ecred;
//
//	rcred = ctx->realcred;
//	ecred = ctx->ecred;
//
//	/* commit_creds will take one ref for the eff creds, but
//	 * expects us to hold a ref for the obj creds, so take a
//	 * ref here */
//	get_cred(rcred);
//	ret = commit_creds(rcred);
//	if (ret)
//		return ret;
//
//	if (ecred == rcred)
//		return 0;
//
//	old = override_creds(ecred); /* override_creds otoh takes new ref */
//	put_cred(old);
//
//	ctx->realcred = ctx->ecred = NULL;
//	return 0;
//}
//
//int restore_restart_block(struct ckpt_ctx *ctx)
//{
//	struct ckpt_hdr_restart_block *h;
//	struct restart_block restart_block;
//	struct timespec ts;
//	clockid_t clockid;
//	s64 expire;
//	int ret = 0;
//
//	h = ckpt_read_obj_type(ctx, sizeof(*h), CKPT_HDR_RESTART_BLOCK);
//	if (IS_ERR(h))
//		return PTR_ERR(h);
//
//	expire = ktime_to_ns(ctx->ktime_begin) + h->arg_4;
//	restart_block.fn = NULL;
//
//	ckpt_debug("restart_block: expire %lld begin %lld\n",
//		 expire, ktime_to_ns(ctx->ktime_begin));
//	ckpt_debug("restart_block: args %#llx %#llx %#llx %#llx %#llx\n",
//		 h->arg_0, h->arg_1, h->arg_2, h->arg_3, h->arg_4);
//
//	switch (h->function_type) {
//	case CKPT_RESTART_BLOCK_NONE:
//		restart_block.fn = do_no_restart_syscall;
//		break;
//	case CKPT_RESTART_BLOCK_HRTIMER_NANOSLEEP:
//		clockid = h->arg_0;
//		if (clockid < 0 || invalid_clockid(clockid))
//			break;
//		restart_block.fn = hrtimer_nanosleep_restart;
//		restart_block.nanosleep.index = clockid;
//		restart_block.nanosleep.rmtp =
//			(struct timespec __user *) (unsigned long) h->arg_1;
//		restart_block.nanosleep.expires = expire;
//		break;
//	case CKPT_RESTART_BLOCK_POSIX_CPU_NANOSLEEP:
//		clockid = h->arg_0;
//		if (clockid < 0 || invalid_clockid(clockid))
//			break;
//		restart_block.fn = posix_cpu_nsleep_restart;
//		restart_block.arg0 = clockid;
//		restart_block.arg1 = h->arg_1;
//		ts = ns_to_timespec(expire);
//		restart_block.arg2 = ts.tv_sec;
//		restart_block.arg3 = ts.tv_nsec;
//		break;
//#ifdef CONFIG_COMPAT
//	case CKPT_RESTART_BLOCK_COMPAT_NANOSLEEP:
//		clockid = h->arg_0;
//		if (clockid < 0 || invalid_clockid(clockid))
//			break;
//		restart_block.fn = compat_nanosleep_restart;
//		restart_block.nanosleep.index = clockid;
//		restart_block.nanosleep.rmtp =
//			(struct timespec __user *) (unsigned long) h->arg_1;
//		restart_block.nanosleep.compat_rmtp =
//			(struct compat_timespec __user *)
//				(unsigned long) h->arg_2;
//		restart_block.nanosleep.expires = expire;
//		break;
//	case CKPT_RESTART_BLOCK_COMPAT_CLOCK_NANOSLEEP:
//		clockid = h->arg_0;
//		if (clockid < 0 || invalid_clockid(clockid))
//			break;
//		restart_block.fn = compat_clock_nanosleep_restart;
//		restart_block.nanosleep.index = clockid;
//		restart_block.nanosleep.rmtp =
//			(struct timespec __user *) (unsigned long) h->arg_1;
//		restart_block.nanosleep.compat_rmtp =
//			(struct compat_timespec __user *)
//				(unsigned long) h->arg_2;
//		restart_block.nanosleep.expires = expire;
//		break;
//#endif
//	case CKPT_RESTART_BLOCK_FUTEX:
//		restart_block.fn = futex_wait_restart;
//		restart_block.futex.uaddr = (u32 *) (unsigned long) h->arg_0;
//		restart_block.futex.val = h->arg_1;
//		restart_block.futex.flags = h->arg_2;
//		restart_block.futex.bitset = h->arg_3;
//		restart_block.futex.time = expire;
//		break;
//	case CKPT_RESTART_BLOCK_POLL:
//		restart_block.fn = do_restart_poll;
//		restart_block.poll.ufds =
//			(struct pollfd __user *) (unsigned long) h->arg_0;
//		restart_block.poll.nfds = h->arg_1;
//		restart_block.poll.has_timeout = h->arg_2;
//		ts = ns_to_timespec(expire);
//		restart_block.poll.tv_sec = ts.tv_sec;
//		restart_block.poll.tv_nsec = ts.tv_nsec;
//		break;
//	default:
//		break;
//	}
//
//	if (restart_block.fn)
//		task_thread_info(current)->restart_block = restart_block;
//	else
//		ret = -EINVAL;
//
//	ckpt_hdr_put(ctx, h);
//	return ret;
//}
//
///* prepare the task for restore */
//int pre_restore_task(void)
//{
//	sigset_t sigset;
//
//	/*
//	 * Block task's signals to avoid interruptions due to signals,
//	 * say, from restored timers, file descriptors etc. Signals
//	 * will be unblocked when restore completes.
//	 *
//	 * NOTE: tasks with file descriptors set to send a SIGKILL as
//	 * i/o notification may fail the restart if a signal occurs
//	 * before that task completed its restore. FIX ?
//	 */
//	current->saved_sigmask = current->blocked;
//
//	sigfillset(&sigset);
//	sigdelset(&sigset, SIGKILL);
//	sigdelset(&sigset, SIGSTOP);
//	sigprocmask(SIG_SETMASK, &sigset, NULL);
//
//	return 0;
//}
//
///* finish up task restore */
//void post_restore_task(void)
//{
//	/* only now is it safe to unblock the restored task's signals */
//	sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
//}
//
///* read the entire state of the current task */
//int restore_task(struct ckpt_ctx *ctx)
//{
//	int ret;
//
//	ret = restore_task_struct(ctx);
//	ckpt_debug("task %d\n", ret);
//	if (ret < 0)
//		goto out;
//
//	/* zombie - we're done here */
//	if (ret)
//		goto out;
//
//	ret = restore_thread(ctx);
//	ckpt_debug("thread %d\n", ret);
//	if (ret < 0)
//		goto out;
//	ret = restore_restart_block(ctx);
//	ckpt_debug("restart-blocks %d\n", ret);
//	if (ret < 0)
//		goto out;
//	ret = restore_cpu(ctx);
//	ckpt_debug("cpu %d\n", ret);
//	if (ret < 0)
//		goto out;
//	ret = restore_task_objs(ctx);
//	ckpt_debug("objs %d\n", ret);
//	if (ret < 0)
//		goto out;
//	ret = restore_creds(ctx);
//	ckpt_debug("creds: ret %d\n", ret);
//	if (ret < 0)
//		goto out;
//	ret = restore_task_signal(ctx);
//	ckpt_debug("signal: ret %d\n", ret);
// out:
//	return ret;
//}
