/*
 *  Checkpoint logic and helpers
 *
 *  Copyright (C) 2009 Oren Laadan
 *
 *  This file is subject to the terms and conditions of the GNU General Public
 *  License.  See the file COPYING in the main directory of the Linux
 *  distribution for more details.
 */

/* default debug level for output */
#define CKPT_DFLAG  CKPT_DIPC

#include <linux/ipc.h>
#include <linux/msg.h>
#include <linux/sched.h>
#include <linux/ipc_namespace.h>
#include <linux/checkpoint.h>

#include "util.h"

/* for ckpt_debug */
#ifdef CONFIG_CHECKPOINT_DEBUG
static char *ipc_ind_to_str[] = { "sem", "msg", "shm" };
#endif

#define shm_ids(ns)	((ns)->ids[IPC_SHM_IDS])
#define msg_ids(ns)	((ns)->ids[IPC_MSG_IDS])
#define sem_ids(ns)	((ns)->ids[IPC_SEM_IDS])

/**************************************************************************
 * Checkpoint
 */

/*
 * Requires that ids->rw_mutex be held; this is sufficient because:
 *
 * (a) The data accessed either may not change at all (e.g. id, key,
 * sqe), or may only change by ipc_update_perm() (e.g. uid, cuid, gid,
 * cgid, mode), which is only called with the mutex write-held.
 *
 * (b) The function ipcperms() relies solely on the latter (uid, vuid,
 * gid, cgid, mode)
 *
 * (c) The security context perm->security also may only change when the
 * mutex is taken.
 */
int checkpoint_fill_ipc_perms(struct ckpt_ctx *ctx,
			      struct ckpt_hdr_ipc_perms *h,
			      struct kern_ipc_perm *perm)
{
	if (ipcperms(perm, S_IROTH))
		return -EACCES;

	h->id = perm->id;
	h->key = perm->key;
	h->uid = perm->uid;
	h->gid = perm->gid;
	h->cuid = perm->cuid;
	h->cgid = perm->cgid;
	h->mode = perm->mode & S_IRWXUGO;
	h->seq = perm->seq;

	h->sec_ref = security_checkpoint_obj(ctx, perm->security,
					     CKPT_SECURITY_IPC);
	if (h->sec_ref < 0) {
		ckpt_err(ctx, h->sec_ref, "%(T)ipc_perm->security\n");
		return h->sec_ref;
	}

	return 0;
}

static int checkpoint_ipc_any(struct ckpt_ctx *ctx,
			      struct ipc_namespace *ipc_ns,
			      int ipc_ind, int ipc_type,
			      int (*func)(int id, void *p, void *data))
{
	struct ckpt_hdr_ipc *h;
	struct ipc_ids *ipc_ids = &ipc_ns->ids[ipc_ind];
	int ret = -ENOMEM;

	down_read(&ipc_ids->rw_mutex);
	h = ckpt_hdr_get_type(ctx, sizeof(*h), CKPT_HDR_IPC);
	if (!h)
		goto out;

	h->ipc_type = ipc_type;
	h->ipc_count = ipc_ids->in_use;
	ckpt_debug("ipc-%s count %d\n", ipc_ind_to_str[ipc_ind], h->ipc_count);

	ret = ckpt_write_obj(ctx, &h->h);
	ckpt_hdr_put(ctx, h);
	if (ret < 0)
		goto out;

	ret = idr_for_each(&ipc_ids->ipcs_idr, func, ctx);
	ckpt_debug("ipc-%s ret %d\n", ipc_ind_to_str[ipc_ind], ret);
 out:
	up_read(&ipc_ids->rw_mutex);
	return ret;
}

static int checkpoint_ipc_ns(struct ckpt_ctx *ctx, void *ptr)
{
	struct ipc_namespace *ipc_ns = ptr;
	struct ckpt_hdr_ipcns *h;
	int ret;

	h = ckpt_hdr_get_type(ctx, sizeof(*h), CKPT_HDR_IPC_NS);
	if (!h)
		return -ENOMEM;

	down_read(&shm_ids(ipc_ns).rw_mutex);
	h->shm_ctlmax = ipc_ns->shm_ctlmax;
	h->shm_ctlall = ipc_ns->shm_ctlall;
	h->shm_ctlmni = ipc_ns->shm_ctlmni;
	up_read(&shm_ids(ipc_ns).rw_mutex);

	down_read(&msg_ids(ipc_ns).rw_mutex);
	h->msg_ctlmax = ipc_ns->msg_ctlmax;
	h->msg_ctlmnb = ipc_ns->msg_ctlmnb;
	h->msg_ctlmni = ipc_ns->msg_ctlmni;
	up_read(&msg_ids(ipc_ns).rw_mutex);

	down_read(&sem_ids(ipc_ns).rw_mutex);
	h->sem_ctl_msl = ipc_ns->sem_ctls[0];
	h->sem_ctl_mns = ipc_ns->sem_ctls[1];
	h->sem_ctl_opm = ipc_ns->sem_ctls[2];
	h->sem_ctl_mni = ipc_ns->sem_ctls[3];
	up_read(&sem_ids(ipc_ns).rw_mutex);

	ret = ckpt_write_obj(ctx, &h->h);
	ckpt_hdr_put(ctx, h);
	if (ret < 0)
		return ret;

	ret = checkpoint_ipc_any(ctx, ipc_ns, IPC_SHM_IDS,
				 CKPT_HDR_IPC_SHM, checkpoint_ipc_shm);
	if (ret < 0)
		return ret;
	ret = checkpoint_ipc_any(ctx, ipc_ns, IPC_MSG_IDS,
				 CKPT_HDR_IPC_MSG, checkpoint_ipc_msg);
	if (ret < 0)
		return ret;
	ret = checkpoint_ipc_any(ctx, ipc_ns, IPC_SEM_IDS,
				 CKPT_HDR_IPC_SEM, checkpoint_ipc_sem);
	return ret;
}

/**************************************************************************
 * Collect
 */

int ckpt_collect_ipc_ns(struct ckpt_ctx *ctx, struct ipc_namespace *ipc_ns)
{
	struct ipc_ids *ipc_ids;
	int ret;

	/*
	 * Each shm object holds a reference to a file pointer, so
	 * collect them. Nothing to do for msg and sem.
	 */
	ipc_ids = &ipc_ns->ids[IPC_SHM_IDS];
	down_read(&ipc_ids->rw_mutex);
	ret = idr_for_each(&ipc_ids->ipcs_idr, ckpt_collect_ipc_shm, ctx);
	up_read(&ipc_ids->rw_mutex);

	return ret;
}

/**************************************************************************
 * Restart
 */

/*
 * check whether current task may create ipc object with
 * checkpointed uids and gids.
 * Return 1 if ok, 0 if not.
 */
static int validate_created_perms(struct ckpt_hdr_ipc_perms *h)
{
	const struct cred *cred = current_cred();
	uid_t uid = cred->uid, euid = cred->euid;

	/* actually I don't know - is CAP_IPC_OWNER the right one? */
	if (((h->uid != uid && h->uid == euid) ||
			(h->cuid != uid && h->cuid != euid) ||
			!in_group_p(h->cgid) ||
			!in_group_p(h->gid)) &&
			!capable(CAP_IPC_OWNER))
		return 0;
	return 1;
}

/*
 * Requires that ids->rw_mutex be held; this is sufficient because:
 *
 * (a) The data accessed either may only change by ipc_update_perm()
 * or by security hooks (perm->security), all of which are only called
 * with the mutex write-held.
 *
 * (b) During restart, we are guarantted to be using a brand new
 * ipc-ns, only accessible to us, so there will be no attempt for
 * access validation while we restore the state (by other tasks).
 */
int restore_load_ipc_perms(struct ckpt_ctx *ctx,
			   struct ckpt_hdr_ipc_perms *h,
			   struct kern_ipc_perm *perm)
{
	if (h->id < 0)
		return -EINVAL;
	if (CKPT_TST_OVERFLOW_16(h->uid, perm->uid) ||
	    CKPT_TST_OVERFLOW_16(h->gid, perm->gid) ||
	    CKPT_TST_OVERFLOW_16(h->cuid, perm->cuid) ||
	    CKPT_TST_OVERFLOW_16(h->cgid, perm->cgid) ||
	    CKPT_TST_OVERFLOW_16(h->mode, perm->mode))
		return -EINVAL;
	if (h->seq >= USHRT_MAX)
		return -EINVAL;
	if (h->mode & ~S_IRWXUGO)
		return -EINVAL;

	/* FIX: verify the ->mode field makes sense */

	if (!validate_created_perms(h))
		return -EPERM;
	perm->uid = h->uid;
	perm->gid = h->gid;
	perm->cuid = h->cuid;
	perm->cgid = h->cgid;
	perm->mode = h->mode;

	return security_restore_obj(ctx, (void *)perm,
				    CKPT_SECURITY_IPC,
				    h->sec_ref);
}

static int restore_ipc_any(struct ckpt_ctx *ctx, struct ipc_namespace *ipc_ns,
			   int ipc_ind, int ipc_type,
			   int (*func)(struct ckpt_ctx *ctx,
				       struct ipc_namespace *ns))
{
	struct ckpt_hdr_ipc *h;
	int n, ret;

	h = ckpt_read_obj_type(ctx, sizeof(*h), CKPT_HDR_IPC);
	if (IS_ERR(h))
		return PTR_ERR(h);

	ckpt_debug("ipc-%s: count %d\n", ipc_ind_to_str[ipc_ind], h->ipc_count);

	ret = -EINVAL;
	if (h->ipc_type != ipc_type)
		goto out;

	ret = 0;
	for (n = 0; n < h->ipc_count; n++) {
		ret = (*func)(ctx, ipc_ns);
		if (ret < 0)
			goto out;
	}
 out:
	ckpt_debug("ipc-%s: ret %d\n", ipc_ind_to_str[ipc_ind], ret);
	ckpt_hdr_put(ctx, h);
	return ret;
}

static void *restore_ipc_ns(struct ckpt_ctx *ctx)
{
	struct ipc_namespace *ipc_ns = NULL;
	struct ckpt_hdr_ipcns *h;
	int ret;

	h = ckpt_read_obj_type(ctx, sizeof(*h), CKPT_HDR_IPC_NS);
	if (IS_ERR(h))
		return ERR_PTR(PTR_ERR(h));

	ret = -EINVAL;
	if (h->shm_ctlmax < 0 || h->shm_ctlall < 0 || h->shm_ctlmni < 0)
		goto out;
	if (h->msg_ctlmax < 0 || h->msg_ctlmnb < 0 || h->msg_ctlmni < 0)
		goto out;
	if (h->sem_ctl_msl < 0 || h->sem_ctl_mns < 0 ||
	    h->sem_ctl_opm < 0 || h->sem_ctl_mni < 0)
		goto out;

	/*
	 * If !CONFIG_IPC_NS, do not restore the global IPC state, as
	 * it is used by other processes. It is ok to try to restore
	 * the {shm,msg,sem} objects: in the worst case the requested
	 * identifiers will be in use.
	 */
#ifdef CONFIG_IPC_NS
	ret = -ENOMEM;
	ipc_ns = create_ipc_ns();
	if (!ipc_ns)
		goto out;

	down_read(&shm_ids(ipc_ns).rw_mutex);
	ipc_ns->shm_ctlmax = h->shm_ctlmax;
	ipc_ns->shm_ctlall = h->shm_ctlall;
	ipc_ns->shm_ctlmni = h->shm_ctlmni;
	up_read(&shm_ids(ipc_ns).rw_mutex);

	down_read(&msg_ids(ipc_ns).rw_mutex);
	ipc_ns->msg_ctlmax = h->msg_ctlmax;
	ipc_ns->msg_ctlmnb = h->msg_ctlmnb;
	ipc_ns->msg_ctlmni = h->msg_ctlmni;
	up_read(&msg_ids(ipc_ns).rw_mutex);

	down_read(&sem_ids(ipc_ns).rw_mutex);
	ipc_ns->sem_ctls[0] = h->sem_ctl_msl;
	ipc_ns->sem_ctls[1] = h->sem_ctl_mns;
	ipc_ns->sem_ctls[2] = h->sem_ctl_opm;
	ipc_ns->sem_ctls[3] = h->sem_ctl_mni;
	up_read(&sem_ids(ipc_ns).rw_mutex);
#else
	ret = -EEXIST;
	/* complain if image contains multiple namespaces */
	if (ctx->stats.ipc_ns)
		goto out;
	ipc_ns = current->nsproxy->ipc_ns;
	get_ipc_ns(ipc_ns);
#endif

	ret = restore_ipc_any(ctx, ipc_ns, IPC_SHM_IDS,
			      CKPT_HDR_IPC_SHM, restore_ipc_shm);
	if (ret < 0)
		goto out;
	ret = restore_ipc_any(ctx, ipc_ns, IPC_MSG_IDS,
			      CKPT_HDR_IPC_MSG, restore_ipc_msg);
	if (ret < 0)
		goto out;
	ret = restore_ipc_any(ctx, ipc_ns, IPC_SEM_IDS,
			      CKPT_HDR_IPC_SEM, restore_ipc_sem);
	if (ret < 0)
		goto out;

	ctx->stats.ipc_ns++;
 out:
	ckpt_hdr_put(ctx, h);
	if (ret < 0) {
		put_ipc_ns(ipc_ns);
		ipc_ns = ERR_PTR(ret);
	}
	return (void *)ipc_ns;
}

/*
 * ipc-related checkpoint objects
 */

static int obj_ipc_ns_grab(void *ptr)
{
	get_ipc_ns((struct ipc_namespace *) ptr);
	return 0;
}

static void obj_ipc_ns_drop(void *ptr, int lastref)
{
	put_ipc_ns((struct ipc_namespace *) ptr);
}

static int obj_ipc_ns_users(void *ptr)
{
	return atomic_read(&((struct ipc_namespace *) ptr)->count);
}

/* ipc_ns object */
static struct ckpt_obj_ops ckpt_obj_ipcns_ops = {
	.obj_name = "IPC_NS",
	.obj_type = CKPT_OBJ_IPC_NS,
	.ref_drop = obj_ipc_ns_drop,
	.ref_grab = obj_ipc_ns_grab,
	.ref_users = obj_ipc_ns_users,
	.checkpoint = checkpoint_ipc_ns,
	.restore = restore_ipc_ns,
};

static int __init checkpoint_register_ipcns(void)
{
	return register_checkpoint_obj(&ckpt_obj_ipcns_ops);
}
module_init(checkpoint_register_ipcns);
