// SPDX-License-Identifier: GPL-2.0-or-later
/* Driver for Virtio QTBox device.
 *
 * Copyright 2022 HUAWEI TECHNOLOGIES CO., LTD.
 */

#include <linux/err.h>
#include <linux/module.h>
#include <linux/cpu.h>
#include <linux/file.h>
#include <linux/slab.h>
#include <linux/poll.h>
#include <linux/anon_inodes.h>
#include "uapi/virtio_qtbox.h"
#include "virtio_qtbox_comm.h"

#define RELEASE_WORK_TIME_SET 1000

/* Total number of instances in the system */
static atomic_t total_instances = ATOMIC_INIT(0);

static DEFINE_MUTEX(sandbox_cpu_pool_mutex);

#define SANDBOX_CPUS_SIZE 512
static char sandbox_cpus[SANDBOX_CPUS_SIZE];
static cpumask_var_t sandbox_cpu_pool_total;
static cpumask_var_t sandbox_cpu_pool_avail; /* available currently */
static cpumask_var_t *sandbox_cpu_sibling_map;

static unsigned long cid_bits[BITS_TO_LONGS(UINT_MAX)];

static int
sandbox_set_kernel_param(const char *val, const struct kernel_param *kp);

static const struct kernel_param_ops sandbox_cpu_pool_ops = {
    .get = param_get_string,
    .set = sandbox_set_kernel_param,
};

static struct kparam_string sandbox_cpus_arg = {
    .maxlen = sizeof(sandbox_cpus),
    .string = sandbox_cpus,
};

module_param_cb(sandbox_cpus, &sandbox_cpu_pool_ops, &sandbox_cpus_arg, 0644);

struct sandbox_cmd {
	char *name;
	unsigned long cmd;
	long (*func)(void *priv, void __user *arg);
};

static inline bool sandbox_cpu_pool_seted(void)
{
	bool ret;

	mutex_lock(&sandbox_cpu_pool_mutex);
	ret = !!sandbox_cpu_pool_total && !!sandbox_cpu_pool_avail;
	mutex_unlock(&sandbox_cpu_pool_mutex);

	return ret;
}

/* Must hold sandbox_cpu_pool_mutex */
static void sandbox_teardown_cpu_pool(void)
{
	int cpu, rc;

	if (!sandbox_cpu_pool_total)
		return;

	for_each_cpu(cpu, sandbox_cpu_pool_total) {
		rc = os_compat_add_cpu(cpu);
		/* Should continue even failed */
		if (rc < 0)
			pr_err("cpu %d is not onlined, rc=%d\n", cpu, rc);

		free_cpumask_var(sandbox_cpu_sibling_map[cpu]);
		sandbox_cpu_sibling_map[cpu] = NULL;
	}

	free_cpumask_var(sandbox_cpu_pool_total);
	sandbox_cpu_pool_total = NULL;
	free_cpumask_var(sandbox_cpu_pool_avail);
	sandbox_cpu_pool_avail = NULL;
}

static int sandbox_check_housekeeping_cpu(void)
{
	int sibling;

	if (cpumask_test_cpu(0, sandbox_cpu_pool_total)) {
		pr_err("cpu 0 is in cpu pool\n");
		return -EINVAL;
	}

	for_each_cpu(sibling, topology_sibling_cpumask(0)) {
		if (cpumask_test_cpu(sibling, sandbox_cpu_pool_total)) {
			pr_err("sibling (%d) of cpu 0 is in cpu pool\n", sibling);
			return -EINVAL;
		}
	}

	return 0;
}

static int check_all_siblings_in_pool(int cpu)
{
	int sibling;

	for_each_cpu(sibling, sandbox_cpu_sibling_map[cpu]) {
		if (!cpumask_test_cpu(sibling, sandbox_cpu_pool_total))
			break;
	}

	/*
	 * sibling >= nr_cpu_ids: all siblings of cpu are in pool
	 * sibling < nr_cpu_ids: the sibling cpu is not in pool
	 */
	return sibling;
}

static int sandbox_add_cpu_to_pool(void)
{
	int cpu, rc = 0;

	for_each_cpu(cpu, sandbox_cpu_pool_total) {
		rc = zalloc_cpumask_var(&sandbox_cpu_sibling_map[cpu], GFP_KERNEL);
		if (!rc) {
			pr_err("alloc sibling cpumap failed\n");
			return -ENOMEM;
		}

		cpumask_copy(sandbox_cpu_sibling_map[cpu], topology_sibling_cpumask(cpu));
		cpumask_clear_cpu(cpu, sandbox_cpu_sibling_map[cpu]);

		rc = os_compat_remove_cpu(cpu);
		if (rc < 0) {
			pr_err("cpu %d is not offlined, rc=%d\n", cpu, rc);
			break;
		}
	}

	for_each_cpu(cpu, sandbox_cpu_pool_total) {
		rc = check_all_siblings_in_pool(cpu);
		if (rc < nr_cpu_ids) {
			pr_err("sibling (%d) of cpu (%d) is not in pool\n", rc, cpu);
			return -EINVAL;
		}
	}

	return 0;
}

static int sandbox_setup_cpu_pool(const char *val)
{
	int rc = -ENOENT;

	if (!sandbox_cpu_sibling_map) {
		pr_err("sandbox service not ready, please make sure the qtbox "
		       "is configured and support sandbox service\n");
		return rc;
	}

	mutex_lock(&sandbox_cpu_pool_mutex);

	rc = -ENOMEM;

	sandbox_teardown_cpu_pool();

	if (!zalloc_cpumask_var(&sandbox_cpu_pool_total, GFP_KERNEL)) {
		pr_err("alloc cpu_pool_total failed\n");
		goto out;
	}

	if (!zalloc_cpumask_var(&sandbox_cpu_pool_avail, GFP_KERNEL)) {
		pr_err("alloc cpu_pool_avail failed\n");
		goto free_total;
	}

	rc = cpulist_parse(val, sandbox_cpu_pool_total);
	if (rc < 0) {
		pr_err("parse string(%s) failed, rc=%d\n", val, rc);
		goto free_avail;
	}

	if (cpumask_empty(sandbox_cpu_pool_total)) {
		rc = -ENOMEM;
		pr_info("cpu pool is empty now\n");
		goto free_avail;
	}

	rc = sandbox_check_housekeeping_cpu();
	if (rc < 0)
		goto free_avail;

	rc = sandbox_add_cpu_to_pool();
	if (rc < 0) {
		sandbox_teardown_cpu_pool();
		goto out;
	}

	cpumask_copy(sandbox_cpu_pool_avail, sandbox_cpu_pool_total);
	mutex_unlock(&sandbox_cpu_pool_mutex);

	return 0;

free_avail:
	free_cpumask_var(sandbox_cpu_pool_avail);
	sandbox_cpu_pool_avail = NULL;
free_total:
	free_cpumask_var(sandbox_cpu_pool_total);
	sandbox_cpu_pool_total = NULL;
out:
	mutex_unlock(&sandbox_cpu_pool_mutex);
	return rc;
}

static int
sandbox_set_kernel_param(const char *val, const struct kernel_param *kp)
{
	int ret;

	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

	if (atomic_read(&total_instances)) {
		pr_err("there're %d instances in system, cannot adjust the cpu pool\n",
		       atomic_read(&total_instances));
		return -EPERM;
	}

	ret = param_set_copystring(val, kp);
	if (ret < 0) {
		pr_err("set param string(%s) failed, ret=%d\n", val, ret);
		return ret;
	}

	ret = sandbox_setup_cpu_pool(val);
	if (ret < 0) {
		param_set_copystring("", kp);
		return ret;
	}

	return 0;
}

static inline bool sandbox_cap_inited(struct virtio_qtbox_sandbox *sandbox)
{
	return !!sandbox->cap.max_instances;
}

static void sandbox_list_cap(struct virtio_qtbox_sandbox *sandbox)
{
	struct vqtbox_sandbox_vm_cap *cap = &sandbox->cap;

	dev_dbg(sandbox->misc_dev.this_device,
		 "Sandbox Service Capability Info:\n");
	dev_dbg(sandbox->misc_dev.this_device,
		 "\t max_instances: %llu\n", cap->max_instances);
	dev_dbg(sandbox->misc_dev.this_device,
		 "\t max_vcpus_per_inst: %llu\n", cap->max_vcpus_per_inst);
	dev_dbg(sandbox->misc_dev.this_device,
		 "\t max_mem_regions_per_inst: %llu\n", cap->max_mem_regions_per_inst);
	dev_dbg(sandbox->misc_dev.this_device,
		 "\t max_pdevs_per_inst: %llu\n", cap->max_pdevs_per_inst);
	dev_dbg(sandbox->misc_dev.this_device,
		 "\t mem_addr_align: 0x%llx\n", cap->mem_addr_align);
	dev_dbg(sandbox->misc_dev.this_device,
		 "\t mem_size_align: 0x%llx\n", cap->mem_size_align);
	dev_dbg(sandbox->misc_dev.this_device,
		 "\t supported_launch_mode: 0x%llx\n", cap->supported_launch_mode);
	dev_dbg(sandbox->misc_dev.this_device,
		 "\t supported_image_format: 0x%llx\n", cap->supported_image_format);
	dev_dbg(sandbox->misc_dev.this_device,
		 "\t image_offset: 0x%llx\n", cap->image_offset);
}

static long sandbox_get_cap_ioctl(struct virtio_qtbox_sandbox *sandbox)
{
	struct virtio_qtbox *qtbox = sandbox2qtbox(sandbox);
	struct virtio_qtbox_sandbox_vm_instance_cap_flf cap_flf = {};
	struct virtio_qtbox_sandbox_vm_instance_cap_result result = {};
	int ret;

	ret = virtio_qtbox_do_cmd(qtbox, VIRTIO_QTBOX_SANDBOX_VM_SERVICE,
				  VIRTIO_QTBOX_SANDBOX_VM_S_INSTANCE_CAP,
				  &cap_flf, sizeof(cap_flf), NULL, 0,
				  &result, sizeof(result));
	if (ret != 0) {
		dev_err(sandbox->misc_dev.this_device,
			"Error in get cap [ret=%d]\n", ret);
		return -EINVAL;
	}

	sandbox->cap.max_instances = le64_to_cpu(result.max_instances);
	sandbox->cap.max_vcpus_per_inst = le64_to_cpu(result.max_vcpus_num);
	sandbox->cap.max_mem_regions_per_inst = le64_to_cpu(result.max_mem_regions_num);
	sandbox->cap.max_pdevs_per_inst = le64_to_cpu(result.max_pdevs_num);
	sandbox->cap.mem_addr_align = le64_to_cpu(result.mem_addr_align);
	sandbox->cap.mem_size_align = le64_to_cpu(result.mem_size_align);
	sandbox->cap.supported_launch_mode = le64_to_cpu(result.supported_launch_mode);
	sandbox->cap.supported_image_format = le64_to_cpu(result.supported_image_format);
	sandbox->cap.image_offset = le64_to_cpu(result.image_offset);

	sandbox_list_cap(sandbox);
	return 0;
}

static inline long define_param_check(struct virtio_qtbox_sandbox *sandbox,
				      struct vqtbox_sandbox_define *define)
{
	if (!define->launch_mode || !define->image_format) {
		dev_err(sandbox->misc_dev.this_device,
			"no mode or format specificed (0x%llx,0x%llx)\n",
			define->launch_mode, define->image_format);
		return -EINVAL;
	}

	if (define->launch_mode & ~sandbox->cap.supported_launch_mode) {
		dev_err(sandbox->misc_dev.this_device,
			"unsupport launch mode (0x%llx vs 0x%llx)\n",
			define->launch_mode, sandbox->cap.supported_launch_mode);
		return -EINVAL;
	}

	if (define->image_format & ~sandbox->cap.supported_image_format) {
		dev_err(sandbox->misc_dev.this_device,
			"unsupport image mode (0x%llx vs 0x%llx)\n",
			define->image_format, sandbox->cap.supported_image_format);
		return -EINVAL;
	}

	if (!is_power_of_2(define->launch_mode) || !is_power_of_2(define->image_format)) {
		dev_err(sandbox->misc_dev.this_device,
			"only one mode or format can specificed (0x%llx, 0x%llx)\n",
			define->launch_mode, define->image_format);
		return -EINVAL;
	}

	return 0;
}

#define instance_dev_err(instance, format, ...)				\
	dev_err(instance->sandbox->misc_dev.this_device,		\
		"instance %llu: "format,				\
		instance->sid, ##__VA_ARGS__)

#define instance_dev_info(instance, format, ...)			\
	dev_info(instance->sandbox->misc_dev.this_device,		\
		 "instance %llu: "format,				\
		 instance->sid, ##__VA_ARGS__)

static long log_if_maintenance(struct virtio_qtbox_sandbox_instance *instance,
			       const char *err, long ret)
{
	if (ret == VIRTIO_QTBOX_SANDBOX_VM_ERR_OPS_STATUS) {
		ret = -ret;
	} else {
		dev_err(instance->sandbox->misc_dev.this_device,
			"instance %llu: %s, ret = %ld\n", instance->sid,
			err, ret);
		ret = -EINVAL;
	}
	return ret;
}

/* Must hold sandbox_cpu_pool_mutex */
static inline void
instance_set_cpu(struct virtio_qtbox_sandbox_instance *instance, uint64_t cpu)
{
	int sibling;

	cpumask_clear_cpu(cpu, sandbox_cpu_pool_avail);
	for_each_cpu(sibling, sandbox_cpu_sibling_map[cpu]) {
		/*
		 * Note:
		 *   The userspce MUST crave out all threads of one core to the
		 *   cpu pool when setup.
		 */
		cpumask_clear_cpu(sibling, sandbox_cpu_pool_avail);
		cpumask_set_cpu(sibling, instance->siblings);
	}
	cpumask_set_cpu(cpu, instance->cpus);
}

static bool
instance_add_specific_cpu(struct virtio_qtbox_sandbox_instance *instance,
			  uint64_t cpu)
{
	if (cpumask_test_cpu(cpu, instance->siblings)) {
		cpumask_clear_cpu(cpu, instance->siblings);
		cpumask_set_cpu(cpu, instance->cpus);
		return true;
	}

	mutex_lock(&sandbox_cpu_pool_mutex);
	instance_set_cpu(instance, cpu);
	mutex_unlock(&sandbox_cpu_pool_mutex);

	return false;
}

static long
instance_add_dyn_cpu(struct virtio_qtbox_sandbox_instance *instance)
{
	int cpu;

	cpu = cpumask_first(instance->siblings);
	if (cpu < nr_cpu_ids) {
		cpumask_clear_cpu(cpu, instance->siblings);
		cpumask_set_cpu(cpu, instance->cpus);
		return cpu;
	}

	mutex_lock(&sandbox_cpu_pool_mutex);

	cpu = cpumask_first(sandbox_cpu_pool_avail);
	if (cpu >= nr_cpu_ids) {
		instance_dev_err(instance, "no available cpu found\n");
		cpu = -1;
		goto out;
	}

	instance_set_cpu(instance, cpu);

out:
	mutex_unlock(&sandbox_cpu_pool_mutex);
	return cpu;
}

static long
instance_add_cpu(struct virtio_qtbox_sandbox_instance *instance, uint64_t cpu)
{
	long ret;

	if (!sandbox_cpu_pool_seted()) {
		instance_dev_err(instance, "setup cpu_pool first\n");
		return -EFAULT;
	}

	if (!cpu) {
		ret = instance_add_dyn_cpu(instance);
		if (ret < 0)
			return ret;
		cpu = ret;
	} else {
		if (!instance_add_specific_cpu(instance, cpu)) {
			instance_dev_err(instance, "cpu %lld is not invalid\n", cpu);
			return -EINVAL;
		}
	}

	instance->nr_cpus++;
	instance_dev_info(instance, "cpu %lld cached success\n", cpu);
	return cpu;
}

static long instance_submit_cpus(struct virtio_qtbox_sandbox_instance *instance)
{
	struct virtio_qtbox_sandbox_vm_instance_attach_resources_flf res_flf = {};
	struct virtio_qtbox_sandbox_vm_cmd_result_comm result = {};
	struct virtio_qtbox_sandbox_vm_add_vcpu *cpus;
	int i = 0, cpu;
	long ret;

	if (!instance->nr_cpus) {
		instance_dev_err(instance, "no cpu added\n");
		return -EINVAL;
	}

	if (instance->nr_cpus > SANDBOX_CPUS_SIZE) {
		instance_dev_err(instance, "maximum number (%d) of CPUs exceeded: (%lld)\n",
				            SANDBOX_CPUS_SIZE, instance->nr_cpus);
		return -EINVAL;
	}

	if (!cpumask_empty(instance->siblings)) {
		instance_dev_err(instance, "not full cores\n");
		return -EINVAL;
	}

	cpus = kcalloc(instance->nr_cpus, sizeof(*cpus), GFP_KERNEL);
	if (!cpus) {
		instance_dev_err(instance, "alloc cpus failed\n");
		return -ENOMEM;
	}

	for_each_cpu(cpu, instance->cpus)
		cpus[i++].vcpu_id = cpu_to_le32(cpu);

	res_flf.sid = cpu_to_le32(instance->sid);
	res_flf.num_vcpus = cpu_to_le32(instance->nr_cpus);

	ret = virtio_qtbox_do_cmd(sandbox2qtbox(instance->sandbox),
				  VIRTIO_QTBOX_SANDBOX_VM_SERVICE,
				  VIRTIO_QTBOX_SANDBOX_VM_S_INSTANCE_ATTACH_RESOURCE,
				  &res_flf, sizeof(res_flf),
				  cpus, instance->nr_cpus * sizeof(*cpus),
				  &result, sizeof(result));
	if (ret != 0) {
		instance_dev_err(instance, "attach reosurces failed. ret = %ld\n", ret);
		ret = -EINVAL;
		goto out;
	}

	ret = le32_to_cpu(result.rv);
	if (ret != 0) {
		ret = log_if_maintenance(instance, "cmd: attach cpu fail", ret);
	}
out:
	kfree(cpus);
	return ret;
}

static void instance_del_cpus(struct virtio_qtbox_sandbox_instance *instance)
{
	int cpu;

	mutex_lock(&sandbox_cpu_pool_mutex);

	for_each_cpu(cpu, instance->cpus)
		cpumask_set_cpu(cpu, sandbox_cpu_pool_avail);

	mutex_unlock(&sandbox_cpu_pool_mutex);
}

static inline bool check_range_align(struct virtio_qtbox_sandbox_instance *instance,
				     struct vqtbox_sandbox_mem_range *range)
{
	struct vqtbox_sandbox_vm_cap *cap = &instance->sandbox->cap;

	if (!IS_ALIGNED(range->start, cap->mem_addr_align)) {
		instance_dev_err(instance, "start 0x%llx is not aligned\n", range->start);
		return false;
	}

	if (!range->len) {
		instance_dev_err(instance, "len 0x%llx is invalid\n", range->len);
		return false;
	}

	if (range->len & (cap->mem_size_align - 1)) {
		instance_dev_err(instance, "len 0x%llx is not aligned\n", range->len);
		return false;
	}

	return true;
}

struct phys_contig_mem_range {
	uint64_t num;
	struct range regions[0];
};

static long add_phys_contig_mem(struct virtio_qtbox_sandbox_instance *instance,
				struct phys_contig_mem_range *phys_range,
				struct vqtbox_sandbox_mem_range *range)
{
	if (!check_range_align(instance, range)) {
		instance_dev_err(instance, "physical mem range is invalid\n");
		return -EINVAL;
	}

	if (phys_range->num &&
	    (phys_range->regions[phys_range->num - 1].end + 1) == range->start) {
		phys_range->regions[phys_range->num - 1].end += range->len;
	} else {
		phys_range->regions[phys_range->num].start = range->start;
		phys_range->regions[phys_range->num].end = range->start + range->len - 1;
		phys_range->num++;
	}

	return 0;
}

static long instance_submit_mem(struct virtio_qtbox_sandbox_instance *instance,
				struct phys_contig_mem_range *phys_ranges)
{
	struct virtio_qtbox_sandbox_vm_instance_attach_resources_flf res_flf = {};
	struct virtio_qtbox_sandbox_vm_cmd_result_comm result = {};
	struct virtio_qtbox_sandbox_vm_add_mem *mems;
	long ret;
	uint64_t i;

	mems = kcalloc(phys_ranges->num, sizeof(*mems), GFP_KERNEL);
	if (!mems) {
		instance_dev_err(instance, "alloc mems failed\n");
		return -ENOMEM;
	}

	for (i = 0; i < phys_ranges->num; i++) {
		mems[i].paddr = cpu_to_le64(phys_ranges->regions[i].start);
		mems[i].size = cpu_to_le64(range_len(&phys_ranges->regions[i]));
	}

	res_flf.sid = cpu_to_le32(instance->sid);
	res_flf.num_mems_region = cpu_to_le32(phys_ranges->num);

	ret = virtio_qtbox_do_cmd(sandbox2qtbox(instance->sandbox),
				  VIRTIO_QTBOX_SANDBOX_VM_SERVICE,
				  VIRTIO_QTBOX_SANDBOX_VM_S_INSTANCE_ATTACH_RESOURCE,
				  &res_flf, sizeof(res_flf),
				  mems, phys_ranges->num * sizeof(*mems),
				  &result, sizeof(result));
	if (ret != 0) {
		instance_dev_err(instance, "attach resources failed, ret = %ld\n", ret);
		ret = -EINVAL;
		goto out;
	}

	ret = le32_to_cpu(result.rv);
	if (ret != 0) {
		ret = log_if_maintenance(instance,
					 "cmd: attach memory fail", ret);
	}
out:
	kfree(mems);
	return ret;
}

static long instance_add_mem(struct virtio_qtbox_sandbox_instance *instance,
			     struct vqtbox_sandbox_mem_range *range)
{
	struct virtio_qtbox_mem_range *mem_range;
	struct phys_contig_mem_range *phys_ranges;
	uint64_t i, nr_pages, mem_size;
	long ret;

	if (!check_range_align(instance, range))
		return -EINVAL;

	ret = -ENOMEM;

	/*  If use abnormal parameter, it may cause out of memory, which
	 *  is the behavior of the user and affects itself.
	 */
	nr_pages = range->len / instance->sandbox->cap.mem_size_align;

	phys_ranges = kzalloc(sizeof(*phys_ranges) + nr_pages * sizeof(struct range), GFP_KERNEL);
	if (!phys_ranges) {
		instance_dev_err(instance, "alloc phys_ranges failed\n");
		return ret;
	}

	mem_range = kzalloc(sizeof(*mem_range), GFP_KERNEL);
	if (!mem_range) {
		instance_dev_err(instance, "alloc mem_range failed\n");
		goto free_phys;
	}

	mem_range->pages = kcalloc(nr_pages, sizeof(*mem_range->pages), GFP_KERNEL);
	if (!mem_range->pages) {
		instance_dev_err(instance, "alloc pages array failed\n");
		goto free_mem;
	}

	mem_size = 0;
	for (i = 0; i < nr_pages; i++) {
		struct vqtbox_sandbox_mem_range tmp;

		ret = get_user_pages_unlocked(range->start + mem_size, 1,
					      mem_range->pages + i, FOLL_GET);
		if (ret < 0) {
			instance_dev_err(instance, "get pages failed\n");
			goto free_pages;
		}

		mem_range->nr_pages++;
		tmp.len = page_size(mem_range->pages[i]);
		tmp.start = page_to_phys(mem_range->pages[i]);
		ret = add_phys_contig_mem(instance, phys_ranges, &tmp);
		if (ret < 0)
			goto free_pages;

		mem_size += tmp.len;
		if (mem_size >= range->len)
			break;
	}

	mem_range->virt_mem.start = range->start;
	mem_range->virt_mem.end = range->start + range->len - 1;
	list_add(&mem_range->node, &instance->mem_list);

	ret = instance_submit_mem(instance, phys_ranges);
	if (ret < 0) {
		kfree(phys_ranges);
		/* free other resources during the undefine operation */
		return ret;
	}

	instance->mem_size += mem_size;
	instance->nr_phys_ranges += phys_ranges->num;
	kfree(phys_ranges);

	return ret;

free_pages:
	for (i = 0; i < mem_range->nr_pages; i++)
		put_page(mem_range->pages[i]);
	kfree(mem_range->pages);
free_mem:
	kfree(mem_range);
free_phys:
	kfree(phys_ranges);
	return ret;
}

static void
instance_del_mem_ranges(struct virtio_qtbox_sandbox_instance *instance)
{
	struct virtio_qtbox_mem_range *mem_range, *tmp;
	int i;

	list_for_each_entry_safe(mem_range, tmp, &instance->mem_list, node) {
		list_del(&mem_range->node);
		for (i = 0; i < mem_range->nr_pages; i++)
			put_page(mem_range->pages[i]);
		kfree(mem_range->pages);
		kfree(mem_range);
	}
}

static long instance_start(struct virtio_qtbox_sandbox_instance *instance,
				struct vqtbox_sandbox_start *start)
{
	struct virtio_qtbox_sandbox_vm_instance_start_flf start_flf = {};
	struct virtio_qtbox_sandbox_vm_cmd_result_comm result = {};
	long ret;

	ret = instance_submit_cpus(instance);
	if (ret < 0)
		return ret;

	start_flf.cid = cpu_to_le64(start->cid);
	start_flf.sid = cpu_to_le64(instance->sid);

	if (test_bit(start->cid, cid_bits)) {
		instance_dev_err(instance, "start failed. vsock id %lld has been started !\n",
					    start->cid);
		return -EINVAL;
	}
	set_bit(start->cid, cid_bits);

	ret = virtio_qtbox_do_cmd(sandbox2qtbox(instance->sandbox),
				  VIRTIO_QTBOX_SANDBOX_VM_SERVICE,
				  VIRTIO_QTBOX_SANDBOX_VM_S_INSTANCE_START,
				  &start_flf, sizeof(start_flf), NULL, 0,
				  &result, sizeof(result));
	if (ret != 0) {
		instance_dev_err(instance, "start failed. ret = %ld\n", ret);
		clear_bit(start->cid, cid_bits);
		return -EINVAL;
	}

	ret = le32_to_cpu(result.rv);
	if (ret != 0) {
		ret = log_if_maintenance(instance, "cmd: start fail", ret);
		clear_bit(start->cid, cid_bits);
	}

	instance->cid = start->cid;
	return ret;
}

static long instance_ioctl_add_cpu(void *priv, void __user *arg)
{
	struct virtio_qtbox_sandbox_instance *instance = priv;
	uint64_t cpuid;
	long ret;

	if (copy_from_user(&cpuid, arg, sizeof(cpuid))) {
		instance_dev_err(instance, "add_cpu: copy args from user failed\n");
		return -EFAULT;
	}

	ret = instance_add_cpu(instance, cpuid);
	if (ret < 0)
		return ret;

	if (copy_to_user(arg, &ret, sizeof(ret))) {
		instance_dev_err(instance, "add_cpu: copy args to user failed\n");
		return -EFAULT;
	}

	return ret;
}

static long instance_ioctl_add_mem(void *priv, void __user *arg)
{
	struct virtio_qtbox_sandbox_instance *instance = priv;
	struct vqtbox_sandbox_mem_range range;

	if (copy_from_user(&range, arg, sizeof(range))) {
		instance_dev_err(instance, "add_mem: copy args from user failed\n");
		return -EFAULT;
	}

	return instance_add_mem(instance, &range);
}

static long instance_ioctl_start(void *priv, void __user *arg)
{
	struct virtio_qtbox_sandbox_instance *instance = priv;
	struct vqtbox_sandbox_start start;

	if (copy_from_user(&start, arg, sizeof(start))) {
		instance_dev_err(instance, "start: copy args from user failed\n");
		return -EFAULT;
	}

	return instance_start(instance, &start);
}

static struct sandbox_cmd insn_cmds[] = {
	{ "add_cpu", VQTBOX_SANDBOX_ADD_CPU, instance_ioctl_add_cpu },
	{ "add_mem", VQTBOX_SANDBOX_ADD_MEM, instance_ioctl_add_mem },
	{ "start", VQTBOX_SANDBOX_START, instance_ioctl_start },
};

static long
sandbox_instance_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
	struct virtio_qtbox_sandbox_instance *instance = file->private_data;
	long ret;
	int i;

	for (i = 0; i < ARRAY_SIZE(insn_cmds); i++) {
		if (insn_cmds[i].cmd == cmd)
			break;
	}

	if (i == ARRAY_SIZE(insn_cmds)) {
		instance_dev_err(instance, "unsupport cmd:0x%x\n", cmd);
		return -ENOENT;
	}

	instance_dev_info(instance, "Begin %s\n", insn_cmds[i].name);
	ret = insn_cmds[i].func(instance, (void __user *)arg);
	instance_dev_info(instance, "Finish %s, ret=%ld\n", insn_cmds[i].name, ret);

	return ret;
}

static long instance_submit_stop(struct virtio_qtbox_sandbox_instance *instance)
{
	struct virtio_qtbox_sandbox_vm_instance_stop_flf stop_flf = {};
	struct virtio_qtbox_sandbox_vm_cmd_result_comm result = {};
	long ret;

	stop_flf.sid = cpu_to_le64(instance->sid);

	ret = virtio_qtbox_do_cmd(sandbox2qtbox(instance->sandbox),
				  VIRTIO_QTBOX_SANDBOX_VM_SERVICE,
				  VIRTIO_QTBOX_SANDBOX_VM_S_INSTANCE_SHUTDOWN,
				  &stop_flf, sizeof(stop_flf), NULL, 0,
				  &result, sizeof(result));
	if (ret != 0) {
		instance_dev_err(instance, "stop failed, ret = %ld\n", ret);
		ret = -EINVAL;
		return ret;
	}

	ret = le32_to_cpu(result.rv);
	if (ret != 0) {
		ret = log_if_maintenance(instance, "cmd: stop fail", ret);
	}

	return ret;
}

static long instance_submit_undefine(struct virtio_qtbox_sandbox_instance *instance)
{
	struct virtio_qtbox_sandbox_vm_instance_undefine_flf undefine_flf = {};
	struct virtio_qtbox_sandbox_vm_cmd_result_comm result = {};
	long ret;

	if (instance->console_dev)
		virtio_qtbox_sandbox_console_uninit(instance);

	undefine_flf.sid = cpu_to_le64(instance->sid);

	ret = virtio_qtbox_do_cmd(sandbox2qtbox(instance->sandbox),
				  VIRTIO_QTBOX_SANDBOX_VM_SERVICE,
				  VIRTIO_QTBOX_SANDBOX_VM_S_INSTANCE_UNDEFINE,
				  &undefine_flf, sizeof(undefine_flf), NULL, 0,
				  &result, sizeof(result));
	if (ret != 0) {
		instance_dev_err(instance, "undefine failed, ret = %ld\n", ret);
		ret = -EINVAL;
		return ret;
	}

	ret = le32_to_cpu(result.rv);
	if (ret != 0) {
		ret = log_if_maintenance(instance, "cmd: undefine fail", ret);
	}

	return ret;
}

static void
sandbox_instance_clear(struct virtio_qtbox_sandbox_instance *instance)
{
	mutex_lock(&instance->sandbox->instances_mutex);
	list_del(&instance->node);
	mutex_unlock(&instance->sandbox->instances_mutex);

	instance_del_mem_ranges(instance);
	instance_del_cpus(instance);

	atomic_dec(&total_instances);
	clear_bit(instance->cid, cid_bits);
	free_cpumask_var(instance->siblings);
	free_cpumask_var(instance->cpus);
	kfree(instance);
}

/*
 * when the stop cmd done failed, it will continue to perform stop task
 * or undefine task in periodic times
 */
static int
instance_handle_stop_task(struct virtio_qtbox *qtbox,
			  struct virtio_qtbox_sandbox_instance *instance)
{
	int ret = 0;

	if (instance->release_state == SANDBOX_INTANCE_TODO_STOP) {
		ret = instance_submit_stop(instance);
		if (ret == 0) {
			instance->release_state = SANDBOX_INTANCE_TODO_UNDEFINE;
			instance_dev_info(instance, "release wq: STOP task done\n");
		} else if (ret == -VIRTIO_QTBOX_SANDBOX_VM_ERR_OPS_STATUS) {
			queue_delayed_work(qtbox->release_wq,
				&instance->release_work,
				msecs_to_jiffies(RELEASE_WORK_TIME_SET));
		} else {
			instance_dev_info(instance,
				"release wq: STOP task return other ERR %d\n",
				ret);
		}
	}

	return ret;
}

static int
instance_handle_undefine_task(struct virtio_qtbox *qtbox,
			      struct virtio_qtbox_sandbox_instance *instance)
{
	int ret = 0;

	if (instance->release_state == SANDBOX_INTANCE_TODO_UNDEFINE) {
		ret = instance_submit_undefine(instance);
		if (ret == 0) {
			instance_dev_info(instance, "release wq: UNDEFINE task done\n");
		} else if (ret == -VIRTIO_QTBOX_SANDBOX_VM_ERR_OPS_STATUS) {
			queue_delayed_work(qtbox->release_wq,
				&instance->release_work,
				msecs_to_jiffies(RELEASE_WORK_TIME_SET));
		} else {
			instance_dev_info(instance,
				"release wq: UNDEFINE task return other ERROR %d\n",
				ret);
		}
	}

	return ret;
}

static void
sandbox_instance_retry_stop_work_handler(struct work_struct *work)
{
	struct virtio_qtbox_sandbox_instance *instance;
	struct virtio_qtbox *qtbox;
	int ret;

	if (work == NULL) {
		pr_info("work init failed\n");
		return;
	}

	instance = container_of(work, struct virtio_qtbox_sandbox_instance,
				release_work.work);
	qtbox = sandbox2qtbox(instance->sandbox);

	ret = instance_handle_stop_task(qtbox, instance);
	if (ret != 0)
		return;

	ret = instance_handle_undefine_task(qtbox, instance);
	if (ret != 0)
		return;

	cancel_delayed_work(&instance->release_work);
	instance_dev_info(instance,
			"resource cleared successfully after maintenance\n");
	sandbox_instance_clear(instance);
}

static void release_cmd_ret_check(int ret, int state,
				struct virtio_qtbox *qtbox,
				struct virtio_qtbox_sandbox_instance *instance)
{
	if (ret == -VIRTIO_QTBOX_SANDBOX_VM_ERR_OPS_STATUS) {
		instance->release_state = state;
		instance_dev_info(instance,
				  "release failed, try it again later\n");
		INIT_DELAYED_WORK(&instance->release_work,
				  sandbox_instance_retry_stop_work_handler);
		queue_delayed_work(qtbox->release_wq, &instance->release_work,
				   msecs_to_jiffies(RELEASE_WORK_TIME_SET));
	}
}
static int sandbox_instance_release(struct inode *inode, struct file *file)
{
	struct virtio_qtbox_sandbox_instance *instance = file->private_data;
	struct virtio_qtbox *qtbox;
	int ret;

	if (!instance) {
		pr_info("instance is NULL, no need to release again\n");
		return 0;
	}

	qtbox = sandbox2qtbox(instance->sandbox);

	ret = instance_submit_stop(instance);
	if (ret < 0) {
		release_cmd_ret_check(ret, SANDBOX_INTANCE_TODO_STOP, qtbox,
				      instance);
		return ret;
	}

	ret = instance_submit_undefine(instance);
	if (ret < 0) {
		release_cmd_ret_check(ret, SANDBOX_INTANCE_TODO_UNDEFINE, qtbox,
				      instance);
		return ret;
	}

	sandbox_instance_clear(instance);

	return 0;
}

static __poll_t sandbox_instance_poll(struct file *file, poll_table *wait)
{
	struct virtio_qtbox_sandbox_instance *instance = file->private_data;
	__poll_t mask = 0;

	poll_wait(file, &instance->eventq, wait);

	if (instance->has_event)
		mask |= EPOLLHUP;

	return mask;
}

static const struct file_operations instance_fops = {
	.owner = THIS_MODULE,
	.llseek = noop_llseek,
	.poll = sandbox_instance_poll,
	.unlocked_ioctl = sandbox_instance_ioctl,
	.release = sandbox_instance_release,
};

static long sandbox_define_instance(struct virtio_qtbox_sandbox *sandbox,
				    struct vqtbox_sandbox_define *define)
{
	struct virtio_qtbox *qtbox = sandbox2qtbox(sandbox);
	struct virtio_qtbox_sandbox_instance *instance;
	struct virtio_qtbox_sandbox_vm_instance_define_flf define_flf = {};
	struct virtio_qtbox_sandbox_vm_cmd_result_comm result = {};
	int instance_fd;
	struct file *instance_file;
	int rc;

	rc = define_param_check(sandbox, define);
	if (rc < 0)
		return rc;

	rc = -ENOMEM;
	instance = kzalloc(sizeof(*instance), GFP_KERNEL);
	if (!instance) {
		dev_err(sandbox->misc_dev.this_device, "alloc sandbox_instance failed\n");
		return rc;
	}

	if (!zalloc_cpumask_var(&instance->siblings, GFP_KERNEL)) {
		dev_err(sandbox->misc_dev.this_device, "alloc siblings map failed\n");
		goto free_insn;
	}

	if (!zalloc_cpumask_var(&instance->cpus, GFP_KERNEL)) {
		dev_err(sandbox->misc_dev.this_device, "alloc cpus map failed\n");
		goto free_siblings;
	}

	rc = get_unused_fd_flags(O_CLOEXEC);
	if (rc < 0) {
		dev_err(sandbox->misc_dev.this_device, "failed to get an unused fd\n");
		goto free_cpus;
	}
	instance_fd = rc;

	instance_file = anon_inode_getfile("instance", &instance_fops, instance, O_RDWR);
	if (IS_ERR(instance_file)) {
		dev_err(sandbox->misc_dev.this_device, "failed to alloc anon inode\n");
		rc = PTR_ERR(instance_file);
		goto free_fd;
	}

	define_flf.image_format = cpu_to_le64(define->image_format);
	define_flf.launch_mode = cpu_to_le64(define->launch_mode);

	rc = virtio_qtbox_do_cmd(qtbox, VIRTIO_QTBOX_SANDBOX_VM_SERVICE,
				 VIRTIO_QTBOX_SANDBOX_VM_S_INSTANCE_DEFINE,
				 &define_flf, sizeof(define_flf), NULL, 0,
				 &result, sizeof(result));
	if (rc != 0) {
		dev_err(sandbox->misc_dev.this_device,
			"Error in define_instance, rc=%d\n", rc);
		rc = -EINVAL;
		goto free_file;
	}

	rc = le32_to_cpu(result.rv);
	if (rc != 0) {
		if (rc == VIRTIO_QTBOX_SANDBOX_VM_ERR_OPS_STATUS) {
			dev_info(sandbox->misc_dev.this_device,
				"maintenance state: shield enclave operations");
		} else {
			dev_err(sandbox->misc_dev.this_device,
				"cmd : Error in define_instance, rc=%d\n", rc);
		}
		rc = -rc;
		goto free_file;
	}

	atomic_inc(&total_instances);

	INIT_LIST_HEAD(&instance->mem_list);
	instance->mm = current->mm;
	instance->has_event = false;
	init_waitqueue_head(&instance->eventq);
	instance->sid = le64_to_cpu(result.sid);
	if ((define->launch_mode == VQTBOX_SANDBOX_VM_DEBUG_MODE) &&
		virtio_qtbox_sandbox_console_init(instance) < 0) {
		goto free_file;
	}

	mutex_lock(&sandbox->instances_mutex);
	list_add(&instance->node, &sandbox->instances_list);
	mutex_unlock(&sandbox->instances_mutex);
	instance->sandbox = sandbox;
	define->sid = instance->sid;

	fd_install(instance_fd, instance_file);

	return instance_fd;

free_file:
	instance_file->private_data = NULL;
	fput(instance_file);
free_fd:
	put_unused_fd(instance_fd);
free_cpus:
	free_cpumask_var(instance->cpus);
free_siblings:
	free_cpumask_var(instance->siblings);
free_insn:
	kfree(instance);
	return rc;
}

static long sandbox_ioctl_get_cap(void *priv, void __user *arg)
{
	struct virtio_qtbox_sandbox *sandbox = priv;
	struct vqtbox_sandbox_vm_cap __user *cap;

	/* Not to get cap yet */
	if (!sandbox_cap_inited(sandbox)) {
		long ret = sandbox_get_cap_ioctl(sandbox);
		if (ret < 0)
			return ret;
	}

	cap = (struct vqtbox_sandbox_vm_cap __user *)arg;
	if (copy_to_user(cap, &sandbox->cap, sizeof(*cap)))
		return -EFAULT;

	return 0;
}

static long sandbox_ioctl_define_instance(void *priv, void __user *arg)
{
	struct virtio_qtbox_sandbox *sandbox = priv;
	struct vqtbox_sandbox_define define = {};
	struct file *instance_file;
	long ret;

	if (!sandbox_cap_inited(sandbox)) {
		dev_err(sandbox->misc_dev.this_device,
			"define_instance: Need to get_cap first\n");
		return -EFAULT;
	}

	if (copy_from_user(&define, arg, sizeof(define))) {
		dev_err(sandbox->misc_dev.this_device,
			"define_instance: copy args from user failed\n");
		return -EFAULT;
	}

	ret = sandbox_define_instance(sandbox, &define);
	if (ret < 0)
		return ret;

	if (copy_to_user(arg, &define, sizeof(define))) {
		dev_err(sandbox->misc_dev.this_device,
			"define_instance: copy args to user failed\n");
		instance_file = fget(ret);
		fput(instance_file);
		/* trigger .release() */
		fput(instance_file);
		put_unused_fd(ret);
		return -EFAULT;
	}

	return ret;
}

static struct sandbox_cmd dev_cmds[] = {
	{ "get_sandbox_cap", VQTBOX_SANDBOX_GET_CAP, sandbox_ioctl_get_cap },
	{ "define_instance", VQTBOX_SANDBOX_DEFINE, sandbox_ioctl_define_instance },
};

static long sandbox_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
	struct virtio_qtbox_sandbox *sandbox = filp2sandbox(file);
	long ret;
	int i, array_size;

	array_size = ARRAY_SIZE(dev_cmds);
	for (i = 0; i < array_size; i++) {
		if (dev_cmds[i].cmd == cmd)
			break;
	}

	if (i == array_size) {
		dev_err(sandbox->misc_dev.this_device, "unsupport cmd:0x%x\n", cmd);
		return -ENOENT;
	}

	dev_info(sandbox->misc_dev.this_device, "Begin %s\n", dev_cmds[i].name);
	ret = dev_cmds[i].func(sandbox, (void __user *)arg);
	dev_info(sandbox->misc_dev.this_device, "Finish %s, ret=%ld\n", dev_cmds[i].name, ret);

	return ret;
}

static const struct file_operations sandbox_fops = {
	.owner          = THIS_MODULE,
	.llseek         = noop_llseek,
	.unlocked_ioctl = sandbox_ioctl,
};

static inline bool needto_wakeup_event_poller(uint64_t instance_state)
{
	return (instance_state != VIRTIO_QTBOX_SANDBOX_VM_STATE_RUNNING) &&
	       (instance_state != VIRTIO_QTBOX_SANDBOX_VM_STATE_DEFINED);
}
void sandbox_handle_event(struct virtio_qtbox *qtbox, uint32_t event)
{
	switch (event) {
		case VIRTIO_QTBOX_SANDBOX_VM_E_CHECK_STATE: {
			struct virtio_qtbox_sandbox_vm_instance_info_flf info_flf = {};
			struct virtio_qtbox_sandbox_vm_instance_info_result result = {};
			struct virtio_qtbox_sandbox_instance *instance;
			int ret;

			mutex_lock(&qtbox->sandbox.instances_mutex);

			list_for_each_entry(instance, &qtbox->sandbox.instances_list, node) {
				info_flf.sid = cpu_to_le64(instance->sid);

				ret = virtio_qtbox_do_cmd(qtbox, VIRTIO_QTBOX_SANDBOX_VM_SERVICE,
							  VIRTIO_QTBOX_SANDBOX_VM_S_INSTANCE_INFO,
							  &info_flf, sizeof(info_flf), NULL, 0,
							  &result, sizeof(result));
				if (ret != 0)
					instance_dev_err(instance, "info failed, ret=%d\n", ret);

				if (ret != 0 || needto_wakeup_event_poller(le64_to_cpu(result.state))) {
					instance->has_event = true;
					wake_up_interruptible(&instance->eventq);
				}
			}

			mutex_unlock(&qtbox->sandbox.instances_mutex);
			break;
		}
		default:
			dev_err(&qtbox->vdev->dev, "unknown event: %u\n", event);
			break;
	}
}

static DEFINE_IDA(qtbox_sandbox_index_ida);

int virtio_qtbox_sandbox_vm_init(struct virtio_qtbox_sandbox *sandbox)
{
	int ret;

	ret = ida_alloc(&qtbox_sandbox_index_ida, GFP_KERNEL);
	if (ret < 0)
		return ret;

	sandbox_cpu_sibling_map = kcalloc(nr_cpu_ids, sizeof(cpumask_var_t), GFP_KERNEL);
	if (!sandbox_cpu_sibling_map) {
		ret = -ENOMEM;
		pr_err("Error in alloc sandbox cpu sibling\n");
		goto err_map;
	}

	mutex_init(&sandbox->instances_mutex);
	INIT_LIST_HEAD(&sandbox->instances_list);

	sandbox->index = ret;
	if (snprintf(sandbox->misc_name, sizeof(sandbox->misc_name),
				 "qtbox_service%d", sandbox->index) < 0)
		goto err_misc;

	memset(&sandbox->misc_dev, 0, sizeof(sandbox->misc_dev));
	sandbox->misc_dev.minor = MISC_DYNAMIC_MINOR,
	sandbox->misc_dev.name = sandbox->misc_name,
	sandbox->misc_dev.fops = &sandbox_fops,
	sandbox->misc_dev.mode = 0660, /* file permission: rw-rw---- */

	ret = misc_register(&sandbox->misc_dev);
	if (ret < 0) {
		pr_err("Error in misc dev (%s) register [ret=%d]\n",
		       sandbox->misc_name, ret);
		goto err_misc;
	}
	pr_info("Init misc dev (%s) success!\n", sandbox->misc_name);

	return 0;

err_misc:
	mutex_destroy(&sandbox->instances_mutex);
	kfree(sandbox_cpu_sibling_map);
	sandbox_cpu_sibling_map = NULL;
err_map:
	ida_free(&qtbox_sandbox_index_ida, sandbox->index);
	return ret;
}

int virtio_qtbox_sandbox_vm_uninit(struct virtio_qtbox_sandbox *sandbox)
{
	misc_deregister(&sandbox->misc_dev);
	ida_free(&qtbox_sandbox_index_ida, sandbox->index);
	sandbox_teardown_cpu_pool();
	kfree(sandbox_cpu_sibling_map);
	sandbox_cpu_sibling_map = NULL;
	pr_info("Uninit misc dev (%s) success!\n", sandbox->misc_name);
	return 0;
}
