/*
 * Copyright (c) Huawei Technologies Co., Ltd. 2024-2024.
 * Description: provide um vnotify
 * Author: liwugui
 * Create: 2024-04-20
 */

#include "dfx.h"
#include "um_vnotify.h"

#include <linux/gfp.h>
#include <linux/uaccess.h>
#include <linux/syscalls.h>
#include <linux/task_struct_extend.h>
#include <linux/compiler.h>
#include <linux/miscdevice.h>
#include <linux/sched/clock.h>
#include <linux/rtos/hpvisor.h>
#include <linux/vmalloc.h>
#include <linux/mm.h>
#include <linux/cpumask.h>

#include <asm/memory.h>

#include "../../kernel/rtos_fast_svc.h"

#define CHN_NAME_LEN_USER 32
#define CHN_NAME_LEN_KER 37

#define HPVISOR_CHN_FLAGS_SET \
	(HPVISOR_ATTACH_CREATE_BIT | HPVISOR_ATTACH_SHARED_BIT | HPVISOR_ATTACH_PERSISTENT_BIT \
	| HPVISOR_ATTACH_FAST_BIT | HPVISOR_ATTACH_RECEIVE_BIT)

enum {
	VNOTIFY_CHN_STAT_OFFSET,
	VNOTIFY_RQ_OFFSET
};

/*
 * A_SIDE: shared send，no-shared first attach
 * B_SIDE：shared wait，no-shared second attach
 * NO_SIDE: chn do not alloced
 */
enum {
	STATUS_A_SIDE,
	STATUS_B_SIDE,
	STATUS_NO_SIDE
};

#define UM_CHN_DIR_UNDEF (0)
#define UM_CHN_DIR_WAIT (1 << 0)
#define UM_CHN_DIR_SEND (1 << 1)

struct task_chn_node {
	int fd;
	struct chn_data *data;
	struct list_head list;

	/* for um mode */
	int um_chn_side;
	int um_chn_dir; /* 0: wait, 1: send */
};

struct chn_data {
	struct chn_data_t data_t;

	char name[CHN_NAME_LEN_KER];
	struct hpvisor_chn_xid chn_xid;    /* first attach xid */
	struct hpvisor_chn_xid um_chn_xid;  /* second attach xid */
	struct vnotify_stride *stride;
	int cpu_id;
	int type;
};

struct vnotify_stride {
	u32 idx;
	u32 chn_nr;
	u32 page_nr;
	struct page **pages;
	struct hpvisor_chn_state *chst;
	struct chn_data *chns;
	uint64_t **rtx_rq;
};

struct init_info {
	u32 stride_chn_nr;
	u32 max_stride_nr;
};

struct vnotify_data {
	bool inited;
	struct vnotify_data_t vdata_t;

	unsigned int max_cpus;
	u32 stride_chn_nr;
	u32 max_stride_nr;
	u32 stride_page_nr;

	char *vnotify_ctrl_mem;
	struct hpvisor_ring_queue *rq_headers;

	u32 ctrl_mem_page_nr;
	struct page **ctrl_mem_pages;

	unsigned long *stride_rq_bitmap;
	struct vnotify_stride **strides_addr_arr;
	u32 effective_stride_count;

	struct mutex stride_lock;
};

static const struct vnotify_ioctl_operations um_vnotify_oper;

static struct vnotify_data vnotify = {
	.stride_lock = __MUTEX_INITIALIZER(vnotify.stride_lock)
};

bool vnotify_um_mode(void)
{
	return vnotify.vdata_t.mode == UM_MODE;
}

void um_vnotify_chns_lock(void)
{
	mutex_lock(&vnotify.stride_lock);
}

void um_vnotify_chns_unlock(void)
{
	mutex_unlock(&vnotify.stride_lock);
}

static struct vnotify_stride *get_vnotify_stride(u32 stride_idx)
{
	if (stride_idx >= READ_ONCE(vnotify.effective_stride_count))
		return NULL;
	/*
	 * Ensure that the value of stride_idx is less than the value of
	 * vnotify.effective_stride_count, access to the strides_addr_arr
	 * need done after this operation
	 */
	smp_rmb();
	return vnotify.strides_addr_arr[stride_idx];
}

static inline struct vnotify_stride *id_to_stride(u32 id)
{
	u32 stride_idx = id / vnotify.stride_chn_nr;

	return get_vnotify_stride(stride_idx);
}

static inline struct chn_data *id_to_data_stride(struct vnotify_stride *stride, u32 id)
{
	return stride->chns + (id % vnotify.stride_chn_nr);
}

static inline struct chn_data *id_to_data(u32 id)
{
	struct vnotify_stride *stride;

	stride = id_to_stride(id);
	if (unlikely(!stride))
		return NULL;
	return stride->chns + (id % vnotify.stride_chn_nr);
}

static inline u32 data_to_id_stride(struct vnotify_stride *stride, struct chn_data *data)
{
	return data - stride->chns;
}

static inline u32 data_to_id(struct chn_data *data)
{
	u32 data_idx = data - data->stride->chns;

	return data->stride->idx * vnotify.stride_chn_nr + data_idx;
}

static inline s16 status_a_side_cnt(u64 status)
{
	return (status >> STATUS_HOST_SHIFT) & CHN_CNT_MASK;
}

static inline s16 status_b_side_cnt(u64 status)
{
	return (status >> STATUS_GUEST_SHIFT) & CHN_CNT_MASK;
}

static s64 chn_detach(struct chn_data *data, int type)
{
	s64 status;

	status = chn_status_get(&data->data_t);
	switch (type) {
	case STATUS_A_SIDE: {
		if (status_a_side_cnt(status) <= 0)
			return -ENOENT;
		status -= (1 << STATUS_HOST_SHIFT);
		break;
	}
	case STATUS_B_SIDE: {
		if (status_b_side_cnt(status) <= 0)
			return -ENOENT;
		status -= (1 << STATUS_GUEST_SHIFT);
		break;
	}
	default: {
		hpvisor_err("detach: type %d\n", type);
		return -EINVAL;
	}
	}
	chn_status_set(&data->data_t, status);
	return status;
}

static s64 chn_attach(struct chn_data *data, int type)
{
	s64 status;

	status = chn_status_get(&data->data_t);
	switch (type) {
	case STATUS_A_SIDE: {
		if (unlikely(status_a_side_cnt(status) == CHN_CNT_MAX)) {
			hpvisor_err("out of range, status:0x%llx\n", status);
			return -ERANGE;
		}
		if (status_a_side_cnt(status) < 0)
			status = (status & ~STATUS_HOST_MASK) | (1 << STATUS_HOST_SHIFT);
		else
			status += (1 << STATUS_HOST_SHIFT);
		break;
	}
	case STATUS_B_SIDE: {
		if (unlikely(status_b_side_cnt(status) == CHN_CNT_MAX)) {
			hpvisor_err("out of range, status:0x%llx\n", status);
			return -ERANGE;
		}
		if (status_b_side_cnt(status) < 0)
			status = (status & ~STATUS_GUEST_MASK) | (1 << STATUS_GUEST_SHIFT);
		else
			status += (1 << STATUS_GUEST_SHIFT);
		break;
	}
	default: {
		hpvisor_err("attach: Unsupport status type %d\n", type);
		return -EINVAL;
	}
	}
	chn_status_set(&data->data_t, status);
	return status;
}

static int chn_data_wake_up(struct chn_data *data, __poll_t event)
{
	int ret;
	unsigned long flags;
	struct file *filp = NULL;

	/* must lock file for it maybe detached in host/guest detach */
	spin_lock_irqsave(&data->data_t.file_lock, flags);
	if (IS_ERR_OR_NULL(data->data_t.file)) {
		spin_unlock_irqrestore(&data->data_t.file_lock, flags);
		return -ENOENT;
	}
	filp = get_file(data->data_t.file);
	spin_unlock_irqrestore(&data->data_t.file_lock, flags);

	ret = hpvisor_file_wake_up(filp, event);
	fput(filp);

	hpvisor_vnotify_debug_notify("wake up channel %s\n", data->name);
	return ret;
}

static void chn_data_destroy(struct vnotify_stride *stride)
{
	int ret;
	struct chn_data *data = NULL;
	struct chn_data *end = NULL;

	if (!stride->chns)
		return;

	if (stride->idx == 0)
		data = id_to_data_stride(stride, NORMAL_CHN_START_IN_FIRST_STRIDE);
	else
		data = id_to_data_stride(stride, NORMAL_CHN_START_IN_OTHER_STRIDE);

	end = id_to_data_stride(stride, stride->chn_nr - 1);
	for (; data <= end; data++) {
		if (unlikely(data->data_t.file) &&
			((status_a_side_cnt(chn_status_get(&data->data_t)) > 0) ||
			(status_a_side_cnt(chn_status_get(&data->data_t)) > 0))) {
			ret = chn_data_wake_up(data, EPOLLHUP);
			if (ret != 0)
				hpvisor_err("um channel wake up failed, ret %d\n", ret);
			fput(data->data_t.file);
		}
	}
	kvfree(stride->chns);
	stride->chns = NULL;
}

static void chn_page_destroy(struct vnotify_stride *stride)
{
	int i;

	if (stride->chst) {
		vunmap(stride->chst);
		stride->chst = NULL;
	}

	if (stride->pages) {
		for (i = 0; i < stride->page_nr; i++) {
			if (stride->pages[i])
				__free_page(stride->pages[i]);
		}

		kfree(stride->pages);
		stride->pages = NULL;
	}

	kfree(stride->rtx_rq);
	stride->rtx_rq = NULL;
}

static void vnotify_stride_destroy(struct vnotify_stride *stride)
{
	chn_data_destroy(stride);
	chn_page_destroy(stride);
}

void vnotify_ctrl_mem_destroy(void)
{
	int i;

	if (vnotify.vnotify_ctrl_mem) {
		vunmap(vnotify.vnotify_ctrl_mem);
		vnotify.vnotify_ctrl_mem = NULL;
		vnotify.stride_rq_bitmap = NULL;
		vnotify.rq_headers = NULL;
	}

	if (vnotify.ctrl_mem_pages) {
		for (i = 0; i < vnotify.ctrl_mem_page_nr; i++) {
			if (vnotify.ctrl_mem_pages[i])
				__free_page(vnotify.ctrl_mem_pages[i]);
		}
		kfree(vnotify.ctrl_mem_pages);
		vnotify.ctrl_mem_pages = NULL;
	}
}

static void vnotify_destroy(void)
{
	struct vnotify_stride *node = NULL;
	int i = 0;

	if (vnotify.strides_addr_arr) {
		for (; i < vnotify.effective_stride_count; i++) {
			node = vnotify.strides_addr_arr[i];
			if (!node) {
				hpvisor_err("can't get stride, i:%d\n", i);
				continue;
			}

			vnotify_stride_destroy(node);
			kfree(node);
		}
		kfree(vnotify.strides_addr_arr);
		vnotify.strides_addr_arr = NULL;
	}

	vnotify_ctrl_mem_destroy();

	vnotify.inited = false;
	vnotify.effective_stride_count = 0;
	vnotify.stride_page_nr = 0;
	hpvisor_vnotify_debug_common("um vnotify destroyed!\n");
}

/*
 * For external interfaces, must get_vnotify for protecting
 * vnotify has been freed by others.
 */
static struct vnotify_data_t *get_vnotify(void)
{
	if (refcount_inc_not_zero(&vnotify.vdata_t.ref_count))
		return &vnotify.vdata_t;
	return NULL;
}

static void put_vnotify(void)
{
	if (refcount_dec_and_test(&vnotify.vdata_t.ref_count))
		vnotify_destroy();
}

static inline bool check_chn_id_valid(u32 id)
{
	return id < vnotify.effective_stride_count * vnotify.stride_chn_nr;
}

static inline struct hpvisor_chn_state *get_chn_state(struct vnotify_stride *stride, u32 id)
{
	return &stride->chst[id % vnotify.stride_chn_nr];
}

static inline void chn_state_reset(struct vnotify_stride *stride, u32 id)
{
	memset(get_chn_state(stride, id), 0, sizeof(struct hpvisor_chn_state));
}

static void handle_pending_chn(struct vnotify_stride *stride, u32 id)
{
	int ret;

	ret = chn_data_wake_up(id_to_data_stride(stride, id), EPOLLIN);
	if (ret != 0)
		hpvisor_err("channel wake up failed, ret %d\n", ret);
}

void um_vnotify_handler(void)
{
	int id;
	u32 new_state;
	struct hpvisor_chn_state *chn_state = NULL;
	struct vnotify_data_t *vdata_t = get_vnotify();
	struct vnotify_stride *stride = NULL;
	u32 i = 0;
	int val;
	struct hpvisor_ring_queue *rq;
	struct vnotify_data *vdata;
	int cpu_id;
	int rq_bit;

	if (!vdata_t)
		return;
	vdata = container_of(vdata_t, struct vnotify_data, vdata_t);

	cpu_id = smp_processor_id();
	hpvisor_vnotify_debug_notify("enter handle vnotify ipi request\n");
	while (i < vdata->effective_stride_count) {
		rq_bit = vnotify.max_cpus * i + cpu_id;
		val = test_and_clear_bit(rq_bit, vdata->stride_rq_bitmap);
		if (val == 0) {
			i++;
			continue;
		}

		stride = get_vnotify_stride(i);
		if (unlikely(!stride)) {
			hpvisor_err("can't get stride, i:%u\n", i);
			put_vnotify();
			return;
		}

		while (1) {
			rq = &vnotify.rq_headers[vnotify.max_cpus * stride->idx + cpu_id];
			id = ring_queue_dequeue(rq, stride->rtx_rq[cpu_id]);
			if (id < 0)
				break;
			if (id - i * vdata->stride_chn_nr >= stride->chn_nr)
				continue;

			chn_state = get_chn_state(stride, id);
			new_state = 0;
			if (__sync_bool_compare_and_swap(&chn_state->state,
				HPVISOR_STATE_PENDING_BIT, new_state))
				handle_pending_chn(stride, id);
		}
		i++;
	}
	hpvisor_vnotify_debug_notify("exit handle vnotify ipi request\n");

	put_vnotify();
}

static uint32_t ring_queue_init(struct vnotify_stride *stride, size_t field_size,
	unsigned int cpu_idx)
{
	int i;
	uint16_t count_order = 0;
	unsigned long count_cur;
	uint32_t capacity;
	struct hpvisor_ring_queue *rq_header;

	/*
	 * The size of each channel in the ring queue is sizeof(uint64_t),
	 * Calculate the number of channels here.
	 */
	count_cur = field_size / sizeof(uint64_t);
	while ((count_cur >>= 1) != 0)
		count_order++;

	rq_header = &vnotify.rq_headers[stride->idx * vnotify.max_cpus + cpu_idx];
	rq_header_init(rq_header, count_order);

	capacity = 1U << count_order;
	for (i = 0; i < capacity; i++)
		stride->rtx_rq[cpu_idx][i] = (uint64_t)(-1);

	return capacity;
}

static int um_vnotify_send(unsigned long id)
{
	u32 old_state;
	struct hpvisor_chn_state *chn_state = NULL;
	struct chn_data *data;
	int cpu_id, send_cpu_id;
	struct vnotify_stride *stride;
	struct hpvisor_ring_queue *rq;

	data = id_to_data(id);
	if (unlikely(!data)) {
		hpvisor_err("chn id %lu is invalid\n", id);
		return -EINVAL;
	}

	preempt_disable();
	cpu_id = smp_processor_id();
	preempt_enable();

	stride = data->stride;
	chn_state = get_chn_state(stride, id);
	if (unlikely(chn_state->state & HPVISOR_STATE_CLOSED_BIT))
		return -EPIPE;

	old_state = __sync_fetch_and_or(&chn_state->state, HPVISOR_STATE_PENDING_BIT);
	if ((old_state & HPVISOR_STATE_PENDING_BIT) != 0)
		return 0;

	send_cpu_id = data->cpu_id;
	if (send_cpu_id == -1)
		send_cpu_id = cpu_id;

	rq = &vnotify.rq_headers[stride->idx * vnotify.max_cpus + send_cpu_id];
	ring_queue_enqueue(rq, stride->rtx_rq[send_cpu_id], id);
	set_bit(stride->idx * vnotify.max_cpus + send_cpu_id, vnotify.stride_rq_bitmap);
	um_vnotify_send_ipi(send_cpu_id);

	return 0;
}

typedef bool (*match_fn)(const struct task_chn_node *, const void *);

static struct task_chn_node *find_task_chn_node(const struct list_head *head, match_fn match, const void *target)
{
	struct task_chn_node *node = NULL;
	struct task_chn_node *tmp = NULL;

	list_for_each_entry_safe(node, tmp, head, list) {
		if (match(node, target))
			return node;
	}
	return NULL;
}

static inline bool chn_fd_match(const struct task_chn_node *node, const void *fd)
{
	return node->fd == *(u32 *)fd;
}

static inline bool chn_id_match(const struct task_chn_node *node, const void *id)
{
	return data_to_id(node->data) == *(u32 *)id;
}

static inline bool chn_name_match(const struct task_chn_node *node, const void *name)
{
	return name && strncmp(node->data->name, (const char *)name, CHN_NAME_LEN_KER) == 0;
}

static inline struct task_chn_node *find_task_chn_node_by_fd(const struct list_head *head, int fd)
{
	return find_task_chn_node(head, chn_fd_match, &fd);
}

static inline struct task_chn_node *find_task_chn_node_by_name(const struct list_head *head, const char *name)
{
	return find_task_chn_node(head, chn_name_match, name);
}

static struct chn_data *fd_to_data(int fd)
{
	struct chn_data *data = NULL;
	struct task_chn_node *node = NULL;
	struct chns_struct *chns = task_to_rtos_task(current)->chns;

	mutex_lock(&chns->lock);
	node = find_task_chn_node_by_fd(&chns->head, fd);
	if (node)
		data = node->data;
	mutex_unlock(&chns->lock);

	return data;
}

static int task_chn_node_add(struct list_head *head, struct chn_data *data, int fd,
							 int type, int flags)
{
	struct task_chn_node *node = NULL;

	node = kzalloc(sizeof(struct task_chn_node), GFP_KERNEL);
	if (!node)
		return -ENOMEM;
	node->fd = fd;
	node->data = data;
	if (data->type != STATUS_NO_SIDE)
		node->um_chn_side = data->type;
	else
		node->um_chn_side = type;
	node->um_chn_dir = 0;

	if ((flags & HPVISOR_ATTACH_SHARED_BIT) &&
		(flags & HPVISOR_ATTACH_RECEIVE_BIT))
		node->um_chn_dir = UM_CHN_DIR_WAIT;
	if ((flags & HPVISOR_ATTACH_SHARED_BIT) &&
		!(flags & HPVISOR_ATTACH_RECEIVE_BIT))
		node->um_chn_dir = UM_CHN_DIR_SEND;

	/*
	 * shared chn: received attach should clear close bit
	 * no-shared chn: clear self chn close bit, opposite chn unchanged
	 */
	if (data->data_t.flags & HPVISOR_ATTACH_SHARED_BIT) {
		if (data->data_t.flags & HPVISOR_ATTACH_RECEIVE_BIT)
			__sync_fetch_and_and(&get_chn_state(data->stride, data_to_id(data))->state,
								~HPVISOR_STATE_CLOSED_BIT);
	} else {
		__sync_fetch_and_and(&get_chn_state(data->stride, data_to_id(data))->state,
							~HPVISOR_STATE_CLOSED_BIT);
	}

	list_add(&node->list, head);
	return 0;
}

static void chn_data_free(struct chn_data *data)
{
	unsigned long flags;

	data->name[0] = 0;
	data->data_t.flags = 0;
	chn_status_set(&data->data_t, LLONG_MAX);
	data->chn_xid.pid = 0;
	data->chn_xid.uid = 0;
	data->chn_xid.gid = 0;
	data->um_chn_xid.pid = 0;
	data->um_chn_xid.uid = 0;
	data->um_chn_xid.gid = 0;
	data->cpu_id = -1;
	data->type = STATUS_NO_SIDE;

	/* must release old file after calling this func */
	spin_lock_irqsave(&data->data_t.file_lock, flags);
	data->data_t.file = NULL;
	spin_unlock_irqrestore(&data->data_t.file_lock, flags);

	data->data_t.alloced = 0;
}

static inline void um_side_close_chn(struct chn_data *data)
{
	int ret;

	/* notify guest, host app all detached */
	ret = chn_data_wake_up(data, EPOLLHUP);
	if (ret != 0)
		hpvisor_err("channel wake up failed, ret %d\n", ret);
}

static void um_put_chn_data(struct chn_data *data, int side, int dir)
{
	s64 status;

	status = chn_detach(data, side);
	if ((status_a_side_cnt(status) <= 0) && (status_b_side_cnt(status) <= 0)) {
		chn_data_free(data);
		return;
	}

	if (data->data_t.flags & HPVISOR_ATTACH_PERSISTENT_BIT)
		return;

	/*
	 * shared chn: if receive detach, set close bit, opposite send will epipe;
	 *             if send all detach, wake up opposite, wait will epipe/EPOLLHUP.
	 */
	if ((data->data_t.flags & HPVISOR_ATTACH_SHARED_BIT)) {
		if (data->data_t.flags & HPVISOR_ATTACH_RECEIVE_BIT &&
			(status_b_side_cnt(status) <= 0))
			__sync_fetch_and_or(&get_chn_state(data->stride, data_to_id(data))->state,
								HPVISOR_STATE_CLOSED_BIT);
		if (!(data->data_t.flags & HPVISOR_ATTACH_RECEIVE_BIT) &&
			(status_a_side_cnt(status) <= 0))
			um_side_close_chn(data);
		return;
	}

	/* no-shared and no-persistent chns */
	if (((side == STATUS_A_SIDE) && (status_a_side_cnt(status) <= 0)) ||
		((side == STATUS_B_SIDE) && (status_b_side_cnt(status) <= 0))) {
		__sync_fetch_and_or(&get_chn_state(data->stride, data_to_id(data))->state,
			HPVISOR_STATE_CLOSED_BIT);
		if (dir == UM_CHN_DIR_SEND)
			um_side_close_chn(data);
	}
}

static void chn_data_um_detach(struct task_chn_node *node)
{
	struct chn_data *data = node->data;

	chn_data_lock(&data->data_t);
	um_put_chn_data(data, node->um_chn_side, node->um_chn_dir);
	chn_data_unlock(&data->data_t);

	close_fd(node->fd);
	hpvisor_vnotify_debug_syscall("id %u, name %s, flags:0x%x, status:0x%llx\n",
		data_to_id(data), data->name, data->data_t.flags, chn_status_get(&data->data_t));
}

static int vnotify_stride_init(struct vnotify_stride *stride)
{
	char *shm_addr = NULL;
	unsigned int field_pages;
	uint32_t rq_capacity;
	int ret = 0;
	unsigned int i;

	field_pages = vnotify.stride_page_nr / (vnotify.max_cpus + 1);
	stride->page_nr = vnotify.stride_page_nr;

	stride->pages = kcalloc(stride->page_nr, sizeof(struct page *), GFP_KERNEL);
	if (unlikely(!stride->pages))
		return -ENOMEM;

	for (i = 0; i < stride->page_nr; i++) {
		stride->pages[i] = alloc_page(GFP_KERNEL | __GFP_ZERO);
		if (unlikely(!stride->pages[i])) {
			ret = -ENOMEM;
			goto err;
		}
	}

	shm_addr = vmap(stride->pages, stride->page_nr, VM_MAP | VM_USERMAP, PAGE_KERNEL);
	if (unlikely(!shm_addr)) {
		ret = -ENOMEM;
		goto err;
	}

	stride->chst = (struct hpvisor_chn_state *)&shm_addr[0];
	stride->rtx_rq = kcalloc(vnotify.max_cpus, sizeof(uint64_t *), GFP_KERNEL);
	if (unlikely(!stride->rtx_rq)) {
		ret = -ENOMEM;
		goto err;
	}

	for (i = 0; i < vnotify.max_cpus; i++) {
		stride->rtx_rq[i] = (uint64_t *)&shm_addr[(i + VNOTIFY_RQ_OFFSET) *
			(field_pages << PAGE_SHIFT)];
		rq_capacity = ring_queue_init(stride, field_pages << PAGE_SHIFT, i);
	}

	stride->chn_nr = vnotify.stride_chn_nr;
	/* The chn_nr must be smaller than the capacity of rq. Otherwise, queue may overflow. */
	if (stride->chn_nr > rq_capacity)
		stride->chn_nr = rq_capacity;

	stride->chns = vzalloc(stride->chn_nr * sizeof(struct chn_data));
	if (unlikely(!stride->chns)) {
		ret = -ENOMEM;
		goto err;
	}

	for (i = 0; i < stride->chn_nr; i++) {
		chn_status_set(&stride->chns[i].data_t, LLONG_MAX);
		spin_lock_init(&stride->chns[i].data_t.lock);
		spin_lock_init(&stride->chns[i].data_t.file_lock);
		stride->chns[i].stride = stride;
		stride->chns[i].cpu_id = -1;
		stride->chns[i].type = STATUS_NO_SIDE;
	}

	return ret;
err:
	chn_page_destroy(stride);
	return ret;
}

/*
 * Except hpvisor_vnotify_init, the stride_lock needs to be locked
 * when this function is called in other scenarios
 */
static int vnotify_stride_alloc(struct vnotify_stride **stride_ret)
{
	struct vnotify_stride *stride;
	int ret;

	if (vnotify.effective_stride_count >= vnotify.max_stride_nr) {
		hpvisor_err("The number of strides has reached the maximum, max:%u\n",
			vnotify.max_stride_nr);
		return -E2BIG;
	}

	stride = kzalloc(sizeof(struct vnotify_stride), GFP_KERNEL);
	if (unlikely(!stride))
		return -ENOMEM;

	stride->idx = vnotify.effective_stride_count;
	ret = vnotify_stride_init(stride);
	if (ret != 0) {
		kfree(stride);
		return ret;
	}

	vnotify.strides_addr_arr[vnotify.effective_stride_count] = stride;
	/*
	 * Ensure that strides_addr_arr has been added to the node, and then
	 * add 1 to effective_stride_count. Otherwise, accessing the array in
	 * get_vnotify_stride () may get an NULL element.
	 */
	smp_wmb();
	WRITE_ONCE(vnotify.effective_stride_count, vnotify.effective_stride_count + 1);

	if (stride_ret)
		*stride_ret = stride;

	return 0;
}

static struct chn_data *get_unused_chn_data(struct vnotify_stride *stride,
	const char *name, int flags, int type)
{
	s64 ret;
	struct chn_data *data;
	struct chn_data *end = id_to_data_stride(stride, stride->chn_nr - 1);

	if (stride->idx == 0)
		data = id_to_data_stride(stride, NORMAL_CHN_START_IN_FIRST_STRIDE);
	else
		data = id_to_data_stride(stride, NORMAL_CHN_START_IN_OTHER_STRIDE);
	while (data <= end) {
		if (!data->data_t.alloced) {
			data->data_t.alloced = 1;
			isb();
			data->data_t.flags = flags;
			data->type = type;
			chn_state_reset(stride, data_to_id_stride(stride, data));
			strncpy(data->name, name, CHN_NAME_LEN_KER - 1);

			ret = chn_attach(data, type);
			if (IS_ERR_VALUE(ret))
				return ERR_PTR(ret);

			if (type == STATUS_A_SIDE || type == STATUS_B_SIDE) {
				chn_data_lock(&data->data_t);
				data->chn_xid.pid = task_tgid_vnr(current);
				data->chn_xid.uid = from_kuid_munged(current_user_ns(), current_uid());
				data->chn_xid.gid = from_kgid_munged(current_user_ns(), current_gid());
				chn_data_unlock(&data->data_t);
			}
			return data;
		}
		data++;
	}
	return ERR_PTR(-ENOSPC);
}

static struct chn_data *find_unused_chn_data(const char *name, int flags, int type)
{
	struct vnotify_stride *node = NULL;
	struct chn_data *data = NULL;
	int ret;
	int i = 0;

	for (; i < vnotify.effective_stride_count; i++) {
		node = vnotify.strides_addr_arr[i];
		if (!node) {
			hpvisor_err("can't get stride, i:%d\n", i);
			continue;
		}

		data = get_unused_chn_data(node, name, flags, type);
		if (IS_ERR(data) && data == ERR_PTR(-ENOSPC))
			continue;
		return data;
	}

	/* can't find a unused chn_data in all stride, we will alloc a new stride */
	ret = vnotify_stride_alloc(&node);
	if (ret != 0) {
		hpvisor_err("vnotify_stride_alloc fail, ret:%d\n", ret);
		return ERR_PTR(-ENOSPC);
	}

	return get_unused_chn_data(node, name, flags, type);
}

static struct chn_data *alloc_new_chn_data(const char *name, int flags, int type)
{
	if (!(flags & HPVISOR_ATTACH_CREATE_BIT))
		return ERR_PTR(-EINVAL);

	return find_unused_chn_data(name, flags, type);
}

static int check_chn_data_flags(const struct chn_data *data, u32 flags)
{
	if (unlikely((flags & ~HPVISOR_ATTACH_RECEIVE_BIT) !=
		(data->data_t.flags & ~HPVISOR_ATTACH_RECEIVE_BIT))) {
		hpvisor_err("flags:0x%x is not equal, name:%s flags:0x%x status:0x%llx\n",
			flags, data->name, data->data_t.flags, chn_status_get(&data->data_t));
		return -EINVAL;
	}

	if ((flags & HPVISOR_ATTACH_SHARED_BIT) && (flags & HPVISOR_ATTACH_RECEIVE_BIT) &&
		(status_b_side_cnt(chn_status_get(&data->data_t))) > 0) {
		hpvisor_err("shared mode allows only one wait, name:%s flags:0x%x status:0x%llx\n",
			data->name, data->data_t.flags, chn_status_get(&data->data_t));
		return -EINVAL;
	}

	/* no-shared chn only permit two process */
	if ((!(flags & HPVISOR_ATTACH_SHARED_BIT)) &&
		(status_a_side_cnt(chn_status_get(&data->data_t)) > 0) &&
		(status_b_side_cnt(chn_status_get(&data->data_t)) > 0)) {
		hpvisor_err("no-shared chn only permit two process, name:%s flags:0x%x status:0x%llx\n",
			data->name, data->data_t.flags, chn_status_get(&data->data_t));
		return -EINVAL;
	}

	return 0;
}

static struct chn_data *get_used_chn_data(struct vnotify_stride *stride,
	const char *name, int flags, int type)
{
	s64 ret;
	struct chn_data *data;
	struct chn_data *end = id_to_data_stride(stride, stride->chn_nr - 1);

	if (stride->idx == 0)
		data = id_to_data_stride(stride, NORMAL_CHN_START_IN_FIRST_STRIDE);
	else
		data = id_to_data_stride(stride, NORMAL_CHN_START_IN_OTHER_STRIDE);

	while (data < end) {
		chn_data_lock(&data->data_t);
		if (!data->data_t.alloced || (strncmp(data->name, name, CHN_NAME_LEN_KER) != 0)) {
			chn_data_unlock(&data->data_t);
			data++;
			continue;
		}

		ret = check_chn_data_flags(data, flags);
		if (ret != 0) {
			chn_data_unlock(&data->data_t);
			return ERR_PTR(ret);
		}

		data->type = type;
		if (!(data->data_t.flags & HPVISOR_ATTACH_SHARED_BIT)) {
			if (status_a_side_cnt(chn_status_get(&data->data_t)) > 0)
				data->type = STATUS_B_SIDE;
			if (status_b_side_cnt(chn_status_get(&data->data_t)) > 0)
				data->type = STATUS_A_SIDE;
		}
		ret = chn_attach(data, data->type);
		if (IS_ERR_VALUE(ret)) {
			chn_data_unlock(&data->data_t);
			return ERR_PTR(ret);
		}

		/* if guest has been attached and host is first attached */
		data->um_chn_xid.pid = task_tgid_vnr(current);
		data->um_chn_xid.uid = from_kuid_munged(current_user_ns(), current_uid());
		data->um_chn_xid.gid = from_kgid_munged(current_user_ns(), current_gid());

		chn_data_unlock(&data->data_t);
		return data;
	}
	return NULL;
}

static struct chn_data *find_used_chn_data(const char *name, int flags, int type)
{
	struct vnotify_stride *node;
	struct chn_data *data;
	int i = 0;

	for (; i < vnotify.effective_stride_count; i++) {
		node = vnotify.strides_addr_arr[i];
		if (!node) {
			hpvisor_err("can't get stride, i:%d\n", i);
			continue;
		}

		data = get_used_chn_data(node, name, flags, type);
		if (data)
			return data;
	}
	return NULL;
}

void vnotify_chn_set_cpu(u32 id)
{
	struct chn_data *data = NULL;

	if (unlikely(!get_vnotify()))
		return;
	if (unlikely(!check_chn_id_valid(id))) {
		put_vnotify();
		return;
	}

	data = id_to_data(id);
	if (unlikely(!data)) {
		hpvisor_err("chn id %u is invalid\n", id);
		put_vnotify();
		return;
	}

	chn_data_lock(&data->data_t);
	preempt_disable();
	data->cpu_id = smp_processor_id();
	preempt_enable();
	chn_data_unlock(&data->data_t);

	put_vnotify();
}

/*
 * host: vnotify stride_lock and process chns lock held, both mutex locks
 * guest: vnotify stride_lock held, mutex lock
 */
static int alloc_new_chn_data_and_file(struct chn_data **data, const char *name, int flags, int type)
{
	int fd, ret;
	struct chn_data *new_data;

	new_data = alloc_new_chn_data(name, flags, type);
	if (IS_ERR(new_data)) {
		ret = PTR_ERR(new_data);
		hpvisor_err("alloc new chn_data failed, ret:%d\n", ret);
		return ret;
	}

	fd = create_new_eventfd(data_to_id(new_data), get_efd_flags(flags), flags);
	if (unlikely(fd < 0)) {
		hpvisor_err("alloc fd failed, ret %d\n", fd);
		chn_data_free(new_data);
		return fd;
	}

	ret = new_chn_data_efd_attach(&new_data->data_t, fd);
	if (ret != 0) {
		chn_data_free(new_data);
		close_fd(fd);
		return ret;
	}

	*data = new_data;
	return fd;
}

static int vnotify_ioctl_query(u32 id)
{
	struct chn_data *data;

	if (unlikely(!check_chn_id_valid(id)))
		return -ENOENT;

	data = id_to_data(id);
	if (unlikely(!data)) {
		hpvisor_err("chn id %u is invalid\n", id);
		return -EINVAL;
	}

	return get_data_status(&data->data_t);
}

static int vnotify_ioctl_um_find(int efd)
{
	struct chn_data *data = fd_to_data(efd);

	if (unlikely(!data))
		return -EBADFD;
	return data_to_id(data);
}

static int vnotify_ioctl_host_attach(int flags, char *uname)
{
	int fd;
	struct chn_data *data = NULL;
	struct task_chn_node *node = NULL;
	struct chns_struct *chns = task_to_rtos_task(current)->chns;
	int type;

	if ((flags & ~HPVISOR_CHN_FLAGS_SET)) {
		hpvisor_vnotify_debug_syscall("flags is over range, flags:0x%x\n", flags);
		return -EINVAL;
	}

	mutex_lock(&chns->lock);
	/* if already attached, just return self fd */
	node = find_task_chn_node_by_name(&chns->head, uname);
	if (node) {
		fd = node->fd;
		goto unlock;
	}

	/* shared send */
	if ((flags & HPVISOR_ATTACH_SHARED_BIT) && !(flags & HPVISOR_ATTACH_RECEIVE_BIT))
		type = STATUS_A_SIDE;
	/* no-shared second attach, and shared receive */
	else
		type = STATUS_B_SIDE;

	data = find_used_chn_data(uname, flags, type);
	/* result 1: data flags check failed or chn_attach failed */
	if (IS_ERR(data)) {
		hpvisor_vnotify_debug_syscall("flags check failed or chn_attach failed! uname:%s\n",
			uname);
		fd = PTR_ERR(data);
		goto unlock;
	}
	/* result 2: data exists */
	if (data) {
		fd = get_unused_fd_flags(get_efd_flags(flags));
		if (unlikely(fd < 0)) {
			chn_data_lock(&data->data_t);
			um_put_chn_data(data, data->type, UM_CHN_DIR_UNDEF);
			chn_data_unlock(&data->data_t);

			hpvisor_vnotify_debug_syscall("get unsed fd failed, ret %d\n", fd);
			goto unlock;
		}
		chn_data_efd_attach(&data->data_t, fd);
	/* result 3: need to alloc new one */
	} else {
		/* shared receive */
		if ((flags & HPVISOR_ATTACH_SHARED_BIT) && (flags & HPVISOR_ATTACH_RECEIVE_BIT))
			type = STATUS_B_SIDE;
		/* no-shared first attach, and shared send */
		else
			type = STATUS_A_SIDE;

		fd = alloc_new_chn_data_and_file(&data, uname, flags, type);
		if (unlikely(fd < 0)) {
			hpvisor_vnotify_debug_syscall("alloc new chn_data and file failed, ret %d\n", fd);
			goto unlock;
		}
	}

	if (unlikely(task_chn_node_add(&chns->head, data, fd, type, flags))) {
		chn_data_lock(&data->data_t);
		um_put_chn_data(data, data->type, UM_CHN_DIR_UNDEF);
		chn_data_unlock(&data->data_t);

		close_fd(fd);
		hpvisor_vnotify_debug_syscall("add task chn node failed, fd:%d, id:%u, name:%s, flags:0x%x, status:0x%llx\n",
			fd, data_to_id(data), data->name, data->data_t.flags,
			chn_status_get(&data->data_t));
		fd = -ENOMEM;
		goto unlock;
	}

	hpvisor_vnotify_debug_syscall("fd:%d, id:%u, name:%s, flags:0x%x, status:0x%llx\n",
		fd, data_to_id(data), data->name, data->data_t.flags,
		chn_status_get(&data->data_t));
unlock:
	mutex_unlock(&chns->lock);

	return fd;
}

static int vnotify_ioctl_host_detach(int efd)
{
	struct task_chn_node *node = NULL;
	struct chns_struct *chns = task_to_rtos_task(current)->chns;

	mutex_lock(&vnotify.stride_lock);
	mutex_lock(&chns->lock);
	node = find_task_chn_node_by_fd(&chns->head, efd);
	if (unlikely(!node)) {
		mutex_unlock(&chns->lock);
		mutex_unlock(&vnotify.stride_lock);
		hpvisor_vnotify_debug_syscall("find task chn node failed! fd:%d\n", efd);
		return -EBADFD;
	}

	chn_data_um_detach(node);
	list_del(&node->list);
	mutex_unlock(&chns->lock);
	mutex_unlock(&vnotify.stride_lock);
	kfree(node);

	return 0;
}

static int vnotify_ioctl_um_detach(int fd0, int fd1, int flags)
{
	struct task_chn_node *node0, *node1;
	struct chns_struct *chns = task_to_rtos_task(current)->chns;

	if (flags & HPVISOR_ATTACH_SHARED_BIT)
		return vnotify_ioctl_host_detach(fd0);

	mutex_lock(&vnotify.stride_lock);
	mutex_lock(&chns->lock);
	node0 = find_task_chn_node_by_fd(&chns->head, fd0);
	if (unlikely(!node0)) {
		mutex_unlock(&chns->lock);
		mutex_unlock(&vnotify.stride_lock);
		hpvisor_vnotify_debug_syscall("find task chn node0 failed! fd:%d\n", fd0);
		return -EBADFD;
	}

	chn_data_um_detach(node0);
	list_del(&node0->list);
	kfree(node0);

	if (fd1 == 0) {
		mutex_unlock(&chns->lock);
		mutex_unlock(&vnotify.stride_lock);
		return 0;
	}
	node1 = find_task_chn_node_by_fd(&chns->head, fd1);
	if (unlikely(!node1)) {
		mutex_unlock(&chns->lock);
		mutex_unlock(&vnotify.stride_lock);
		hpvisor_vnotify_debug_syscall("find task chn node1 failed! fd:%d\n", fd1);
		return -EBADFD;
	}

	chn_data_um_detach(node1);
	list_del(&node1->list);
	kfree(node1);
	mutex_unlock(&chns->lock);
	mutex_unlock(&vnotify.stride_lock);

	return 0;
}

static int vnotify_ioctl_um_attach(int flags, uintptr_t uname, unsigned long chn_fds)
{
	int fd0, fd1;
	int ret;
	void __user *chn_fds_p = (void __user *)chn_fds;
	struct hpvisor_chn_fd chn_fd_tmp;
	char kname0[CHN_NAME_LEN_KER] = {0}, kname1[CHN_NAME_LEN_KER] = {0};
	char kname[CHN_NAME_LEN_KER] = {0};
	struct chns_struct *chns = task_to_rtos_task(current)->chns;
	struct task_chn_node *node = NULL;

	if (!(flags & HPVISOR_ATTACH_SHARED_BIT) && (chn_fds_p == NULL)) {
		hpvisor_err("chn_fds is NULL\n");
		return -EINVAL;
	}

	if (unlikely(copy_name_from_user(kname, CHN_NAME_LEN_USER, (void __user *)uname))) {
		hpvisor_vnotify_debug_syscall("copy chn name form user failed\n");
		return -EFAULT;
	}

	if (flags & HPVISOR_ATTACH_SHARED_BIT) {
		mutex_lock(&vnotify.stride_lock);
		fd0 = vnotify_ioctl_host_attach(flags, kname);
		mutex_unlock(&vnotify.stride_lock);
		return fd0;
	}

	kname[CHN_NAME_LEN_USER - 1] = 0;
	/* The suffix length cannot exceed (CHN_NAME_LEN_KER - CHN_NAME_LEN_USER) */
	ret = snprintf(kname0, sizeof(kname0), "%s_chn0", kname);
	if (ret < 0)
		return -EFAULT;
	ret = snprintf(kname1, sizeof(kname1), "%s_chn1", kname);
	if (ret < 0)
		return -EFAULT;

	mutex_lock(&vnotify.stride_lock);
	fd0 = vnotify_ioctl_host_attach(flags, kname0);
	if (fd0 < 0) {
		mutex_unlock(&vnotify.stride_lock);
		return fd0;
	}

	fd1 = vnotify_ioctl_host_attach(flags, kname1);
	if (fd1 < 0) {
		mutex_unlock(&vnotify.stride_lock);
		vnotify_ioctl_um_detach(fd0, 0, 0);
		return fd1;
	}

	mutex_lock(&chns->lock);
	/* if already attached, just return self fd */
	node = find_task_chn_node_by_name(&chns->head, kname0);
	if (!node)
		goto unlock_out;

	if (node->um_chn_side == STATUS_A_SIDE) {
		chn_fd_tmp.fd0 = fd0;
		chn_fd_tmp.fd1 = fd1;
	} else {
		chn_fd_tmp.fd0 = fd1;
		chn_fd_tmp.fd1 = fd0;
	}

	if (node->um_chn_side == STATUS_A_SIDE)
		node->um_chn_dir = UM_CHN_DIR_WAIT;
	else
		node->um_chn_dir = UM_CHN_DIR_SEND;

	node = find_task_chn_node_by_name(&chns->head, kname1);
	if (!node)
		goto unlock_out;
	if (node->um_chn_side == STATUS_A_SIDE)
		node->um_chn_dir = UM_CHN_DIR_SEND;
	else
		node->um_chn_dir = UM_CHN_DIR_WAIT;

	mutex_unlock(&chns->lock);
	mutex_unlock(&vnotify.stride_lock);

	if (copy_to_user(chn_fds_p, &chn_fd_tmp, sizeof(struct hpvisor_chn_fd))) {
		vnotify_ioctl_um_detach(fd0, fd1, 0);
		hpvisor_err("copy_to_user err, may be chn_fds_p is not user address\n");
		return -EFAULT;
	}

	return 0;

unlock_out:
	mutex_unlock(&chns->lock);
	mutex_unlock(&vnotify.stride_lock);
	vnotify_ioctl_um_detach(fd0, fd1, 0);
	hpvisor_err("find name failed\n");
	return -EFAULT;
}

static int vnotify_get_xid(u32 id, struct hpvisor_chn_xid *chn_xid_p)
{
	struct chn_data *data = NULL;

	data = id_to_data(id);
	if (unlikely(!data)) {
		hpvisor_err("chn id %u is invalid\n", id);
		return -EINVAL;
	}

	chn_data_lock(&data->data_t);
	if (!data->data_t.alloced) {
		chn_data_unlock(&data->data_t);
		hpvisor_err("chn id %u is not alloced\n", id);
		return -EINVAL;
	}

	/* opposite no attach return 0 */
	if (data->um_chn_xid.pid != task_tgid_vnr(current)) {
		chn_xid_p->pid = data->um_chn_xid.pid;
		chn_xid_p->uid = data->um_chn_xid.uid;
		chn_xid_p->gid = data->um_chn_xid.gid;
	} else {
		chn_xid_p->pid = data->chn_xid.pid;
		chn_xid_p->uid = data->chn_xid.uid;
		chn_xid_p->gid = data->chn_xid.gid;
	}
	chn_data_unlock(&data->data_t);

	return 0;
}

static long vnotify_ioctl_get_chn_xid(u32 id, unsigned long chn_xid)
{
	int ret;
	void __user *chn_xid_p = (void __user *)chn_xid;
	struct hpvisor_chn_xid chn_xid_tmp;

	if (chn_xid_p == NULL) {
		hpvisor_err("chn_xid_p is NULL\n");
		return -EINVAL;
	}

	if (unlikely(!check_chn_id_valid(id))) {
		hpvisor_err("chn id %u over range\n", id);
		return -ERANGE;
	}

	ret = vnotify_get_xid(id, &chn_xid_tmp);
	if (ret < 0)
		return ret;

	if (copy_to_user(chn_xid_p, &chn_xid_tmp, sizeof(struct hpvisor_chn_xid))) {
		hpvisor_err("copy_to_user err, may be chn_xid_p is not user address\n");
		return -EFAULT;
	}
	return 0;
}

/*
 * vnotify um mode, the child process does not inherit the channel resources
 * of the parent process.
 */
static struct chns_struct *um_dup_chns(struct chns_struct *oldc, struct chns_struct *newc,
	bool *use_chn)
{
	return newc;
}

bool um_chns_struct_free_list(struct chns_struct *chns)
{
	bool need_detach = true;
	struct task_chn_node *tmp = NULL;
	struct task_chn_node *node = NULL;
	struct vnotify_data_t *vdata_t = NULL;
	bool use_chn = false;

	/* if used vnotify, list is not empty */
	vdata_t = get_vnotify();
	if (!vdata_t)
		need_detach = false;

	mutex_lock(&vnotify.stride_lock);
	mutex_lock(&chns->lock);
	list_for_each_entry_safe(node, tmp, &chns->head, list) {
		use_chn = true;
		if (need_detach) {
			chn_data_lock(&node->data->data_t);
			um_put_chn_data(node->data, node->um_chn_side, node->um_chn_dir);
			chn_data_unlock(&node->data->data_t);
			hpvisor_vnotify_debug_exit("abort exited: fd:%d id:%u detached\n",
				node->fd, data_to_id(node->data));
		}
		list_del(&node->list);
		kfree(node);
	}
	mutex_unlock(&chns->lock);
	/* if use_chn is true, this lock will be unlocked in do_exit() */
	if (!use_chn)
		mutex_unlock(&vnotify.stride_lock);
	if (vdata_t)
		put_vnotify();
	return use_chn;
}

void um_close_chn(struct file *filp)
{
	struct chns_struct *chns = task_to_rtos_task(current)->chns;
	struct task_chn_node *node = NULL;
	struct task_chn_node *tmp = NULL;
	bool do_chns_lock = false;

	if (unlikely(!get_vnotify()))
		return;

	if (check_vnotify_file(filp) < 0)
		goto vnotify_out;

	if (!chns)
		goto vnotify_out;

	if ((unsigned long)hpvisor_mutex_owner(&vnotify.stride_lock) != (unsigned long)current)
		mutex_lock(&vnotify.stride_lock);
	else
		goto vnotify_out;

	if ((unsigned long)hpvisor_mutex_owner(&chns->lock) != (unsigned long)current) {
		do_chns_lock = true;
		mutex_lock(&chns->lock);
	}

	list_for_each_entry_safe(node, tmp, &chns->head, list) {
		if (node->data->data_t.file == filp) {
			spin_lock(&node->data->data_t.lock);
			um_put_chn_data(node->data, node->um_chn_side, node->um_chn_dir);
			spin_unlock(&node->data->data_t.lock);
			list_del(&node->list);
			kfree(node);
			break;
		}
	}
	if (do_chns_lock)
		mutex_unlock(&chns->lock);
	mutex_unlock(&vnotify.stride_lock);

vnotify_out:
	put_vnotify();
}

static int task_first_call(void)
{
	struct rtos_task_struct *rtsk = task_to_rtos_task(current);

	/* Generally, task->chns should never equal to NULL */
	if (unlikely(!rtsk->chns)) {
		rtsk->chns = chns_alloc_and_init(NULL);
		if (!rtsk->chns)
			return -ENOMEM;
	}

	return 0;
}

static int vnotify_ctrl_mem_init(unsigned int max_stride_nr)
{
	char *ctrl_mem;
	int i;
	int ret = 0;
	int bitmap_size;
	int rq_headers_total_size;

	/* bitmap_size should be 8 bytes alignment */
	bitmap_size = ALIGN(BITS_TO_LONGS(max_stride_nr * vnotify.max_cpus) *
		sizeof(unsigned long), 8);
	/* Each CPU (including offline CPUs) has a ring queue */
	rq_headers_total_size = max_stride_nr * vnotify.max_cpus *
		sizeof(struct hpvisor_ring_queue);

	vnotify.ctrl_mem_page_nr = DIV_ROUND_UP(bitmap_size + rq_headers_total_size, PAGE_SIZE);
	vnotify.ctrl_mem_pages = kcalloc(vnotify.ctrl_mem_page_nr, sizeof(struct page *),
		GFP_KERNEL);
	if (unlikely(!vnotify.ctrl_mem_pages))
		return -ENOMEM;

	for (i = 0; i < vnotify.ctrl_mem_page_nr; i++) {
		vnotify.ctrl_mem_pages[i] = alloc_page(GFP_KERNEL | __GFP_ZERO);
		if (unlikely(!vnotify.ctrl_mem_pages[i])) {
			ret = -ENOMEM;
			goto err;
		}
	}

	ctrl_mem = vmap(vnotify.ctrl_mem_pages, vnotify.ctrl_mem_page_nr,
		VM_MAP | VM_USERMAP, PAGE_KERNEL);
	if (unlikely(!ctrl_mem)) {
		ret = -ENOMEM;
		goto err;
	}

	vnotify.stride_rq_bitmap = (unsigned long *)ctrl_mem;
	bitmap_zero(vnotify.stride_rq_bitmap, bitmap_size);

	vnotify.rq_headers = (struct hpvisor_ring_queue *)&ctrl_mem[bitmap_size];
	vnotify.vnotify_ctrl_mem = ctrl_mem;
	return ret;
err:
	vnotify_ctrl_mem_destroy();
	return ret;
}

/*
 * Allocate field_pages and vnotify.chns.
 * If the allocation fails, uses vnotify_destroy to release resources.
 */
int um_vnotify_mem_init(unsigned int stride_chn_nr, unsigned int max_stride_nr)
{
	int ret;
	u64 max_chn_nr;

	if (stride_chn_nr == 0 || (max_stride_nr == 0))
		return -EINVAL;

	max_chn_nr = (u64)max_stride_nr * stride_chn_nr;
	if (max_chn_nr > CHN_NR_LIMIT) {
		hpvisor_err("vnotify don't support that many channels:%lld\n", max_chn_nr);
		return -EINVAL;
	}

	if (!__sync_bool_compare_and_swap(&vnotify.inited, false, true)) {
		hpvisor_err("vnotify has been initialized\n");
		return -EEXIST;
	}

	vnotify.max_cpus = num_possible_cpus();

	ret = vnotify_ctrl_mem_init(max_stride_nr);
	if (ret != 0)
		goto err;

	vnotify.effective_stride_count = 0;
	vnotify.max_stride_nr = max_stride_nr;
	vnotify.stride_chn_nr = stride_chn_nr;

	/*
	 * The size of a chn_state array is the same as the size of a ring queue
	 * There are "max_cpus" ring queues
	 */
	vnotify.stride_page_nr = DIV_ROUND_UP(stride_chn_nr * sizeof(struct hpvisor_chn_state),
		PAGE_SIZE) + DIV_ROUND_UP(stride_chn_nr * sizeof(uint64_t), PAGE_SIZE) *
		vnotify.max_cpus;

	vnotify.strides_addr_arr = kcalloc(vnotify.max_stride_nr, sizeof(struct vnotify_stride *),
		GFP_KERNEL);
	if (unlikely(!vnotify.strides_addr_arr))
		goto err;

	ret = vnotify_stride_alloc(NULL);
	if (ret != 0)
		goto err;

	ret = register_rtos_fast_svc_handler_ctx(um_vnotify_send);
	if (ret != 0) {
		hpvisor_err("register_rtos_fast_svc_handler_ctx failed! ret is %d\n", ret);
		goto err;
	}

	vnotify.vdata_t.get_vnotify = get_vnotify;
	vnotify.vdata_t.put_vnotify = put_vnotify;
	vnotify.vdata_t.task_first_call = task_first_call;
	vnotify.vdata_t.vnotify_close_chn = um_close_chn;
	vnotify.vdata_t.vnotify_chns_unlock = um_vnotify_chns_unlock;
	vnotify.vdata_t.dup_chns = um_dup_chns;
	vnotify.vdata_t.chns_struct_free_list = um_chns_struct_free_list;
	vnotify.vdata_t.ipi_vnotify_handler = um_vnotify_handler;

	hpvisor_vnotify_debug_common("vnotify init data: stride_page_nr:%u ctrl_mem_page_nr:%u\n",
		vnotify.stride_page_nr, vnotify.ctrl_mem_page_nr);

	vnotify.vdata_t.mode = UM_MODE;
	set_vnotify_operations(&um_vnotify_oper);
	set_vdata_t(&vnotify.vdata_t);
	refcount_set(&vnotify.vdata_t.ref_count, 1);
	return 0;
err:
	vnotify_destroy();

	return ret;
}
EXPORT_SYMBOL_NS(um_vnotify_mem_init, HW_RTOS_NS);

int um_ioctl_vnotify_mem_init_stride(unsigned long arg)
{
	void __user *argp = (void __user *)arg;
	struct init_info init;

	if (unlikely(!access_ok(((void *__user)arg), sizeof(struct init_info)))) {
		hpvisor_err("arg:0x%lx error\n", arg);
		return -EFAULT;
	}
	if (copy_from_user(&init, argp, sizeof(struct init_info)))
		return -EFAULT;

	return um_vnotify_mem_init(init.stride_chn_nr, init.max_stride_nr);
}

int um_ioctl_vnotify_mem_init(unsigned int chn_nr)
{
	return um_vnotify_mem_init(chn_nr, 1);
}
EXPORT_SYMBOL_NS(um_ioctl_vnotify_mem_init, HW_RTOS_NS);

static const struct vnotify_ioctl_operations um_vnotify_oper = {
	.query = vnotify_ioctl_query,
	.host_find = vnotify_ioctl_um_find,
	.host_attach = vnotify_ioctl_um_attach,
	.host_detach = vnotify_ioctl_um_detach,
	.get_chn_xid = vnotify_ioctl_get_chn_xid,
};
