/*
 * Copyright (c) Huawei Technologies Co., Ltd. 2022-2022.
 * Description: fast notify for hpvisor
 * Author: lilinjie8 <lilinjie8@huawei.com>
 * Create: 2022-02-10
 */

#include "dfx.h"
#include "mmu.h"
#include "irqbypass.h"
#include "vm_vnotify.h"

#include <linux/gfp.h>
#include <linux/uaccess.h>
#include <linux/syscalls.h>
#include <linux/task_struct_extend.h>
#include <linux/compiler.h>
#include <linux/sched/clock.h>
#include <linux/rtos/hpvisor.h>
#include <linux/vmalloc.h>
#include <linux/mm.h>
#include <linux/bitmap.h>
#include <asm/memory.h>

#define CHN_NAME_LEN 32

#define HPVISOR_CHN_FLAGS_SET \
	(HPVISOR_ATTACH_CREATE_BIT | HPVISOR_ATTACH_SHARED_BIT | HPVISOR_ATTACH_PERSISTENT_BIT | HPVISOR_ATTACH_FAST_BIT)

/* ctrl channel data need a page for using */
#define CTRL_CHN_DATA_SIZE (1 * PAGE_SIZE)

enum {
	HPVISOR_RX_OFFSET,
	HPVISOR_TX_OFFSET,
	HPVISOR_RX_RQ_OFFSET,
	HPVISOR_TX_RQ_OFFSET,
	HPVISOR_SHARE_MEM_FIELD_NR
};

enum {
	STATUS_HOST,
	STATUS_GUEST
};

struct task_chn_node {
	int fd;
	struct chn_data *data;
	struct list_head list;
};

struct init_info {
	bool vmmod;

	u32 stride_chn_nr;
	u32 max_stride_nr;

	gpa_t vnotify_stride_gpa;
	gpa_t vnotify_ctrl_gpa;
};

struct record_ctx {
	pid_t tid;
	pid_t pid;
	u32 index; /* record packet index, need guest store it in hpvisor_chn_state->mask */
	u32 efd_count : 24; /* eventfd_ctx->efd_count */
	u8 event; /* read/poll/wake ... */
	u64 clock;
};

struct vnotify_record {
	u32 tail_idx; /* tail index of array */
	u32 size; /* size of record array */
	struct record_ctx array[0];
};

struct vnotify_stride {
	u32 all_chn_used;
	u32 idx;
	/* vnotify share memory with guest */
	u32 chn_nr;
	u32 page_nr;
	struct page **pages;
	struct hpvisor_chn_state *rx;
	struct hpvisor_chn_state *tx;
	uint64_t *rx_rq;
	uint64_t *tx_rq;

	/* vnotify channel data, managed by host kernel */
	struct chn_data *chns;
};

struct chn_data {
	struct chn_data_t data_t;

	char name[CHN_NAME_LEN];
	struct vnotify_record *record;
	struct hpvisor_chn_xid chn_xid;
	struct vnotify_stride *stride;
};

struct vnotify_data {
	bool inited;
	struct vnotify_data_t vdata_t;
	u32 stride_chn_nr;
	u32 max_stride_nr;
	u32 stride_page_nr;

	/* vnotify is only be associated with one vm */
	int ctrl_chn_fd;
	pid_t qemu_tgid;
	struct kvm *kvm;

	u64 vnotify_stride_gpa;

	char *vnotify_ctrl_mem;
	struct hpvisor_ring_queue *rq_headers;

	u32 ctrl_mem_page_nr;
	struct page **ctrl_mem_pages;
	unsigned long *guest_to_host_stride_bitmap;
	unsigned long *host_to_guest_stride_bitmap;
	struct vnotify_stride **strides_addr_arr;

	u32 effective_stride_count;

	struct mutex stride_lock;
};

static struct vnotify_data vnotify = {
	.qemu_tgid = -1,
	.stride_lock = __MUTEX_INITIALIZER(vnotify.stride_lock)
};

static const char *g_event_str[HPVISOR_EVENT_MAX_NR + 1] = {
	[HPVISOR_EVENT_POLL] = "poll",
	[HPVISOR_EVENT_WAKE] = "wake",
	[HPVISOR_EVENT_READ] = "read",
	[HPVISOR_EVENT_MAX_NR] = "null",
};

void vnotify_stride_lock(void)
{
	mutex_lock(&vnotify.stride_lock);
}

void vnotify_stride_unlock(void)
{
	mutex_unlock(&vnotify.stride_lock);
}

static struct vnotify_stride *get_vnotify_stride(u32 stride_idx)
{
	if (stride_idx >= READ_ONCE(vnotify.effective_stride_count))
		return NULL;
	/*
	 * Ensure that the value of stride_idx is less than the value of
	 * vnotify.effective_stride_count, access to the strides_addr_arr
	 * need done after this operation
	 */
	smp_rmb();
	return vnotify.strides_addr_arr[stride_idx];
}

static inline struct vnotify_stride *id_to_stride(u32 id)
{
	u32 stride_idx = id / vnotify.stride_chn_nr;

	return get_vnotify_stride(stride_idx);
}

static inline struct chn_data *id_to_data_stride(struct vnotify_stride *stride, u32 id)
{
	return stride->chns + (id % vnotify.stride_chn_nr);
}

static inline struct chn_data *id_to_data(u32 id)
{
	struct vnotify_stride *stride;

	stride = id_to_stride(id);
	if (unlikely(!stride))
		return NULL;
	return stride->chns + (id % vnotify.stride_chn_nr);
}

static inline u32 data_to_id_stride(struct vnotify_stride *stride, struct chn_data *data)
{
	return data - stride->chns;
}

static inline u32 data_to_id(struct chn_data *data)
{
	u32 data_idx = data - data->stride->chns;

	return data->stride->idx * vnotify.stride_chn_nr + data_idx;
}

/*
 * The reference counting of the channel and
 * the reference count of the file must be in the final state
 * before the check
 */
static int check_chn_count(struct chn_data *data)
{
	long f_count;
	s16 chn_count;
	s64 status;

	/*
	 * When the channel is just initialized,
	 * data or data->data_t.file may be NULL,
	 * and no subsequent check is performed.
	 */
	if (!data || !data->data_t.file)
		return 0;

	status = chn_status_get(&data->data_t);
	if (status == LLONG_MAX)
		return 0;

	f_count = atomic_long_read(&data->data_t.file->f_count);
	chn_count = status_host_cnt(status);
	if (chn_count > f_count) {
		hpvisor_err("chn_count:%d is larger than f_count:%ld\n", chn_count, f_count);
		return -ERANGE;
	}
	hpvisor_vnotify_debug_notify("chn_count:%d, f_count:%ld\n", chn_count, f_count);
	return 0;
}

static s64 chn_detach(struct chn_data *data, int type)
{
	s64 status;

	status = chn_status_get(&data->data_t);
	switch (type) {
	case STATUS_HOST: {
		status -= (1 << STATUS_HOST_SHIFT);
		break;
	}
	case STATUS_GUEST: {
		if (status_guest_cnt(status) <= 0)
			return -ENOENT;
		status -= (1 << STATUS_GUEST_SHIFT);
		break;
	}
	default: {
		hpvisor_err("detach: type %d\n", type);
		return -EINVAL;
	}
	}
	chn_status_set(&data->data_t, status);
	return status;
}

static s64 chn_attach(struct chn_data *data, int type)
{
	s64 status;

	status = chn_status_get(&data->data_t);
	switch (type) {
	case STATUS_HOST: {
		if (unlikely(status_host_cnt(status) == CHN_CNT_MAX)) {
			hpvisor_err("out of range, status:0x%llx\n", status);
			return -ERANGE;
		}
		if (status_host_cnt(status) < 0)
			status = (status & ~STATUS_HOST_MASK) | (1 << STATUS_HOST_SHIFT);
		else
			status += (1 << STATUS_HOST_SHIFT);
		break;
	}
	case STATUS_GUEST: {
		if (unlikely(status_guest_cnt(status) > 0)) {
			hpvisor_err("channel has been attached, status:0x%llx\n", status);
			return -EEXIST;
		}
		status = (status & ~STATUS_GUEST_MASK) | (1 << STATUS_GUEST_SHIFT);
		break;
	}
	default: {
		hpvisor_err("attach: Unsupport status type %d\n", type);
		return -EINVAL;
	}
	}
	chn_status_set(&data->data_t, status);
	return status;
}

static int chn_data_wake_up(struct chn_data *data, __poll_t event)
{
	int ret;
	unsigned long flags;
	struct file *filp = NULL;

	/* must lock file for it maybe detached in host/guest detach */
	spin_lock_irqsave(&data->data_t.file_lock, flags);
	if (IS_ERR_OR_NULL(data->data_t.file)) {
		spin_unlock_irqrestore(&data->data_t.file_lock, flags);
		return -ENOENT;
	}
	filp = get_file(data->data_t.file);
	spin_unlock_irqrestore(&data->data_t.file_lock, flags);

	ret = hpvisor_file_wake_up(filp, event);
	fput(filp);

	hpvisor_vnotify_debug_notify("wake up channel %s\n", data->name);
	return ret;
}

static void chn_data_destroy(struct vnotify_stride *stride)
{
	int ret;
	struct chn_data *data = NULL;
	struct chn_data *end = NULL;

	if (!stride->chns)
		return;

	if (stride->idx == 0)
		data = id_to_data_stride(stride, NORMAL_CHN_START_IN_FIRST_STRIDE);
	else
		data = id_to_data_stride(stride, NORMAL_CHN_START_IN_OTHER_STRIDE);

	end = id_to_data_stride(stride, stride->chn_nr - 1);
	for (; data <= end; data++) {
		if (unlikely(data->data_t.file) &&
			(status_guest_cnt(chn_status_get(&data->data_t)) > 0)) {
			ret = chn_data_wake_up(data, EPOLLHUP);
			if (ret != 0)
				hpvisor_err("channel wake up failed, ret %d\n", ret);
			fput(data->data_t.file);
		}
		if (unlikely(data->record))
			kfree(data->record);
	}
	kvfree(stride->chns);
	stride->chns = NULL;
}

static void chn_page_destroy(struct vnotify_stride *stride)
{
	int i;

	if (stride->rx) {
		vunmap(stride->rx);
		stride->rx = NULL;
		/* tx/rx_rq/tx_rq is rear half of sharing memory, so no need to unmap */
		stride->tx = NULL;
		stride->rx_rq = NULL;
		stride->tx_rq = NULL;
	}

	if (stride->pages) {
		for (i = 0; i < stride->page_nr; i++) {
			if (stride->pages[i])
				__free_page(stride->pages[i]);
		}

		kfree(stride->pages);
		stride->pages = NULL;
	}
}

static void vnotify_stride_destroy(struct vnotify_stride *stride)
{
	chn_data_destroy(stride);
	chn_page_destroy(stride);
}

static void vnotify_ctrl_mem_destroy(void)
{
	int i;

	if (vnotify.vnotify_ctrl_mem) {
		vunmap(vnotify.vnotify_ctrl_mem);
		vnotify.vnotify_ctrl_mem = NULL;
		vnotify.guest_to_host_stride_bitmap = NULL;
		vnotify.host_to_guest_stride_bitmap = NULL;
		vnotify.rq_headers = NULL;
	}

	if (vnotify.ctrl_mem_pages) {
		for (i = 0; i < vnotify.ctrl_mem_page_nr; i++) {
			if (vnotify.ctrl_mem_pages[i])
				__free_page(vnotify.ctrl_mem_pages[i]);
		}
		kfree(vnotify.ctrl_mem_pages);
		vnotify.ctrl_mem_pages = NULL;
	}
}

static void vnotify_destroy(void)
{
	struct vnotify_stride *node = NULL;
	int i = 0;

	if (vnotify.strides_addr_arr) {
		for (; i < vnotify.effective_stride_count; i++) {
			node = vnotify.strides_addr_arr[i];
			if (!node) {
				hpvisor_err("can't get stride, i:%d\n", i);
				continue;
			}

			vnotify_stride_destroy(node);
			kfree(node);
		}
		kfree(vnotify.strides_addr_arr);
		vnotify.strides_addr_arr = NULL;
	}

	vnotify_ctrl_mem_destroy();

	vnotify.qemu_tgid = -1;
	vnotify.inited = false;
	vnotify.effective_stride_count = 0;
	vnotify.stride_page_nr = 0;
	hpvisor_vnotify_debug_common("kvm hpe vnotify destroyed!\n");
}

/*
 * For external interfaces, must get_vnotify for protecting
 * vnotify has been freed by others.
 */
static struct vnotify_data_t *get_vnotify(void)
{
	if (refcount_inc_not_zero(&vnotify.vdata_t.ref_count))
		return &vnotify.vdata_t;
	return NULL;
}

static void put_vnotify(void)
{
	if (refcount_dec_and_test(&vnotify.vdata_t.ref_count))
		vnotify_destroy();
}

static inline bool check_chn_id_valid(u32 id)
{
	return id < vnotify.effective_stride_count * vnotify.stride_chn_nr;
}

static inline struct hpvisor_chn_state *get_rx_chn_state(struct vnotify_stride *stride, u32 id)
{
	return &stride->rx[id % vnotify.stride_chn_nr];
}

static inline struct hpvisor_chn_state *get_tx_chn_state(struct vnotify_stride *stride, u32 id)
{
	return &stride->tx[id % vnotify.stride_chn_nr];
}

static inline void chn_state_reset(struct vnotify_stride *stride, u32 id)
{
	memset(get_rx_chn_state(stride, id), 0, sizeof(struct hpvisor_chn_state));
	memset(get_tx_chn_state(stride, id), 0, sizeof(struct hpvisor_chn_state));
}

static void handle_pending_chn(struct vnotify_stride *stride, u32 id)
{
	int ret;

	switch (id) {
	case CTRL_CHN_QEMU_WAKE_UP_GUEST:
		hpvisor_err("data %d only for send to guest\n", CTRL_CHN_QEMU_WAKE_UP_GUEST);
		break;
	case CTRL_CHN_GUEST_WAKE_UP_QEMU:
	default:
		ret = chn_data_wake_up(id_to_data_stride(stride, id), EPOLLIN);
		if (ret != 0)
			hpvisor_err("channel wake up failed, ret %d\n", ret);
		break;
	}
}

void hpvisor_vnotify_handler(void)
{
	int id;
	struct hpvisor_chn_state *rx = NULL;
	struct vnotify_data_t *vdata_t = get_vnotify();
	struct vnotify_stride *stride = NULL;
	u32 i = 0;
	int val;
	struct hpvisor_ring_queue *rq;
	struct vnotify_data *vdata;

	if (!vdata_t)
		return;
	vdata = container_of(vdata_t, struct vnotify_data, vdata_t);

	hpvisor_vnotify_debug_notify("enter handle vnotify ipi request\n");
	while (i < vdata->effective_stride_count) {
		val = test_and_clear_bit(i, vdata->guest_to_host_stride_bitmap);
		if (val == 0) {
			i++;
			continue;
		}

		stride = get_vnotify_stride(i);
		if (unlikely(!stride)) {
			hpvisor_err("can't get stride, i:%u\n", i);
			put_vnotify();
			return;
		}

		rq = &vnotify.rq_headers[RX_RQ_HEADER_OFFSET(stride->idx)];
		while (1) {
			id = ring_queue_dequeue(rq, stride->rx_rq);
			if (id < 0)
				break;
			if (id - i * vdata->stride_chn_nr >= stride->chn_nr)
				continue;

			rx = get_rx_chn_state(stride, id);
			if (__sync_bool_compare_and_swap(&rx->state, HPVISOR_STATE_IS_READY,
				HPVISOR_STATE_NOTIFYING_BIT))
				handle_pending_chn(stride, id);
		}
		i++;
	}
	hpvisor_vnotify_debug_notify("exit handle vnotify ipi request\n");

	put_vnotify();
}

static uint32_t ring_queue_init(struct vnotify_stride *stride, size_t field_size)
{
	int i;
	uint16_t count_order = 0;
	unsigned long count_cur;
	uint32_t capacity;
	struct hpvisor_ring_queue *rx_rq_header;
	struct hpvisor_ring_queue *tx_rq_header;

	/*
	 * The size of each channel in the ring queue is sizeof(uint64_t),
	 * Calculate the number of channels here.
	 */
	count_cur = field_size / sizeof(uint64_t);
	while ((count_cur >>= 1) != 0)
		count_order++;

	rx_rq_header = &vnotify.rq_headers[RX_RQ_HEADER_OFFSET(stride->idx)];
	tx_rq_header = &vnotify.rq_headers[TX_RQ_HEADER_OFFSET(stride->idx)];
	rq_header_init(rx_rq_header, count_order);
	rq_header_init(tx_rq_header, count_order);

	capacity = 1U << count_order;
	for (i = 0; i < capacity; i++) {
		stride->rx_rq[i] = (uint64_t)(-1);
		stride->tx_rq[i] = (uint64_t)(-1);
	}

	return capacity;
}

static int vnotify_stride_alloc(struct vnotify_stride *stride)
{
	char *shm_addr = NULL;
	unsigned int field_pages;
	uint32_t rq_capacity;
	int ret = 0;
	int i;

	field_pages = vnotify.stride_page_nr / HPVISOR_SHARE_MEM_FIELD_NR;
	stride->page_nr = vnotify.stride_page_nr;

	stride->pages = kcalloc(stride->page_nr, sizeof(struct page *), GFP_KERNEL);
	if (unlikely(!stride->pages))
		return -ENOMEM;

	for (i = 0; i < stride->page_nr; i++) {
		stride->pages[i] = alloc_page(GFP_KERNEL | __GFP_ZERO);
		if (unlikely(!stride->pages[i])) {
			ret = -ENOMEM;
			goto err;
		}
	}

	shm_addr = vmap(stride->pages, stride->page_nr, VM_MAP | VM_USERMAP, __pgprot(PROT_NORMAL));
	if (unlikely(!shm_addr)) {
		ret = -ENOMEM;
		goto err;
	}

	stride->rx = (struct hpvisor_chn_state *)&shm_addr[HPVISOR_RX_OFFSET *
		(field_pages << PAGE_SHIFT)];
	stride->tx = (struct hpvisor_chn_state *)&shm_addr[HPVISOR_TX_OFFSET *
		(field_pages << PAGE_SHIFT)];
	stride->rx_rq = (uint64_t *)&shm_addr[HPVISOR_RX_RQ_OFFSET * (field_pages << PAGE_SHIFT)];
	stride->tx_rq = (uint64_t *)&shm_addr[HPVISOR_TX_RQ_OFFSET * (field_pages << PAGE_SHIFT)];

	rq_capacity = ring_queue_init(stride, field_pages << PAGE_SHIFT);

	stride->chn_nr = vnotify.stride_chn_nr;
	/* The chn_nr must be smaller than the capacity of rq. Otherwise, queue may overflow. */
	if (stride->chn_nr > rq_capacity)
		stride->chn_nr = rq_capacity;

	stride->chns = vzalloc(stride->chn_nr * sizeof(struct chn_data));
	if (unlikely(!stride->chns)) {
		ret = -ENOMEM;
		goto err;
	}

	for (i = 0; i < stride->chn_nr; i++) {
		chn_status_set(&stride->chns[i].data_t, LLONG_MAX);
		spin_lock_init(&stride->chns[i].data_t.lock);
		spin_lock_init(&stride->chns[i].data_t.file_lock);
		stride->chns[i].stride = stride;
	}
	stride->all_chn_used = 0;
	return ret;
err:
	chn_page_destroy(stride);
	return ret;
}

void hpvisor_vnotify_release(const struct kvm *kvm)
{
	if (kvm && kvm == vnotify.kvm) {
		vnotify.kvm = NULL;
		put_vnotify();
	}
}

static inline struct page *get_vnotify_ctrl_mem_page(unsigned long pgoff)
{
	return vnotify.ctrl_mem_pages[pgoff];
}

struct page *hpvisor_vnotify_get_ctrl_mem_page(const struct kvm *kvm, struct vm_fault *vmf)
{
	struct page *page = NULL;
	struct vnotify_data_t *vdata_t = get_vnotify();
	unsigned long pgoff;
	struct vnotify_data *vdata;

	if (unlikely(!vdata_t))
		return NULL;

	vdata = container_of(vdata_t, struct vnotify_data, vdata_t);
	if (unlikely(kvm != vdata->kvm))
		goto out;

	pgoff = (vmf->address - vmf->vma->vm_start) >> PAGE_SHIFT;
	page = get_vnotify_ctrl_mem_page(pgoff);
out:
	put_vnotify();
	return page;
}

static int do_vnotify_stage2_map(struct kvm *kvm, u64 gpa, hva_t hva, u32 size)
{
	struct hpvisor_s2_map s2m = {
		.gpa = gpa,
		.hva = hva,
		.size = size,
		.flags = HPVISOR_MEM_SETUP_NOW,
	};

	return hpvisor_arch_do_stage2_map(kvm, &s2m);
}

/*
 * Except hpvisor_vnotify_init, the stride_lock needs to be locked
 * when this function is called in other scenarios
 */
static int vnotify_stride_alloc_and_map(struct vnotify_stride **stride_ret)
{
	struct vnotify_stride *stride;
	u64 map_gpa;
	int ret;

	if (vnotify.effective_stride_count >= vnotify.max_stride_nr) {
		hpvisor_err("The number of strides has reached the maximum, max:%u\n",
			vnotify.max_stride_nr);
		return -E2BIG;
	}

	stride = kzalloc(sizeof(struct vnotify_stride), GFP_KERNEL);
	if (unlikely(!stride))
		return -ENOMEM;

	stride->idx = vnotify.effective_stride_count;
	ret = vnotify_stride_alloc(stride);
	if (ret != 0) {
		kfree(stride);
		return ret;
	}

	map_gpa = vnotify.vnotify_stride_gpa + vnotify.effective_stride_count *
		stride->page_nr * PAGE_SIZE;

	ret = do_vnotify_stage2_map(vnotify.kvm, map_gpa, (hva_t)stride->rx,
		stride->page_nr * PAGE_SIZE);
	if (ret != 0) {
		kvfree(stride->chns);
		stride->chns = NULL;
		chn_page_destroy(stride);
		kfree(stride);
		return ret;
	}

	vnotify.strides_addr_arr[vnotify.effective_stride_count] = stride;
	/*
	 * Ensure that strides_addr_arr has been added to the node, and then
	 * add 1 to effective_stride_count. Otherwise, accessing the array in
	 * get_vnotify_stride () may get an NULL element.
	 */
	smp_wmb();
	WRITE_ONCE(vnotify.effective_stride_count, vnotify.effective_stride_count + 1);

	if (stride_ret)
		*stride_ret = stride;

	return 0;
}

static int vnotify_ctrl_mem_init(struct kvm *kvm, struct init_info *init)
{
	char *ctrl_mem;
	int i;
	int ret;
	int bitmap_size;
	int rq_headers_total_size;

	bitmap_size = DIV_ROUND_UP(init->max_stride_nr, BITS_PER_BYTE);
	/* need align to 8 in arm64 */
	bitmap_size = ALIGN(bitmap_size, 8);

	rq_headers_total_size = init->max_stride_nr * TRANSIT_DIRECTION *
		sizeof(struct hpvisor_ring_queue);

	vnotify.ctrl_mem_page_nr = DIV_ROUND_UP(bitmap_size * TRANSIT_DIRECTION +
		rq_headers_total_size + CTRL_CHN_DATA_SIZE, PAGE_SIZE);

	vnotify.ctrl_mem_pages = kcalloc(vnotify.ctrl_mem_page_nr, sizeof(struct page *),
					GFP_KERNEL);
	if (unlikely(!vnotify.ctrl_mem_pages))
		return -ENOMEM;

	for (i = 0; i < vnotify.ctrl_mem_page_nr; i++) {
		vnotify.ctrl_mem_pages[i] = alloc_page(GFP_KERNEL | __GFP_ZERO);
		if (unlikely(!vnotify.ctrl_mem_pages[i])) {
			ret = -ENOMEM;
			goto err;
		}
	}

	ctrl_mem = vmap(vnotify.ctrl_mem_pages, vnotify.ctrl_mem_page_nr,
		VM_MAP | VM_USERMAP, __pgprot(PROT_NORMAL));
	if (unlikely(!ctrl_mem)) {
		ret = -ENOMEM;
		goto err;
	}

	vnotify.guest_to_host_stride_bitmap = (unsigned long *)&ctrl_mem[PAGE_SIZE];
	bitmap_zero(vnotify.guest_to_host_stride_bitmap, bitmap_size);
	vnotify.host_to_guest_stride_bitmap = (unsigned long *)&ctrl_mem[PAGE_SIZE + bitmap_size];
	bitmap_zero(vnotify.host_to_guest_stride_bitmap, bitmap_size);

	vnotify.rq_headers = (struct hpvisor_ring_queue *)&ctrl_mem[PAGE_SIZE +
		bitmap_size * TRANSIT_DIRECTION];
	vnotify.vnotify_ctrl_mem = ctrl_mem;

	ret = do_vnotify_stage2_map(kvm, init->vnotify_ctrl_gpa, (hva_t)vnotify.vnotify_ctrl_mem,
		vnotify.ctrl_mem_page_nr * PAGE_SIZE);
	if (ret != 0)
		goto err;

	return ret;
err:
	vnotify_ctrl_mem_destroy();
	return ret;
}

bool check_task_perm(void)
{
	struct rtos_task_struct *task = task_to_rtos_task(current);

	return task->chns && task->chns->kvm == vnotify.kvm;
}

typedef bool (*match_fn)(const struct task_chn_node *, const void *);

static struct task_chn_node *find_task_chn_node(const struct list_head *head, match_fn match, const void *target)
{
	struct task_chn_node *node = NULL;
	struct task_chn_node *tmp = NULL;

	list_for_each_entry_safe(node, tmp, head, list) {
		if (match(node, target))
			return node;
	}
	return NULL;
}

static inline bool chn_fd_match(const struct task_chn_node *node, const void *fd)
{
	return node->fd == *(u32 *)fd;
}

static inline bool chn_id_match(const struct task_chn_node *node, const void *id)
{
	return data_to_id(node->data) == *(u32 *)id;
}

static inline bool chn_name_match(const struct task_chn_node *node, const void *name)
{
	return name && strncmp(node->data->name, (const char *)name, CHN_NAME_LEN) == 0;
}

static inline bool chn_file_match(const struct task_chn_node *node, const void *filp)
{
	return filp && node->data->data_t.file == filp;
}

static inline struct task_chn_node *find_task_chn_node_by_fd(const struct list_head *head, int fd)
{
	return find_task_chn_node(head, chn_fd_match, &fd);
}

static inline struct task_chn_node *find_task_chn_node_by_id(const struct list_head *head, u32 id)
{
	return find_task_chn_node(head, chn_id_match, &id);
}

static inline struct task_chn_node *find_task_chn_node_by_name(const struct list_head *head, const char *name)
{
	return find_task_chn_node(head, chn_name_match, name);
}

static struct chn_data *fd_to_data(int fd)
{
	struct chn_data *data = NULL;
	struct task_chn_node *node = NULL;
	struct chns_struct *chns = task_to_rtos_task(current)->chns;

	mutex_lock(&chns->lock);
	node = find_task_chn_node_by_fd(&chns->head, fd);
	if (node)
		data = node->data;
	mutex_unlock(&chns->lock);

	return data;
}

static int task_chn_node_add(struct list_head *head, struct chn_data *data, int fd)
{
	struct task_chn_node *node = NULL;

	node = kzalloc(sizeof(struct task_chn_node), GFP_KERNEL);
	if (!node) {
		hpvisor_err("alloc chn node failed\n");
		return -ENOMEM;
	}
	node->fd = fd;
	node->data = data;
	list_add(&node->list, head);
	return 0;
}

static void chn_data_free(struct chn_data *data)
{
	unsigned long flags;

	data->name[0] = 0;
	data->data_t.flags = 0;
	chn_status_set(&data->data_t, LLONG_MAX);
	data->chn_xid.pid = 0;
	data->chn_xid.uid = 0;
	data->chn_xid.gid = 0;

	/* must release old file after calling this func */
	spin_lock_irqsave(&data->data_t.file_lock, flags);
	data->data_t.file = NULL;
	spin_unlock_irqrestore(&data->data_t.file_lock, flags);

	data->data_t.alloced = 0;
	__sync_fetch_and_and(&data->stride->all_chn_used, 0);
}

static void send_ipi_to_guest(u32 id)
{
	int pcpu;

	pcpu = hpvisor_arch_find_pcpu(0);
	if (pcpu > 0) {
		hpvisor_vnotify_send_ipi(pcpu);
		hpvisor_vnotify_debug_notify("id:%u send ipi to guest\n", id);
	}
}

static void host_close_chn(u32 id)
{
	u32 old_state;
	struct vnotify_stride *stride;
	struct hpvisor_ring_queue *rq;

	stride = id_to_stride(id);
	if (unlikely(!stride)) {
		hpvisor_vnotify_debug_syscall("chn id %u is invlaid\n", id);
		return;
	}

	/* notify guest, host app all detached */
	old_state = __sync_fetch_and_or(&get_tx_chn_state(stride, id)->state,
		HPVISOR_STATE_CLOSED_BIT);
	if ((old_state & HPVISOR_STATE_IS_READY) != 0)
		return;

	rq = &vnotify.rq_headers[TX_RQ_HEADER_OFFSET(stride->idx)];
	ring_queue_enqueue(rq, stride->tx_rq, id);
	set_bit(stride->idx, vnotify.host_to_guest_stride_bitmap);
	send_ipi_to_guest(id);
}

static int guest_close_chn(u32 id)
{
	int ret;
	struct vnotify_stride *stride;

	stride = id_to_stride(id);
	if (unlikely(!stride)) {
		hpvisor_err("chn id %u is invlaid\n", id);
		return -EINVAL;
	}
	/* if guest detached, need wake up all host app */
	__sync_fetch_and_or(&get_rx_chn_state(stride, id)->state, HPVISOR_STATE_CLOSED_BIT);
	ret = chn_data_wake_up(id_to_data_stride(stride, id), EPOLLHUP);
	if (ret != 0)
		hpvisor_err("channel wake up failed, ret %d\n", ret);
	return ret;
}

static void host_put_chn_data(struct chn_data *data)
{
	s64 status;

	status = chn_detach(data, STATUS_HOST);
	if (status_host_cnt(status) > 0)
		return;

	if (status_guest_cnt(status) <= 0)
		chn_data_free(data);
	else if (!(data->data_t.flags & HPVISOR_ATTACH_PERSISTENT_BIT))
		host_close_chn(data_to_id(data));
}

static void chn_data_host_detach(struct chn_data *data, int fd)
{
	chn_data_lock(&data->data_t);
	host_put_chn_data(data);
	chn_data_unlock(&data->data_t);

	close_fd(fd);
	hpvisor_vnotify_debug_syscall("id %u, name %s, flags:0x%x, status:0x%llx\n",
		data_to_id(data), data->name, data->data_t.flags, chn_status_get(&data->data_t));
}

static struct chn_data *get_unused_chn_data(struct vnotify_stride *stride,
	const char *name, int flags, int type)
{
	s64 ret;
	struct chn_data *data;
	struct chn_data *end = id_to_data_stride(stride, stride->chn_nr - 1);

	if (stride->idx == 0)
		data = id_to_data_stride(stride, NORMAL_CHN_START_IN_FIRST_STRIDE);
	else
		data = id_to_data_stride(stride, NORMAL_CHN_START_IN_OTHER_STRIDE);

	while (data <= end) {
		if (!data->data_t.alloced) {
			data->data_t.alloced = 1;
			isb();
			data->data_t.flags = flags;
			chn_state_reset(stride, data_to_id_stride(stride, data));
			strncpy(data->name, name, CHN_NAME_LEN - 1);

			ret = chn_attach(data, type);
			if (IS_ERR_VALUE(ret))
				return ERR_PTR(ret);

			if (type == STATUS_HOST) {
				chn_data_lock(&data->data_t);
				data->chn_xid.pid = task_tgid_vnr(current);
				data->chn_xid.uid = from_kuid_munged(current_user_ns(), current_uid());
				data->chn_xid.gid = from_kgid_munged(current_user_ns(), current_gid());
				chn_data_unlock(&data->data_t);
			}
			return data;
		}
		data++;
	}
	__sync_fetch_and_or(&stride->all_chn_used, 1);
	return ERR_PTR(-ENOSPC);
}

static struct chn_data *find_unused_chn_data(const char *name, int flags, int type)
{
	struct vnotify_stride *node = NULL;
	struct chn_data *data = NULL;
	int ret;
	int i = 0;

	for (; i < vnotify.effective_stride_count; i++) {
		node = vnotify.strides_addr_arr[i];
		if (!node) {
			hpvisor_err("can't get stride, i:%d\n", i);
			continue;
		}

		if (node->all_chn_used == 1)
			continue;

		data = get_unused_chn_data(node, name, flags, type);
		if (IS_ERR(data) && data == ERR_PTR(-ENOSPC))
			continue;
		return data;
	}

	/* can't find a unused chn_data in all stride, we will alloc a new stride */
	ret = vnotify_stride_alloc_and_map(&node);
	if (ret != 0) {
		hpvisor_err("vnotify_stride_alloc_and_map fail, ret:%d\n", ret);
		return ERR_PTR(-ENOSPC);
	}

	return get_unused_chn_data(node, name, flags, type);
}

static struct chn_data *alloc_new_chn_data(const char *name, int flags, int type)
{
	if (!(flags & HPVISOR_ATTACH_CREATE_BIT))
		return ERR_PTR(-EINVAL);

	return find_unused_chn_data(name, flags, type);
}

static int check_chn_data_flags(const struct chn_data *data, u32 flags, int type)
{
	if (unlikely(flags != data->data_t.flags)) {
		hpvisor_err("flags:0x%x is not equal, name:%s flags:0x%x status:0x%llx\n",
			flags, data->name, data->data_t.flags, chn_status_get(&data->data_t));
		return -EINVAL;
	}

	/* only host attach need check SHARED bit */
	if (type == STATUS_HOST && !(data->data_t.flags & HPVISOR_ATTACH_SHARED_BIT) &&
		(status_host_cnt(chn_status_get(&data->data_t)) > 0)) {
		hpvisor_err("not support shared, name:%s flags:0x%x status:0x%llx\n",
			data->name, data->data_t.flags, chn_status_get(&data->data_t));
		return -EMLINK;
	}
	return 0;
}

static inline bool chn_state_closed(struct chn_data *data)
{
	struct vnotify_stride *stride = data->stride;
	u32 id = data_to_id_stride(stride, data);

	return (get_rx_chn_state(stride, id)->state | get_tx_chn_state(stride, id)->state) &
		HPVISOR_STATE_CLOSED_BIT;
}

static struct chn_data *get_used_chn_data(struct vnotify_stride *stride,
	const char *name, int flags, int type)
{
	s64 ret;
	struct chn_data *data;
	struct chn_data *end = id_to_data_stride(stride, stride->chn_nr - 1);

	if (stride->idx == 0)
		data = id_to_data_stride(stride, NORMAL_CHN_START_IN_FIRST_STRIDE);
	else
		data = id_to_data_stride(stride, NORMAL_CHN_START_IN_OTHER_STRIDE);

	while (data <= end) {
		chn_data_lock(&data->data_t);
		if (!data->data_t.alloced || chn_state_closed(data) ||
			(strncmp(data->name, name, CHN_NAME_LEN) != 0)) {
			chn_data_unlock(&data->data_t);
			data++;
			continue;
		}

		ret = check_chn_data_flags(data, flags, type);
		if (ret != 0) {
			chn_data_unlock(&data->data_t);
			return ERR_PTR(ret);
		}

		ret = chn_attach(data, type);
		if (IS_ERR_VALUE(ret)) {
			chn_data_unlock(&data->data_t);
			return ERR_PTR(ret);
		}

		/* if guest has been attached and host is first attached */
		if ((type == STATUS_HOST) &&
			(status_host_cnt(chn_status_get(&data->data_t)) == 1)) {
			data->chn_xid.pid = task_tgid_vnr(current);
			data->chn_xid.uid = from_kuid_munged(current_user_ns(), current_uid());
			data->chn_xid.gid = from_kgid_munged(current_user_ns(), current_gid());
		}
		chn_data_unlock(&data->data_t);
		return data;
	}
	return NULL;
}

static struct chn_data *find_used_chn_data(const char *name, int flags, int type)
{
	struct vnotify_stride *node = NULL;
	struct chn_data *data = NULL;
	int i = 0;

	for (; i < vnotify.effective_stride_count; i++) {
		node = vnotify.strides_addr_arr[i];
		if (!node) {
			hpvisor_err("can't get stride, i:%d\n", i);
			continue;
		}

		data = get_used_chn_data(node, name, flags, type);
		if (data)
			return data;
	}
	return NULL;
}

static int chn_id_send(struct vnotify_stride *stride, u32 id)
{
	u32 old_state;
	struct hpvisor_chn_state *tx = NULL;
	struct hpvisor_ring_queue *rq;

	tx = get_tx_chn_state(stride, id);
	if (unlikely(tx->state & HPVISOR_STATE_CLOSED_BIT)) {
		hpvisor_err("chn %u epiped\n", id);
		return -EPIPE;
	}

	old_state = __sync_fetch_and_or(&tx->state, HPVISOR_STATE_IS_READY);
	/* chn has been setted HPVISOR_STATE_PENDING_BIT or HPVISOR_STATE_NOTIFYING_BIT */
	if ((old_state & HPVISOR_STATE_IS_READY) != 0)
		return 0;

	rq = &vnotify.rq_headers[TX_RQ_HEADER_OFFSET(stride->idx)];
	ring_queue_enqueue(rq, stride->tx_rq, id);
	set_bit(stride->idx, vnotify.host_to_guest_stride_bitmap);

	send_ipi_to_guest(id);
	return 0;
}

static int vnotify_record_init(struct chn_data *data)
{
	int num;

	/* a complete communication process should include poll->wake->read and other events */
	num = hpvisor_vnotify_get_record_val() * HPVISOR_EVENT_MAX_NR;
	if (unlikely(num > 0)) {
		/* if this channel has used record, free it */
		if (unlikely(data->record))
			kfree(data->record);

		data->record = kzalloc(sizeof(struct vnotify_record) + sizeof(struct record_ctx) * num, GFP_KERNEL);
		if (data->record == NULL) {
			hpvisor_err("alloc record memory failed\n");
			return -ENOMEM;
		}

		data->record->size = num;
	} else {
		data->record = NULL;
	}
	return 0;
}

/* must be locked by eventfd_ctx->wqh.lock, or record maybe disordered */
void hpvisor_vnotify_record_write(u8 event, u32 id, u32 count)
{
	struct chn_data *data = NULL;
	struct record_ctx *ctx = NULL;
	struct vnotify_stride *stride;

	if (unlikely(!get_vnotify()))
		return;
	if (unlikely(!check_chn_id_valid(id))) {
		put_vnotify();
		return;
	}

	stride = id_to_stride(id);
	if (unlikely(!stride)) {
		hpvisor_vnotify_debug_syscall("chn id %u is invlaid\n", id);
		put_vnotify();
		return;
	}

	data = id_to_data_stride(stride, id);
	if (data->record == NULL) {
		put_vnotify();
		return;
	}

	ctx = &data->record->array[data->record->tail_idx];
	data->record->tail_idx = (data->record->tail_idx + 1) % data->record->size;

	ctx->pid = current->tgid;
	ctx->tid = current->pid;
	ctx->index = get_rx_chn_state(stride, id)->mask;
	ctx->efd_count = count;
	ctx->event = event;
	ctx->clock = local_clock();
	put_vnotify();
}

static inline const char *event_to_str(u8 event)
{
	return event < HPVISOR_EVENT_MAX_NR ? g_event_str[event] : g_event_str[HPVISOR_EVENT_MAX_NR];
}

static inline void print_record_ctx(const struct record_ctx *ctx)
{
	hpvisor_info("%4d, %4d, %s, %u, %u, %-20llu\n",
		ctx->pid, ctx->tid, event_to_str(ctx->event), ctx->efd_count, ctx->index, ctx->clock);
}

static void vnotify_record_print(struct chn_data *data)
{
	int i;
	struct record_ctx *ctx_array = NULL;

	if (!data->record)
		return;

	ctx_array = data->record->array;

	hpvisor_info("chn_id:%u\n", data_to_id(data));
	hpvisor_info("PID, TID, EVT, CNT, IDX, %-20s\n", "CLK");
	if (ctx_array[data->record->tail_idx].clock != 0) {
		for (i = data->record->tail_idx; i < data->record->size; i++)
			print_record_ctx(&ctx_array[i]);
	}
	for (i = 0; i < data->record->tail_idx; i++)
		print_record_ctx(&ctx_array[i]);
}

/*
 * host: vnotify chns_lock and process chns lock held, both mutex locks
 * guest: vnotify chns_lock held, mutex lock
 */
static int alloc_new_chn_data_and_file(struct chn_data **data, const char *name, int flags, int type)
{
	int fd, ret;
	struct chn_data *new_data;

	new_data = alloc_new_chn_data(name, flags, type);
	if (IS_ERR(new_data)) {
		ret = PTR_ERR(new_data);
		hpvisor_err("alloc new chn_data failed, ret:%d\n", ret);
		return ret;
	}

	if (vnotify_record_init(new_data) != 0) {
		chn_data_free(new_data);
		return -ENOMEM;
	}

	fd = create_new_eventfd(data_to_id(new_data), get_efd_flags(flags), flags);
	if (unlikely(fd < 0)) {
		hpvisor_err("alloc fd failed, ret %d\n", fd);
		chn_data_free(new_data);
		return fd;
	}

	ret = new_chn_data_efd_attach(&new_data->data_t, fd);
	if (ret) {
		chn_data_free(new_data);
		close_fd(fd);
		return ret;
	}

	*data = new_data;
	return fd;
}

static int vnotify_ioctl_check(void)
{
	if (!refcount_read(&vnotify.vdata_t.ref_count))
		return -EHOSTDOWN;
	return 0;
}

static int vnotify_ioctl_query(u32 id)
{
	struct chn_data *data;

	if (unlikely(!check_chn_id_valid(id)))
		return -ENOENT;

	data = id_to_data(id);
	if (unlikely(!data)) {
		hpvisor_err("chn id %u is invlaid\n", id);
		return -EINVAL;
	}
	return get_data_status(&data->data_t);
}

static int vnotify_ioctl_host_send(int efd)
{
	struct chn_data *data = fd_to_data(efd);
	struct vnotify_stride *stride;
	u32 chn_id;

	if (unlikely(!data))
		return -EBADFD;

	stride = data->stride;
	chn_id = data_to_id(data);
	return chn_id_send(stride, chn_id);
}

static int vnotify_ioctl_host_wake(int efd)
{
	struct chn_data *data = fd_to_data(efd);

	if (unlikely(!data))
		return -EBADFD;

	return chn_data_wake_up(data, EPOLLIN);
}

static int vnotify_ioctl_host_find(int efd)
{
	struct chn_data *data = fd_to_data(efd);

	if (unlikely(!data))
		return -EBADFD;
	return data_to_id(data);
}

static int vnotify_ioctl_host_attach(int flags, uintptr_t uname, unsigned long chn_fds)
{
	int fd;
	char kname[CHN_NAME_LEN] = {0};
	struct chn_data *data = NULL;
	struct task_chn_node *node = NULL;
	struct chns_struct *chns = task_to_rtos_task(current)->chns;
	int ret;

	if ((flags & ~HPVISOR_CHN_FLAGS_SET)) {
		hpvisor_vnotify_debug_syscall("flags is over range, flags:0x%x\n", flags);
		return -EINVAL;
	}

	if (unlikely(copy_name_from_user(kname, CHN_NAME_LEN, (void __user *)uname))) {
		hpvisor_vnotify_debug_syscall("copy chn name form user failed\n");
		return -EFAULT;
	}

	mutex_lock(&vnotify.stride_lock);
	mutex_lock(&chns->lock);
	/* if already attached, just return self fd */
	node = find_task_chn_node_by_name(&chns->head, kname);
	if (node) {
		fd = node->fd;
		goto unlock;
	}

	data = find_used_chn_data(kname, flags, STATUS_HOST);
	/* result 1: data flags check failed or chn_attach failed */
	if (IS_ERR(data)) {
		hpvisor_vnotify_debug_syscall("flags check failed or chn_attach failed! kname:%s\n", kname);
		fd = PTR_ERR(data);
		goto unlock;
	}
	/* result 2: data exists */
	if (data) {
		fd = get_unused_fd_flags(get_efd_flags(flags));
		if (unlikely(fd < 0)) {
			chn_data_lock(&data->data_t);
			host_put_chn_data(data);
			chn_data_unlock(&data->data_t);

			hpvisor_vnotify_debug_syscall("get unsed fd failed, ret %d\n", fd);
			goto unlock;
		}
		chn_data_efd_attach(&data->data_t, fd);
	/* result 3: need to alloc new one */
	} else {
		fd = alloc_new_chn_data_and_file(&data, kname, flags, STATUS_HOST);
		if (unlikely(fd < 0)) {
			hpvisor_vnotify_debug_syscall("alloc new chn_data and file failed, ret %d\n", fd);
			goto unlock;
		}
	}

	if (unlikely(task_chn_node_add(&chns->head, data, fd))) {
		chn_data_lock(&data->data_t);
		host_put_chn_data(data);
		chn_data_unlock(&data->data_t);

		close_fd(fd);
		hpvisor_vnotify_debug_syscall("add task chn node failed, fd:%d, id:%u, name:%s, flags:0x%x, status:0x%llx\n",
			fd, data_to_id(data), data->name, data->data_t.flags,
			chn_status_get(&data->data_t));
		fd = -ENOMEM;
		goto unlock;
	}

	hpvisor_vnotify_debug_syscall("fd:%d, id:%u, name:%s, flags:0x%x, status:0x%llx\n",
		fd, data_to_id(data), data->name, data->data_t.flags,
		chn_status_get(&data->data_t));
	ret = check_chn_count(data);
	if (ret != 0)
		fd = ret;
unlock:
	mutex_unlock(&chns->lock);
	mutex_unlock(&vnotify.stride_lock);

	return fd;
}

static int vnotify_ioctl_host_detach(int efd, int fd1, int flags)
{
	struct task_chn_node *node = NULL;
	struct chns_struct *chns = task_to_rtos_task(current)->chns;
	int ret;

	mutex_lock(&vnotify.stride_lock);
	mutex_lock(&chns->lock);
	node = find_task_chn_node_by_fd(&chns->head, efd);
	if (unlikely(!node)) {
		mutex_unlock(&chns->lock);
		mutex_unlock(&vnotify.stride_lock);
		hpvisor_vnotify_debug_syscall("find task chn node failed! fd:%d\n", efd);
		return -EBADFD;
	}

	chn_data_host_detach(node->data, node->fd);
	ret = check_chn_count(node->data);
	list_del(&node->list);
	mutex_unlock(&chns->lock);
	mutex_unlock(&vnotify.stride_lock);
	kfree(node);

	return ret;
}

static int vnotify_ioctl_print_record(int fd)
{
	struct chn_data *data = NULL;

	if (!hpvisor_vnotify_record_debug_on())
		return -ENOSYS;

	data = fd_to_data(fd);
	if (unlikely(data == NULL))
		return -EBADFD;

	vnotify_record_print(data);
	return 0;
}

static int vnotify_ioctl_ctrl_send(int efd)
{
	int ret;
	struct vnotify_stride *stride;
	struct chn_data *data;

	stride = get_vnotify_stride(0);
	if (unlikely(!stride)) {
		hpvisor_err("can't get stride\n");
		return -ENODATA;
	}

	data = id_to_data_stride(stride, CTRL_CHN_GUEST_WAKE_UP_QEMU);

	chn_data_lock(&data->data_t);
	if (unlikely(efd < 0 || efd != vnotify.ctrl_chn_fd)) {
		chn_data_unlock(&data->data_t);
		hpvisor_err("fd %d is not control channel\n", efd);
		return -EBADFD;
	}
	ret = chn_id_send(stride, CTRL_CHN_GUEST_WAKE_UP_QEMU);
	chn_data_unlock(&data->data_t);
	return ret;
}

static int vnotify_ioctl_ctrl_attach(void)
{
	int fd;
	struct chn_data *data;
	struct vnotify_stride *stride;

	stride = get_vnotify_stride(0);
	if (unlikely(!stride)) {
		hpvisor_err("can't get stride\n");
		return -ENODATA;
	}

	data = id_to_data_stride(stride, CTRL_CHN_GUEST_WAKE_UP_QEMU);

	fd = create_new_eventfd(CTRL_CHN_GUEST_WAKE_UP_QEMU, EFD_SEMAPHORE, HPVISOR_ATTACH_SHARED_BIT);
	if (unlikely(fd < 0)) {
		hpvisor_err("alloc file failed for control channel\n");
		return fd;
	}

	chn_data_lock(&data->data_t);
	if (data->data_t.alloced) {
		hpvisor_err("control channel has been attached\n");
		close_fd(fd);
		chn_data_unlock(&data->data_t);
		return -EEXIST;
	}

	if (unlikely(new_chn_data_efd_attach(&data->data_t, fd) < 0)) {
		hpvisor_err("control channel attached failed\n");
		close_fd(fd);
		chn_data_unlock(&data->data_t);
		return -EBADFD;
	}

	data->data_t.alloced = 1;
	vnotify.ctrl_chn_fd = fd;
	data->chn_xid.pid = task_tgid_vnr(current);
	data->chn_xid.uid = from_kuid_munged(current_user_ns(), current_uid());
	data->chn_xid.gid = from_kgid_munged(current_user_ns(), current_gid());
	chn_data_unlock(&data->data_t);

	return fd;
}

static int vnotify_ioctl_ctrl_detach(int efd)
{
	int ret;
	struct chn_data *data;
	struct vnotify_stride *stride;

	stride = get_vnotify_stride(0);
	if (unlikely(!stride)) {
		hpvisor_err("can't get stride\n");
		return -ENODATA;
	}

	data = id_to_data_stride(stride, CTRL_CHN_GUEST_WAKE_UP_QEMU);

	chn_data_lock(&data->data_t);
	if (unlikely(efd < 0 || efd != vnotify.ctrl_chn_fd)) {
		chn_data_unlock(&data->data_t);
		hpvisor_err("fd %d is not control channel\n", efd);
		return -EBADFD;
	}

	ret = close_fd(efd);
	isb();
	data->chn_xid.pid = 0;
	data->chn_xid.uid = 0;
	data->chn_xid.gid = 0;
	data->data_t.alloced = 0;
	vnotify.ctrl_chn_fd = -1;
	chn_data_unlock(&data->data_t);

	return ret;
}

static int vnotify_ioctl_guest_attach(int flags, uintptr_t uname)
{
	int fd, ret;
	struct chn_data *data = NULL;
	char kname[CHN_NAME_LEN] = {0};

	if ((flags & ~HPVISOR_CHN_FLAGS_SET)) {
		hpvisor_vnotify_debug_syscall("check flags failed, flags:0x%x\n", flags);
		return -EINVAL;
	}
	if (copy_name_from_user(kname, CHN_NAME_LEN, (void __user *)uname)) {
		hpvisor_vnotify_debug_syscall("copy from user failed, flags:0x%x\n", flags);
		return -EFAULT;
	}

	mutex_lock(&vnotify.stride_lock);
	data = find_used_chn_data(kname, flags, STATUS_GUEST);
	/* result 1: data flags check failed or guest has been attached */
	if (IS_ERR(data)) {
		mutex_unlock(&vnotify.stride_lock);
		hpvisor_vnotify_debug_syscall("flags check failed or guest has been attached! kname:%s\n", kname);
		return PTR_ERR(data);
	}
	/* result 2: data exists */
	if (data) {
		/* count of file equal to host_count + guest_count */
		chn_data_lock(&data->data_t);
		get_file(data->data_t.file);
		chn_data_unlock(&data->data_t);
	/* result 3: need to alloc new one */
	} else {
		fd = alloc_new_chn_data_and_file(&data, kname, flags, STATUS_GUEST);
		if (unlikely(fd < 0)) {
			mutex_unlock(&vnotify.stride_lock);
			hpvisor_vnotify_debug_syscall("guest alloc new chn_data and file failed, ret %d\n", fd);
			return fd;
		}

		/*
		 * if guest creates chn_data first, we want to create
		 * file now, but we don't want to possess fd since we
		 * don't need it in guest, so we get file and close fd.
		 */
		get_file(data->data_t.file);
		ret = close_fd(fd);
		if (ret != 0) {
			struct file *filp = data->data_t.file;

			chn_data_free(data);
			fput(filp);
			mutex_unlock(&vnotify.stride_lock);
			hpvisor_vnotify_debug_syscall("guest close fd failed, ret %d\n", fd);
			return ret;
		}
	}
	hpvisor_vnotify_debug_syscall("id:%u, name:%s, flags:0x%x, status:0x%llx\n",
		data_to_id(data), data->name, data->data_t.flags, chn_status_get(&data->data_t));
	mutex_unlock(&vnotify.stride_lock);

	return data_to_id(data);
}

static int vnotify_ioctl_guest_detach(u32 id)
{
	int ret = 0;
	s64 status;
	struct chn_data *data = NULL;
	struct file *filp = NULL;

	if (unlikely(!check_chn_id_valid(id))) {
		hpvisor_vnotify_debug_syscall("chn id %u over range\n", id);
		return -ERANGE;
	}

	data = id_to_data(id);
	if (unlikely(!data)) {
		hpvisor_vnotify_debug_syscall("chn id %u is invlaid\n", id);
		return -EINVAL;
	}

	mutex_lock(&vnotify.stride_lock);
	chn_data_lock(&data->data_t);
	status = chn_detach(data, STATUS_GUEST);
	if (IS_ERR_VALUE(status)) {
		chn_data_unlock(&data->data_t);
		mutex_unlock(&vnotify.stride_lock);
		hpvisor_vnotify_debug_syscall("guest not attached with id %u\n", id);
		return status;
	}

	filp = data->data_t.file;

	if (status_host_cnt(status) <= 0)
		chn_data_free(data);
	else if (!(data->data_t.flags & HPVISOR_ATTACH_PERSISTENT_BIT))
		ret = guest_close_chn(id);
	chn_data_unlock(&data->data_t);

	/* only need fput, fd has been closed in guest attach */
	if (filp != NULL)
		fput(filp);

	hpvisor_vnotify_debug_syscall("id:%u, name:%s, flags:0x%x, status:0x%llx\n",
		id, data->name, data->data_t.flags, chn_status_get(&data->data_t));
	mutex_unlock(&vnotify.stride_lock);
	return ret;
}

static int vnotify_get_xid(u32 id, struct hpvisor_chn_xid *chn_xid_p)
{
	struct chn_data *data = NULL;

	data = id_to_data(id);
	if (unlikely(!data)) {
		hpvisor_err("chn id %u is invlaid\n", id);
		return -EINVAL;
	}

	chn_data_lock(&data->data_t);
	if (!data->data_t.alloced) {
		chn_data_unlock(&data->data_t);
		hpvisor_err("chn id %u is not alloced\n", id);
		return -EINVAL;
	}

	if ((id > CTRL_CHN_GUEST_WAKE_UP_QEMU) &&
		(status_host_cnt(chn_status_get(&data->data_t)) <= 0)) {
		chn_data_unlock(&data->data_t);
		hpvisor_err("chn id %u is not attached by host\n", id);
		return -EINVAL;
	}

	chn_xid_p->pid = data->chn_xid.pid;
	chn_xid_p->uid = data->chn_xid.uid;
	chn_xid_p->gid = data->chn_xid.gid;
	chn_data_unlock(&data->data_t);

	return 0;
}

int vnotify_get_vmctrl_xid(struct hpvisor_chn_xid *chn_xid_p)
{
	int ret;
	struct vnotify_data_t *vdata_t;

	vdata_t = get_vnotify();
	if (unlikely(!vdata_t))
		return -ENODATA;

	if (chn_xid_p == NULL) {
		put_vnotify();
		return -EINVAL;
	}

	ret = vnotify_get_xid(CTRL_CHN_GUEST_WAKE_UP_QEMU, chn_xid_p);
	put_vnotify();

	return ret;
}
EXPORT_SYMBOL_NS(vnotify_get_vmctrl_xid, HW_RTOS_NS);

static long vnotify_ioctl_get_chn_xid(u32 id, unsigned long chn_xid)
{
	int ret;
	void __user *chn_xid_p = (void __user *)chn_xid;
	struct hpvisor_chn_xid chn_xid_tmp;

	if (chn_xid_p == NULL)
		return -EINVAL;

	if (unlikely(!check_chn_id_valid(id)) || (id < CTRL_CHN_GUEST_WAKE_UP_QEMU))
		return -ERANGE;

	ret = vnotify_get_xid(id, &chn_xid_tmp);
	if (ret < 0)
		return ret;

	if (copy_to_user(chn_xid_p, &chn_xid_tmp, sizeof(struct hpvisor_chn_xid))) {
		hpvisor_err("copy_to_user err, may be chn_xid_p is not user address\n");
		return -EFAULT;
	}
	return 0;
}

static long vnotify_ioctl_get_vnotify_setting(unsigned long vnotify_setting)
{
	void __user *vnotify_setting_p = (void __user *)vnotify_setting;
	struct hpvisor_vnotify_setting vnotify_setting_tmp;

	if (!vnotify_setting_p) {
		hpvisor_err("vnotify_setting_p is NULL\n");
		return -EINVAL;
	}

	if (!vnotify.inited) {
		hpvisor_err("vnotify has not inited\n");
		return -ENODATA;
	}

	vnotify_setting_tmp.max_stride_nr = vnotify.max_stride_nr;
	vnotify_setting_tmp.stride_chn_nr = vnotify.stride_chn_nr;
	vnotify_setting_tmp.ctrl_mem_size = vnotify.ctrl_mem_page_nr << PAGE_SHIFT;
	vnotify_setting_tmp.strides_mem_size = (vnotify.stride_page_nr * vnotify.max_stride_nr)
		<< PAGE_SHIFT;

	if (copy_to_user(vnotify_setting_p, &vnotify_setting_tmp,
		sizeof(struct hpvisor_vnotify_setting))) {
		hpvisor_err("copy_to_user err, may be vnotify_setting_p is not user address\n");
		return -EFAULT;
	}
	return 0;
}

/* use_chn is used to mark whether this task has used at least one chn */
struct chns_struct *vm_dup_chns(struct chns_struct *oldc, struct chns_struct *newc, bool *use_chn)
{
	s64 ret;
	struct task_chn_node *old = NULL;
	struct task_chn_node *new = NULL;

	if (!get_vnotify())
		return newc;

	vnotify_stride_lock();
	mutex_lock(&oldc->lock);
	list_for_each_entry(old, &oldc->head, list) {
		*use_chn = true;
		new = kmalloc(sizeof(struct task_chn_node), GFP_KERNEL);
		if (unlikely(!new))
			goto err;

		new->fd = old->fd;
		new->data = old->data;
		list_add_tail(&new->list, &newc->head);
		chn_data_lock(&new->data->data_t);
		ret = chn_attach(new->data, STATUS_HOST);
		chn_data_unlock(&new->data->data_t);
		if (IS_ERR_VALUE(ret))
			goto err;
	}
	mutex_unlock(&oldc->lock);
	/* if *use_chn is true, this lock will be unlocked in copy_process() */
	if (!*use_chn)
		vnotify_stride_unlock();
	put_vnotify();
	return newc;

err:
	mutex_unlock(&oldc->lock);
	vnotify_stride_unlock();
	put_vnotify();
	list_for_each_entry(new, &newc->head, list)
		kfree(new);
	kfree(newc);
	return ERR_PTR(-ENOMEM);
}

bool vm_chns_struct_free_list(struct chns_struct *chns)
{
	bool need_detach = true;
	struct task_chn_node *tmp = NULL;
	struct task_chn_node *node = NULL;
	struct vnotify_data_t *vdata_t = NULL;
	bool use_chn = false;

	/* if used vnotify, list is not empty */
	if (chns->kvm) {
		vdata_t = get_vnotify();
		if (!vdata_t || chns->kvm != vnotify.kvm)
			need_detach = false;

		vnotify_stride_lock();
		mutex_lock(&chns->lock);
		list_for_each_entry_safe(node, tmp, &chns->head, list) {
			use_chn = true;
			if (need_detach) {
				chn_data_lock(&node->data->data_t);
				host_put_chn_data(node->data);
				chn_data_unlock(&node->data->data_t);
				hpvisor_vnotify_debug_exit("abort exited: fd:%d id:%u detached\n",
					node->fd, data_to_id(node->data));
			}
			list_del(&node->list);
			kfree(node);
		}
		mutex_unlock(&chns->lock);
		/* if use_chn is true, this lock will be unlocked in do_exit() */
		if (!use_chn)
			vnotify_stride_unlock();
		if (vdata_t)
			put_vnotify();
	}
	return use_chn;
}

void hpvisor_close_chn(struct file *filp)
{
	struct chns_struct *chns = task_to_rtos_task(current)->chns;
	struct task_chn_node *node = NULL;
	struct task_chn_node *tmp = NULL;
	bool do_chns_lock = false;

	if (unlikely(!get_vnotify()))
		return;

	if (check_vnotify_file(filp) < 0)
		goto vnotify_out;

	if (!chns)
		goto vnotify_out;

	if ((unsigned long)hpvisor_mutex_owner(&vnotify.stride_lock) != (unsigned long)current)
		mutex_lock(&vnotify.stride_lock);
	else
		goto vnotify_out;

	if ((unsigned long)hpvisor_mutex_owner(&chns->lock) != (unsigned long)current) {
		do_chns_lock = true;
		mutex_lock(&chns->lock);
	}

	list_for_each_entry_safe(node, tmp, &chns->head, list) {
		if (node->data->data_t.file == filp) {
			spin_lock(&node->data->data_t.lock);
			host_put_chn_data(node->data);
			spin_unlock(&node->data->data_t.lock);
			list_del(&node->list);
			kfree(node);
			break;
		}
	}
	if (do_chns_lock)
		mutex_unlock(&chns->lock);
	mutex_unlock(&vnotify.stride_lock);

vnotify_out:
	put_vnotify();
}

static long vnotify_ioctl_get_strides_mem_size(void)
{
	return (vnotify.stride_page_nr * vnotify.max_stride_nr) << PAGE_SHIFT;
}

static struct page *get_vnotify_stride_mem_page(unsigned long pgoff)
{
	u32 stride_idx;
	u32 pgidx_in_stride;
	struct vnotify_stride *stride;

	stride_idx = pgoff / vnotify.stride_page_nr;
	pgidx_in_stride = pgoff % vnotify.stride_page_nr;

	stride = get_vnotify_stride(stride_idx);
	if (unlikely(!stride)) {
		hpvisor_err("can't get stride, idx:%u\n", stride_idx);
		return NULL;
	}

	return stride->pages[pgidx_in_stride];
}

static vm_fault_t hpvisor_vnotify_page_fault(struct vm_fault *vmf)
{
	int ret = 0;
	struct page *page;
	struct vnotify_data_t *vdata_t = get_vnotify();
	unsigned long pgoff_in_vma;

	if (unlikely(!vdata_t))
		return VM_FAULT_NOPAGE;

	pgoff_in_vma = (vmf->address - vmf->vma->vm_start) >> PAGE_SHIFT;
	if (vmf->vma->vm_pgoff == HPVISOR_VNOTIFY_CTRL_MEM_PAGE_OFFSET) {
		page = get_vnotify_ctrl_mem_page(pgoff_in_vma);
	} else if (vmf->vma->vm_pgoff == HPVISOR_VNOTIFY_STRIDE_MEM_PAGE_OFFSET) {
		page = get_vnotify_stride_mem_page(pgoff_in_vma);
	} else {
		ret = VM_FAULT_SIGBUS;
		goto out;
	}

	if (!page) {
		ret = VM_FAULT_NOPAGE;
		goto out;
	}

	get_page(page);
	vmf->page = page;
out:
	put_vnotify();
	return ret;
}

static const struct vm_operations_struct hpvisor_vnotify_vm_ops = {
	.fault = hpvisor_vnotify_page_fault,
};

static int vnotify_mmap(struct file *filp, struct vm_area_struct *vma)
{
	vma->vm_ops = &hpvisor_vnotify_vm_ops;
	return 0;
}

static const struct vnotify_ioctl_operations vm_vnotify_oper = {
	.check = vnotify_ioctl_check,
	.query = vnotify_ioctl_query,
	.ctrl_attach = vnotify_ioctl_ctrl_attach,
	.ctrl_detach = vnotify_ioctl_ctrl_detach,
	.ctrl_send = vnotify_ioctl_ctrl_send,
	.guest_attach = vnotify_ioctl_guest_attach,
	.guest_detach = vnotify_ioctl_guest_detach,
	.host_send = vnotify_ioctl_host_send,
	.host_wake = vnotify_ioctl_host_wake,
	.host_find = vnotify_ioctl_host_find,
	.host_attach = vnotify_ioctl_host_attach,
	.host_detach = vnotify_ioctl_host_detach,
	.print_record = vnotify_ioctl_print_record,
	.get_strides_mem_size = vnotify_ioctl_get_strides_mem_size,
	.get_chn_xid = vnotify_ioctl_get_chn_xid,
	.get_vnotify_setting = vnotify_ioctl_get_vnotify_setting,
};

static int task_first_call(void)
{
	struct rtos_task_struct *rtsk = task_to_rtos_task(current);

	/* Generally, task->chns should never equal to NULL */
	if (unlikely(!rtsk->chns)) {
		rtsk->chns = chns_alloc_and_init(vnotify.kvm);
		if (!rtsk->chns)
			return -ENOMEM;
	} else if (unlikely(!rtsk->chns->kvm)) {
		rtsk->chns->kvm = vnotify.kvm;
	}
	return 0;
}

int hpvisor_ioctl_vnotify_init(struct kvm *kvm, unsigned long arg)
{
	int ret;
	void __user *argp = (void __user *)arg;
	struct init_info init;
	u64 max_chn_nr;

	if (unlikely(!access_ok(((void *__user)arg), sizeof(struct init_info)))) {
		hpvisor_err("arg:0x%lx error\n", arg);
		return -EFAULT;
	}
	if (copy_from_user(&init, argp, sizeof(struct init_info)))
		return -EFAULT;

	if (init.stride_chn_nr <= NORMAL_CHN_START_IN_FIRST_STRIDE || (init.max_stride_nr <= 0))
		return -EINVAL;

	max_chn_nr = (u64)init.max_stride_nr * init.stride_chn_nr;
	if (max_chn_nr > CHN_NR_LIMIT) {
		hpvisor_err("vnotify don't support that many channels:%d\n",
			init.max_stride_nr * init.stride_chn_nr);
		return -EINVAL;
	}

	if (!__sync_bool_compare_and_swap(&vnotify.inited, false, true)) {
		hpvisor_err("vnotify only support one vm\n");
		return -EEXIST;
	}

	ret = vnotify_ctrl_mem_init(kvm, &init);
	if (ret != 0)
		goto err;

	vnotify.effective_stride_count = 0;
	vnotify.max_stride_nr = init.max_stride_nr;
	vnotify.stride_chn_nr = init.stride_chn_nr;
	vnotify.stride_page_nr = DIV_ROUND_UP(init.stride_chn_nr * sizeof(struct hpvisor_chn_state),
		PAGE_SIZE) * HPVISOR_SHARE_MEM_FIELD_NR;
	vnotify.vnotify_stride_gpa = init.vnotify_stride_gpa;

	vnotify.strides_addr_arr = kcalloc(vnotify.max_stride_nr, sizeof(struct vnotify_stride *),
		GFP_KERNEL);
	if (unlikely(!vnotify.strides_addr_arr))
		goto err;

	vnotify.kvm = kvm;
	ret = vnotify_stride_alloc_and_map(NULL);
	if (ret != 0)
		goto err;

	vnotify.ctrl_chn_fd = -1;
	vnotify.qemu_tgid = task_tgid_vnr(current);
	vnotify.vdata_t.get_vnotify = get_vnotify;
	vnotify.vdata_t.put_vnotify = put_vnotify;
	vnotify.vdata_t.task_first_call = task_first_call;
	vnotify.vdata_t.check_task_perm = check_task_perm;
	vnotify.vdata_t.vnotify_close_chn = hpvisor_close_chn;
	vnotify.vdata_t.vnotify_chns_unlock = vnotify_stride_unlock;
	vnotify.vdata_t.dup_chns = vm_dup_chns;
	vnotify.vdata_t.chns_struct_free_list = vm_chns_struct_free_list;
	vnotify.vdata_t.ipi_vnotify_handler = hpvisor_vnotify_handler;
	vnotify.vdata_t.mode = VM_MODE;

	get_vnotify_cdev_ops()->mmap = vnotify_mmap;
	set_vnotify_operations(&vm_vnotify_oper);
	set_vdata_t(&vnotify.vdata_t);
	refcount_set(&vnotify.vdata_t.ref_count, 1);
	return 0;
err:
	vnotify_destroy();
	return ret;
}
