/*
 * Copyright (c) Huawei Technologies Co., Ltd. 2024-2024.
 * Description: fast notify for hpvisor
 * Author: wanglinhui <wanglinhui@huawei.com>
 * Create: 2024-06-22
 */

#include "dfx.h"
#include "vnotify.h"

#include <linux/slab.h>
#include <linux/compiler.h>

static struct vnotify_data_t *g_vdata_t;

void set_vdata_t(struct vnotify_data_t *vdata_t)
{
	g_vdata_t = vdata_t;
}

struct vnotify_data_t *get_vdata_t(void)
{
	return g_vdata_t;
}

struct chns_struct vnotify_init_chns = {
	.count		= ATOMIC_INIT(1),
	.kvm		= NULL,
	.lock		= __MUTEX_INITIALIZER(vnotify_init_chns.lock),
	.head		= LIST_HEAD_INIT(vnotify_init_chns.head)
};

static inline uint64_t create_consumed_rq_node(void)
{
	return HPVISOR_RQ_CONSUMED_BIT;
}

static inline bool rq_node_is_consumed(uint64_t node)
{
	return (node & HPVISOR_RQ_CONSUMED_BIT) > 0;
}

static inline int64_t hpvisor_get_cycles(uint64_t data, uint16_t order)
{
	/* Initial cycles is -1, -1 here is valid value, nor an error */
	if ((int64_t)data == -1)
		return -1;

	return (int64_t)(data >> order);
}

void rq_header_init(struct hpvisor_ring_queue *ring, uint32_t count_order)
{
	ring->conf.data_width = HPVISOR_RQ_CHN_ID_BITS;
	ring->conf.count_order = count_order;
	ring->conf.count = 1U << count_order;
	ring->head = 0;
	ring->tail = 0;
}

void ring_queue_enqueue(struct hpvisor_ring_queue *rq, uint64_t *rq_array, unsigned int id)
{
	uint64_t tail, index;
	uint64_t ent, new_ent;
	int64_t e_cycle, t_cycle;

	if (rq == NULL || id > ((1U << rq->conf.data_width) - 1))
		return;

	while (1) {
		tail = __sync_fetch_and_add(&rq->tail, 1);
		index = tail & (rq->conf.count - 1);
		ent = READ_ONCE(rq_array[index]);

		e_cycle = hpvisor_get_cycles(ent, rq->conf.data_width + CONSUMED_BIT);
		t_cycle = hpvisor_get_cycles(tail, rq->conf.count_order);
		if ((e_cycle < t_cycle) && rq_node_is_consumed(ent)) {
			new_ent = ((uint64_t)t_cycle << (rq->conf.data_width + CONSUMED_BIT)) | id;
			if (__sync_bool_compare_and_swap(&rq_array[index], ent, new_ent))
				break;
		}
	}
}

int ring_queue_dequeue(struct hpvisor_ring_queue *rq, uint64_t *rq_array)
{
	uint64_t head, tail, index;
	uint64_t ent, new_ent;
	int64_t h_cycle, e_cycle;

	if (rq == NULL)
		return -ENODATA;

	while (1) {
		head = READ_ONCE(rq->head);
		tail = READ_ONCE(rq->tail);
		/* ring queue empty */
		if (head == tail)
			return -ENODATA;

		index = head & (rq->conf.count - 1);
		ent = READ_ONCE(rq_array[index]);
		e_cycle = hpvisor_get_cycles(ent, rq->conf.data_width + CONSUMED_BIT);
		h_cycle = hpvisor_get_cycles(head, rq->conf.count_order);
		if (e_cycle == h_cycle) {
			ent = __sync_fetch_and_or(&rq_array[index], create_consumed_rq_node());
			WRITE_ONCE(rq->head, head + 1);
			return ent & ((1U << rq->conf.data_width) - 1);
		}

		/* no valid data */
		if (head + 1 == tail)
			return -ENODATA;

		if (e_cycle < h_cycle) {
			new_ent = ((uint64_t)h_cycle << (rq->conf.data_width + CONSUMED_BIT)) |
				create_consumed_rq_node();
			if (!__sync_bool_compare_and_swap(&rq_array[index], ent, new_ent))
				continue;
		}
		WRITE_ONCE(rq->head, head + 1);
	}
}

int new_chn_data_efd_attach(struct chn_data_t *data_t, int fd)
{
	struct files_struct *files = current->files;

	spin_lock(&files->file_lock);
	data_t->file = files_lookup_fd_locked(files, fd);
	spin_unlock(&files->file_lock);
	if (unlikely(!data_t->file)) {
		hpvisor_err("get file failed from fd %d\n", fd);
		return -EBADFD;
	}

	return 0;
}

struct chns_struct *chns_alloc_and_init(struct kvm *kvm)
{
	struct chns_struct *chns = NULL;

	chns = kmalloc(sizeof(struct chns_struct), GFP_KERNEL);
	if (unlikely(!chns))
		return NULL;

	chns->kvm = kvm;
	mutex_init(&chns->lock);
	atomic_set(&chns->count, 1);
	INIT_LIST_HEAD(&chns->head);
	return chns;
}

int copy_vnotify_chns(unsigned long flags, struct rtos_task_struct *tsk, bool *use_chn)
{
	struct chns_struct *newc = NULL;
	struct chns_struct *oldc = task_to_rtos_task(current)->chns;

	if (!oldc)
		return 0;

	if ((flags & CLONE_FILES) != 0) {
		tsk->chns = oldc;
		atomic_inc(&oldc->count);
		return 0;
	}

	newc = chns_alloc_and_init(oldc->kvm);
	if (unlikely(!newc))
		return -ENOMEM;

	if (get_vdata_t()) {
		newc = get_vdata_t()->dup_chns(oldc, newc, use_chn);
		if (IS_ERR(newc))
			return PTR_ERR(newc);
	}

	tsk->chns = newc;
	return 0;
}

/*
 * called by do_exit: if process exited, should detach all channel resources
 * 1. if is not the last thread exiting, only need to decrease reference
 * 2. if is the last thread exiting, need free channel list and dettach all channels
 * 3. return val: whether this task is using at least one vnotify chn
 */
bool exit_vnotify_chns(struct rtos_task_struct *task)
{
	struct chns_struct *chns = task->chns;
	bool use_chn = false;

	if (!chns)
		return use_chn;

	if (atomic_dec_and_test(&chns->count)) {
		if (get_vdata_t())
			use_chn = get_vdata_t()->chns_struct_free_list(chns);

		if (chns != &vnotify_init_chns) {
			kfree(chns);
			task->chns = NULL;
		}
	}
	return use_chn;
}
