/*
 * Copyright (c) Huawei Technologies Co., Ltd. 2022-2022.
 * Description: irq bypass
 * Author: lilinjie8 <lilinjie8@huawei.com>
 * Create: 2022-02-07
 */

#include "../../virt/hpvisor/dfx.h"
#include "mmu.h"
#include "irqbypass.h"

#include <linux/slab.h>
#include <linux/refcount.h>
#include <linux/kvm_host.h>
#include <linux/of_address.h>
#include <linux/irqchip/arm-gic-v3.h>

#include <asm/esr.h>
#include <asm/uaccess.h>
#include <asm/arch_gicv3.h>
#include <asm/kvm_emulate.h>

#ifdef CONFIG_FIQ_GLUE
#define FIQ_MEM_REG 28
#define FIQ_VCPU_REG 29
#endif

#define HVC_ARG_NR 6
#define HOST_IPI_MAX_NUM 7
#define IRQ_MAX_NR (GENMASK(23, 0) + 1)

#define PTIMER_IRQ 30
#define GIC_NAME_LEN 32

#define HVC_INTERRUPT 1024
#define HVC_USERSPACE 1025

#define EOI_MASK 0x480001FF
#define STATE_PENDING_BIT BIT(0)

struct irq_state {
	u32 hwirq;
	u32 state;
};

struct init_info {
	u32 vcpu_num;
	u32 *vcpu_to_cpu;
	u32 irq_num;
	u32 *guest_irqs;
};

struct vm_irq_data {
	bool inited;
	u32 irq_num;
	u32 *guest_irqs;
	struct kvm *kvm;
	refcount_t ref_count;
};

enum {
	/* The user-mode input parameter is u32, The enumeration must start from 0 */
	GIC_DISTRIBUTOR = 0,
	GIC_REDISTRIBUTOR,
	GIC_CPU_INTERFACE,
	GIC_TYPE_MAX_NR
};

struct gic_s2_map {
	u64 gpa;
	u32 size;
	u32 type;
};

static struct {
	u64 hpa;
	u64 size;
	char name[GIC_NAME_LEN];
} gic_data[GIC_TYPE_MAX_NR] = {
	[GIC_DISTRIBUTOR] = {
		.hpa = 0,
		.size = 0,
		.name = "Distributor"
	},
	[GIC_REDISTRIBUTOR] = {
		.hpa = 0,
		.size = 0,
		.name = "Redistributor"
	},
	[GIC_CPU_INTERFACE] = {
		.hpa = 0,
		.size = 0,
		.name = "CPU Interface"
	}
};

static struct vm_irq_data hpvisor_vm_data;
static int cpu_to_vcpu[NR_CPUS] = {
	[0 ... NR_CPUS - 1] = -1
};

/*
 * Check whether the guest can register the interrupt.
 * 1. PTIMER_IRQ used by host, guest only can use VTIMER_IRQ.
 * 2. Guest cannot register ipi, which used by host.
 * 3. hwirq must be less than IRQ_MAX_NR.
 */
static inline bool check_guest_irq_range(u32 hwirq)
{
	return (hwirq != PTIMER_IRQ && hwirq > HOST_IPI_MAX_NUM && hwirq < IRQ_MAX_NR);
}

static void hpvisor_vm_data_free(void)
{
	int i;
	u32 mask = EOI_MASK;

	for (i = 0; i < NR_CPUS; i++) {
		if (cpu_to_vcpu[i] != -1) {
			/* this operation need guest interrupt priority is lower than host */
			smp_call_function_single(i, hpvisor_gic_eoi_activated, &mask, false);
			cpu_to_vcpu[i] = -1;
		}
	}

	if (hpvisor_vm_data.guest_irqs) {
		kfree(hpvisor_vm_data.guest_irqs);
		hpvisor_vm_data.guest_irqs = NULL;
		hpvisor_vm_data.irq_num = 0;
	}

	hpvisor_vm_data.inited = false;
}

static void irq_bypass_destroy(void)
{
	hpvisor_vm_data_free();
	hpvisor_irq_debug_common("irq bypass destroyed!\n");
}

static inline struct vm_irq_data *get_vm_data(void)
{
	if (refcount_inc_not_zero(&hpvisor_vm_data.ref_count))
		return &hpvisor_vm_data;
	return NULL;
}

static inline void put_vm_data(void)
{
	if (refcount_dec_and_test(&hpvisor_vm_data.ref_count))
		irq_bypass_destroy();
}

static int hpvisor_vm_data_init(const struct init_info *init)
{
	int i;

	hpvisor_vm_data.guest_irqs = kmalloc(init->irq_num * sizeof(u32), GFP_KERNEL);
	if (!hpvisor_vm_data.guest_irqs)
		return -ENOMEM;

	for (i = 0; i < init->vcpu_num; i++) {
		if (init->vcpu_to_cpu[i] >= NR_CPUS) {
			hpvisor_err("vcpu %d cannot bind to pcpu%u, max nr is %u\n",
				i, init->vcpu_to_cpu[i], NR_CPUS);
			return -ERANGE;
		}
		cpu_to_vcpu[init->vcpu_to_cpu[i]] = i;
	}

	for (i = 0; i < init->irq_num; i++) {
		if (!check_guest_irq_range(init->guest_irqs[i])) {
			hpvisor_err("irqnr %u invalid or out of range [%u, %lu]\n",
				init->guest_irqs[i], HOST_IPI_MAX_NUM, IRQ_MAX_NR);
			return -ERANGE;
		}
		hpvisor_vm_data.guest_irqs[i] = init->guest_irqs[i];
	}
	hpvisor_vm_data.irq_num = init->irq_num;
	return 0;
}

static int alloc_and_copy_data(void **to, void __user *from, size_t size)
{
	void *tmp = kzalloc(size, GFP_KERNEL);

	if (!tmp)
		return -ENOMEM;
	if (unlikely(!access_ok(from, size))) {
		kfree(tmp);
		return -EFAULT;
	}
	if (copy_from_user(tmp, from, size)) {
		kfree(tmp);
		return -EFAULT;
	}
	*to = tmp;
	return 0;
}

static void show_init_info(const struct init_info *init)
{
	int i;

	hpvisor_irq_debug_common("cpu info:\n");
	for (i = 0; i < init->vcpu_num; i++)
		hpvisor_irq_debug_common("vcpu[%d] = %u\n", i, init->vcpu_to_cpu[i]);

	hpvisor_irq_debug_common("irq info:\n");
	for (i = 0; i < init->irq_num; i++)
		hpvisor_irq_debug_common("irqs[%d] = %u\n", i, init->guest_irqs[i]);
}

static void free_init_info(struct init_info *init)
{
	if (!init)
		return;
	if (init->vcpu_to_cpu) {
		kfree(init->vcpu_to_cpu);
		init->vcpu_to_cpu = NULL;
	}
	if (init->guest_irqs) {
		kfree(init->guest_irqs);
		init->guest_irqs = NULL;
	}
	kfree(init);
}

static int check_init_info(const struct init_info *init)
{
	if (init->vcpu_num < 1 || init->vcpu_num >= num_online_cpus()) {
		hpvisor_err("cpu num %u out range of [1 %u]\n", init->vcpu_num, num_online_cpus());
		return -ERANGE;
	}
	if (init->irq_num < 1 || init->irq_num >= IRQ_MAX_NR - HOST_IPI_MAX_NUM) {
		hpvisor_err("irq num %u out range of [%d %lu]\n", init->irq_num, HOST_IPI_MAX_NUM, IRQ_MAX_NR);
		return -ERANGE;
	}
	return 0;
}

static void *get_init_info(u64 arg)
{
	int ret;
	struct init_info *u_init = NULL;
	struct init_info *init = NULL;
	void __user *vcpus = NULL;
	void __user *irqs = NULL;

	ret = alloc_and_copy_data((void **)&u_init, (void __user *)arg, sizeof(struct init_info));
	if (ret != 0) {
		hpvisor_err("get init info failed, arg:%llx ret:%d\n", arg, ret);
		goto err;
	}
	ret = check_init_info(u_init);
	if (ret != 0)
		goto err;

	init = kzalloc(sizeof(struct init_info), GFP_KERNEL);
	if (unlikely(!init)) {
		hpvisor_err("kzalloc failed for kernel init\n");
		goto err;
	}

	init->irq_num = u_init->irq_num;
	init->vcpu_num = u_init->vcpu_num;
	vcpus = u_init->vcpu_to_cpu;
	irqs = u_init->guest_irqs;

	ret = alloc_and_copy_data((void **)&init->vcpu_to_cpu, vcpus, init->vcpu_num * sizeof(int));
	if (ret != 0) {
		hpvisor_err("copy vcpu to cpu failed, vcpus:0x%llx ret:%d\n", (u64)vcpus, ret);
		goto err;
	}
	ret = alloc_and_copy_data((void **)&init->guest_irqs, irqs, init->irq_num * sizeof(int));
	if (ret != 0) {
		hpvisor_err("copy guest irqs failed\n");
		goto err;
	}
	kfree(u_init);
	return init;
err:
	if (u_init != NULL)
		kfree(u_init);
	free_init_info(init);
	return NULL;
}

int hpvisor_arch_ioctl_irq_bypass_init(struct kvm *kvm, unsigned long arg)
{
	int ret = 0;
	struct init_info *init = NULL;

	if (!gic_data[GIC_DISTRIBUTOR].hpa) {
		hpvisor_err("Gic data does not init successful\n");
		return -ENODEV;
	}
	if (!kvm || kvm->gic_mode != HPVISOR_IRQ_BYPASS) {
		hpvisor_err("kvm may be not support irq bypass\n");
		return -EINVAL;
	}

	init = get_init_info(arg);
	if (!init)
		return -EFAULT;
	show_init_info(init);

	if (!__sync_bool_compare_and_swap(&hpvisor_vm_data.inited, false, true)) {
		hpvisor_err("irq bypass only support one vm\n");
		ret = -EEXIST;
		goto out;
	}

	ret = hpvisor_vm_data_init(init);
	if (ret != 0)
		goto out;

	hpvisor_vm_data.kvm = kvm;
	refcount_set(&hpvisor_vm_data.ref_count, 1);

out:
	free_init_info(init);
	if (ret != 0)
		hpvisor_vm_data_free();
	return ret;
}

int hpvisor_arch_find_pcpu(int vcpu)
{
	int i;
	int num_cpus = num_possible_cpus();

	for (i = 0; i < num_cpus; i++) {
		if (cpu_to_vcpu[i] == vcpu)
			return i;
	}
	return -ENODATA;
}

void hpvisor_arch_irq_bypass_release(const struct kvm *kvm)
{
	if (kvm && kvm == hpvisor_vm_data.kvm) {
		hpvisor_vm_data.kvm = NULL;
		put_vm_data();
	}
}

int hpvisor_gic_save_guest_irq(u32 hwirq)
{
	int i;
	int cpu = smp_processor_id();
	struct irq_state *irqs = NULL;
	struct kvm_vcpu *vcpu = NULL;
	struct kvm *kvm = hpvisor_vm_data.kvm;

	if (cpu_to_vcpu[cpu] < 0 || !kvm || !refcount_inc_not_zero(&kvm->users_count))
		return -ENODATA;

	vcpu = kvm->vcpus[cpu_to_vcpu[cpu]];
	if (!vcpu || !vcpu->irq_data.irqs) {
		kvm_put_kvm(kvm);
		return -ENODATA;
	}

	irqs = vcpu->irq_data.irqs;
	for (i = 0; i < vcpu->irq_data.irq_num; i++) {
		if (irqs[i].hwirq == hwirq) {
			hpvisor_gic_set_state(hwirq, IRQCHIP_STATE_MASKED, true);
			gic_write_eoir(hwirq);
			gic_write_dir(hwirq);
			irqs[i].state = STATE_PENDING_BIT;
			vcpu->irq_data.received = true;
			kvm_vcpu_wake_up(vcpu);
			hpvisor_irq_debug_inject("cpu %d: save guest irq %u and wake up vcpu %d\n",
				cpu, hwirq, vcpu->vcpu_idx);
			/* refcount_inc_not_zero called before, so put kvm */
			kvm_put_kvm(kvm);
			return 0;
		}
	}
	/* refcount_inc_not_zero called before, so put kvm */
	kvm_put_kvm(kvm);
	return -EFAULT;
}

void hpvisor_vcpu_inject_irq(struct kvm_vcpu *vcpu)
{
	int i;
	u32 hwirq;
	struct irq_state *irqs = NULL;

	irqs = vcpu->irq_data.irqs;
	for (i = 0; i < vcpu->irq_data.irq_num; i++) {
		if (irqs[i].state & STATE_PENDING_BIT) {
			hwirq = irqs[i].hwirq;
			hpvisor_gic_set_state(hwirq, IRQCHIP_STATE_MASKED, false);
			hpvisor_gic_set_state(hwirq, IRQCHIP_STATE_PENDING, true);
			hpvisor_irq_debug_inject("vcpu %d: inject hwirq %u\n", vcpu->vcpu_idx, hwirq);
			irqs[i].state &= ~STATE_PENDING_BIT;
		}
	}
	vcpu->irq_data.received = false;
}

void hpvisor_vcpu_irq_data_destroy(struct kvm_vcpu *vcpu)
{
	if (vcpu->irq_data.irqs) {
		kfree(vcpu->irq_data.irqs);
		memset(&vcpu->irq_data, 0, sizeof(vcpu->irq_data));
	}
}

int hpvisor_vcpu_irq_data_init(struct kvm_vcpu *vcpu)
{
	int i;
	struct irq_state *irqs = NULL;
	struct vm_irq_data *data = NULL;

	if (unlikely(!vcpu))
		return -EINVAL;

	data = get_vm_data();
	if (unlikely(!data)) {
		hpvisor_err("irq bypass data need init before vcpu running\n");
		return -EFAULT;
	}
	irqs = kmalloc(data->irq_num * sizeof(struct irq_state), GFP_KERNEL);
	if (!irqs) {
		put_vm_data();
		return -ENOMEM;
	}

	for (i = 0; i < data->irq_num; i++) {
		irqs[i].hwirq = data->guest_irqs[i];
		irqs[i].state = 0;
	}
	vcpu->irq_data.irqs = irqs;
	vcpu->irq_data.irq_num = data->irq_num;
	vcpu->irq_data.received = false;
	put_vm_data();
#ifdef CONFIG_FIQ_GLUE
	vcpu_set_reg(vcpu, FIQ_VCPU_REG, (u64)fiq_el1_trigger);
	vcpu_set_reg(vcpu, FIQ_MEM_REG, (u64)virt_to_phys((void *)nmi_virt_ctx_base));
#endif
	return 0;
}

void hpvisor_vcpu_handle_hvc_early(const struct kvm_vcpu *vcpu)
{
	int i;
	u32 hwirq;
	struct irq_state *irqs = NULL;
	u64 vcpu_r0;

	vcpu_r0 = vcpu_get_reg(vcpu, 0);
#ifdef CONFIG_FIQ_GLUE
	if (vcpu_r0 == (__u64)fiq_el1_trigger) {
		fiq_el1_trigger();
		return;
	}
#endif
	if (vcpu_r0 != HVC_INTERRUPT)
		return;

	hwirq = vcpu_get_reg(vcpu, 1);
	irqs = vcpu->irq_data.irqs;
	for (i = 0; i < vcpu->irq_data.irq_num; i++)
		WARN(hwirq == irqs[i].hwirq, "hwirq %u belong to guest\n", hwirq);

	hpvisor_gic_handle_irq(hwirq);

	if (hwirq == PTIMER_IRQ)
		hpvisor_irq_debug_ptimer("guest send back ptimer irq\n");
	else
		hpvisor_irq_debug_hvc("guest send back irq %u to host\n", hwirq);
}

int hpvisor_vcpu_handle_hvc_call(const struct kvm_vcpu *vcpu)
{
	u64 hvc_type;

	hvc_type = vcpu_get_reg(vcpu, 0);
	if (hvc_type == HVC_INTERRUPT)
		return 1;
	if (hvc_type != HVC_USERSPACE)
		return -EINVAL;
	memcpy(&vcpu->run->hypercall.args[0], &vcpu_gp_regs(vcpu)->regs[1], sizeof(__u64) * HVC_ARG_NR);
	vcpu->run->exit_reason = HVC_USERSPACE;
	return 0;
}

void hpvisor_vcpu_set_userspace_retval(struct kvm_vcpu *vcpu)
{
	struct kvm_run *run = NULL;

	run = vcpu->run;
	if (run->exit_reason == HVC_USERSPACE)
		vcpu_set_reg(vcpu, 0, run->hypercall.ret);
}

static int check_gic_s2_map(const struct gic_s2_map *map)
{
	if (map->type >= GIC_TYPE_MAX_NR) {
		hpvisor_err("gic map failed for type:%u\n", map->type);
		return -EINVAL;
	}
	if ((u64)map->size > gic_data[map->type].size) {
		hpvisor_err("map size:0x%x cannot be larger than gic size:0x%llx\n",
			map->size, gic_data[map->type].size);
		return -EINVAL;
	}
	if (!gic_data[map->type].hpa) {
		hpvisor_err("no data for type:%s\n", gic_data[map->type].name);
		return -ENODATA;
	}
	return 0;
}

int hpvisor_arch_ioctl_intr_stage2_map(struct kvm *kvm, unsigned long arg)
{
	int ret;
	struct gic_s2_map map;
	struct hpvisor_s2_map sm;

	if (unlikely(!access_ok(((void *__user)arg), sizeof(struct gic_s2_map)))) {
		hpvisor_err("arg:0x%lx error\n", arg);
		return -EFAULT;
	}
	if (copy_from_user(&map, (void *__user)arg, sizeof(struct gic_s2_map)))
		return -EFAULT;

	ret = check_gic_s2_map(&map);
	if (ret != 0)
		return ret;

	sm.hva = (hva_t)ioremap(gic_data[map.type].hpa, map.size);
	if (!sm.hva) {
		hpvisor_err("ioremap for %s failed, hpa:0x%px size:0x%x\n",
			gic_data[map.type].name, (void *)gic_data[map.type].hpa, map.size);
		return -EFAULT;
	}

	sm.gpa = map.gpa;
	sm.size = map.size;
	sm.flags = HPVISOR_MEM_SETUP_NOW;
	hpvisor_irq_debug_common("map %s, hva:0x%px gpa:0x%llx size:0x%x, flags:0x%x\n",
		gic_data[map.type].name, (void *)sm.hva, sm.gpa, sm.size, sm.flags);
	ret = hpvisor_arch_do_stage2_map(kvm, &sm);
	/* After the stage2 pte has been created, no need ioremap addr,
	 * whether it's successful or not.
	 */
	iounmap((void *)sm.hva);
	return ret;
}

void hpvisor_gic_of_init(struct device_node *node, int nr_redist_regions)
{
	int i;
	struct resource res;

	if (nr_redist_regions != 1) {
		hpvisor_err("irq bypass only support nr_redist_regions == 1\n");
		return;
	}
	for (i = GIC_DISTRIBUTOR; i < GIC_TYPE_MAX_NR; i++) {
		if (of_address_to_resource(node, i, &res) == 0) {
			gic_data[i].hpa = res.start;
			gic_data[i].size = resource_size(&res);
		}
	}
}

void hpvisor_vcpu_set_wfx_not_trap(struct kvm_vcpu *vcpu)
{
	/* We Don't trap WFE and WFI to EL2 */
	*vcpu_hcr(vcpu) &= ~HCR_TWE;
	*vcpu_hcr(vcpu) &= ~HCR_TWI;
}
