/*
 * Copyright (c) Huawei Technologies Co., Ltd. 2018-2019.
 * Description: support irq priority level partition and dpe fast irq
 * Author: yanbo <joey.yanbo@huawei.com>
 * Create: 2018-09-27
 */

#include <linux/irq.h>
#include <linux/irqdomain.h>
#include <asm/uaccess.h>
#include <linux/proc_fs.h>
#include <../../../kernel/irq/internals.h>
#include <asm/cacheflush.h>
#include <linux/seq_file.h>
#ifdef CONFIG_OUTER_CACHE
#include <asm/outercache.h>
#endif
#ifdef CONFIG_RTOS_HAL_SET_IRQPRIORITY
#include "irq-gic-common.h"
#endif

#define  HI1382_VIRQ_MAX	65536
#define  HI1382_VIRQ_MIN	5
#define  MAX_IPI	15

#ifdef CONFIG_RTOS_HAL_IRQ_BYPASS

static inline int fast_uirq_sanity_check(unsigned int irq, cpumask_var_t mask_val, irq_flow_fast_handler_t handler)
{
	unsigned int cpu;

	if (unlikely(!mask_val)) {
		pr_err("uirq: cpu mask null");
		return -EINVAL;
	}

	cpu = cpumask_first(mask_val);
	if (unlikely(cpu == 0))
		pr_err("uirq warn: affinity set to cpu 0\n");

	if (unlikely((irq <= HI1382_VIRQ_MIN) || (irq >= HI1382_VIRQ_MAX))) {
		pr_err("uirq: irq(%u) nr out of restriction\n", irq);
		return -EINVAL;
	}
	if (unlikely(!handler)) {
		pr_err("uirq: handler illegal\n");
		return -EINVAL;
	}
	return 0;
}

static inline struct irq_desc *fast_uirq_get_irq_desc(unsigned int irq)
{
	struct irq_desc *desc = NULL;

	desc = irq_to_desc(irq);
	if (unlikely(!desc)) {
		pr_err("uirq: irq%u desc null", irq);
		return NULL;
	}
	return desc;
}


/**
 * @irq: represent the hwirq
 *	In DPE scenario, RTOS_HAL_LINEAR_MAP is enabled, so virq is consistent with hwirq,
 *	that is to say, virq = hwirq = @irq.
 */
int request_fast_uirq(unsigned int irq, cpumask_var_t mask_val, irq_flow_fast_handler_t handler)
{
	unsigned long flags;
	struct irq_desc *desc = NULL;
	unsigned int flag;
	bool only_hook_handler = false;
	int ret;

	if (fast_uirq_sanity_check(irq, mask_val, handler))
		return -EINVAL;

	desc = fast_uirq_get_irq_desc(irq);
	if (!desc)
		return -EINVAL;

#ifdef CONFIG_RTOS_HAL_SET_IRQPRIORITY
	irq_set_priority(irq, MIN_IRQ_PRI_NUM);
#endif

	flag = irqd_get_trigger_type(&desc->irq_data);
	if (irq > MAX_IPI && (flag & IRQF_TRIGGER_MASK)) {
		/*
		 * Dont't return error here.
		 * Some core like 1381, can't write gic register in data core.
		 * If return error, all interrupts will fail to register in some ways.
		 */
		ret = __irq_set_trigger(desc, flag & IRQF_TRIGGER_MASK);
		if (ret)
			pr_warn("uirq: irq%u set trigger fail \n", irq);
	} else {
		pr_debug("uirq: irq%u get trigger fail \n", irq);
	}
	chip_bus_lock(desc);
	raw_spin_lock_irqsave(&desc->lock, flags);

	irq_domain_activate_irq(&desc->irq_data, false);
	only_hook_handler = cpumask_empty(mask_val);
	if (desc->irq_data.chip->irq_mask && !only_hook_handler) {
		desc->irq_data.chip->irq_mask(&desc->irq_data);
		irqd_set(&desc->irq_data, IRQD_IRQ_MASKED);
	}
	if (!only_hook_handler) {
		if (desc->irq_data.chip->irq_set_affinity)
			desc->irq_data.chip->irq_set_affinity(&desc->irq_data, mask_val, false);
	}
	if (desc->fast_handler_irq)
		pr_warn("request uirq: already registered?\n");
	desc->fast_handler_irq = handler;
	if (desc->irq_data.chip->irq_unmask && !only_hook_handler) {
		desc->irq_data.chip->irq_unmask(&desc->irq_data);
		irqd_clear(&desc->irq_data, IRQD_IRQ_MASKED);
	}
	raw_spin_unlock_irqrestore(&desc->lock, flags);
	chip_bus_sync_unlock(desc);

	return 0;
}
EXPORT_SYMBOL_NS(request_fast_uirq, HW_RTOS_NS);

/**
 * @irq: represent the hwirq
 *	In DPE scenario, RTOS_HAL_LINEAR_MAP is enabled, so virq is consistent with hwirq,
 *	that is to say, virq = hwirq = @irq.
 */
int free_fast_uirq(unsigned int irq)
{
	unsigned long flags;
	struct irq_desc *desc = NULL;

	if (unlikely((irq <= HI1382_VIRQ_MIN) || (irq >= HI1382_VIRQ_MAX))) {
		pr_err("uirq: irq nr out of restriction\n");
		return -EINVAL;
	}

	desc = irq_to_desc(irq);
	if (!desc) {
		pr_err("IRQ%u's desc is NULL.\n", irq);
		return -EINVAL;
	}

	chip_bus_lock(desc);
	raw_spin_lock_irqsave(&desc->lock, flags);

	if (!desc->fast_handler_irq) {
		pr_warn("free uirq: handler is null\n");
		goto err;
	}
	if (desc->irq_data.chip->irq_mask) {
		desc->irq_data.chip->irq_mask(&desc->irq_data);
		irqd_set(&desc->irq_data, IRQD_IRQ_MASKED);
	}
	irq_domain_deactivate_irq(&desc->irq_data);
	desc->fast_handler_irq = NULL;

	raw_spin_unlock_irqrestore(&desc->lock, flags);
	chip_bus_sync_unlock(desc);

	return 0;
err:
	raw_spin_unlock_irqrestore(&desc->lock, flags);
	chip_bus_sync_unlock(desc);
	return -1;
}
EXPORT_SYMBOL_NS(free_fast_uirq, HW_RTOS_NS);
#endif
