// SPDX-License-Identifier: GPL-2.0
/*
 * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
 * Copyright (C) 2005-2006, Thomas Gleixner, Russell King
 *
 * This file contains the interrupt descriptor management code. Detailed
 * information is available in Documentation/core-api/genericirq.rst
 *
 */
#include <seminix/cpumask.h>
#include <seminix/slab.h>
#include <seminix/bug.h>
#include <seminix/mutex.h>
#include <seminix/irqdomain.h>
#include <seminix/irq.h>
#include <seminix/irqdesc.h>
#include <seminix/interrupt.h>
#include <seminix/irq/irq_regs.h>
#include <seminix/irq/chip.h>
#include <seminix/irq/handle.h>
#include <seminix/irq/dummychip.h>

#define NR_IRQ  64
#define IRQ_BITMAP_BITS (NR_IRQ + 8196)

u64 irq_err_count = 0;

static DEFINE_MUTEX(sparse_irq_lock);
static DECLARE_BITMAP(allocated_irqs, IRQ_BITMAP_BITS);

static int nr_irqs_max = NR_IRQ;
static struct irq_desc **irq_desc_array = NULL;

void __init early_init_IRQ(void)
{
    irq_desc_array = kcalloc(nr_irqs_max, sizeof (struct irq_desc *), GFP_KERNEL);
    BUG_ON(irq_desc_array == NULL);
}

static void irq_insert_desc(int irq, struct irq_desc *desc)
{
    BUG_ON(irq < 0 || irq >= nr_irqs_max);
    BUG_ON(irq_desc_array[irq] != NULL);
    BUG_ON(desc == NULL);

    irq_desc_array[irq] = desc;
}

static void delete_irq_desc(int irq)
{
    BUG_ON(irq < 0 || irq >= nr_irqs_max);
    BUG_ON(irq_desc_array[irq] == NULL);

    irq_desc_array[irq] = NULL;
}

struct irq_desc *irq_to_desc(int virq)
{
    BUG_ON(virq < 0 || virq >= nr_irqs_max);

    return irq_desc_array[virq];
}

static inline int alloc_masks(struct irq_desc *desc)
{
    cpumask_clear(&desc->irq_data.affinity);
	return 0;
}

static inline void free_masks(struct irq_desc *desc)
{
}

static void desc_smp_init(struct irq_desc *desc, const struct cpumask *affinity)
{
    if (!affinity) {
        affinity = &__cpu_possible_mask;
    }
	cpumask_copy(&desc->irq_data.affinity, affinity);
}

static struct irq_desc *alloc_desc(int irq, u32 flags,
                const struct cpumask *affinity)
{
    struct irq_desc *desc;

    desc = kzalloc(sizeof (*desc), GFP_KERNEL);
    if (!desc) {
        return NULL;
    }

	if (alloc_masks(desc))
		goto err_desc;

    desc->irq_data.irq = irq;

    desc->irq_data.msi_desc = NULL;
    desc->irq_data.irq_desc = desc;

    desc->irq_data.chip = &no_irq_chip;
    desc->irq_data.chip_data = NULL;

    desc->irq_data.handler_data = NULL;

    desc->status_use_accessors = flags;

	__irqd_set(&desc->irq_data, IRQ_MASKED);
    desc->handle_irq = handle_bad_irq;
    desc->depth = 1;
    desc->irq_count = 0;
    desc->irqs_unhandled = 0;

    desc_smp_init(desc, affinity);

    if (irqd_has_set(&desc->irq_data, IRQ_PER_CPU)) {
        int cpu;

        desc->status_percpu = kcalloc(nr_cpu_ids, sizeof (u32), GFP_ZERO);
        if (!desc->status_percpu)
            goto err_mask;
        for_each_possible_cpu(cpu)
            desc->status_percpu[cpu] |= IRQ_MASKED;
    }

    atomic_set(&desc->threads_active, 0);
    raw_spin_lock_init(&desc->lock);
    mutex_init(&desc->request_mutex);

    irq_insert_desc(irq, desc);

    return desc;

err_mask:
    free_masks(desc);
err_desc:
    kfree(desc);
    return NULL;
}

static void free_desc(int irq)
{
    struct irq_desc *desc = irq_to_desc(irq);

    delete_irq_desc(irq);
    free_masks(desc);
    if (irqd_has_set(&desc->irq_data, IRQ_PER_CPU))
        kfree(desc->status_percpu);
    kfree(desc);
}

static int alloc_descs(int start, int cnt, const struct cpumask *affinity)
{
    struct irq_desc *desc;
    int i;

    /* Validate affinity mask(s) */
    if (affinity) {
        for (i = 0; i < cnt; i++) {
            if (cpumask_empty(&affinity[i]))
                return -EINVAL;
        }
    }

    for (i = 0; i < cnt; i++) {
        const struct cpumask *mask = NULL;
        u32 flags = 0;

        if (affinity) {
            mask = &affinity[i];
        }

        desc = alloc_desc(start + i, flags, mask);
        if (!desc)
            goto err;
    }
    bitmap_set(allocated_irqs, start, cnt);
    return start;

err:
    for (i--; i >= 0; i--)
        free_desc(start + i);
    return -ENOMEM;
}

static int irq_expand_nr_irqs(unsigned int nr)
{
    void *array;

	if (nr > IRQ_BITMAP_BITS)
		return -EINVAL;

    array = kcalloc(nr, sizeof (struct irq_desc *), GFP_KERNEL);
    if (!array)
        return -ENOMEM;

    memcpy(array, irq_desc_array, nr_irqs_max);
    kfree(irq_desc_array);
	nr_irqs_max = nr;
    irq_desc_array = array;

	return 0;
}

int
__irq_alloc_descs(int irq, int cnt, const struct cpumask *affinity)
{
    int start, ret, from;

    if (!cnt)
        return -EINVAL;

    mutex_lock(&sparse_irq_lock);

    from = irq;
    if (irq <= 0)
        from = 1;

    start = bitmap_find_next_zero_area(allocated_irqs, IRQ_BITMAP_BITS,
                    from, cnt, 0);
    ret = -EEXIST;
	if (irq >= 0 && start != irq)
		goto unlock;

	if (start + cnt > nr_irqs_max) {
		ret = irq_expand_nr_irqs(start + cnt);
		if (ret)
			goto unlock;
	}
	ret = alloc_descs(start, cnt, affinity);
unlock:
    mutex_unlock(&sparse_irq_lock);
    return ret;
}

/**
 * irq_free_descs - free irq descriptors
 * @from:	Start of descriptor range
 * @cnt:	Number of consecutive irqs to free
 */
void irq_free_descs(int from, int cnt)
{
    int i;

	if (from >= nr_irqs_max || (from + cnt) > nr_irqs_max)
		return;

	mutex_lock(&sparse_irq_lock);
	for (i = 0; i < cnt; i++)
		free_desc(from + i);

	bitmap_clear(allocated_irqs, from, cnt);
	mutex_unlock(&sparse_irq_lock);
}

/**
 * generic_handle_irq - Invoke the handler for a particular irq
 * @irq:	The irq number to handle
 *
 */
int generic_handle_irq(int irq)
{
	struct irq_desc *desc = irq_to_desc(irq);

	if (!desc)
		return -EINVAL;

	generic_handle_irq_desc(desc);

	return 0;
}

/**
 * __handle_domain_irq - Invoke the handler for a HW irq belonging to a domain
 * @domain:	The domain where to perform the lookup
 * @hwirq:	The HW irq number to convert to a logical one
 * @lookup:	Whether to perform the domain lookup or not
 * @regs:	Register file coming from the low-level handling code
 *
 * Returns:	0 on success, or -EINVAL if conversion has failed
 */
int __handle_domain_irq(struct irq_domain *domain, int hwirq, struct pt_regs *regs)
{
    struct pt_regs *old_regs = set_irq_regs(regs);
    int ret = 0, irq;

    irq_enter();

	irq = irq_find_mapping(domain, hwirq);

	/*
	 * Some hardware gives randomly wrong interrupts.  Rather
	 * than crashing, do something sensible.
	 */
	if (unlikely(!irq || irq >= nr_irqs_max)) {
		ack_bad_irq(irq);
		ret = -EINVAL;
	} else {
		generic_handle_irq(irq);
	}

	irq_exit();
	set_irq_regs(old_regs);
	return ret;
}

struct irq_desc *
__irq_get_desc_lock(int irq, unsigned long *flags, bool bus,
		    unsigned int check)
{
	struct irq_desc *desc = irq_to_desc(irq);

	if (desc) {
		if (check & _IRQ_DESC_CHECK) {
			if ((check & _IRQ_DESC_PERCPU) &&
			    !irqd_is_percpu(&desc->irq_data))
				return NULL;

			if (!(check & _IRQ_DESC_PERCPU) &&
			    irqd_is_percpu(&desc->irq_data))
				return NULL;
		}

		if (bus)
			chip_bus_lock(desc);
		raw_spin_lock_irqsave(&desc->lock, *flags);
	}
	return desc;
}

void __irq_put_desc_unlock(struct irq_desc *desc, unsigned long flags, bool bus)
{
	raw_spin_unlock_irqrestore(&desc->lock, flags);
	if (bus)
		chip_bus_sync_unlock(desc);
}

int irq_set_percpu_devid(int irq)
{
    int cpu;
    struct irq_desc *desc = irq_to_desc(irq);

    if (irqd_is_percpu(&desc->irq_data))
        return 0;

    __irqd_set(&desc->irq_data, IRQ_PER_CPU);
    BUG_ON(desc->status_percpu);
    desc->status_percpu = kcalloc(nr_cpu_ids, sizeof (u32), GFP_ZERO);
    if (!desc->status_percpu)
        return -ENOMEM;

    for_each_possible_cpu(cpu) {
        desc->status_percpu[cpu] |= IRQ_MASKED;
    }

    return 0;
}
