// SPDX-License-Identifier: GPL-2.0
/*
 * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
 * Copyright (C) 2005-2006 Thomas Gleixner
 *
 * This file contains driver APIs to the irq subsystem.
 */

#define pr_fmt(fmt) "genirq: " fmt
#include <seminix/errno.h>
#include <seminix/cpumask.h>
#include <seminix/bug.h>
#include <seminix/smp.h>
#include <seminix/slab.h>
#include <seminix/irq.h>
#include <seminix/irqdesc.h>
#include <seminix/irq/dummychip.h>
#include <seminix/irq/chip.h>
#include <seminix/irq/manage.h>
#include "internal.h"

static void __synchronize_hardirq(struct irq_desc *desc)
{
    bool inprogress;

    do {
        unsigned long flags;

        /*
         * Wait until we're out of the critical section.  This might
         * give the wrong answer due to the lack of memory barriers.
         */
        while (irqd_irq_inprogress(&desc->irq_data))
            cpu_relax();

        /* Ok, that indicated we're done: double-check carefully. */
        raw_spin_lock_irqsave(&desc->lock, flags);
        inprogress = irqd_irq_inprogress(&desc->irq_data);
        raw_spin_unlock_irqrestore(&desc->lock, flags);

        /* Oops, that failed? */
    } while (inprogress);
}

/**
 *	synchronize_hardirq - wait for pending hard IRQ handlers (on other CPUs)
 *	@irq: interrupt number to wait for
 *
 *	This function waits for any pending hard IRQ handlers for this
 *	interrupt to complete before returning. If you use this
 *	function while holding a resource the IRQ handler may need you
 *	will deadlock. It does not take associated threaded handlers
 *	into account.
 *
 *	Do not use this for shutdown scenarios where you must be sure
 *	that all parts (hardirq and threaded handler) have completed.
 *
 *	Returns: false if a threaded handler is active.
 *
 *	This function may be called - with care - from IRQ context.
 */
bool synchronize_hardirq(int irq)
{
    struct irq_desc *desc = irq_to_desc(irq);

    if (desc) {
        __synchronize_hardirq(desc);
        return !atomic_read(&desc->threads_active);
    }

    return true;
}

/**
 *	synchronize_irq - wait for pending IRQ handlers (on other CPUs)
 *	@irq: interrupt number to wait for
 *
 *	This function waits for any pending IRQ handlers for this interrupt
 *	to complete before returning. If you use this function while
 *	holding a resource the IRQ handler may need you will deadlock.
 *
 *	This function may be called - with care - from IRQ context.
 */
void synchronize_irq(int irq)
{
    struct irq_desc *desc = irq_to_desc(irq);

    if (desc) {
        __synchronize_hardirq(desc);
        /*
         * We made sure that no hardirq handler is
         * running. Now verify that no threaded handlers are
         * active.
         */
        BUG_ON(atomic_read(&desc->threads_active));
    }
}

static bool __irq_can_set_affinity(struct irq_desc *desc)
{
    if (!desc || !irqd_can_balance(&desc->irq_data) ||
        !desc->irq_data.chip || !desc->irq_data.chip->irq_set_affinity)
        return false;
    return true;
}

/**
 *	irq_can_set_affinity - Check if the affinity of a given irq can be set
 *	@irq:		Interrupt to check
 *
 */
int irq_can_set_affinity(int irq)
{
    return __irq_can_set_affinity(irq_to_desc(irq));
}

int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask,
            bool force)
{
    struct irq_desc *desc = irq_data_to_desc(data);
    struct irq_chip *chip = irq_data_get_irq_chip(data);
    int ret;

    if (!chip || !chip->irq_set_affinity)
        return -EINVAL;

    ret = chip->irq_set_affinity(data, mask, force);
    switch (ret) {
    case IRQ_SET_MASK_OK:
    case IRQ_SET_MASK_OK_DONE:
        cpumask_copy(&desc->irq_data.affinity, mask);
    }

    return ret;
}

int __irq_set_affinity(int irq, const struct cpumask *mask, bool force)
{
    struct irq_desc *desc = irq_to_desc(irq);
    unsigned long flags;
    int ret;

    if (!desc)
        return -EINVAL;

    raw_spin_lock_irqsave(&desc->lock, flags);
    ret = irq_do_set_affinity(irq_desc_get_irq_data(desc), mask, force);
    raw_spin_unlock_irqrestore(&desc->lock, flags);
    return ret;
}

/*
 * Internal function that tells the architecture code whether a
 * particular irq has been exclusively allocated or is available
 * for driver use.
 */
int can_request_irq(int irq, unsigned long irqflags)
{
    unsigned long flags;
    struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
    int canrequest = 0;

    if (!desc)
        return 0;

    if (irqd_can_request(&desc->irq_data)) {
        if (!desc->action ||
            irqflags & desc->action->flags & IRQF_SHARED)
            canrequest = 1;
    }
    irq_put_desc_unlock(desc, flags);
    return canrequest;
}

int __irq_set_trigger(struct irq_desc *desc, unsigned long flags)
{
    struct irq_chip *chip = desc->irq_data.chip;
    int ret, unmask = 0;

    if (!chip || !chip->irq_set_type) {
        /*
         * IRQF_TRIGGER_* but the PIC does not support multiple
         * flow-types?
         */
        pr_debug("No set_type function for IRQ %d (%s)\n",
             irq_desc_get_irq(desc),
             chip ? (chip->name ? : "unknown") : "unknown");
        return 0;
    }

    if (chip->flags & IRQCHIP_SET_TYPE_MASKED) {
        if (!irqd_irq_masked(&desc->irq_data))
            mask_irq(desc);
    }

    /* Mask all flags except trigger mode */
    flags &= IRQ_TYPE_SENSE_MASK;
    ret = chip->irq_set_type(&desc->irq_data, flags);

    switch (ret) {
    case IRQ_SET_MASK_OK:
    case IRQ_SET_MASK_OK_DONE:
        __irqd_clear(&desc->irq_data, IRQ_TYPE_DEFAULT);
        __irqd_set(&desc->irq_data, flags);
        break;
    default:
        pr_err("Setting trigger mode %lu for irq %u failed (%pF)\n",
               flags, irq_desc_get_irq(desc), chip->irq_set_type);
    }
    if (unmask)
        unmask_irq(desc);
    return ret;
}

/*
 * Internal function to register an irqaction - typically used to
 * allocate special interrupts that are part of the architecture.
 *
 * Locking rules:
 *
 * desc->request_mutex	Provides serialization against a concurrent free_irq()
 *   chip_bus_lock	Provides serialization for slow bus operations
 *     desc->lock	Provides serialization against hard interrupts
 *
 * chip_bus_lock and desc->lock are sufficient for all other management and
 * interrupt related functions. desc->request_mutex solely serializes
 * request/free_irq().
 */
static int
__setup_irq(int irq, struct irq_desc *desc, struct irqaction *new)
{
    struct irqaction *old, **old_ptr;
    unsigned long flags;
    int ret, shared = 0;

    if (!desc)
        return -EINVAL;

    if (desc->irq_data.chip == &no_irq_chip)
        return -ENOSYS;

    new->irq = irq;

    /*
     * If the trigger type is not specified by the caller,
     * then use the default for this interrupt.
     */
    if (!(new->flags & IRQF_TRIGGER_MASK))
        new->flags |= irqd_get_trigger_type(&desc->irq_data);

    /*
     * Protects against a concurrent __free_irq() call which might wait
     * for synchronize_hardirq() to complete without holding the optional
     * chip bus lock and desc->lock. Also protects against handing out
     * a recycled oneshot thread_mask bit while it's still in use by
     * its previous owner.
     */
    mutex_lock(&desc->request_mutex);

    /*
     * Acquire bus lock as the irq_request_resources() callback below
     * might rely on the serialization or the magic power management
     * functions which are abusing the irq_bus_lock() callback,
     */
    chip_bus_lock(desc);

    /*
     * The following block of code has to be executed atomically
     * protected against a concurrent interrupt and any of the other
     * management calls which are not serialized via
     * desc->request_mutex or the optional bus lock.
     */
    raw_spin_lock_irqsave(&desc->lock, flags);
    old_ptr = &desc->action;
    old = *old_ptr;
    if (old) {
        /*
         * Can't share interrupts unless both agree to and are
         * the same type (level, edge, polarity). So both flag
         * fields must have IRQF_SHARED set and the bits which
         * set the trigger type must match. Also all must
         * agree on ONESHOT.
         */
        unsigned int oldtype;

        /*
         * If nobody did set the configuration before, inherit
         * the one provided by the requester.
         */
        if (irqd_has_set(&desc->irq_data, IRQ_TYPE_DEFAULT)) {
            oldtype = irqd_get_trigger_type(&desc->irq_data);
        } else {
            oldtype = new->flags & IRQF_TRIGGER_MASK;
            irqd_set_trigger_type(&desc->irq_data, oldtype);
        }

        if (!((old->flags & new->flags) & IRQF_SHARED) ||
            (oldtype != (new->flags & IRQF_TRIGGER_MASK)))
            goto mismatch;

        /* All handlers must agree on per-cpuness */
        if ((old->flags & IRQF_PERCPU) !=
            (new->flags & IRQF_PERCPU))
            goto mismatch;

        /* add new interrupt at end of irq queue */
        do {
            /*
             * Or all existing action->thread_mask bits,
             * so we can find the next zero bit for this
             * new action.
             */
            old_ptr = &old->next;
            old = *old_ptr;
        } while (old);
        shared = 1;
    }

    if (new->flags & IRQF_TRIGGER_MASK) {
        unsigned int nmsk = new->flags & IRQF_TRIGGER_MASK;
        unsigned int omsk = irqd_get_trigger_type(&desc->irq_data);

        if (nmsk != omsk)
            /* hope the handler works with current  trigger mode */
            pr_warn("irq %d uses trigger mode %u; requested %u\n",
                irq, omsk, nmsk);
    }

    *old_ptr = new;

    /* Reset broken irq detection when installing new handler */
    desc->irq_count = 0;
    desc->irqs_unhandled = 0;

    /*
     * Check whether we disabled the irq via the spurious handler
     * before. Reenable it and give it another chance.
     */
    if (shared && irqd_irq_masked(&desc->irq_data)) {
        __irqd_clear(&desc->irq_data, IRQ_MASKED);
        unmask_irq(desc);
    }

    raw_spin_unlock_irqrestore(&desc->lock, flags);
    chip_bus_sync_unlock(desc);
    mutex_unlock(&desc->request_mutex);

    return 0;

mismatch:
    pr_err("Flags mismatch irq %d. %08x (%s) vs. %08x (%s)\n",
            irq, new->flags, new->name, old->flags, old->name);

    ret = -EBUSY;
    raw_spin_unlock_irqrestore(&desc->lock, flags);
    chip_bus_sync_unlock(desc);
    mutex_unlock(&desc->request_mutex);
    return ret;
}

/**
 *	setup_irq - setup an interrupt
 *	@irq: Interrupt line to setup
 *	@act: irqaction for the interrupt
 *
 * Used to statically setup interrupts in the early boot process.
 */
int setup_irq(int irq, struct irqaction *act)
{
    int retval;
    struct irq_desc *desc = irq_to_desc(irq);

    if (!desc || WARN_ON(irqd_is_percpu(&desc->irq_data)))
        return -EINVAL;

    retval = __setup_irq(irq, desc, act);

    return retval;
}

/*
 * Internal function to unregister an irqaction - used to free
 * regular and special interrupts that are part of the architecture.
 */
static struct irqaction *__free_irq(struct irq_desc *desc, void *dev_id)
{
    int irq = desc->irq_data.irq;
    struct irqaction *action, **action_ptr;
    unsigned long flags;

    WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);

    mutex_lock(&desc->request_mutex);
    chip_bus_lock(desc);
    raw_spin_lock_irqsave(&desc->lock, flags);

    /*
     * There can be multiple actions per IRQ descriptor, find the right
     * one based on the dev_id:
     */
    action_ptr = &desc->action;
    for (;;) {
        action = *action_ptr;

        if (!action) {
            WARN(1, "Trying to free already-free IRQ %d\n", irq);
            raw_spin_unlock_irqrestore(&desc->lock, flags);
            chip_bus_sync_unlock(desc);
            mutex_unlock(&desc->request_mutex);
            return NULL;
        }

        if (action->dev_id == dev_id)
            break;
        action_ptr = &action->next;
    }

    /* Found it - now remove it from the list of entries: */
    *action_ptr = action->next;

    /* If this was the last handler, shut down the IRQ line: */
    if (!desc->action) {
        irq_shutdown(desc);
    }

    raw_spin_unlock_irqrestore(&desc->lock, flags);
    /*
     * Drop bus_lock here so the changes which were done in the chip
     * callbacks above are synced out to the irq chips which hang
     * behind a slow bus (I2C, SPI) before calling synchronize_hardirq().
     *
     * Aside of that the bus_lock can also be taken from the threaded
     * handler in irq_finalize_oneshot() which results in a deadlock
     * because kthread_stop() would wait forever for the thread to
     * complete, which is blocked on the bus lock.
     *
     * The still held desc->request_mutex() protects against a
     * concurrent request_irq() of this irq so the release of resources
     * and timing data is properly serialized.
     */
    chip_bus_sync_unlock(desc);

    /* Make sure it's not being used on another CPU: */
    synchronize_hardirq(irq);

    /* Last action releases resources */
    if (!desc->action) {
        /*
         * Reaquire bus lock as irq_release_resources() might
         * require it to deallocate resources over the slow bus.
         */
        chip_bus_lock(desc);
        chip_bus_sync_unlock(desc);
    }

    mutex_unlock(&desc->request_mutex);

    return action;
}

/**
 *	remove_irq - free an interrupt
 *	@irq: Interrupt line to free
 *	@act: irqaction for the interrupt
 *
 * Used to remove interrupts statically setup by the early boot process.
 */
void remove_irq(int irq, struct irqaction *act)
{
    struct irq_desc *desc = irq_to_desc(irq);

    if (desc && !WARN_ON(irqd_is_percpu(&desc->irq_data)))
        __free_irq(desc, act->dev_id);
}

/**
 *	free_irq - free an interrupt allocated with request_irq
 *	@irq: Interrupt line to free
 *	@dev_id: Device identity to free
 *
 *	Remove an interrupt handler. The handler is removed and if the
 *	interrupt line is no longer in use by any driver it is disabled.
 *	On a shared IRQ the caller must ensure the interrupt is disabled
 *	on the card it drives before calling this function. The function
 *	does not return until any executing interrupts for this IRQ
 *	have completed.
 *
 *	This function must not be called from interrupt context.
 *
 *	Returns the devname argument passed to request_irq.
 */
const void *free_irq(int irq, void *dev_id)
{
    struct irq_desc *desc = irq_to_desc(irq);
    struct irqaction *action;
    const char *devname;

    if (!desc || WARN_ON(irqd_is_percpu(&desc->irq_data)))
        return NULL;

    action = __free_irq(desc, dev_id);

    if (!action)
        return NULL;

    devname = action->name;
    kfree(action);
    return devname;
}

int __request_irq(int irq, irq_handler_t handler,
             unsigned long irqflags,
             const char *devname, void *dev_id)
{
    struct irqaction *action;
    struct irq_desc *desc;
    int retval;

    /*
     * Sanity-check: shared interrupts must pass in a real dev-ID,
     * otherwise we'll have trouble later trying to figure out
     * which interrupt is which (messes up the interrupt freeing
     * logic etc).
     *
     * Also IRQF_COND_SUSPEND only makes sense for shared interrupts and
     * it cannot be set along with IRQF_NO_SUSPEND.
     */
    if ((irqflags & IRQF_SHARED) && !dev_id)
        return -EINVAL;

    desc = irq_to_desc(irq);
    if (!desc)
        return -EINVAL;

    if (!irqd_can_request(&desc->irq_data) ||
        WARN_ON(irqd_is_percpu(&desc->irq_data)))
        return -EINVAL;

    if (!handler) {
        return -EINVAL;
    }

    action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
    if (!action)
        return -ENOMEM;

    action->handler = handler;
    action->flags = irqflags;
    action->name = devname;
    action->dev_id = dev_id;

    retval = __setup_irq(irq, desc, action);
    if (retval) {
        kfree(action);
    }

    return retval;
}

void enable_percpu_irq(int irq, unsigned int type)
{
    int cpu = smp_processor_id();
    unsigned long flags;
    struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);

    if (!desc)
        return;

    /*
     * If the trigger type is not specified by the caller, then
     * use the default for this interrupt.
     */
    type &= IRQ_TYPE_SENSE_MASK;
    if (type == IRQ_TYPE_NONE)
        type = irqd_get_trigger_type(&desc->irq_data);

    if (type != IRQ_TYPE_NONE) {
        int ret;

        ret = __irq_set_trigger(desc, type);

        if (ret) {
            WARN(1, "failed to set type for IRQ%d\n", irq);
            goto out;
        }
    }

    irq_percpu_enable(desc, cpu);
out:
    irq_put_desc_unlock(desc, flags);
}

/**
 * irq_percpu_is_enabled - Check whether the per cpu irq is enabled
 * @irq:	Linux irq number to check for
 *
 * Must be called from a non migratable context. Returns the enable
 * state of a per cpu interrupt on the current cpu.
 */
bool irq_percpu_is_enabled(int irq)
{
    int cpu = smp_processor_id();
    struct irq_desc *desc;
    unsigned long flags;
    bool is_enabled;

    desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
    if (!desc)
        return false;

    is_enabled = false;
    if (irqd_is_percpu(&desc->irq_data))
        if (desc->status_percpu)
            is_enabled = !(desc->status_percpu[cpu] & IRQ_MASKED);
    irq_put_desc_unlock(desc, flags);

    return is_enabled;
}

void disable_percpu_irq(int irq)
{
    int cpu = smp_processor_id();
    unsigned long flags;
    struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);

    if (!desc)
        return;

    irq_percpu_disable(desc, cpu);
    irq_put_desc_unlock(desc, flags);
}

/*
 * Internal function to unregister a percpu irqaction.
 */
static struct irqaction *__free_percpu_irq(int irq, void __percpu *dev_id)
{
    int cpu;
    struct irq_desc *desc = irq_to_desc(irq);
    struct irqaction *action;
    unsigned long flags;

    WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);

    if (!desc)
        return NULL;

    raw_spin_lock_irqsave(&desc->lock, flags);

    action = desc->action;
    if (!action || action->percpu_dev_id != dev_id) {
        WARN(1, "Trying to free already-free IRQ %d\n", irq);
        goto bad;
    }

    if (desc->status_percpu) {
        for_each_possible_cpu(cpu) {
            if (!(desc->status_percpu[cpu] & IRQ_MASKED)) {
                WARN(1, "percpu IRQ %d still enabled on CPU%d!\n",
                     irq, cpu);
                goto bad;
            }
        }
    }

    /* Found it - now remove it from the list of entries: */
    desc->action = NULL;

    raw_spin_unlock_irqrestore(&desc->lock, flags);

    return action;

bad:
    raw_spin_unlock_irqrestore(&desc->lock, flags);
    return NULL;
}

/**
 *	remove_percpu_irq - free a per-cpu interrupt
 *	@irq: Interrupt line to free
 *	@act: irqaction for the interrupt
 *
 * Used to remove interrupts statically setup by the early boot process.
 */
void remove_percpu_irq(int irq, struct irqaction *act)
{
    struct irq_desc *desc = irq_to_desc(irq);

    if (desc && irqd_is_percpu(&desc->irq_data))
        __free_percpu_irq(irq, act->percpu_dev_id);
}

/**
 *	setup_percpu_irq - setup a per-cpu interrupt
 *	@irq: Interrupt line to setup
 *	@act: irqaction for the interrupt
 *
 * Used to statically setup per-cpu interrupts in the early boot process.
 */
int setup_percpu_irq(int irq, struct irqaction *act)
{
    struct irq_desc *desc = irq_to_desc(irq);
    int retval;

    if (!desc || !irqd_is_percpu(&desc->irq_data))
        return -EINVAL;

    retval = __setup_irq(irq, desc, act);

    return retval;
}

/**
 *	__request_percpu_irq - allocate a percpu interrupt line
 *	@irq: Interrupt line to allocate
 *	@handler: Function to be called when the IRQ occurs.
 *	@flags: Interrupt type flags (IRQF_TIMER only)
 *	@devname: An ascii name for the claiming device
 *	@dev_id: A percpu cookie passed back to the handler function
 *
 *	This call allocates interrupt resources and enables the
 *	interrupt on the local CPU. If the interrupt is supposed to be
 *	enabled on other CPUs, it has to be done on each CPU using
 *	enable_percpu_irq().
 *
 *	Dev_id must be globally unique. It is a per-cpu variable, and
 *	the handler gets called with the interrupted CPU's instance of
 *	that variable.
 */
int __request_percpu_irq(int irq, irq_handler_t handler,
             unsigned long flags, const char *devname,
             void __percpu *dev_id)
{
    struct irqaction *action;
    struct irq_desc *desc;
    int retval;

    if (!dev_id)
        return -EINVAL;

    desc = irq_to_desc(irq);
    if (!desc || !irqd_can_request(&desc->irq_data) ||
        !irqd_is_percpu(&desc->irq_data))
        return -EINVAL;

    if (flags && flags != IRQF_TIMER)
        return -EINVAL;

    action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
    if (!action)
        return -ENOMEM;

    action->handler = handler;
    action->flags = flags | IRQF_PERCPU;
    action->name = devname;
    action->percpu_dev_id = dev_id;

    retval = __setup_irq(irq, desc, action);

    if (retval) {
        kfree(action);
    }

    return retval;
}

/**
 *	irq_get_irqchip_state - returns the irqchip state of a interrupt.
 *	@irq: Interrupt line that is forwarded to a VM
 *	@which: One of IRQCHIP_STATE_* the caller wants to know about
 *	@state: a pointer to a boolean where the state is to be storeed
 *
 *	This call snapshots the internal irqchip state of an
 *	interrupt, returning into @state the bit corresponding to
 *	stage @which
 *
 *	This function should be called with preemption disabled if the
 *	interrupt controller has per-cpu registers.
 */
int irq_get_irqchip_state(int irq, enum irqchip_irq_state which,
              bool *state)
{
    struct irq_desc *desc;
    struct irq_data *data;
    struct irq_chip *chip;
    unsigned long flags;
    int err = -EINVAL;

    desc = irq_get_desc_buslock(irq, &flags, 0);
    if (!desc)
        return err;

    data = irq_desc_get_irq_data(desc);

    do {
        chip = irq_data_get_irq_chip(data);
        if (chip->irq_get_irqchip_state)
            break;
        data = data->parent_data;
    } while (data);

    if (data)
        err = chip->irq_get_irqchip_state(data, which, state);

    irq_put_desc_busunlock(desc, flags);
    return err;
}

/**
 *	irq_set_irqchip_state - set the state of a forwarded interrupt.
 *	@irq: Interrupt line that is forwarded to a VM
 *	@which: State to be restored (one of IRQCHIP_STATE_*)
 *	@val: Value corresponding to @which
 *
 *	This call sets the internal irqchip state of an interrupt,
 *	depending on the value of @which.
 *
 *	This function should be called with preemption disabled if the
 *	interrupt controller has per-cpu registers.
 */
int irq_set_irqchip_state(int irq, enum irqchip_irq_state which,
              bool val)
{
    struct irq_desc *desc;
    struct irq_data *data;
    struct irq_chip *chip;
    unsigned long flags;
    int err = -EINVAL;

    desc = irq_get_desc_buslock(irq, &flags, 0);
    if (!desc)
        return err;

    data = irq_desc_get_irq_data(desc);

    do {
        chip = irq_data_get_irq_chip(data);
        if (chip->irq_set_irqchip_state)
            break;
        data = data->parent_data;
    } while (data);

    if (data)
        err = chip->irq_set_irqchip_state(data, which, val);

    irq_put_desc_busunlock(desc, flags);
    return err;
}
