#ifndef SEMINIX_IRQDESC_H
#define SEMINIX_IRQDESC_H

#include <utils/types.h>
#include <seminix/irq.h>
#include <seminix/spinlock.h>
#include <seminix/mutex.h>
#include <asm/ptrace.h>

struct irqaction;

struct irq_desc {
    struct irq_data     irq_data;

    irq_flow_handler_t  handle_irq;
    struct irqaction    *action;

    u32         status_use_accessors;
    u32         *status_percpu;

    unsigned int		depth;		/* nested irq disables */
    unsigned int		irq_count;	/* For detecting broken IRQs */
    unsigned long		last_unhandled;	/* Aging timer for unhandled count */
    unsigned int		irqs_unhandled;

    atomic_t		    threads_active;

    raw_spinlock_t      lock;
    struct mutex		request_mutex;

    const char          *name;
} ____cacheline_internodealigned_in_smp;

static inline struct irq_desc *irq_data_to_desc(struct irq_data *data)
{
    return data->irq_desc;
}

static inline int irq_desc_get_irq(struct irq_desc *desc)
{
    return desc->irq_data.irq;
}

static inline struct irq_data *irq_desc_get_irq_data(struct irq_desc *desc)
{
    return &desc->irq_data;
}

static inline struct irq_chip *irq_desc_get_chip(struct irq_desc *desc)
{
    return desc->irq_data.chip;
}

static inline void *irq_desc_get_chip_data(struct irq_desc *desc)
{
    return desc->irq_data.chip_data;
}

static inline void *irq_desc_get_handler_data(struct irq_desc *desc)
{
    return desc->irq_data.handler_data;
}

extern u64 irq_err_count;

static inline void ack_bad_irq(int virq)
{
    irq_err_count++;
}

struct irq_desc *irq_to_desc(int irq);

int __irq_alloc_descs(int irq, int cnt, const struct cpumask *affinity);
void irq_free_descs(int from, int cnt);

int generic_handle_irq(int irq);

/*
 * Architectures call this to let the generic IRQ layer
 * handle an interrupt.
 */
static inline void generic_handle_irq_desc(struct irq_desc *desc)
{
    desc->handle_irq(desc);
}

int __handle_domain_irq(struct irq_domain *domain, int hwirq, struct pt_regs *regs);

static inline int handle_domain_irq(struct irq_domain *domain,
                    unsigned int hwirq, struct pt_regs *regs)
{
    return __handle_domain_irq(domain, hwirq, regs);
}

#define for_each_action_of_desc(desc, act)			\
    for (act = desc->action; act; act = act->next)

#define _IRQ_DESC_CHECK		(1 << 0)
#define _IRQ_DESC_PERCPU	(1 << 1)

#define IRQ_GET_DESC_CHECK_GLOBAL	(_IRQ_DESC_CHECK)
#define IRQ_GET_DESC_CHECK_PERCPU	(_IRQ_DESC_CHECK | _IRQ_DESC_PERCPU)

struct irq_desc *
__irq_get_desc_lock(int irq, unsigned long *flags, bool bus,
            unsigned int check);
void __irq_put_desc_unlock(struct irq_desc *desc, unsigned long flags, bool bus);

int irq_set_percpu_devid(int irq);


static inline struct cpumask *
irq_desc_get_affinity_mask(struct irq_desc *desc)
{
    return &desc->irq_data.affinity;
}

/* Test to see if a driver has successfully requested an irq */
static inline int irq_desc_has_action(struct irq_desc *desc)
{
    return desc->action != NULL;
}

static inline int irq_has_action(int irq)
{
    return irq_desc_has_action(irq_to_desc(irq));
}

/**
 * irq_set_handler_locked - Set irq handler from a locked region
 * @data:	Pointer to the irq_data structure which identifies the irq
 * @handler:	Flow control handler function for this interrupt
 *
 * Sets the handler in the irq descriptor associated to @data.
 *
 * Must be called with irq_desc locked and valid parameters. Typical
 * call site is the irq_set_type() callback.
 */
static inline void irq_set_handler_locked(struct irq_data *data,
                        irq_flow_handler_t handler)
{
    struct irq_desc *desc = irq_data_to_desc(data);

    desc->handle_irq = handler;
}

/**
 * irq_set_chip_handler_name_locked - Set chip, handler and name from a locked region
 * @data:	Pointer to the irq_data structure for which the chip is set
 * @chip:	Pointer to the new irq chip
 * @handler:	Flow control handler function for this interrupt
 * @name:	Name of the interrupt
 *
 * Replace the irq chip at the proper hierarchy level in @data and
 * sets the handler and name in the associated irq descriptor.
 *
 * Must be called with irq_desc locked and valid parameters.
 */
static inline void
irq_set_chip_handler_name_locked(struct irq_data *data, struct irq_chip *chip,
            irq_flow_handler_t handler, const char *name)
{
    struct irq_desc *desc = irq_data_to_desc(data);

    desc->handle_irq = handler;
    desc->name = name;
    data->chip = chip;
}

static inline struct irq_desc *
irq_get_desc_buslock(int irq, unsigned long *flags, unsigned int check)
{
    return __irq_get_desc_lock(irq, flags, true, check);
}

static inline void
irq_put_desc_busunlock(struct irq_desc *desc, unsigned long flags)
{
    __irq_put_desc_unlock(desc, flags, true);
}

static inline struct irq_desc *
irq_get_desc_lock(int irq, unsigned long *flags, unsigned int check)
{
    return __irq_get_desc_lock(irq, flags, false, check);
}

static inline void
irq_put_desc_unlock(struct irq_desc *desc, unsigned long flags)
{
    __irq_put_desc_unlock(desc, flags, false);
}

/*
 * IRQ line status.
 *
 * Bits 0-7 are the same as the IRQF_* bits in linux/interrupt.h
 *
 * IRQ_TYPE_NONE		- default, unspecified type
 * IRQ_TYPE_EDGE_RISING		- rising edge triggered
 * IRQ_TYPE_EDGE_FALLING	- falling edge triggered
 * IRQ_TYPE_EDGE_BOTH		- rising and falling edge triggered
 * IRQ_TYPE_LEVEL_HIGH		- high level triggered
 * IRQ_TYPE_LEVEL_LOW		- low level triggered
 * IRQ_TYPE_LEVEL_MASK		- Mask to filter out the level bits
 * IRQ_TYPE_SENSE_MASK		- Mask for all the above bits
 * IRQ_TYPE_DEFAULT		- For use by some PICs to ask irq_set_type
 *				  to setup the HW to a sane default (used
 *                                by irqdomain map() callbacks to synchronize
 *                                the HW state and SW flags for a newly
 *                                allocated descriptor).
 *
 * Bits which can be modified via irq_set/clear/modify_status_flags()
 * IRQ_LEVEL			- Interrupt is level type. Will be also
 *				  updated in the code when the above trigger
 *				  bits are modified via irq_set_irq_type()
 * IRQ_PER_CPU			- Mark an interrupt PER_CPU. Will protect
 *				  it from affinity setting
 * IRQ_NOREQUEST		- Interrupt cannot be requested via
 *				  request_irq()
 * IRQ_NO_BALANCING		- Interrupt cannot be balanced (affinity set)
 */
enum {
    IRQ_TYPE_NONE		= 0x00000000,
    IRQ_TYPE_EDGE_RISING	= 0x00000001,
    IRQ_TYPE_EDGE_FALLING	= 0x00000002,
    IRQ_TYPE_EDGE_BOTH	= (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING),
    IRQ_TYPE_LEVEL_HIGH	= 0x00000004,
    IRQ_TYPE_LEVEL_LOW	= 0x00000008,
    IRQ_TYPE_LEVEL_MASK	= (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH),
    IRQ_TYPE_SENSE_MASK	= 0x0000000f,
    IRQ_TYPE_DEFAULT	= IRQ_TYPE_SENSE_MASK,

    IRQ_LEVEL		= (1 <<  8),
    IRQ_PER_CPU		= (1 <<  9),
    IRQ_NOREQUEST		= (1 << 10),
    IRQ_NO_BALANCING    = (1 << 11),

    IRQ_WAITING         = (1 << 13),
    IRQ_PENDING         = (1 << 14),
    IRQ_INIPC           = (1 << 15),

    IRQ_MASKED			= (1 << 16),
    IRQ_INPROGRESS		= (1 << 17),

    IRQ_STARTED         = (1 << 18),
};

static inline void __irqd_clear(struct irq_data *d, u32 mask)
{
    d->irq_desc->status_use_accessors &= ~mask;
}

static inline void __irqd_set(struct irq_data *d, u32 mask)
{
    d->irq_desc->status_use_accessors |= mask;
}

static inline bool irqd_has_set(struct irq_data *d, u32 mask)
{
    return d->irq_desc->status_use_accessors & mask;
}

static inline bool irqd_is_level(struct irq_data *d)
{
    return d->irq_desc->status_use_accessors & IRQ_LEVEL;
}

static inline bool irqd_is_percpu(struct irq_data *d)
{
    return d->irq_desc->status_use_accessors & IRQ_PER_CPU;
}

static inline bool irqd_can_request(struct irq_data *d)
{
    return !(d->irq_desc->status_use_accessors & IRQ_NOREQUEST);
}

static inline bool irqd_can_balance(struct irq_data *d)
{
    return !(d->irq_desc->status_use_accessors & (IRQ_PER_CPU | IRQ_NO_BALANCING));
}

static inline bool irqd_is_waitting(struct irq_data *d)
{
    return !(d->irq_desc->status_use_accessors & IRQ_WAITING);
}

static inline bool irqd_is_pending(struct irq_data *d)
{
    return !(d->irq_desc->status_use_accessors & IRQ_PENDING);
}

static inline bool irqd_irq_masked(struct irq_data *d)
{
    return d->irq_desc->status_use_accessors & IRQ_MASKED;
}

static inline bool irqd_irq_inprogress(struct irq_data *d)
{
    return d->irq_desc->status_use_accessors & IRQ_INPROGRESS;
}

static inline bool irqd_irq_inipc(struct irq_data *d)
{
    return d->irq_desc->status_use_accessors & IRQ_INIPC;
}

static inline bool irqd_irq_started(struct irq_data *d)
{
    return d->irq_desc->status_use_accessors & IRQ_STARTED;
}

static inline void irqd_set_trigger_type(struct irq_data *d, u32 type)
{
    d->irq_desc->status_use_accessors &= ~IRQ_TYPE_DEFAULT;
    d->irq_desc->status_use_accessors |= type & IRQ_TYPE_DEFAULT;
}

static inline u32 irqd_get_trigger_type(struct irq_data *d)
{
    return d->irq_desc->status_use_accessors & IRQ_TYPE_DEFAULT;
}

static inline u32 irq_get_trigger_type(int irq)
{
    struct irq_data *d = irq_get_irq_data(irq);
    return d ? irqd_get_trigger_type(d) : 0;
}

#endif /* !SEMINIX_IRQDESC_H */
