#include <linux/softirq.h>
#include <linux/bitops.h>
#include <linux/irq.h>
#include <linux/percpu.h>

typedef struct
{
    unsigned int __softirq_pending;
#ifdef ARCH_WANTS_NMI_IRQSTAT
    unsigned int __nmi_count;
#endif
} irq_cpustat_t;

static struct softirq_action softirq_vec[NR_SOFTIRQS];
static DEFINE_PER_CPU_ALIGNED(irq_cpustat_t, irq_stat);

static void handle_softirqs(bool ksirqd)
{
    struct softirq_action *h;
    u32 pending;
    int softirq_bit;

    pending = local_softirq_pending();

    set_softirq_pending(0);

    h = softirq_vec;
    while (pending)
    {
        softirq_bit = ffs(pending);
        h += softirq_bit - 1;

        h->action();

        pending >>= softirq_bit;
    }
}

static inline void invoke_softirq(void)
{
    __do_softirq();
}

static inline void __irq_exit_rcu(void)
{
    __irq_exit_raw();
    invoke_softirq();
}

/*******************************************************************/
unsigned local_softirq_pending(void)
{
    irq_cpustat_t *cpustat = &irq_stat; //TODO

    return cpustat->__softirq_pending;
}

void set_softirq_pending(unsigned x)
{
    irq_cpustat_t *cpustat = &irq_stat;

    cpustat->__softirq_pending = x;
}

void or_softirq_pending(unsigned x)
{
    irq_cpustat_t *cpustat = &irq_stat;

    cpustat->__softirq_pending |= x;
}

void __do_softirq(void)
{
    handle_softirqs(false);
}

void open_softirq(int nr, void (*action)(void))
{
    softirq_vec[nr].action = action;
}

void __raise_softirq_irqoff(unsigned int nr)
{
    or_softirq_pending(1UL << nr);
}

void irq_enter_rcu(void)
{
    __irq_enter_raw();
}

void irq_exit_rcu(void)
{
    __irq_exit_rcu();
}
