#include "linux/err.h"
#include "linux/printk.h"
#include "linux/sched.h"
#include "linux/jiffies.h"
#include "linux/percpu-defs.h"
#include "linux/cpumask.h"
#include "linux/current.h"
#include "linux/bottom_half.h"
#include "linux/preempt.h"
#include "linux/irqflags.h"
#include "linux/cache.h"
#include "linux/compiler.h"
#include "linux/init.h"
#include "asm/ptrace.h"
#include "linux/init.h"
#include "linux/errno.h"
#include "linux/irq.h"
#include "linux/interrupt.h"
#include "linux/compiler.h"
#include "linux/irq_cpustat.h"
#include "linux/bitops.h"

#define MAX_SOFTIRQ_TIME msecs_to_jiffies(2)
#define MAX_SOFTIRQ_RESTART 10

irq_cpustat_t irq_stat[NR_CPUS] ____cacheline_aligned;
static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp;

char *softirq_to_name[NR_SOFTIRQS] =
    {
        "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "IRQ_POLL",
        "TASKLET", "SCHED", "HRTIMER", "RCU"};

void wakeup_softirqd(void)
{
}

int ksoftirqd_running(void)
{
    return 0;
}

void __local_bh_enable(unsigned int cnt)
{
}

void __do_softirq(void)
{
    unsigned long end = jiffies + MAX_SOFTIRQ_TIME;
    //unsigned long old_flags = current->flags;
    int max_restart = MAX_SOFTIRQ_RESTART;
    struct softirq_action *h;
    u32 pending;
    int softirq_bit;

    /*
     * Mask out PF_MEMALLOC s current task context is borrowed for the
     * softirq. A softirq handled such as network RX might set PF_MEMALLOC
     * again if the socket is related to swap
     */
    //current->flags &= ~PF_MEMALLOC;

    pending = local_softirq_pending();

    __local_bh_disable_ip(0, SOFTIRQ_OFFSET);

restart:
    /* Reset the pending bitmask before enabling irqs */
    set_softirq_pending(0);

    local_irq_enable();

    h = softirq_vec;

    while ((softirq_bit = ffs(pending)))
    {
        unsigned int vec_nr;
        int prev_count;

        h += softirq_bit - 1;

        vec_nr = h - softirq_vec;
        prev_count = preempt_count();

        //kstat_incr_softirqs_this_cpu(vec_nr);

        h->action(h);
        if (prev_count != preempt_count())
        {
            printk("huh, entered softirq %u %s %p with preempt_count %08x, exited with %08x?\n",
                   vec_nr, softirq_to_name[vec_nr], h->action,
                   prev_count, preempt_count());
            preempt_count_set(prev_count);
        }
        h++;
        pending >>= softirq_bit;
    }

    local_irq_disable();

    pending = local_softirq_pending();
    if (pending)
    {
        if (time_before(jiffies, end) && !need_resched() &&
            --max_restart)
            goto restart;

        wakeup_softirqd();
    }

    __local_bh_enable(SOFTIRQ_OFFSET);
    WARN_ON_ONCE(in_interrupt());
    //tsk_restore_flags(current, old_flags, PF_MEMALLOC);
}

void do_softirq_own_stack(void)
{
    __do_softirq();
}

void do_softirq(void)
{
    u32 pending;
    unsigned long flags;

    if (in_interrupt())
        return;

    local_irq_save(flags);

    pending = local_softirq_pending();

    if (pending && !ksoftirqd_running())
        do_softirq_own_stack();

    local_irq_restore(flags);
}

void invoke_softirq(void)
{
    if (ksoftirqd_running())
        return;

    if (!force_irqthreads)
    {
#ifdef CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK
        /*
         * We can safely execute softirq on the current stack if
         * it is the irq stack, because it should be near empty
         * at this stage.
         */
        __do_softirq();
#else
        /*
         * Otherwise, irq_exit() is called on the task stack that can
         * be potentially deep already. So call softirq in its own stack
         * to prevent from any overrun.
         */
        do_softirq_own_stack();
#endif
    }
    else
    {
        wakeup_softirqd();
    }
}

void irq_enter(void)
{
    preempt_count_add(HARDIRQ_OFFSET); 
}

void irq_exit(void)
{
    preempt_count_sub(HARDIRQ_OFFSET);
    if (!in_interrupt() && local_softirq_pending())
		invoke_softirq();
}

void __raise_softirq_irqoff(unsigned int nr)
{
    or_softirq_pending(1UL << nr);
}

void raise_softirq_irqoff(unsigned int nr)
{
    __raise_softirq_irqoff(nr);

    if (!in_interrupt())
        wakeup_softirqd();
}

void raise_softirq(unsigned int nr)
{
    unsigned long flags;

    local_irq_save(flags);
    raise_softirq_irqoff(nr);
    local_irq_restore(flags);
}


void open_softirq(int nr, void (*action)(struct softirq_action *))
{
    softirq_vec[nr].action = action;
}


struct tasklet_head 
{
    struct tasklet_struct *head;
    struct tasklet_struct **tail;
};

static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec);
static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec);

static void tasklet_action(struct softirq_action *a)
{

}

static void tasklet_hi_action(struct softirq_action *a)
{

}

void __init softirq_init(void)
{
    int cpu = 0;

    for_each_possible_cpu(cpu)
    {
        per_cpu(tasklet_vec, cpu).tail = &per_cpu(tasklet_vec, cpu).head;
        per_cpu(tasklet_hi_vec, cpu).tail =  &per_cpu(tasklet_hi_vec, cpu).head;
    }

    open_softirq(TASKLET_SOFTIRQ, tasklet_action);
	open_softirq(HI_SOFTIRQ, tasklet_hi_action);
}