#include <linux/errno.h>
#include <linux/irq.h>
#include <linux/bitmap.h>

#define IRQ_BITMAP_BITS	NR_IRQS
static DECLARE_BITMAP(allocated_irqs, IRQ_BITMAP_BITS);

struct irq_desc irq_desc[NR_IRQS] = {
	[0 ... NR_IRQS-1] = {
		.handle_irq	= handle_bad_irq,
		// .depth		= 1,
		// .lock		= __RAW_SPIN_LOCK_UNLOCKED(irq_desc->lock),
	}
};

struct irq_desc *irq_to_desc(unsigned int irq)
{
	return (irq < NR_IRQS) ? irq_desc + irq : NULL;
}

// static void desc_set_defaults(unsigned int irq, struct irq_desc *desc, int node,
// 			      const struct cpumask *affinity, struct module *owner)
static void desc_set_defaults(unsigned int irq, struct irq_desc *desc)
{
	// int cpu;

	// desc->irq_common_data.handler_data = NULL;
	// desc->irq_common_data.msi_desc = NULL;

	// desc->irq_data.common = &desc->irq_common_data;
	desc->irq_data.irq = irq;
	// desc->irq_data.chip = &no_irq_chip;
	// desc->irq_data.chip_data = NULL;
	// irq_settings_clr_and_set(desc, ~0, _IRQ_DEFAULT_INIT_FLAGS);
	// irqd_set(&desc->irq_data, IRQD_IRQ_DISABLED);
	// irqd_set(&desc->irq_data, IRQD_IRQ_MASKED);
	// desc->handle_irq = handle_bad_irq;
	// desc->depth = 1;
	// desc->irq_count = 0;
	// desc->irqs_unhandled = 0;
	// desc->tot_count = 0;
	// desc->name = NULL;
	// desc->owner = owner;
	// for_each_possible_cpu(cpu)
		// *per_cpu_ptr(desc->kstat_irqs, cpu) = 0;
	// desc_smp_init(desc, node, affinity);
}

int early_irq_init(void)
{
	int count, i;
	// int count, i, node = first_online_node;
	struct irq_desc *desc;

	// init_irq_default_affinity();

	// printk(KERN_INFO "NR_IRQS: %d\n", NR_IRQS);

	desc = irq_desc;
	// count = ARRAY_SIZE(irq_desc);
	count = NR_IRQS;

	for (i = 0; i < count; i++) {
		// desc[i].kstat_irqs = alloc_percpu(unsigned int);
		// alloc_masks(&desc[i], node);
		// raw_spin_lock_init(&desc[i].lock);
		// lockdep_set_class(&desc[i].lock, &irq_desc_lock_class);
		// mutex_init(&desc[i].request_mutex);
		// desc_set_defaults(i, &desc[i], node, NULL, NULL);
		desc_set_defaults(i, &desc[i]);
	}
	// return arch_early_irq_init();
	return 0;
}

static inline void generic_handle_irq_desc(struct irq_desc *desc)
{
	// printf("this is %s(): %d\r\n", __func__, __LINE__);
	desc->handle_irq(desc);
}

int handle_irq_desc(struct irq_desc *desc)
{
	// struct irq_data *data;
	// printf("this is %s(): %d\r\n", __func__, __LINE__);

	if (!desc)
		return -EINVAL;

	// data = irq_desc_get_irq_data(desc);
	// if (WARN_ON_ONCE(!in_irq() && handle_enforce_irqctx(data)))
	// 	return -EPERM;

	generic_handle_irq_desc(desc);
	return 0;
}

int handle_domain_irq(struct irq_domain *domain, unsigned int hwirq, struct pt_regs *regs)
{
	// struct pt_regs *old_regs = set_irq_regs(regs);
	struct irq_desc *desc;
	int ret = 0;

	// printf("this is %s(): %d\r\n", __func__, __LINE__);

	// irq_enter();

	/* The irqdomain code provides boundary checks */
	desc = irq_resolve_mapping(domain, hwirq);
	// desc = &irq_desc[hwirq];
	// printf("this is %s(): %d >>> desc->irq_data.irq = %d\r\n", __func__, __LINE__, desc->irq_data.irq);
	if (desc)
		handle_irq_desc(desc);
	else
		ret = -EINVAL;

	// irq_exit();
	// set_irq_regs(old_regs);
	return ret;
}

struct module {
	int test;
};

static inline int alloc_descs(unsigned int start, unsigned int cnt, int node,
			      const struct irq_affinity_desc *affinity,
			      struct module *owner)
{
	// u32 i;

	// for (i = 0; i < cnt; i++) {
	// 	struct irq_desc *desc = irq_to_desc(start + i);

	// 	desc->owner = owner;
	// }
	bitmap_set(allocated_irqs, start, cnt);
	return start;
}

int __irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node,
		  struct module *owner, const struct irq_affinity_desc *affinity)
{
	int start, ret;

	// if (!cnt)
	// 	return -EINVAL;

	// if (irq >= 0) {
	// 	if (from > irq)
	// 		return -EINVAL;
	// 	from = irq;
	// } else {
	// 	/*
	// 	 * For interrupts which are freely allocated the
	// 	 * architecture can force a lower bound to the @from
	// 	 * argument. x86 uses this to exclude the GSI space.
	// 	 */
	// 	from = arch_dynirq_lower_bound(from);
	// }

	// mutex_lock(&sparse_irq_lock);

	start = bitmap_find_next_zero_area(allocated_irqs, IRQ_BITMAP_BITS,
					   from, cnt, 0);
	// ret = -EEXIST;
	// if (irq >=0 && start != irq)
	// 	goto unlock;

	// if (start + cnt > nr_irqs) {
	// 	ret = irq_expand_nr_irqs(start + cnt);
	// 	if (ret)
	// 		goto unlock;
	// }
	ret = alloc_descs(start, cnt, node, affinity, owner);
unlock:
	// mutex_unlock(&sparse_irq_lock);
	return ret;
}
