/*
 * DIM-SUM操作系统 -- RISCV主中断控制器驱动
 *
 * Copyright (C) 2023 国科础石(重庆)软件有限公司
 *
 * 作者: Dong Peng <w-pengdong@kernelsoft.com>
 *
 * License terms: GNU General Public License (GPL) version 3
 *
 */
#include <dim-sum/types.h>
#include <dim-sum/devtree.h>
#include <dim-sum/smp_lock.h>
#include <dim-sum/irq_mapping.h>
#include <dim-sum/irq.h>
#include <dim-sum/init.h>
#include <dim-sum/percpu.h>
#include <asm-generic/bug.h>
#include <asm/exception.h>
#include <asm/irq.h>
#include <asm/io.h>

#if defined(CONFIG_ARCH_JH7110)
#define PLIC_CPU 1
#else
#define PLIC_CPU 0
#endif

#define PLIC_REG_START (0xc000000)
#define PLIC_REG_SIZE (0x210000)

#define MAX_DEVICES			1024
#define MAX_CONTEXTS			15872

/*
 * Each interrupt source has a priority register associated with it.
 * We always hardwire it to one in Linux.
 */
#define PRIORITY_BASE			0
#define     PRIORITY_PER_ID		4

/*
 * Each hart context has a vector of interrupt enable bits associated with it.
 * There's one bit for each interrupt source.
 */
#define ENABLE_BASE			0x2000
#define     ENABLE_PER_HART		0x80

/*
 * Each hart context has a set of control registers associated with it.  Right
 * now there's only two: a source priority threshold over which the hart will
 * take an interrupt, and a register to claim interrupts.
 */
#define CONTEXT_BASE			0x200000
#define     CONTEXT_PER_HART		0x1000
#define     CONTEXT_THRESHOLD		0x00
#define     CONTEXT_CLAIM		0x04

static void __iomem *plic_regs;

struct plic_handler {
	bool			present;
	int			ctxid;
};
static DEFINE_PER_CPU(struct plic_handler, plic_handlers);

static inline void __iomem *plic_hart_offset(int ctxid)
{
	return plic_regs + CONTEXT_BASE + ctxid * CONTEXT_PER_HART;
}

static inline u32 __iomem *plic_enable_base(int ctxid)
{
	return plic_regs + ENABLE_BASE + ctxid * ENABLE_PER_HART;
}

/*
 * Protect mask operations on the registers given that we can't assume that
 * atomic memory operations work on them.
 */
static DEFINE_SMP_LOCK(plic_toggle_lock);

static inline void plic_toggle(int ctxid, int hwirq, int enable)
{
	u32 __iomem *reg = plic_enable_base(ctxid) + (hwirq / 32);
	u32 hwirq_mask = 1 << (hwirq % 32);

	smp_lock(&plic_toggle_lock);
	if (enable)
		writel(readl(reg) | hwirq_mask, reg);
	else
		writel(readl(reg) & ~hwirq_mask, reg);
	smp_unlock(&plic_toggle_lock);
}

static inline void plic_irq_toggle(struct irq_desc *d, int enable)
{
	struct plic_handler *handler;
	writel(enable, plic_regs + PRIORITY_BASE + d->hw_irq * PRIORITY_PER_ID);
	handler = per_cpu_ptr(&plic_handlers, PLIC_CPU);
	if (handler->present) {
		plic_toggle(handler->ctxid, d->hw_irq, enable);
	}
}

static void plic_irq_enable(struct irq_desc *d)
{
	plic_irq_toggle(d, 1);
}

static void plic_irq_disable(struct irq_desc *d)
{
	plic_irq_toggle(d, 0);
}

static void plic_irq_eoi(struct irq_desc *d)
{
	struct plic_handler *handler = handler = per_cpu_ptr(&plic_handlers, PLIC_CPU);
	if (handler->present) 
		writel(d->hw_irq, (plic_hart_offset(handler->ctxid) + CONTEXT_CLAIM));

}

static int plic_find_hart_id(struct device_node *node)
{
	for (; node; node = node->parent) {
		if (is_device_compatible(node, "riscv"))
			return riscv_dt_processor_hart(node);
	}

	return -1;
}

static struct irq_mapping *plic_irq_mapping;


static void __exception_irq_entry plic_handle_irq(struct exception_spot *regs)
{
	struct plic_handler *handler = per_cpu_ptr(&plic_handlers, PLIC_CPU);
	void __iomem *claim = plic_hart_offset(handler->ctxid) + CONTEXT_CLAIM;
	irq_hw_number_t hwirq;
	WARN_ONCE(!handler->present);

	csr_clear(CSR_IE, IE_EIE);

	while ((hwirq = readl(claim))) {
		do_hard_irq(plic_irq_mapping, hwirq, regs);

		writel(hwirq, claim);
	}
	csr_set(CSR_IE, IE_EIE);

}


static struct irq_controller plic_chip = {
	.name		= "SiFive PLIC",
	.unmask	= plic_irq_enable,
	.mask	= plic_irq_disable,
	.eoi  = plic_irq_eoi,
};



static int plic_init(struct irq_mapping *map, unsigned int virq, unsigned int hwirq)
{
	prepare_one_irq(map, true, virq, hwirq, &plic_chip,
			NULL, handle_fasteoi_irq, NULL, NULL);
	return 0;
}

static int plic_extract(struct irq_mapping *map, struct device_node *node,
				const u32 *configs, unsigned int config_size,
				unsigned int *hw_irq, unsigned int *type)
{
	int ret = 0;

	if (config_size < 3)
		return -EINVAL;

	*hw_irq = configs[1];

	*type = configs[2] & HWIRQ_TRIGGER_TYPE_MASK;
	return ret;
}

static const struct irq_mapping_ops plic_irq_mapping_ops = {
	.init = plic_init,
	.extract = plic_extract,
};

__maybe_unused static void __init __riscv_cpu_irq_init(void)
{
	int irq_size, irq_base;

	irq_size = MAX_DEVICES - 0;
	irq_base = 0;
	plic_regs = ioremap(PLIC_REG_START, PLIC_REG_SIZE);
	
	plic_irq_mapping = 
			alloc_init_irq_mapping(NULL, irq_size, &plic_irq_mapping_ops, NULL);
	ASSERT(plic_irq_mapping != NULL);

	/**
	 * 注册中断描述符中的处理函数
	 */
	irq_mapping_associate_many(plic_irq_mapping, irq_base, 0, irq_size);

	set_chip_irq_handle(plic_handle_irq);
}



void irq_controller_secondary_init(void)
{
	
}

static const struct dt_device_id plic_dt_ids[] = {
	{
		.compatible = "sifive,plic-1.0.0",
	},
	{
		.compatible = "riscv,plic0",
	},
	{ }
};

void __init riscv_cpu_irq_init(void)
{
	int irq_size, irq_base, nr_mapped = 0;
	struct device_node *dt_node = NULL;
	u32 nr_irqs = 0, ctxnr = 0;
	phys_addr_t addr = 0;
	u64 size = 0;
	int ret = 0;
	int i;
	
	/**
	 * 根据plic_dt_ids表扫描中断控制器的设备节点
	 */
	for (i = 0; i < ARRAY_SIZE(plic_dt_ids); i++) {
		dt_node = dt_find_matching_node(NULL, &plic_dt_ids[i]);
		if (dt_node)
			break;
	}

	BUG_ON(!dt_node);

	/**
	 * 获取中断控制器的寄存器的开始物理地址和大小
	 */
	addr = dt_get_addr(dt_node, &size, 0);
	BUG_ON(!addr || !size);
	
	irq_size = MAX_DEVICES - 0;
	irq_base = 0;

	/**
	 * 映射中断控制器的寄存器到虚拟地址空间中
	 */
	plic_regs = ioremap(addr, size);
	
	BUG_ON(!plic_regs);

	/**
	 * 获取中断控制器的irqnr
	 */
	ret = dt_read_u32_index(dt_node, "riscv,ndev", &nr_irqs, 0);
	BUG_ON(ret || !nr_irqs);

	ctxnr = dt_irq_count(dt_node);
	BUG_ON(!ctxnr);

	irq_size = (unsigned int)(nr_irqs + 1);

	/**
	 * 分配并初始化中断映射表
	 */
	plic_irq_mapping = 
			alloc_init_irq_mapping(NULL, irq_size, &plic_irq_mapping_ops, NULL);
	ASSERT(plic_irq_mapping != NULL);

	/**
	 * 注册中断描述符中的处理函数
	 */
	irq_mapping_associate_many(plic_irq_mapping, irq_base, 0, irq_size);

	for (i = 0; i < ctxnr; i++) {
		struct dt_phandle_args parent;
		struct plic_handler *handler;
		irq_hw_number_t hwirq;
		int cpu;

		if (dt_irq_parse_one(dt_node, i, &parent)) {
			pr_err("failed to parse parent for context %d.\n", i);
			continue;
		}

		/* skip context holes */
		if (parent.args[0] != RV_IRQ_EXT)
			continue;

		cpu = plic_find_hart_id(parent.node);
		if (cpu < 0) {
			pr_warn("failed to parse hart ID for context %d.\n", i);
			continue;
		}

		handler = per_cpu_ptr(&plic_handlers, cpu);
		handler->present = true;
		handler->ctxid = i;
		/* priority must be > threshold to trigger an interrupt */
		writel(0, plic_hart_offset(i) + CONTEXT_THRESHOLD);
		for (hwirq = 1; hwirq <= nr_irqs; hwirq++)
			plic_toggle(i, hwirq, 0);
		nr_mapped++;
	}

	pr_info("mapped %d interrupts to %d (out of %d) handlers.\n",
		nr_irqs, nr_mapped, ctxnr);

	/**
	 * 设置当前体系结构外部中断处理函数
	 */
	set_chip_irq_handle(plic_handle_irq);
	
}
int test_irq_riscv_plic(void);

int test_irq_riscv_plic(void)
{
	struct plic_handler *handler = per_cpu_ptr(&plic_handlers, PLIC_CPU);
	void __iomem *claim = plic_hart_offset(handler->ctxid) + CONTEXT_CLAIM;
	irq_hw_number_t hwirq;
	int ret = 0;

	WARN_ONCE(!handler->present);

	csr_clear(CSR_IE, IE_EIE);

	while ((hwirq = readl(claim))) {
		pr_info("%s %d hwirq:%d\n", __FUNCTION__, __LINE__, hwirq);
		// BUG();
		writel(hwirq, claim);
		ret = (int)hwirq;
	}
	csr_set(CSR_IE, IE_EIE);
	return ret;
}
