/*
  author Sylvain Bertrand <digital.ragnarok@gmail.com>
  Protected by GNU Affero GPL v3 with some exceptions.
  See README at root of alga tree.
*/
#include <linux/pci.h>
#include <linux/interrupt.h>
#include <linux/cdev.h>

#include <alga/rng_mng.h>
#include <alga/timing.h>
#include <alga/pixel_fmts.h>
#include <alga/amd/dce4/dce4.h>

#include "regs.h"

#include "ih.h"
#include "ba.h"
#include "ucode.h"
#include "cp.h"
#include "drv.h"

void ih_start(struct pci_dev *dev)
{
	u32 ih_ctl;
	u32 ih_rb_ctl;

	ih_ctl = rr32(dev, IH_CTL);
	ih_rb_ctl = rr32(dev, IH_RB_CTL);

	ih_ctl |= ENA_INTR;
	ih_rb_ctl |= IH_RB_ENA;
	wr32(dev, ih_ctl, IH_CTL);
	wr32(dev, ih_rb_ctl, IH_RB_CTL);
}

void ih_stop(struct pci_dev *dev)
{
	u32 ih_rb_ctl;
	u32 ih_ctl;

	ih_rb_ctl = rr32(dev, IH_RB_CTL);
	ih_ctl = rr32(dev, IH_CTL);

	ih_rb_ctl &= ~IH_RB_ENA;
	ih_ctl &= ~ENA_INTR;

	/* works even if ucode in not loaded */
	wr32(dev, ih_rb_ctl, IH_RB_CTL);
	wr32(dev, ih_ctl, IH_CTL);
}

/* ih ring size is 2^IH_RING_LOG2_DWS(=14) dwords or 4096 vectors of 16 bytes */
#define WB_IH_WPTR_OF 2048
void ih_init(struct pci_dev *dev)
{
	struct dev_drv_data *dd;
	u32 intr_ctl;
	u32 ih_rb_ctl;
	u32 ih_ctl;

	dd = pci_get_drvdata(dev);

	/*
	 * setup interrupt control
	 * set dummy read address to ring address
	 * 256 bytes block index
	 */
	wr32(dev, dd->ba.ih_ring_map->gpu_addr >> 8, INTR_CTL2); 

	intr_ctl = rr32(dev, INTR_CTL);
	/*
	 * IH_DUMMY_RD_OVERRIDE=0 - dummy read disabled with msi, enabled
	 *                          without msi
	 * IH_DUMMY_RD_OVERRIDE=1 - dummy read controlled by IH_DUMMY_RD_ENA
	 */
	intr_ctl &= ~IH_DUMMY_RD_OVERRIDE; /* GPU should disable dummy read */

	/* IH_REQ_NONSNOOP_ENA=1 if ring is in non-cacheable mem, e.g. vram */
	intr_ctl &= ~IH_REQ_NONSNOOP_ENA; /* we are in bus aperture */
	wr32(dev, intr_ctl, INTR_CTL);

	/* 256 bytes block index */
	wr32(dev, dd->ba.ih_ring_map->gpu_addr >> 8, IH_RB_BASE); 

	ih_rb_ctl = (IH_WPTR_OVERFLOW_ENA | IH_WPTR_OVERFLOW_CLR
					| SET(IH_IB_LOG2_DWS, IH_RING_LOG2_DWS)
					| IH_WPTR_WRITEBACK_ENA);

	wr32(dev, (dd->ba.wb_map->gpu_addr + WB_IH_WPTR_OF) & 0xfffffffc,
							IH_RB_WPTR_ADDR_LO);
	wr32(dev, upper_32_bits(dd->ba.wb_map->gpu_addr + WB_IH_WPTR_OF) & 0xff,
							IH_RB_WPTR_ADDR_HI);
	wr32(dev, ih_rb_ctl, IH_RB_CTL);

	wr32(dev, 0, IH_RB_RPTR);
	wr32(dev, 0, IH_RB_WPTR);

	/* default settings for IH_CTL (disabled at first) */
	ih_ctl = SET(MC_WR_REQ_CREDIT, 0x10) | SET(MC_WR_CLEAN_CNT, 0x10)
								| RPTR_REARM;
	wr32(dev, ih_ctl, IH_CTL);

	dd->ih.rp = 0;
	spin_lock_init(&dd->ih.lock);
};

#define VECTOR_SZ 16
#define VECTOR_ID_D0	1
#define VECTOR_ID_D1	2
#define VECTOR_ID_D2	3
#define VECTOR_ID_D3	4
#define VECTOR_ID_D4	5
#define VECTOR_ID_D5	6
#define		Dx_VBLANK 0
#define VECTOR_ID_HPD	42

static void vector(struct pci_dev *dev, u32 id, u32 data, bool *irq_thd)
{
	struct dev_drv_data *dd;
	dd = pci_get_drvdata(dev);

	switch (id) {
	case VECTOR_ID_HPD:
		dce4_hpd_irq(dd->dce, data);
		*irq_thd = true;
		break;
	case VECTOR_ID_D0:
	case VECTOR_ID_D1:
	case VECTOR_ID_D2:
	case VECTOR_ID_D3:
	case VECTOR_ID_D4:
	case VECTOR_ID_D5:
		if (data == Dx_VBLANK) {/* only page flipping in vblank */
			dce4_pf_irq(dd->dce, id - 1);
			*irq_thd = true;
		}
		break;
	}
}

static u64 wp_overflow(struct pci_dev *dev, u32 wp)
{
	struct dev_drv_data *dd;
	u32 tmp;

	dd = pci_get_drvdata(dev);

	if ((wp & RB_OVERFLOW) != 0) {
		/*
		 * When a ring buffer overflow happen start parsing interrupt
		 * from the last not overwritten vector (wptr + 16). Hopefully
		 * this should allow us to catchup.
		 */
		dev_warn(&dev->dev, "ih ring buffer overflow wp=0x%08x"
				"rp=0x%08x, trying next vector at 0x%08x\n",
					(u32)(wp & (~RB_OVERFLOW)), dd->ih.rp,
					(wp + VECTOR_SZ) & IH_RING_MASK);

		dd->ih.rp = (wp + VECTOR_SZ) & IH_RING_MASK;

		tmp = rr32(dev, IH_RB_CTL);
		tmp |= IH_WPTR_OVERFLOW_CLR;
		wr32(dev, tmp, IH_RB_CTL);

		wp &= ~RB_OVERFLOW;
	}
	return wp;
}

bool ih_parse(struct pci_dev *dev)
{
	struct dev_drv_data *dd;
	unsigned long flgs;
	u32 wp;
	u32 rp;
	bool irq_thd;

	dd = pci_get_drvdata(dev);
	irq_thd = false;

	spin_lock_irqsave(&dd->ih.lock, flgs);
	while (1) {
		rmb();

		wp = le32_to_cpup(dd->ba.wb_map->cpu_addr + WB_IH_WPTR_OF);
		wp = wp_overflow(dev, wp);
		rp = dd->ih.rp;

		if (rp == wp)
			break;

		dce4_irqs_ack(dd->dce); /* must ack dce irqs ourself... */

		do {
			u32 id;
			u32 data;

			id =  le32_to_cpup(dd->ba.ih_ring_map->cpu_addr + rp)
									& 0xff;
			data = le32_to_cpup(dd->ba.ih_ring_map->cpu_addr + rp
						+ sizeof(id)) & 0xfffffff;

			vector(dev, id, data, &irq_thd);

			rp += VECTOR_SZ;
			rp &= IH_RING_MASK;
		} while (rp != wp);
		dd->ih.rp = rp;
	}
	wr32(dev, dd->ih.rp, IH_RB_RPTR);
	spin_unlock_irqrestore(&dd->ih.lock, flgs);
	return irq_thd;
}
