/*
  author Sylvain Bertrand <digital.ragnarok@gmail.com>
  Protected by GNU Affero GPL v3 with some exceptions.
  See README at root of alga tree.
*/
#include <linux/pci.h>
#include <linux/delay.h>
#include <linux/cdev.h>

#include <alga/rng_mng.h>
#include <alga/timing.h>
#include <alga/pixel_fmts.h>
#include <alga/amd/dce4/dce4.h>

#include "regs.h"

#include "ih.h"
#include "ba.h"
#include "ucode.h"
#include "cp.h"
#include "drv.h"

void cp_stop(struct pci_dev *dev)
{
	wr32(dev, CP_ME_HALT | CP_PFP_HALT, CP_ME_CTL);
	wr32(dev, 0, SCRATCH_UMSK);
}

/* should be at the very beginning of the ring */
void cp_me_init(struct pci_dev *dev)
{
	struct dev_drv_data *dd;
	dd = pci_get_drvdata(dev);

	cp_wr(dev, PKT3(PKT3_ME_INIT, 6));
	cp_wr(dev, 0x1);
	cp_wr(dev, 0x2);
	cp_wr(dev, dd->cfg.gpu_hw_ctxs_n_max - 1);
	cp_wr(dev, PKT3_ME_INIT_DEV_ID(1));
	cp_wr(dev, 0);
	cp_wr(dev, 0);

	cp_commit(dev);
	wr32(dev, 0xff, CP_ME_CTL); /* XXX: specific to ME init? */
}

/*
 * o ring size is 2^CP_RING_LOG2_QWS(17) quadwords (256 * 4096 bytes)
 * o block size is gpu page size, namely 2^GPU_PAGE_LOG2_QWS(9) quadwords
 *   (4096 bytes)
 */
#define WB_SCRATCH_OF 0
#define WB_CP_RPTR_OF 1024
void cp_init(struct pci_dev *dev)
{
	u32 cp_rb_ctl;
	u64 cp_rb_rptr_addr;
	u64 wb_scratch_addr;
	struct dev_drv_data *dd;

	/* some command processor base settings */
	wr32(dev, SET(ROQ_IB1_START, 0x16) | SET(ROQ_IB2_START, 0x2b),
							CP_QUEUE_THRESHOLDS);
	wr32(dev, SET(STQ_SPLIT, 0x30), CP_MEQ_THRESHOLDS);
	wr32(dev, 0, CP_PERFMON_CTL);

	/* reset cp; if cp is reset, then pa, sh, vgt also need to be reset */
	wr32(dev, SOFT_RESET_CP | SOFT_RESET_PA | SOFT_RESET_SH
			| SOFT_RESET_VGT | SOFT_RESET_SX, GRBM_SOFT_RESET);
	rr32(dev, GRBM_SOFT_RESET);
	mdelay(15);
	wr32(dev, 0, GRBM_SOFT_RESET);
	rr32(dev, GRBM_SOFT_RESET);

	/* set ring buffer size */
	cp_rb_ctl = SET(RB_BLK_LOG2_QWS, GPU_PAGE_LOG2_QWS)
				| SET(RB_BUF_LOG2_QWS, CP_RING_LOG2_QWS);
#ifdef __BIG_ENDIAN
	cp_rb_ctl |= BUF_SWAP_32BIT;
#endif
	wr32(dev, cp_rb_ctl, CP_RB_CTL);
	wr32(dev, 0x4, CP_SEM_WAIT_TIMER);

	wr32(dev, 0, CP_RB_WPTR_DELAY);

	wr32(dev, cp_rb_ctl | RB_RPTR_WR_ENA, CP_RB_CTL);
	wr32(dev, 0, CP_RB_RPTR_WR);
	wr32(dev, 0, CP_RB_WPTR);
	wr32(dev, 0, CP_RB_RPTR);

	dd = pci_get_drvdata(dev);

	/* set the wb address, 2 lower bits are for endianness */
	cp_rb_rptr_addr = dd->ba.wb_map->gpu_addr + WB_CP_RPTR_OF;
	
	wr32(dev, cp_rb_rptr_addr & 0xfffffffc, CP_RB_RPTR_ADDR);
	wr32(dev, upper_32_bits(cp_rb_rptr_addr) & 0xff, CP_RB_RPTR_ADDR_HI);

	wb_scratch_addr = dd->ba.wb_map->gpu_addr + WB_SCRATCH_OF;
	/*
	 * 256 bytes block index is ok because gpu address and chosen write back
	 * page offset fit properly that required aligment
	 */
	wr32(dev, (wb_scratch_addr >> 8) & 0xffffffff, SCRATCH_ADDR);

	wr32(dev, 0xff, SCRATCH_UMSK);

	mdelay(1);
	wr32(dev, cp_rb_ctl, CP_RB_CTL);

	/* 256 bytes aligned ok because it is GPU_PAGE_SZ aligned */
	wr32(dev, dd->ba.cp_ring_map->gpu_addr >> 8, CP_RB_BASE);
	wr32(dev, (1 << 27) | (1 << 28), CP_DEBUG); /* ??? */

	dd->cp0.rptr = 0;
	dd->cp0.wptr = 0;
	spin_lock_init(&dd->cp0.lock);
}

inline void cp_wr(struct pci_dev *dev, u32 v)
{
	struct dev_drv_data *dd;
	u32 *r;

	dd = pci_get_drvdata(dev);

	r = dd->ba.cp_ring_map->cpu_addr;
	r[dd->cp0.wptr++] = v;
	dd->cp0.wptr &= CP_RING_DW_MASK;
}

inline void cp_commit(struct pci_dev *dev)
{
	struct dev_drv_data *dd;
	u32 *r;

	dd = pci_get_drvdata(dev);

	/* match ring fetch size */
	r = dd->ba.cp_ring_map->cpu_addr;
	while (dd->cp0.wptr & CP_RING_PFP_DW_MASK)
		r[dd->cp0.wptr++] = PKT2;

	wmb();	/* data write operations emitted before dma */

	dd->cp0.wptr &= CP_RING_DW_MASK;
	wr32(dev, dd->cp0.wptr, CP_RB_WPTR);
	rr32(dev, CP_RB_WPTR);
}
