/*
  author Sylvain Bertrand <digital.ragnarok@gmail.com>
  Protected by GNU Affero GPL v3 with some exceptions.
  See README at root of alga tree.
*/
#include <linux/pci.h>
#include <asm/byteorder.h>
#include <linux/delay.h>
#include <linux/cdev.h>
#include <linux/vmalloc.h>

#include <alga/rng_mng.h>
#include <alga/timing.h>
#include <alga/pixel_fmts.h>
#include <alga/amd/dce4/dce4.h>

#include "regs.h"

#include "ih.h"
#include "ba.h"
#include "ucode.h"
#include "cp.h"
#include "drv.h"

/* pte attributes */
#define PTE_VALID	(1 << 0)
#define PTE_SYSTEM	(1 << 1) /* page aperture: system (vram+agp), non-system (pcie) */
#define PTE_SNOOPED	(1 << 2) /* snoop is cache coherent */
#define PTE_READABLE	(1 << 5)
#define PTE_WRITEABLE	(1 << 6)

static const size_t ctx0_ba_range_sz = 512 * 1024 *1024;	/* 512 MB */

static void ctx0_tlb_flush(struct pci_dev *dev)
{
	unsigned i;
	u32 tmp;

	wr32(dev, 0x1, HDP_MEM_COHERENCY_FLUSH_CTL);

	wr32(dev, SET(REQ_TYPE, TLB_FLUSH), VM_CTX_0_REQ_RESP);
	for (i = 0; i < 100000; i++) { /* 100000 us = 100 ms */
		/* read MC_STATUS */
		tmp = rr32(dev, VM_CTX_0_REQ_RESP);
		tmp = GET(RESP_TYPE, tmp);
		if (tmp == RESP_FAILED) {
			dev_warn(&dev->dev, "ba: flush TLB failed\n");
			return;
		}
		if (tmp)
			return;
		udelay(1);
	}
	/* try to keep going anyway if timed out */
}

static void ctx_tlb_flush(struct ctx *ctx)
{
	/* only ctx0 in currently supported */
	ctx0_tlb_flush(ctx->dev);
}

static int dummy_page(struct pci_dev *dev)
{
	struct dev_drv_data *dd;
	dd = pci_get_drvdata(dev);

	dd->ba.dummy_cpu_addr = dma_zalloc_coherent(&dev->dev, GPU_PAGE_SZ,
					&dd->ba.dummy_bus_addr, GFP_KERNEL);
	if (!dd->ba.dummy_cpu_addr) {
		dev_err(&dev->dev, "ba: unable to allocate dummy page\n");
		return -BA_ERR;
	}

	if (!IS_GPU_PAGE_ALIGNED(dd->ba.dummy_bus_addr)) {
		dev_err(&dev->dev,
			"ba: dummy page bus addr not aligned on GPU page\n");
		dma_free_coherent(&dev->dev, GPU_PAGE_SZ, dd->ba.dummy_cpu_addr,
							dd->ba.dummy_bus_addr);
		return -BA_ERR;
	}
	dev_info(&dev->dev, "ba: dummy page mapped cpu_addr=0x%p"
				" bus_addr=0x%016llx\n", dd->ba.dummy_cpu_addr,
							dd->ba.dummy_bus_addr);
	return 0;
}

static int ctx_init(struct pci_dev *dev, struct ctx *ctx, u64 start, u64 sz)
{
	struct dev_drv_data *dd;
	int r;

	ctx->dev = dev;
	dd = pci_get_drvdata(ctx->dev);

	r = rng_alloc_align(&ctx->pt_start, &dd->vram.mng, (sz / GPU_PAGE_SZ)
						* sizeof(u64), GPU_PAGE_SZ);
	if (r != 0) {
		dev_err(&ctx->dev->dev, "ba:ctx: unable to allocate vram for"
						" ctx page table entries\n");
		r = -BA_ERR;
		return r;
	}

	rng_mng_init(&ctx->mng, start, sz);
	INIT_LIST_HEAD(&dd->ba.ctx0.maps);
	return 0;
}

/* only ctx0 */
int ba_init(struct pci_dev *dev)
{
	struct dev_drv_data *dd;
	unsigned i;
	u64 pte;
	u32 tmp;
	int r;
	u64 pt_of;
	u64 ctx0_start;
	u64 ctx0_end;

	dd = pci_get_drvdata(dev);

	if (ctx0_ba_range_sz % GPU_PAGE_SZ) {
		dev_err(&dev->dev, "ba: bus aperture size is not aligned on gpu"
								" page size\n");
		return -BA_ERR;
	}

	/* ctx0 aperture after vram */
	ctx0_start = rng_align(dd->vram.mng.s + dd->vram.mng.sz, GPU_PAGE_SZ);
	r = ctx_init(dev, &dd->ba.ctx0, ctx0_start, ctx0_ba_range_sz);
	if (r != 0)
		return r;
	ctx0_end = dd->ba.ctx0.mng.s + dd->ba.ctx0.mng.sz - 1;

	r = dummy_page(dev);
	if (r != 0)
		return r;

	pt_of = dd->ba.ctx0.pt_start - dd->vram.mng.s;
	pte = dd->ba.dummy_bus_addr | PTE_VALID | PTE_SYSTEM
				| PTE_SNOOPED | PTE_READABLE | PTE_WRITEABLE;
	for (i = 0; i < ctx0_ba_range_sz/GPU_PAGE_SZ; ++i)
		writeq(pte, dd->vram.bar0 + pt_of + i * sizeof(pte));

	/* make sure the gpu pte updates where sent over the bus */
	wmb();

	ctx0_tlb_flush(dev);

	/* setup l2 cache */
	wr32(dev, ENA_L2_CACHE | ENA_L2_FRAG_PROCESSING
				| ENA_L2_PTE_CACHE_LRU_UPDATE_BY_WR
				| SET(EFFECTIVE_L2_QUEUE_SZ, 7), VM_L2_CTL_0);
	wr32(dev, 0, VM_L2_CTL_1);
	wr32(dev, SET(BANK_SELECT, 0) | SET(CACHE_UPDATE_MODE, 2), VM_L2_CTL_2);

	/*
	 * setup tlb control/VM read/write clients 
	 * MD has "A" and "B", MB has SYS/HDP/DMA/SEM...
	 */
	tmp = ENA_L1_TLB | ENA_L1_FRAG_PROCESSING
		| SET(SYS_ACCESS_MODE, NOT_IN_SYS)
		| SET(SYS_APER_UNMAPPED_ACCESS, PASS_THRU)
		| SET(EFFECTIVE_L1_TLB_SZ, 5) | SET(EFFECTIVE_L1_QUEUE_SZ, 5);

	wr32(dev, tmp, MC_VM_MD_L1_TLB_0_CTL);
	wr32(dev, tmp, MC_VM_MD_L1_TLB_1_CTL);
	wr32(dev, tmp, MC_VM_MD_L1_TLB_2_CTL);
	wr32(dev, tmp, MC_VM_MD_L1_TLB_3_CTL);

	wr32(dev, tmp, MC_VM_MB_L1_TLB_0_CTL);
	wr32(dev, tmp, MC_VM_MB_L1_TLB_1_CTL);
	wr32(dev, tmp, MC_VM_MB_L1_TLB_2_CTL);
	wr32(dev, tmp, MC_VM_MB_L1_TLB_3_CTL);

	wr32(dev, GPU_PAGE_IDX(dd->ba.ctx0.mng.s), VM_CTX_0_PT_START_ADDR);
	wr32(dev, GPU_PAGE_IDX(ctx0_end), VM_CTX_0_PT_END_ADDR);
	wr32(dev, GPU_PAGE_IDX(dd->ba.ctx0.pt_start), VM_CTX_0_PT_BASE_ADDR);
	wr32(dev, ENA_CTX | SET(PT_DEPTH, 0)
			| RNG_PROTECTION_FAULT_ENA_DEFAULT, VM_CTX_0_CTL);

	wr32(dev, GPU_PAGE_IDX(dd->ba.dummy_bus_addr),
					VM_CTX_0_PROTECTION_FAULT_DEFAULT_ADDR);
	wr32(dev, 0, VM_CTX_1_CTL);

	ctx0_tlb_flush(dev);
	return 0;
}

/* only ctx0 */
void ba_shutdown(struct pci_dev *dev)
{
	struct dev_drv_data *dd;
	u32 tmp;

	/* disable all tables */
	wr32(dev, 0, VM_CTX_0_CTL);
	wr32(dev, 0, VM_CTX_1_CTL);

	/* setup l2 cache */
	wr32(dev, ENA_L2_FRAG_PROCESSING | SET(EFFECTIVE_L2_QUEUE_SZ, 7),
								VM_L2_CTL_0);
	wr32(dev, 0, VM_L2_CTL_1);
	wr32(dev, SET(BANK_SELECT, 0) | SET(CACHE_UPDATE_MODE, 2), VM_L2_CTL_2);

	/* setup tlb control/VM read/write clients */
	tmp = SET(EFFECTIVE_L1_TLB_SZ, 5) | SET(EFFECTIVE_L1_QUEUE_SZ, 5);
	wr32(dev, tmp, MC_VM_MD_L1_TLB_0_CTL);
	wr32(dev, tmp, MC_VM_MD_L1_TLB_1_CTL);
	wr32(dev, tmp, MC_VM_MD_L1_TLB_2_CTL);
	wr32(dev, tmp, MC_VM_MD_L1_TLB_3_CTL);

	wr32(dev, tmp, MC_VM_MB_L1_TLB_0_CTL);
	wr32(dev, tmp, MC_VM_MB_L1_TLB_1_CTL);
	wr32(dev, tmp, MC_VM_MB_L1_TLB_2_CTL);
	wr32(dev, tmp, MC_VM_MB_L1_TLB_3_CTL);

	dd = pci_get_drvdata(dev);

	dma_free_coherent(&dev->dev, GPU_PAGE_SZ, dd->ba.dummy_cpu_addr,
							dd->ba.dummy_bus_addr);
	rng_mng_destroy(&dd->ba.ctx0.mng);
}

static void ptes_coherent_contig_restore(struct ba_map *m)
{
	struct dev_drv_data *dd;
	u64 pte;
	unsigned i;

	dd = pci_get_drvdata(m->ctx->dev);

	pte = dd->ba.dummy_bus_addr | PTE_VALID | PTE_SYSTEM | PTE_SNOOPED
						| PTE_READABLE | PTE_WRITEABLE;
	for (i = 0; i < m->gpu_ps_n; ++i) {
		writeq(pte, dd->vram.bar0 + (m->ptes_start - dd->vram.mng.s)
						+ i * sizeof(pte));
	}
}

static void ptes_coherent_contig_install(struct ba_map *m)
{
	struct dev_drv_data *dd;
	unsigned i;
	unsigned pages_of;
	u64 bar0_pt_of;
	u64 __iomem *pte;
	u64 bus_addr;

	dd = pci_get_drvdata(m->ctx->dev);

	pages_of = (m->gpu_addr - m->ctx->mng.s) / GPU_PAGE_SZ;
	m->ptes_start = m->ctx->pt_start + pages_of * sizeof(u64);

	bar0_pt_of = m->ptes_start - dd->vram.mng.s;
	pte = (u64 __iomem *)(dd->vram.bar0 + bar0_pt_of);
	bus_addr = m->bus_addr;

	for (i = 0; i < m->gpu_ps_n; ++i, ++pte, bus_addr += GPU_PAGE_SZ)
		writeq(bus_addr | PTE_VALID | PTE_SYSTEM | PTE_SNOOPED
					| PTE_READABLE | PTE_WRITEABLE, pte);
}

static struct ba_map *ctx_map_coherent_contig(struct ctx *ctx, unsigned gpu_ps_n)
{
	struct ba_map *m;
	size_t sz;
	int r;

	m = kzalloc(GFP_KERNEL, sizeof(*m));
	if (m == NULL) {
		dev_err(&ctx->dev->dev, "ba:ctx: unable to allocate ba"
								" struct\n");
		goto err;
	}

	m->type = BA_MAP_COHERENT_CONTIG;
	m->ctx = ctx;
	m->gpu_ps_n = gpu_ps_n;

	sz = m->gpu_ps_n * GPU_PAGE_SZ;

	/* allocate a range of the aperture */
	r = rng_alloc_align(&m->gpu_addr, &m->ctx->mng, GPU_PAGE_SZ
						* m->gpu_ps_n, GPU_PAGE_SZ);
	if (r != 0) {
		dev_err(&ctx->dev->dev, "ba:ctx: unable to allocate gpu address"
							" space for mapping\n");
		goto err_free_ba_map;
	}

	/* do the coherent mapping */
	m->cpu_addr = dma_zalloc_coherent(&ctx->dev->dev, sz, &m->bus_addr,
								GFP_KERNEL);
	if (m->cpu_addr == NULL) {
		dev_err(&ctx->dev->dev, "ba:ctx: unable to perform coherent dma"
								"mapping\n");
		goto err_free_rng;
	}

	/* install the ptes for this mapping in its owning ctx */
	ptes_coherent_contig_install(m);

	/* account for this new mapping in the ctx */
	list_add(&m->n, &ctx->maps);

	/* be sure the gpu pte updates were sent over the bus */
	wmb();

	/* flush ctx tlb to make live the mapping on the gpu */
	ctx_tlb_flush(ctx);
	return m;

err_free_rng:
	rng_free(&m->ctx->mng, m->gpu_addr);
	
err_free_ba_map:
	kfree(m);
err:
	return NULL;
}

static void cleanup_coherent_contig(struct ba_map *m)
{
	ptes_coherent_contig_restore(m);
	rng_free(&m->ctx->mng, m->gpu_addr);
	dma_free_coherent(&m->ctx->dev->dev, m->gpu_ps_n * GPU_PAGE_SZ,
						m->cpu_addr, m->bus_addr);
	kfree(m);
}

static void cleanup(struct ba_map *m)
{
	switch (m->type) {
	case BA_MAP_COHERENT_CONTIG:
		cleanup_coherent_contig(m);
		break;
	default:
		dev_err(&m->ctx->dev->dev, "ba: unable to cleanup unknown type"
								" mapping\n");
		break;
	}
}

static void unmap(struct ba_map *m)
{
	struct ba_map *pos;
	struct ctx *ctx;

	ctx = m->ctx; /* don't use m ctx since we getting rid of it */
	list_for_each_entry(pos, &ctx->maps, n) {
		if (pos == m) {
			list_del(&pos->n);
			cleanup(pos);
			break;
		}
	}

	/* make sure the gpu pte updates were sent over the bus */
	wmb();

	/* flush ctx tlb to make live the restored dummy pages */
	ctx_tlb_flush(ctx);
}

/* just an helper for the core mappings */
static int generic_map(struct ctx *ctx, u64 sz, struct ba_map **m)
{
	if (sz % GPU_PAGE_SZ) {
		dev_err(&ctx->dev->dev, "ba: size not aligned on gpu page"
								" size\n");
		return -BA_ERR;
	}

	*m = ctx_map_coherent_contig(ctx, sz / GPU_PAGE_SZ);
	if (*m == NULL) {
		dev_err(&ctx->dev->dev, "ba: unable to dma map the aperture\n");
		return -BA_ERR;
	}
	dev_info(&ctx->dev->dev, "ba: aperture coherently mapped gpu_addr=0x%016llx"
			" cpu_addr=0x%p\n", (*m)->gpu_addr, (*m)->cpu_addr);
	return 0;
}

/* default in ctx0 */
static int wb_map(struct pci_dev *dev)
{
	struct dev_drv_data *dd;

	dd = pci_get_drvdata(dev);

	dev_info(&dev->dev, "ba: mapping write back page bus aperture\n");
	return generic_map(&dd->ba.ctx0, GPU_PAGE_SZ, &dd->ba.wb_map);
}

/* default in ctx0 */
static int ih_ring_map(struct pci_dev *dev)
{
	struct dev_drv_data *dd;
	u64 ih_ring_sz;

	ih_ring_sz = (1 << IH_RING_LOG2_DWS) * 4;

	dd = pci_get_drvdata(dev);

	dev_info(&dev->dev, "ba: mapping ih ring bus aperture\n");
	return generic_map(&dd->ba.ctx0, ih_ring_sz, &dd->ba.ih_ring_map);
}

/* default in ctx0 */
static int cp_ring_map(struct pci_dev *dev)
{
	struct dev_drv_data *dd;
	u64 cp_ring_sz;

	cp_ring_sz = (1 << CP_RING_LOG2_QWS) * 8;

	dd = pci_get_drvdata(dev);

	dev_info(&dev->dev, "ba: mapping cp ring bus aperture\n");
	return generic_map(&dd->ba.ctx0, cp_ring_sz, &dd->ba.cp_ring_map);
}

int ba_map(struct pci_dev *dev)
{
	struct dev_drv_data *dd;
	int r;

	dd = pci_get_drvdata(dev);

	r = wb_map(dev);
	if (r)
		goto err;

	r = ih_ring_map(dev);
	if (r)
		goto err_unmap_wb;

	r = cp_ring_map(dev);
	if (r)
		goto err_unmap_ih_ring;

	return 0;

err_unmap_ih_ring:
	unmap(dd->ba.ih_ring_map);

err_unmap_wb:
	unmap(dd->ba.wb_map);
err:
	return -BA_ERR;
}

void ba_unmap(struct pci_dev *dev)
{
	struct dev_drv_data *dd;
	dd = pci_get_drvdata(dev);

	unmap(dd->ba.cp_ring_map);
	unmap(dd->ba.ih_ring_map);
	unmap(dd->ba.wb_map);
}
