/*
  author Sylvain Bertrand <digital.ragnarok@gmail.com>
  Protected by GNU Affero GPL v3 with some exceptions.
  See README at root of alga tree.
*/

#include <linux/pci.h>
#include <asm/byteorder.h>
#include <linux/delay.h>
#include <linux/cdev.h>

#include "types.h"
#include "ba.h"
#include "ucode.h"
#include "cp.h"
#include "gpu.h"
#include "irq.h"
#include "drv.h"
#include "regs.h"

/* pte attributes */
#define PTE_VALID	(1 << 0)
#define PTE_SYSTEM	(1 << 1) /* page aperture: system (vram+agp), non-system (pcie) */
#define PTE_SNOOPED	(1 << 2) /* snoop is cache coherent */
#define PTE_READABLE	(1 << 5)
#define PTE_WRITEABLE	(1 << 6)

static const size_t ba_range_sz = 512 * 1024 *1024;	/* 512 MB */

static u64 pt_start;

static void ba_tlb_flush(struct pci_dev *dev)
{
	unsigned i;
	u32 tmp;

	wr32(dev, 0x1, HDP_MEM_COHERENCY_FLUSH_CTL);

	wr32(dev, REQUEST_TYPE(1), VM_CTX0_REQUEST_RESPONSE);
	for (i = 0; i < 100000; i++) { /* 100000 us = 100 ms */
		/* read MC_STATUS */
		tmp = rr32(dev, VM_CTX0_REQUEST_RESPONSE);
		tmp = (tmp & RESPONSE_TYPE_MASK) >> RESPONSE_TYPE_SHIFT;
		if (tmp == 2) {
			dev_warn(&dev->dev, "flush TLB failed\n");
			return;
		}
		if (tmp)
			return;
		udelay(1);
	}
	/* try to keep going anyway if timed out */
}

int ba_init(struct pci_dev *dev)
{
	struct dev_drv_data *drv_data;
	unsigned i;
	gpu_addr_t pte;
	gpu_addr_t ctx0_base;
	u32 tmp;

	/* after vga and scratch page, see gpu_address_space.layout */
	pt_start = VGA_RANGE_SZ + GPU_PAGE_SZ;

	drv_data = pci_get_drvdata(dev);

	/* some alignment checks */
	if (ba_range_sz % GPU_PAGE_SZ) {
		dev_err(&dev->dev,
			"bus aperture size is not aligned on gpu page size\n");
		return -EINVAL;
	}
	drv_data->ba.ctx0.start = drv_data->vram.range.end + 1;
	if (!IS_GPU_PAGE_ALIGNED(drv_data->ba.ctx0.start)) {
		dev_err(&dev->dev, "start of ctx0 is not gpu page aligned\n");
		return -EINVAL;
	} 
	drv_data->ba.ctx0.end = drv_data->ba.ctx0.start + ba_range_sz - 1;

	ctx0_base = drv_data->vram.range.start + pt_start;
	if (!IS_GPU_PAGE_ALIGNED(ctx0_base)) {
		dev_err(&dev->dev,
			"start of ctx0 page table is not gpu page aligned\n");
		return -EINVAL;
	} 

	/* dummy page */
	drv_data->ba.dummy.cpu_addr = dma_alloc_coherent(&dev->dev,
		GPU_PAGE_SZ, &drv_data->ba.dummy.bus_addr, GFP_ATOMIC);
	if (!drv_data->ba.dummy.cpu_addr) {
		dev_err(&dev->dev, "unable to allocate dummy page\n");
		return -ENOMEM;
	}
	dev_info(&dev->dev, "dummy page bus addr '0x%Lx'\n",
						drv_data->ba.dummy.bus_addr);

	if (!IS_GPU_PAGE_ALIGNED(drv_data->ba.dummy.bus_addr)) {
		dev_err(&dev->dev,
			"dummy page bus addr not aligned on GPU page\n");
		dma_free_coherent(&dev->dev, GPU_PAGE_SZ,
						drv_data->ba.dummy.cpu_addr,
						drv_data->ba.dummy.bus_addr);
		return -ENOMEM;
	}

	/*
	 * FIXME: The first 256kB of the vram could be locked for vga. Then
	 * we have the scratch page. Put the page table entries after, but need to
	 * try to shift the scratch page and page table to 0 since vga was
	 * shutdown in mc_program, must test its removal, when something works.
	 */
	pte = drv_data->ba.dummy.bus_addr | PTE_VALID | PTE_SYSTEM
				| PTE_SNOOPED | PTE_READABLE | PTE_WRITEABLE;
	for (i = 0; i < ba_range_sz/GPU_PAGE_SZ; ++i)
		writeq(pte, drv_data->vram.bar0 + pt_start + i * sizeof(pte));
	wmb();

	ba_tlb_flush(dev);

	/* setup l2 cache */
	wr32(dev, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING
		| ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE
				| EFFECTIVE_L2_QUEUE_SIZE(7), VM_L2_CTL);
	wr32(dev, 0, VM_L2_CTL2);
	wr32(dev, BANK_SELECT(0) | CACHE_UPDATE_MODE(2), VM_L2_CTL3);

	/*
	 * setup tlb control/VM read/write clients 
	 * MD has "A" and "B", MB has SYS/HDP/DMA/SEM...
	 */
	tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING
		| SYSTEM_ACCESS_MODE_NOT_IN_SYS
		| SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU
		| EFFECTIVE_L1_TLB_SZ(5) | EFFECTIVE_L1_QUEUE_SZ(5);

	wr32(dev, tmp, MC_VM_MD_L1_TLB0_CTL);
	wr32(dev, tmp, MC_VM_MD_L1_TLB1_CTL);
	wr32(dev, tmp, MC_VM_MD_L1_TLB2_CTL);

	wr32(dev, tmp, MC_VM_MB_L1_TLB0_CTL);
	wr32(dev, tmp, MC_VM_MB_L1_TLB1_CTL);
	wr32(dev, tmp, MC_VM_MB_L1_TLB2_CTL);
	wr32(dev, tmp, MC_VM_MB_L1_TLB3_CTL);

	wr32(dev, drv_data->ba.ctx0.start, VM_CTX0_PT_START_ADDR);
	wr32(dev, drv_data->ba.ctx0.end, VM_CTX0_PT_END_ADDR);
	wr32(dev, ctx0_base, VM_CTX0_PT_BASE_ADDR);
	wr32(dev, ENABLE_CTX | PT_DEPTH(0)
			| RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, VM_CTX0_CTL);

	wr32(dev, drv_data->ba.dummy.bus_addr,
					VM_CTX0_PROTECTION_FAULT_DEFAULT_ADDR);
	wr32(dev, 0, VM_CTX1_CTL);

	ba_tlb_flush(dev);
	return 0;
}

void ba_cleanup(struct pci_dev *dev)
{
	struct dev_drv_data *drv_data;
	u32 tmp;

	/* disable all tables */
	wr32(dev, 0, VM_CTX0_CTL);
	wr32(dev, 0, VM_CTX1_CTL);

	/* setup l2 cache */
	wr32(dev, ENABLE_L2_FRAGMENT_PROCESSING | EFFECTIVE_L2_QUEUE_SIZE(7),
								VM_L2_CTL);
	wr32(dev, 0, VM_L2_CTL2);
	wr32(dev, BANK_SELECT(0) | CACHE_UPDATE_MODE(2), VM_L2_CTL3);

	/* setup tlb control/VM read/write clients */
	tmp = EFFECTIVE_L1_TLB_SZ(5) | EFFECTIVE_L1_QUEUE_SZ(5);
	wr32(dev, tmp, MC_VM_MD_L1_TLB0_CTL);
	wr32(dev, tmp, MC_VM_MD_L1_TLB1_CTL);
	wr32(dev, tmp, MC_VM_MD_L1_TLB2_CTL);

	wr32(dev, tmp, MC_VM_MB_L1_TLB0_CTL);
	wr32(dev, tmp, MC_VM_MB_L1_TLB1_CTL);
	wr32(dev, tmp, MC_VM_MB_L1_TLB2_CTL);
	wr32(dev, tmp, MC_VM_MB_L1_TLB3_CTL);

	drv_data = pci_get_drvdata(dev);

	dma_free_coherent(&dev->dev, GPU_PAGE_SZ,
					drv_data->ba.dummy.cpu_addr,
					drv_data->ba.dummy.bus_addr);
}

static int wb_map(struct pci_dev *dev)
{
	struct dev_drv_data *drv_data;
	gpu_addr_t __iomem *wb_pte;

	drv_data = pci_get_drvdata(dev);

	drv_data->ba.wb.cpu_addr = dma_alloc_coherent(&dev->dev, GPU_PAGE_SZ,
					&drv_data->ba.wb.bus_addr, GFP_ATOMIC);
	if (!drv_data->ba.wb.cpu_addr) {
		dev_err(&dev->dev, "unable to allocate write back page\n");
		return -ENOMEM;
	}
	dev_info(&dev->dev, "wb page bus addr '0x%Lx'\n",
						drv_data->ba.wb.bus_addr);
	if (!IS_GPU_PAGE_ALIGNED(drv_data->ba.wb.bus_addr)) {
		dev_err(&dev->dev,
				"wb page bus addr not aligned on gpu page\n");
		dma_free_coherent(&dev->dev, GPU_PAGE_SZ,
						drv_data->ba.wb.cpu_addr,
						drv_data->ba.wb.bus_addr);
		return -ENOMEM;
	}

	memset(drv_data->ba.wb.cpu_addr, 0, GPU_PAGE_SZ);

	wb_pte = (gpu_addr_t __iomem*)(drv_data->vram.bar0 + pt_start);
	writeq(drv_data->ba.wb.bus_addr | PTE_VALID | PTE_SYSTEM | PTE_SNOOPED
					| PTE_READABLE | PTE_WRITEABLE, wb_pte);
	return 0;
}

static void wb_unmap(struct pci_dev *dev)
{
	struct dev_drv_data *drv_data;
	gpu_addr_t __iomem *wb_pte;

	drv_data = pci_get_drvdata(dev);

	wb_pte = (gpu_addr_t __iomem*)(drv_data->vram.bar0 + pt_start);
	writeq(drv_data->ba.dummy.bus_addr | PTE_VALID | PTE_SYSTEM
			| PTE_SNOOPED | PTE_READABLE | PTE_WRITEABLE, wb_pte);	

	dma_free_coherent(&dev->dev, GPU_PAGE_SZ, drv_data->ba.wb.cpu_addr,
						drv_data->ba.wb.bus_addr);
}

static int ih_ring_map(struct pci_dev *dev)
{
	struct dev_drv_data *drv_data;
	gpu_addr_t __iomem *pte;
	gpu_addr_t bus_addr;
	unsigned ptes_n;

	/* check size alignment on gpu page */
	if (((1 << IH_RING_LOG2_DWS) * 4) % GPU_PAGE_SZ) {
		dev_err(&dev->dev,
				"ih ring size not aligned on gpu page size\n");
		return -EINVAL;
	}

	drv_data = pci_get_drvdata(dev);

	drv_data->ba.ih_ring.cpu_addr = dma_alloc_coherent(&dev->dev,
						(1 << IH_RING_LOG2_DWS) * 4,
						&drv_data->ba.ih_ring.bus_addr,
						GFP_ATOMIC);
	if (!drv_data->ba.ih_ring.cpu_addr) {
		dev_err(&dev->dev, "unable to allocate ih ring\n");
		return -ENOMEM;
	}
	dev_info(&dev->dev, "ih ring page bus addr '0x%Lx'\n",
						drv_data->ba.ih_ring.bus_addr);
	if (!IS_GPU_PAGE_ALIGNED(drv_data->ba.ih_ring.bus_addr)) {
		dev_err(&dev->dev,
			"ih ring start bus addr not aligned on gpu page\n");
		dma_free_coherent(&dev->dev, (1 << IH_RING_LOG2_DWS) * 4,
						drv_data->ba.ih_ring.cpu_addr,
						drv_data->ba.ih_ring.bus_addr);
		return -ENOMEM;
	}

	/* skip wb page entry */
	pte = (gpu_addr_t __iomem*)(drv_data->vram.bar0 + pt_start) + 1;
	bus_addr = drv_data->ba.ih_ring.bus_addr;
	ptes_n = (1 << IH_RING_LOG2_DWS) * 4 / GPU_PAGE_SZ;

	while (ptes_n) {
		writeq(bus_addr | PTE_VALID | PTE_SYSTEM | PTE_SNOOPED
					| PTE_READABLE | PTE_WRITEABLE, pte);
		++pte;
		bus_addr += GPU_PAGE_SZ;
		ptes_n--;
	}
	return 0;
}

static void ih_ring_unmap(struct pci_dev *dev)
{
	struct dev_drv_data *drv_data;
	gpu_addr_t __iomem *pte;
	unsigned ptes_n;

	drv_data = pci_get_drvdata(dev);

	/* skip wb page entry */
	pte = (gpu_addr_t __iomem*)(drv_data->vram.bar0 + pt_start) + 1;
	ptes_n = (1 << IH_RING_LOG2_DWS) * 4 / GPU_PAGE_SZ;

	while (ptes_n) {
		writeq(drv_data->ba.dummy.bus_addr | PTE_VALID | PTE_SYSTEM
			| PTE_SNOOPED | PTE_READABLE | PTE_WRITEABLE, pte);
		++pte;
		ptes_n--;
	}

	dma_free_coherent(&dev->dev, (1 << IH_RING_LOG2_DWS) * 4,
						drv_data->ba.ih_ring.cpu_addr,
						drv_data->ba.ih_ring.bus_addr);
}

static int cp_ring_map(struct pci_dev *dev)
{
	struct dev_drv_data *drv_data;
	gpu_addr_t __iomem *pte;
	gpu_addr_t bus_addr;
	unsigned ptes_n;
	unsigned ptes_ih_n;

	/* check size alignment on gpu page */
	if (((1 << CP_RING_LOG2_QWS) * 8) % GPU_PAGE_SZ) {
		dev_err(&dev->dev,
				"cp ring size not aligned on gpu page size\n");
		return -EINVAL;
	}

	drv_data = pci_get_drvdata(dev);

	drv_data->ba.cp_ring.cpu_addr = dma_alloc_coherent(&dev->dev,
						(1 << CP_RING_LOG2_QWS) * 8,
						&drv_data->ba.cp_ring.bus_addr,
						GFP_ATOMIC);
	if (!drv_data->ba.cp_ring.cpu_addr) {
		dev_err(&dev->dev, "unable to allocate cp ring\n");
		return -ENOMEM;
	}
	dev_info(&dev->dev, "cp ring page bus addr '0x%Lx'\n",
						drv_data->ba.cp_ring.bus_addr);
	if (!IS_GPU_PAGE_ALIGNED(drv_data->ba.cp_ring.bus_addr)) {
		dev_err(&dev->dev,
			"cp ring start bus addr not aligned on gpu page\n");
		dma_free_coherent(&dev->dev, (1 << CP_RING_LOG2_QWS) * 8,
						drv_data->ba.cp_ring.cpu_addr,
						drv_data->ba.cp_ring.bus_addr);
		return -ENOMEM;
	}

	/* skip wb page entry and those for the ih ring */
	ptes_ih_n = (1 << IH_RING_LOG2_DWS) * 4 / GPU_PAGE_SZ;
	pte = (gpu_addr_t __iomem*)(drv_data->vram.bar0 + pt_start)
								+ 1 + ptes_ih_n;
	ptes_n = (1 << CP_RING_LOG2_QWS) * 8 / GPU_PAGE_SZ;
	bus_addr = drv_data->ba.cp_ring.bus_addr;

	while (ptes_n) {
		writeq(bus_addr | PTE_VALID | PTE_SYSTEM | PTE_SNOOPED
					| PTE_READABLE | PTE_WRITEABLE, pte);
		++pte;
		bus_addr += GPU_PAGE_SZ;
		ptes_n--;
	}
	return 0;
}

static void cp_ring_unmap(struct pci_dev *dev)
{
	struct dev_drv_data *drv_data;
	gpu_addr_t __iomem *pte;
	unsigned ptes_n;
	unsigned ptes_ih_n;

	drv_data = pci_get_drvdata(dev);

	/* skip wb page entry and those of the ih ring */
	ptes_ih_n = (1 << IH_RING_LOG2_DWS) * 4 / GPU_PAGE_SZ;
	pte = (gpu_addr_t __iomem*)(drv_data->vram.bar0 + pt_start)
								+ 1 + ptes_ih_n;
	ptes_n = (1 << CP_RING_LOG2_QWS) * 8 / GPU_PAGE_SZ;

	while (ptes_n) {
		writeq(drv_data->ba.dummy.bus_addr | PTE_VALID | PTE_SYSTEM
			| PTE_SNOOPED | PTE_READABLE | PTE_WRITEABLE, pte);
		++pte;
		ptes_n--;
	}
	dma_free_coherent(&dev->dev, (1 << CP_RING_LOG2_QWS) * 8,
						drv_data->ba.cp_ring.cpu_addr,
						drv_data->ba.cp_ring.bus_addr);
}

int ba_map(struct pci_dev *dev)
{
	int ret;

	/* order matters */
	ret = wb_map(dev);
	if (ret)
		return ret;

	ret = ih_ring_map(dev);
	if (ret)
		goto err_unmap_wb;

	ret = cp_ring_map(dev);
	if (ret)
		goto err_unmap_cp_ring;

	return 0;

err_unmap_cp_ring:
	cp_ring_unmap(dev);

err_unmap_wb:
	wb_unmap(dev);

	return ret;
}

void ba_unmap(struct pci_dev *dev)
{
	cp_ring_unmap(dev);
	ih_ring_unmap(dev);
	wb_unmap(dev);
}
