/*
  author Sylvain Bertrand <digital.ragnarok@gmail.com>
  Protected by GNU Affero GPL v3 with some exceptions.
  See README at root of alga tree.
*/

#include <linux/pci.h>
#include <asm/byteorder.h>
#include <linux/interrupt.h>
#include <linux/firmware.h>
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/cdev.h>
#include <linux/fs.h>

#include <alga/rng_mng.h>
#include <alga/pixel_fmts.h>
#include <alga/timing.h>
#include <alga/amd/atombios/atb.h>
#include <alga/amd/dce4/dce4.h>

#include "regs.h"

#include "ih.h"
#include "ba.h"
#include "ucode.h"
#include "cp.h"
#include "drv.h"

#include "pcie_ports.h"

#include "fops.h"

static struct class *class;

static struct atb_dev adev;
static struct dce4_dev ddev;

/* those functions exist to deal with regs above regs range */
u32 rr32(struct pci_dev *dev, unsigned of)
{
	struct dev_drv_data *dd;
	u32 val;

	dd = pci_get_drvdata(dev);

	if (of <= (dd->regs_sz - sizeof(u32))) {
		val = le32_to_cpu(readl(dd->regs + of));
	} else {
		writel(of, dd->regs + 0);
		val = le32_to_cpu(readl(dd->regs + 4));
	}
	return val;
}

void wr32(struct pci_dev *dev, u32 val, unsigned of)
{
	struct dev_drv_data *dd;
	dd = pci_get_drvdata(dev);

	if (of <= (dd->regs_sz - sizeof(u32))) {
		writel(cpu_to_le32(val), dd->regs + of);
	} else {
		writel(of, dd->regs + 0);
		writel(cpu_to_le32(val), dd->regs + 4);	
	}
}

static u32 extern_rr32(struct device *dev, unsigned of)
{
	struct dev_drv_data *dd;
	struct pci_dev *pdev;

	pdev = container_of(dev, struct pci_dev, dev);
	dd = pci_get_drvdata(pdev);

	return rr32(pdev, of);
}

static void extern_wr32(struct device *dev, u32 val, unsigned of)
{
	struct dev_drv_data *dd;
	struct pci_dev *pdev;

	pdev = container_of(dev, struct pci_dev, dev);
	dd = pci_get_drvdata(pdev);

	wr32(pdev, val, of);
}

static u32 pcie_port_rreg(struct pci_dev *dev, u32 reg)
{
	u32 r;

	wr32(dev, reg & 0xff, PCIE_PORT_INDEX);
	rr32(dev, PCIE_PORT_INDEX);
	r = rr32(dev, PCIE_PORT_DATA);
	return r;
}

static void pcie_port_wreg(struct pci_dev *dev, u32 reg, u32 v)
{
	wr32(dev, reg & 0xff, PCIE_PORT_INDEX);
	rr32(dev, PCIE_PORT_INDEX);
	wr32(dev, v, PCIE_PORT_DATA);
	rr32(dev, PCIE_PORT_DATA);
}

static void * __devinit rom_copy_get(struct pci_dev *dev)
{
	void __iomem *rom;
	void *rom_copy;
	size_t rom_sz;

	rom = pci_map_rom(dev, &rom_sz);
	if (IS_ERR_OR_NULL(rom))
		return rom;

	rom_copy = kzalloc(rom_sz, GFP_KERNEL);
	if (!rom_copy) {
		pci_unmap_rom(dev, rom);
		return ERR_PTR(-ENOMEM);
	}

	memcpy_fromio(rom_copy, rom, rom_sz);
	pci_unmap_rom(dev, rom);
	return rom_copy;
}

static void intrs_reset(struct pci_dev *dev)
{
	struct dev_drv_data *dd;
	dd = pci_get_drvdata(dev);

	/* works even if ucode is not loaded */
	wr32(dev, CNTX_BUSY_INT_ENA | CNTX_EMPTY_INT_ENA, CP_INT_CTL);

	wr32(dev, 0, GRBM_INT_CTL);

	dce4_intrs_reset(dd->dce);
}

/* enable PCIE 2.0 */
static void pcie_gen2_ena(struct pci_dev *dev)
{
	u32 link_width_ctl;
	u32 speed_ctl;

	speed_ctl = pcie_port_rreg(dev, PCIE_LC_SPEED_CTL);
	if (!(speed_ctl & LC_OTHER_SIDE_EVER_SENT_GEN2)
				&& !(speed_ctl & LC_OTHER_SIDE_SUPPORTS_GEN2))
		return;

	link_width_ctl = pcie_port_rreg(dev, PCIE_LC_LINK_WIDTH_CTL);
	link_width_ctl &= ~LC_UPCONFIGURE_DIS;
	pcie_port_wreg(dev, PCIE_LC_LINK_WIDTH_CTL, link_width_ctl);

	speed_ctl = pcie_port_rreg(dev, PCIE_LC_SPEED_CTL);
	speed_ctl &= ~LC_TARGET_LINK_SPEED_OVERRIDE_EN;
	pcie_port_wreg(dev, PCIE_LC_SPEED_CTL, speed_ctl);

	speed_ctl = pcie_port_rreg(dev, PCIE_LC_SPEED_CTL);
	speed_ctl |= LC_CLR_FAILED_SPD_CHANGE_CNT;
	pcie_port_wreg(dev, PCIE_LC_SPEED_CTL, speed_ctl);

	speed_ctl = pcie_port_rreg(dev, PCIE_LC_SPEED_CTL);
	speed_ctl &= ~LC_CLR_FAILED_SPD_CHANGE_CNT;
	pcie_port_wreg(dev, PCIE_LC_SPEED_CTL, speed_ctl);

	speed_ctl = pcie_port_rreg(dev, PCIE_LC_SPEED_CTL);
	speed_ctl |= LC_GEN2_EN_STRAP;
	pcie_port_wreg(dev, PCIE_LC_SPEED_CTL, speed_ctl);
}

static int mc_wait_for_idle(struct pci_dev *dev)
{
	unsigned i;

	/* 100000 us = 100 ms */	
	for (i = 0; i < 100000; ++i) {
		if (!(rr32(dev, SRBM_STATUS) & MC_STATUS_MASK))
			return 0;
		udelay(1);
	}
	return -1;
}

static int mc_program(struct pci_dev *dev)
{
	struct dev_drv_data *dd;
	int i;
	int j;
	u32 tmp;
	u64 vram_start;
	u64 vram_end;
	int r;

	dd = pci_get_drvdata(dev);

	vram_start = dd->vram.mng.s;
	vram_end = vram_start + dd->vram.mng.sz - 1;

	/* initialize hdp */
	for (i = 0, j = 0; i < 32; i++, j += 0x18) {
		wr32(dev, 0x00000000, 0x2c14 + j);
		wr32(dev, 0x00000000, 0x2c18 + j);
		wr32(dev, 0x00000000, 0x2c1c + j);
		wr32(dev, 0x00000000, 0x2c20 + j);
		wr32(dev, 0x00000000, 0x2c24 + j);
	}
	wr32(dev, 0, HDP_REG_COHERENCY_FLUSH_CTL);

	if (mc_wait_for_idle(dev))
		dev_warn(&dev->dev, "mc_program:stop:"
					" wait for mc idle timed out\n");

	wr32(dev, GPU_PAGE_IDX(vram_start), MC_VM_SYS_APER_LOW_ADDR);
	wr32(dev, GPU_PAGE_IDX(vram_end), MC_VM_SYS_APER_HIGH_ADDR);

	/* scratch gpu page */
	r = rng_alloc_align(&dd->vram.scratch_page, &dd->vram.mng, GPU_PAGE_SZ,
								GPU_PAGE_SZ);
	if (r != 0) {
		dev_err(&dev->dev, "unable to alloc GPU scratch page\n");
		return r;
	}
	wr32(dev, GPU_PAGE_IDX(dd->vram.scratch_page),
						MC_VM_SYS_APER_DEFAULT_ADDR);

	/*
	 * MC_VRAM_LOCATION:
	 *   [15:8] last gpu address 16MB block index 
	 *   [7:0]  first gpu address 16MB block index
	 */
	tmp = ((vram_end >> 24) & 0xffff) << 16;
	tmp |= ((vram_start >> 24) & 0xffff);
	wr32(dev, tmp, MC_VRAM_LOCATION);

	/*
	 * maps pci bar 0: use the 256 kB block index of the gpu start
	 * address for vram. 1 GB of gpu address space.
	 */
	wr32(dev, vram_start >> 8, HDP_NONSURFACE_BASE); 
	wr32(dev, (2 << 7) | (1 << 30), HDP_NONSURFACE_INFO);
	wr32(dev, 0x3fffffff, HDP_NONSURFACE_SZ);

	/* no agp aperture in physical addresses */
	wr32(dev, 0, MC_AGP_BASE);
	wr32(dev, 0x0fffffff, MC_AGP_TOP);
	wr32(dev, 0x0fffffff, MC_AGP_BOT);

	if (mc_wait_for_idle(dev))
		dev_warn(&dev->dev, "mc_program:start:"
					" wait for mc idle timed out\n");
	return 0;
}

static int __devinit bars_map(struct pci_dev *dev)
{
	struct dev_drv_data *dd;
	dd = pci_get_drvdata(dev);

	dd->regs = ioremap_nocache(pci_resource_start(dev, 2),
						pci_resource_len(dev, 2));
	if (!dd->regs)
		return -ENOMEM;
	dd->regs_sz = pci_resource_len(dev, 2);
	
	dev_info(&dev->dev, "regs mmio base: 0x%p\n", dd->regs);
	dev_info(&dev->dev, "regs mmio size: %zu\n",
					(size_t)pci_resource_len(dev, 2));

	dd->vram.bar0 = ioremap_wc(pci_resource_start(dev, 0),
						pci_resource_len(dev, 0));
	if (!dd->vram.bar0) {
		iounmap(dd->regs);
		return -ENOMEM;
	}
	dev_info(&dev->dev, "vram mmio base: 0x%p\n", dd->vram.bar0);
	dev_info(&dev->dev, "vram mmio size: %zu\n",
					(size_t)pci_resource_len(dev, 0));
	return 0;
}

static void bars_unmap(struct pci_dev *dev)
{
	struct dev_drv_data *dd;
	dd = pci_get_drvdata(dev);

	iounmap(dd->vram.bar0);
	iounmap(dd->regs);
}

static void cfg_init(struct pci_dev *dev)
{
	struct dev_drv_data *dd;

	dd = pci_get_drvdata(dev);

	switch (dd->family) {
	case CYPRESS:
	case HEMLOCK:
		dd->cfg.addr_best = 0x02011003;
		dd->cfg.dce_crtcs_n = 6;

		dd->cfg.gpu_ses_n = 2;
		dd->cfg.gpu_hw_ctxs_n_max = 8;
		dd->cfg.gpu_rbs_n_max = 4 * dd->cfg.gpu_ses_n;

		dd->cfg.gpu_sx_sets_n = 4;
		dd->cfg.gpu_sx_export_sz_max = 256;
		dd->cfg.gpu_sx_export_pos_sz_max = 64;
		dd->cfg.gpu_sx_export_smx_sz_max = 192;

		dd->cfg.gpu_sc_prim_fifo_sz = 0x100;
		dd->cfg.gpu_sc_hiz_tile_fifo_sz = 0x30;
		dd->cfg.gpu_sc_earlyz_tile_fifo_sz = 0x130;
		break;
	case JUNIPER:
		dd->cfg.addr_best = 0x02010002;
		dd->cfg.dce_crtcs_n = 6;

		dd->cfg.gpu_ses_n = 1;
		dd->cfg.gpu_hw_ctxs_n_max = 8;
		dd->cfg.gpu_rbs_n_max = 4 * dd->cfg.gpu_ses_n;
		dd->cfg.gpu_sx_sets_n = 4;

		dd->cfg.gpu_sx_export_sz_max = 256;
		dd->cfg.gpu_sx_export_pos_sz_max = 64;
		dd->cfg.gpu_sx_export_smx_sz_max = 192;

		dd->cfg.gpu_sc_prim_fifo_sz = 0x100;
		dd->cfg.gpu_sc_hiz_tile_fifo_sz = 0x30;
		dd->cfg.gpu_sc_earlyz_tile_fifo_sz = 0x130;
		break;
	case REDWOOD:
		dd->cfg.addr_best = 0x02010002;
		dd->cfg.dce_crtcs_n = 6;

		dd->cfg.gpu_hw_ctxs_n_max = 8;
		dd->cfg.gpu_ses_n = 1;
		dd->cfg.gpu_rbs_n_max = 2 * dd->cfg.gpu_ses_n;

		dd->cfg.gpu_sx_sets_n = 4;
		dd->cfg.gpu_sx_export_sz_max = 256;
		dd->cfg.gpu_sx_export_pos_sz_max = 64;
		dd->cfg.gpu_sx_export_smx_sz_max = 192;

		dd->cfg.gpu_sc_prim_fifo_sz = 0x100;
		dd->cfg.gpu_sc_hiz_tile_fifo_sz = 0x30;
		dd->cfg.gpu_sc_earlyz_tile_fifo_sz = 0x130;
		break;
	case CEDAR:
		dd->cfg.addr_best = 0x02010001;
		dd->cfg.dce_crtcs_n = 4;

		dd->cfg.gpu_hw_ctxs_n_max = 4;
		dd->cfg.gpu_ses_n = 2;
		dd->cfg.gpu_rbs_n_max = 1 * dd->cfg.gpu_ses_n;

		dd->cfg.gpu_sx_sets_n = 4;
		dd->cfg.gpu_sx_export_sz_max = 128;
		dd->cfg.gpu_sx_export_pos_sz_max = 32;
		dd->cfg.gpu_sx_export_smx_sz_max = 96;

		dd->cfg.gpu_sc_prim_fifo_sz = 0x40;
		dd->cfg.gpu_sc_hiz_tile_fifo_sz = 0x30;
		dd->cfg.gpu_sc_earlyz_tile_fifo_sz = 0x130;
		break;
	}
}

static void hdp_init(struct pci_dev *dev)
{
	struct dev_drv_data *dd;
	int i;
	int j;
	u32 hdp_misc_ctl;
	u32 hdp_host_path_ctl;

	dd = pci_get_drvdata(dev);

	for (i = 0, j = 0; i < 32; i++, j += 0x18) {
		wr32(dev, 0x00000000, 0x2c14 + j);
		wr32(dev, 0x00000000, 0x2c18 + j);
		wr32(dev, 0x00000000, 0x2c1c + j);
		wr32(dev, 0x00000000, 0x2c20 + j);
		wr32(dev, 0x00000000, 0x2c24 + j);
	}

	wr32(dev, dd->cfg.addr_best, HDP_ADDR_CFG);

	hdp_misc_ctl = rr32(dev, HDP_MISC_CTL);
	hdp_misc_ctl |= HDP_FLUSH_INVALIDATE_CACHE;
	wr32(dev, hdp_misc_ctl, HDP_MISC_CTL);

	hdp_host_path_ctl = rr32(dev, HDP_HOST_PATH_CTL);
	wr32(dev, hdp_host_path_ctl, HDP_HOST_PATH_CTL);

	udelay(50);
}

static void soft_reset(struct pci_dev *dev)
{
	u32 grbm_reset;

	cp_stop(dev);

	/* reset all the gfx blocks */
	grbm_reset = (SOFT_RESET_CP |
		      SOFT_RESET_CB |
		      SOFT_RESET_DB |
		      SOFT_RESET_PA |
		      SOFT_RESET_SC |
		      SOFT_RESET_SPI |
		      SOFT_RESET_SH |
		      SOFT_RESET_SX |
		      SOFT_RESET_TC |
		      SOFT_RESET_TA |
		      SOFT_RESET_VC |
		      SOFT_RESET_VGT);

	wr32(dev, grbm_reset, GRBM_SOFT_RESET);
	rr32(dev, GRBM_SOFT_RESET);
	udelay(50);
	wr32(dev, 0, GRBM_SOFT_RESET);
	rr32(dev, GRBM_SOFT_RESET);
	udelay(50);
}

static unsigned bits_set_count(u32 v)
{
	unsigned i;
	unsigned r;

	r = 0;

	for (i = 0; i < 32; ++i) {
		r += v & 1;
		v >>= 1;
	}
	return r;
}

/*
 * Each quartet of map dword targets a rendering *pipe*.
 * The content of that quartet is the rendering *backend* index.
 * Rendering *pipe* quartets are "left justified" in the map dword.
 */
#define GPU_GENERATION_PIPES_N_MAX 8
static int rbs_remap(struct pci_dev *dev, u32 *map, u32 tiling_pipes_n,
						u32 rbs_n_max, u32 rbs_dis)
{
	u32 rendering_pipes_n;
	u32 rb_idx_width;
	u32 rb_idx;
	u32 req_rbs_n;
	u32 rendering_pipes_rbs_q;
	u32 rendering_pipes_rbs_r;
	u32 mask;
	unsigned i;
	unsigned j;

	/* mask out the rbs that don't exist on that asic to count proper */
	rbs_dis |= (0xff << rbs_n_max) & 0xff;
	req_rbs_n = GPU_GENERATION_PIPES_N_MAX - bits_set_count(rbs_dis);

	rendering_pipes_n = 1 << tiling_pipes_n;

	if (rendering_pipes_n < req_rbs_n) {
		dev_warn(&dev->dev, "gpu: less rendering pipes (%u) than"
				" gpu generation rendering backends (%u)\n",
						rendering_pipes_n, req_rbs_n);
		return -ERR;
	}

	rendering_pipes_rbs_q = rendering_pipes_n / req_rbs_n;
	rendering_pipes_rbs_r = rendering_pipes_n % req_rbs_n;

	*map = 0;
	mask = 1 << (rbs_n_max - 1);
	rb_idx_width = 4;

	for (i = 0; i < rbs_n_max; ++i) {
		if (!(mask & rbs_dis)) {
			for (j = 0; j < rendering_pipes_rbs_q; ++j) {
				*map <<= rb_idx_width;
				rb_idx = rbs_n_max - i -1;
				*map |= rb_idx;
			}
			if (rendering_pipes_rbs_r) {
				*map <<= rb_idx_width;
				rb_idx = rbs_n_max - i - 1;
				*map |= rb_idx;
				rendering_pipes_rbs_r--;
			}
		}
		mask >>= 1;
	}
	return 0;
}

/* gpu registers which need to be inited only once */
static int gpu_init(struct pci_dev *dev)
{
	struct dev_drv_data *dd;
	u32 efuse_straps_4;
	u32 efuse_straps_3;
	u32 rbs_dis;
	u32 tiling_pipes_n;
	u32 map;
	u32 sx_debug_1;
	u32 smx_dc_ctl_0;
	u32 vgt_cache_invalidation;
	int i;
	int r;

	dd = pci_get_drvdata(dev);

	wr32(dev, SET(GRBM_RD_TIMEOUT, 0xff), GRBM_CTL);

	/* select all "instances"(???), all shader engines */
	wr32(dev, INST_BROADCAST_WRS | SE_BROADCAST_WRS, GRBM_GFX_IDX);
	wr32(dev, INST_BROADCAST_WRS | SE_BROADCAST_WRS, RLC_GFX_IDX);

	wr32(dev, dd->cfg.addr_best, GB_ADDR_CFG);

	/* workaround on evergreen chips to get the actual render backend map */
	wr32(dev, 0x204, RCU_IDX);
	efuse_straps_4 = rr32(dev, RCU_DATA);

	wr32(dev, 0x203, RCU_IDX);
	efuse_straps_3 = rr32(dev, RCU_DATA);
	rbs_dis = ((efuse_straps_4 & 0xf) << 4)
					| ((efuse_straps_3 & 0xf0000000) >> 28);

	tiling_pipes_n = dd->cfg.addr_best & PIPES_N_MASK;
	r = rbs_remap(dev, &map, tiling_pipes_n, dd->cfg.gpu_rbs_n_max,
								rbs_dis);
	if (r != 0)
		return r;
	wr32(dev, map, GB_BACKEND_MAP);

	/* cgts (related to timestamping) primary */
	wr32(dev, 0, CGTS_SYS_TCC_DIS);
	wr32(dev, 0, CGTS_TCC_DIS);
	/* cgts (related to timestamping) user */
	wr32(dev, 0, CGTS_USER_SYS_TCC_DIS);
	wr32(dev, 0, CGTS_USER_TCC_DIS);

	wr32(dev, DIS_CUBE_ANISO | SYNC_GRADIENT | SYNC_WALKER
						| SYNC_ALIGNER, TA_CTL_AUX);

	sx_debug_1 = rr32(dev, SX_DEBUG_1);
	sx_debug_1 |= ENA_NEW_SMX_ADDR;
	wr32(dev, SX_DEBUG_1, sx_debug_1);

	smx_dc_ctl_0 = rr32(dev, SMX_DC_CTL_0);
	smx_dc_ctl_0 &= ~SETS_N_MASK;
	smx_dc_ctl_0 |= SET(SETS_N, dd->cfg.gpu_sx_sets_n);
	wr32(dev, smx_dc_ctl_0, SMX_DC_CTL_0);

	wr32(dev, 0x00010000, SMX_SAR_CTL_0);

	wr32(dev, SET(COLOR_BUF_SZ, (dd->cfg.gpu_sx_export_sz_max / 4) - 1)
		| SET(POS_BUF_SZ, (dd->cfg.gpu_sx_export_pos_sz_max / 4) - 1)
		| SET(SMX_BUF_SZ, (dd->cfg.gpu_sx_export_smx_sz_max / 4) - 1),
							SX_EXPORT_BUF_SZS);

	wr32(dev, SET(SC_PRIM_FIFO_SZ, dd->cfg.gpu_sc_prim_fifo_sz)
		| SET(SC_HIZ_TILE_FIFO_SZ, dd->cfg.gpu_sc_hiz_tile_fifo_sz)
		| SET(SC_EARLYZ_TILE_FIFO_SZ,
			dd->cfg.gpu_sc_earlyz_tile_fifo_sz), PA_SC_FIFO_SZ);


	wr32(dev, 0, SPI_CFG_CTL_0);
	wr32(dev, SET(VTX_DONE_DELAY, 4), SPI_CFG_CTL_1);

	wr32(dev, 1, VGT_INSTS_N);

	switch (dd->family) {
	case CEDAR:
		vgt_cache_invalidation = SET(CACHE_INVALIDATION, TC_ONLY);
		break;
	default:
		vgt_cache_invalidation = SET(CACHE_INVALIDATION, VC_AND_TC);
		break;
	}
	vgt_cache_invalidation |= SET(AUTO_INVALIDATION_ENA, ES_AND_GS_AUTO);
	wr32(dev, vgt_cache_invalidation, VGT_CACHE_INVALIDATION);

	wr32(dev, 16, VGT_GS_VTX_REUSE);
	wr32(dev, 0xe, VGT_VTX_REUSE_BLK_CTL);
	wr32(dev, 0x10, VGT_OUT_DEALLOC_CTL);

	wr32(dev, 0, PA_SU_LINE_STIPPLE_VALUE);
	wr32(dev, 0, PA_SC_LINE_STIPPLE_STATE);

	wr32(dev, 0, CB_PERF_CTR_0_SEL_0);
	wr32(dev, 0, CB_PERF_CTR_0_SEL_1);
	wr32(dev, 0, CB_PERF_CTR_1_SEL_0);
	wr32(dev, 0, CB_PERF_CTR_1_SEL_1);
	wr32(dev, 0, CB_PERF_CTR_2_SEL_0);
	wr32(dev, 0, CB_PERF_CTR_2_SEL_1);
	wr32(dev, 0, CB_PERF_CTR_3_SEL_0);
	wr32(dev, 0, CB_PERF_CTR_3_SEL_1);

	/* XXX: should not be here since it's programmed by the accel code */
	wr32(dev, 0, CB_COLOR_0_BASE);
	wr32(dev, 0, CB_COLOR_1_BASE);
	wr32(dev, 0, CB_COLOR_2_BASE);
	wr32(dev, 0, CB_COLOR_3_BASE);
	wr32(dev, 0, CB_COLOR_4_BASE);
	wr32(dev, 0, CB_COLOR_5_BASE);
	wr32(dev, 0, CB_COLOR_6_BASE);
	wr32(dev, 0, CB_COLOR_7_BASE);
	wr32(dev, 0, CB_COLOR_8_BASE);
	wr32(dev, 0, CB_COLOR_9_BASE);
	wr32(dev, 0, CB_COLOR_A_BASE);
	wr32(dev, 0, CB_COLOR_B_BASE);

	/*
	 * set the shader const cache sizes to 0 fol pixel shader
	 * nad hull shader
	 */
	for (i = SQ_ALU_CONST_BUF_SZ_PS_0; i < 0x28200; i += 4)
		wr32(dev, 0, i);
	for (i = SQ_ALU_CONST_BUF_SZ_HS_0; i < 0x29000; i += 4)
		wr32(dev, 0, i);

	wr32(dev, CLIP_VTX_REORDER_ENA | SET(CLIP_SEQ_N, 3), PA_CL_ENHANCE);
	return 0;
}

static irqreturn_t irq_thd(int irq, void *dev_id)
{
	struct pci_dev *dev;
	struct dev_drv_data *dd;

	dev = dev_id;
	dd = pci_get_drvdata(dev);

	dce4_irqs_thd(dd->dce);
	return IRQ_HANDLED;
}

static irqreturn_t irq(int irq, void *dev_id)
{
	struct pci_dev *dev;
	dev = dev_id;
	/* TODO: should return IRQ_HANDLED in some cases ? */
	if (ih_parse(dev))
		return IRQ_WAKE_THREAD;
	else
		return IRQ_NONE;
}

static int __devinit probe(struct pci_dev *dev, const struct pci_device_id *id)
{
	int err;
	struct dev_drv_data *dd;

	err = pci_enable_device(dev);
	if (err) {
		dev_err(&dev->dev, "cannot enable the device\n");
		return err;
	}

	err = pci_enable_msi(dev);
	if (err) {		dev_err(&dev->dev, "cannot enable MSI\n");
		goto err_dis_pdev;
	}


	err = dma_set_mask(&dev->dev, DMA_BIT_MASK(40));
	if (err) {
		dev_err(&dev->dev, "cannot set DMA address width to 40 bits\n");
		goto err_dis_msi;
	}
	dma_set_coherent_mask(&dev->dev, DMA_BIT_MASK(40));

	err = pci_request_regions(dev, pci_name(dev));
	if (err != 0) {
		goto err_dis_msi;
	}

	dd = kzalloc(sizeof(*dd), GFP_KERNEL);
	if (!dd) {
		dev_err(&dev->dev, "cannot allocate driver private data for" 
								" device\n");	
		err = -ENOMEM;
		goto err_release_pci_regions;
	}

	pci_set_drvdata(dev, dd);

	dd->dev = dev;	/* only used for char device node */
	dd->family = id->driver_data;

	cfg_init(dev);

	adev.rom = rom_copy_get(dev);
	if (IS_ERR_OR_NULL(adev.rom)) {
		if (IS_ERR(adev.rom))
			err = PTR_ERR(adev.rom);
		else
			err = -EIO;
		dev_err(&dev->dev, "cannot copy the rom\n");
		goto err_free_drv_data;
	}

	err = bars_map(dev);
	if (err)
		goto err_free_rom_copy;

	adev.dev = &dev->dev;
	adev.rr32 = extern_rr32;
	adev.wr32 = extern_wr32;

	dd->atb = atb_alloc(&adev);
	if (!dd->atb) {
		dev_err(&dev->dev, "cannot allocate atombios\n");
		err = -ENOMEM;
		goto err_bars_unmap;
	}

	err = atb_init(dd->atb);
	if (err) {
		dev_err(&dev->dev, "cannot init the atombios\n");
		goto err_free_atb;
	}

	ddev.dev = &dev->dev;
	ddev.atb = dd->atb;
	ddev.crtcs_n = dd->cfg.dce_crtcs_n;
	ddev.rr32 = extern_rr32;
	ddev.wr32 = extern_wr32;

	dd->dce = dce4_alloc(&ddev);

	if (!dd->dce) {
		dev_err(&dev->dev, "cannot allocate dce\n");
		err = -ENOMEM;
		goto err_cleanup_atb;
	}

	if (!rr32(dev, CFG_MEM_SZ)) {
		dev_info(&dev->dev, "posting now...\n");
		err = atb_asic_init(dd->atb);
		if (err) {
			dev_err(&dev->dev, "atombios failed to init the "
								"asic\n");
			goto err_dce_free;
		}
	} else {
		dev_info(&dev->dev, "already posted\n");
	}
	/* claim back the 256k vga memory at vram beginning */
	dce4_vga_off(dd->dce);	

	/* CFG_MEM_SZ is now valid */
	dev_info(&dev->dev, "vram size is %uMB\n", rr32(dev, CFG_MEM_SZ));

	rng_mng_init(&dd->vram.mng, 0, rr32(dev, CFG_MEM_SZ) * 1024 * 1024);

	cp_stop(dev);
	ih_stop(dev);
	intrs_reset(dev);

	err = request_threaded_irq(dev->irq, irq, irq_thd, 0, pci_name(dev),
								(void*)dev);
	if (err) {
		dev_err(&dev->dev, "unable to request threaded irq\n");
		goto err_vram_mng_destroy;
	}

	pcie_gen2_ena(dev);

	err = ucode_load(dev);
	if (err)
		goto err_free_irq;

	/* quiet the memory requests before mc programming: GPU and DCE */
	if (rr32(dev, GRBM_STATUS) & GUI_ACTIVE)
		soft_reset(dev);

	err = dce4_mem_req(dd->dce, false);
	if (err)
		goto err_release_firmware;

	err = mc_program(dev);
	if (err)
		goto err_release_firmware;

	hdp_init(dev);

	err = ba_init(dev);	
	if (err)
		goto err_release_firmware;

	err = ba_map(dev); /* map wb page, ih ring, cp ring */
	if (err)
		goto err_ba_shutdown;

	err = dce4_init(dd->dce, dd->cfg.addr_best);
	if (err)
		goto err_ba_unmap;

	err = gpu_init(dev);
	if (err)
		goto err_ba_unmap;

	ucode_rlc_program(dev);
	ih_init(dev);

	pci_set_master(dev);

	ih_start(dev);

	ucode_cp_program(dev);
	cp_init(dev);
	cp_me_init(dev);

	dce4_hpds_intr_ena(dd->dce);

	/* userland interface setup */
	cdev_init(&dd->evergreen_cdev, &fops);
	err = cdev_add(&dd->evergreen_cdev, devt, 1);
	if (err) {
		dev_err(&dev->dev, "cannot add register char device\n");
		goto err_clr_master;
	}

	dd->evergreen_dev = device_create(class, &dev->dev,
				dd->evergreen_cdev.dev, NULL, "evergreen0");
	if (IS_ERR(dd->evergreen_dev)) {
		dev_err(&dev->dev, "cannot create userspace char device\n");
		goto err_cdev_del;
	}

	dev_info(&dev->dev, "ready\n");
	return 0;

err_cdev_del:
	cdev_del(&dd->evergreen_cdev);

err_clr_master:
	pci_clear_master(dev);
	cp_stop(dev);
	ih_stop(dev);
	dce4_shutdown(dd->dce);

err_ba_unmap:
	ba_unmap(dev);

err_ba_shutdown:
	ba_shutdown(dev);

err_release_firmware:
	ucode_release(dev);

err_free_irq:
	free_irq(dev->irq, (void*)dev);

err_vram_mng_destroy:
	rng_mng_destroy(&dd->vram.mng);

err_dce_free:
	kfree(dd->dce);

err_cleanup_atb:
	atb_cleanup(dd->atb);

err_free_atb:
	kfree(dd->atb);

err_bars_unmap:
	bars_unmap(dev);

err_free_rom_copy:
	kfree(adev.rom);

err_free_drv_data:
	pci_set_drvdata(dev, NULL);
	kfree(dd);

err_release_pci_regions:
	pci_release_regions(dev);

err_dis_msi:
	pci_disable_msi(dev);

err_dis_pdev:
	pci_disable_device(dev);
	return err;
}

static void __devexit remove(struct pci_dev *dev)
{
	struct dev_drv_data *dd;

	dd = pci_get_drvdata(dev);

	/* remove userland interface */
	device_destroy(class, dd->evergreen_cdev.dev);
	cdev_del(&dd->evergreen_cdev);

	pci_clear_master(dev);
	cp_stop(dev);
	ih_stop(dev);
	dce4_shutdown(dd->dce);
	kfree(dd->dce);
	free_irq(dev->irq, (void*)dev);

	ba_unmap(dev);

	ba_shutdown(dev);

	ucode_release(dev);

	rng_mng_destroy(&dd->vram.mng);
	atb_cleanup(dd->atb);
	kfree(dd->atb);
	kfree(adev.rom);

	bars_unmap(dev);

	pci_release_regions(dev);
	pci_disable_msi(dev);
	pci_disable_device(dev);

	pci_set_drvdata(dev, NULL);
	kfree(dd);
}

static const struct dev_pm_ops pm_ops;

/* FIXME: hardcoded for now */
static DEFINE_PCI_DEVICE_TABLE(pci_tbl) =
{
	{PCI_VENDOR_ID_ATI, 0x68be, PCI_ANY_ID, PCI_ANY_ID, 0, 0, JUNIPER},
	{}
};
MODULE_DEVICE_TABLE(pci, pci_tbl);

static struct pci_driver pci_driver =
{
	.name = "AMD evergreen PCI driver",
	.id_table = pci_tbl,
	.probe = probe,
	.remove = __devexit_p(remove),
	.driver.pm = NULL
};

static int __init init(void)
{
	int r;

	class = class_create(THIS_MODULE, "evergreen");	
	if (IS_ERR(class))
		return PTR_ERR(class);

	r = alloc_chrdev_region(&devt, 0, 1, pci_driver.name);
	if (r < 0) {
		printk(KERN_ERR "%s:cannot allocate major/minor range\n",
							pci_driver.name);
		goto class_destroy;
	}
	
	r = pci_register_driver(&pci_driver);
	if (r != 0) {
		printk(KERN_ERR "%s:cannot register PCI driver\n",
							pci_driver.name);
		goto chrdev_region_unregister;
	}
	return 0;

chrdev_region_unregister:
	unregister_chrdev_region(devt, 1);
	
class_destroy:
	class_destroy(class);
	return r;
}

static void __exit cleanup(void)
{
	pci_unregister_driver(&pci_driver);
	unregister_chrdev_region(devt, 1);
	class_destroy(class);
}

module_init(init);
module_exit(cleanup);

MODULE_AUTHOR("Sylvain Bertrand <digital.ragnarok@gmail.com>");
MODULE_DESCRIPTION("AMD evergreen driver");
MODULE_LICENSE("GPL");
