/*
  author Sylvain Bertrand <digital.ragnarok@gmail.com>
  Protected by GNU Affero GPL v3 with some exceptions.
  See README at root of alga tree.
*/
#include <linux/pci.h>
#include <linux/delay.h>
#include <linux/cdev.h>

#include "types.h"
#include "ba.h"
#include "ucode.h"
#include "cp.h"
#include "gpu.h"
#include "irq.h"
#include "drv.h"

#include "regs.h"

void gpu_cfg_init(struct pci_dev *dev)
{
	struct dev_drv_data *drv_data;

	drv_data = pci_get_drvdata(dev);

	switch (drv_data->family) {
	case CYPRESS:
	case HEMLOCK:
		drv_data->gpu.cfg.num_ses = 2;
		drv_data->gpu.cfg.max_shader_pipes = 4;
		drv_data->gpu.cfg.max_tile_pipes = 8;
		drv_data->gpu.cfg.max_se_simds = 10;
		drv_data->gpu.cfg.max_backends = 4 * drv_data->gpu.cfg.num_ses;
		drv_data->gpu.cfg.max_gprs = 256;
		drv_data->gpu.cfg.max_thds = 248;
		drv_data->gpu.cfg.max_gs_thds = 32;
		drv_data->gpu.cfg.max_stack_entries = 512;
		drv_data->gpu.cfg.sx_num_of_sets = 4;
		drv_data->gpu.cfg.sx_max_export_sz = 256;
		drv_data->gpu.cfg.sx_max_export_pos_sz = 64;
		drv_data->gpu.cfg.sx_max_export_smx_sz = 192;
		drv_data->gpu.cfg.max_hw_ctxs = 8;
		drv_data->gpu.cfg.sq_num_cf_insts = 2;

		drv_data->gpu.cfg.sc_prim_fifo_sz = 0x100;
		drv_data->gpu.cfg.sc_hiz_tile_fifo_sz = 0x30;
		drv_data->gpu.cfg.sc_earlyz_tile_fifo_sz = 0x130;
		break;
	case JUNIPER:
		drv_data->gpu.cfg.num_ses = 1;
		drv_data->gpu.cfg.max_shader_pipes = 4;
		drv_data->gpu.cfg.max_tile_pipes = 4;
		drv_data->gpu.cfg.max_se_simds = 10;
		drv_data->gpu.cfg.max_backends = 4 * drv_data->gpu.cfg.num_ses;
		drv_data->gpu.cfg.max_gprs = 256;
		drv_data->gpu.cfg.max_thds = 248;
		drv_data->gpu.cfg.max_gs_thds = 32;
		drv_data->gpu.cfg.max_stack_entries = 512;
		drv_data->gpu.cfg.sx_num_of_sets = 4;
		drv_data->gpu.cfg.sx_max_export_sz = 256;
		drv_data->gpu.cfg.sx_max_export_pos_sz = 64;
		drv_data->gpu.cfg.sx_max_export_smx_sz = 192;
		drv_data->gpu.cfg.max_hw_ctxs = 8;
		drv_data->gpu.cfg.sq_num_cf_insts = 2;

		drv_data->gpu.cfg.sc_prim_fifo_sz = 0x100;
		drv_data->gpu.cfg.sc_hiz_tile_fifo_sz = 0x30;
		drv_data->gpu.cfg.sc_earlyz_tile_fifo_sz = 0x130;
		break;
	case REDWOOD:
		drv_data->gpu.cfg.num_ses = 1;
		drv_data->gpu.cfg.max_shader_pipes = 4;
		drv_data->gpu.cfg.max_tile_pipes = 4;
		drv_data->gpu.cfg.max_se_simds = 5;
		drv_data->gpu.cfg.max_backends = 2 * drv_data->gpu.cfg.num_ses;
		drv_data->gpu.cfg.max_gprs = 256;
		drv_data->gpu.cfg.max_thds = 248;
		drv_data->gpu.cfg.max_gs_thds = 32;
		drv_data->gpu.cfg.max_stack_entries = 256;
		drv_data->gpu.cfg.sx_num_of_sets = 4;
		drv_data->gpu.cfg.sx_max_export_sz = 256;
		drv_data->gpu.cfg.sx_max_export_pos_sz = 64;
		drv_data->gpu.cfg.sx_max_export_smx_sz = 192;
		drv_data->gpu.cfg.max_hw_ctxs = 8;
		drv_data->gpu.cfg.sq_num_cf_insts = 2;

		drv_data->gpu.cfg.sc_prim_fifo_sz = 0x100;
		drv_data->gpu.cfg.sc_hiz_tile_fifo_sz = 0x30;
		drv_data->gpu.cfg.sc_earlyz_tile_fifo_sz = 0x130;
		break;
	case CEDAR:
		drv_data->gpu.cfg.num_ses = 1;
		drv_data->gpu.cfg.max_shader_pipes = 2;
		drv_data->gpu.cfg.max_tile_pipes = 2;
		drv_data->gpu.cfg.max_se_simds = 2;
		drv_data->gpu.cfg.max_backends = 1 * drv_data->gpu.cfg.num_ses;
		drv_data->gpu.cfg.max_gprs = 256;
		drv_data->gpu.cfg.max_thds = 192;
		drv_data->gpu.cfg.max_gs_thds = 16;
		drv_data->gpu.cfg.max_stack_entries = 256;
		drv_data->gpu.cfg.sx_num_of_sets = 4;
		drv_data->gpu.cfg.sx_max_export_sz = 128;
		drv_data->gpu.cfg.sx_max_export_pos_sz = 32;
		drv_data->gpu.cfg.sx_max_export_smx_sz = 96;
		drv_data->gpu.cfg.max_hw_ctxs = 4;
		drv_data->gpu.cfg.sq_num_cf_insts = 1;

		drv_data->gpu.cfg.sc_prim_fifo_sz = 0x40;
		drv_data->gpu.cfg.sc_hiz_tile_fifo_sz = 0x30;
		drv_data->gpu.cfg.sc_earlyz_tile_fifo_sz = 0x130;
		break;
	case PALM:
		drv_data->gpu.cfg.num_ses = 1;
		drv_data->gpu.cfg.max_shader_pipes = 2;
		drv_data->gpu.cfg.max_tile_pipes = 2;
		drv_data->gpu.cfg.max_se_simds = 2;
		drv_data->gpu.cfg.max_backends = 1 * drv_data->gpu.cfg.num_ses;
		drv_data->gpu.cfg.max_gprs = 256;
		drv_data->gpu.cfg.max_thds = 192;
		drv_data->gpu.cfg.max_gs_thds = 16;
		drv_data->gpu.cfg.max_stack_entries = 256;
		drv_data->gpu.cfg.sx_num_of_sets = 4;
		drv_data->gpu.cfg.sx_max_export_sz = 128;
		drv_data->gpu.cfg.sx_max_export_pos_sz = 32;
		drv_data->gpu.cfg.sx_max_export_smx_sz = 96;
		drv_data->gpu.cfg.max_hw_ctxs = 4;
		drv_data->gpu.cfg.sq_num_cf_insts = 1;

		drv_data->gpu.cfg.sc_prim_fifo_sz = 0x40;
		drv_data->gpu.cfg.sc_hiz_tile_fifo_sz = 0x30;
		drv_data->gpu.cfg.sc_earlyz_tile_fifo_sz = 0x130;
		break;
	}
}

static u32 tile_pipe_to_backend_map(struct pci_dev *dev)
{
	struct dev_drv_data *drv_data;
	unsigned swizzle_tile_pipe[MAX_PIPES];
	bool force_no_swizzle;
	unsigned tile_pipe;
	unsigned backend;
	u32 backend_map;

	drv_data = pci_get_drvdata(dev);

	switch (drv_data->family) {
	case CEDAR:
	case REDWOOD:
	case PALM:
		force_no_swizzle = false;
		break;
	case CYPRESS:
	case HEMLOCK:
	case JUNIPER:
	default:
		force_no_swizzle = true;
		break;
	}

	memset(swizzle_tile_pipe, 0, sizeof(swizzle_tile_pipe));

	drv_data = pci_get_drvdata(dev);

	switch (drv_data->gpu.cfg.max_tile_pipes) {
	case 1:
	case 3:
	case 5:
	case 7:
		dev_err(&dev->dev, "odd number of tile pipes!\n");
		break;
	case 2:
		swizzle_tile_pipe[0] = 0;
		swizzle_tile_pipe[1] = 1;
		break;
	case 4:
		if (force_no_swizzle) {
			swizzle_tile_pipe[0] = 0;
			swizzle_tile_pipe[1] = 1;
			swizzle_tile_pipe[2] = 2;
			swizzle_tile_pipe[3] = 3;
		} else {
			swizzle_tile_pipe[0] = 0;
			swizzle_tile_pipe[1] = 2;
			swizzle_tile_pipe[2] = 1;
			swizzle_tile_pipe[3] = 3;
		}
		break;
	case 6:
		if (force_no_swizzle) {
			swizzle_tile_pipe[0] = 0;
			swizzle_tile_pipe[1] = 1;
			swizzle_tile_pipe[2] = 2;
			swizzle_tile_pipe[3] = 3;
			swizzle_tile_pipe[4] = 4;
			swizzle_tile_pipe[5] = 5;
		} else {
			swizzle_tile_pipe[0] = 0;
			swizzle_tile_pipe[1] = 2;
			swizzle_tile_pipe[2] = 4;
			swizzle_tile_pipe[3] = 1;
			swizzle_tile_pipe[4] = 3;
			swizzle_tile_pipe[5] = 5;
		}
		break;
	case 8:
		if (force_no_swizzle) {
			swizzle_tile_pipe[0] = 0;
			swizzle_tile_pipe[1] = 1;
			swizzle_tile_pipe[2] = 2;
			swizzle_tile_pipe[3] = 3;
			swizzle_tile_pipe[4] = 4;
			swizzle_tile_pipe[5] = 5;
			swizzle_tile_pipe[6] = 6;
			swizzle_tile_pipe[7] = 7;
		} else {
			swizzle_tile_pipe[0] = 0;
			swizzle_tile_pipe[1] = 2;
			swizzle_tile_pipe[2] = 4;
			swizzle_tile_pipe[3] = 6;
			swizzle_tile_pipe[4] = 1;
			swizzle_tile_pipe[5] = 3;
			swizzle_tile_pipe[6] = 5;
			swizzle_tile_pipe[7] = 7;
		}
		break;
	}


	backend = 0;
	backend_map = 0;
	for (tile_pipe = 0; tile_pipe < drv_data->gpu.cfg.max_tile_pipes;
								++tile_pipe) {

		backend_map |= ((backend & 0xf)
					<< (swizzle_tile_pipe[tile_pipe] * 4));
		++backend;
	}
	return backend_map;
}

static void sq_init(struct pci_dev *dev)
{
	struct dev_drv_data *drv_data;
	u32 sq_cfg;
	u32 sq_lds_res_mgmt;
	u32 sq_gpr_res_mgmt_1;
	u32 sq_gpr_res_mgmt_2;
	u32 sq_gpr_res_mgmt_3;
	u32 sq_thd_res_mgmt;
	u32 sq_thd_res_mgmt_2;
	u32 sq_stack_res_mgmt_1;
	u32 sq_stack_res_mgmt_2;
	u32 sq_stack_res_mgmt_3;
	int ps_thd_cnt;

	drv_data = pci_get_drvdata(dev);

	wr32(dev, CACHE_FIFO_SZ(16 * drv_data->gpu.cfg.sq_num_cf_insts)
			| FETCH_FIFO_HIWATER(0x4) | DONE_FIFO_HIWATER(0xe0)
			| ALU_UPDATE_FIFO_HIWATER(0x8), SQ_MS_FIFO_SZS);

	sq_cfg = rr32(dev, SQ_CFG);
	sq_cfg &= ~(PS_PRIO(3) | VS_PRIO(3) | GS_PRIO(3) | ES_PRIO(3));
	sq_cfg |= (EXPORT_SRC_C | PS_PRIO(0) | VS_PRIO(1) | GS_PRIO(2)
								| ES_PRIO(3));

	switch (drv_data->family) {
	case CEDAR:
	case PALM:
		/* no vertex cache */
		sq_cfg &= ~VC_ENABLE;
		break;
	default:
		sq_cfg |= VC_ENABLE;
		break;
	}

	sq_lds_res_mgmt = rr32(dev, SQ_LDS_RES_MGMT);

	sq_gpr_res_mgmt_1 = NUM_PS_GPRS(
			(drv_data->gpu.cfg.max_gprs - (4 * 2))* 12 / 32);
	sq_gpr_res_mgmt_1 |= NUM_VS_GPRS(
			(drv_data->gpu.cfg.max_gprs - (4 * 2)) * 6 / 32);
	sq_gpr_res_mgmt_1 |= NUM_CLAUSE_TEMP_GPRS(4);
	sq_gpr_res_mgmt_2 = NUM_GS_GPRS(
			(drv_data->gpu.cfg.max_gprs - (4 * 2)) * 4 / 32);
	sq_gpr_res_mgmt_2 |= NUM_ES_GPRS(
			(drv_data->gpu.cfg.max_gprs - (4 * 2)) * 4 / 32);
	sq_gpr_res_mgmt_3 = NUM_HS_GPRS(
			(drv_data->gpu.cfg.max_gprs - (4 * 2)) * 3 / 32);
	sq_gpr_res_mgmt_3 |= NUM_LS_GPRS(
			(drv_data->gpu.cfg.max_gprs - (4 * 2)) * 3 / 32);

	switch (drv_data->family) {
	case CEDAR:
	case PALM:
		ps_thd_cnt = 96;
		break;
	default:
		ps_thd_cnt = 128;
		break;
	}

	sq_thd_res_mgmt = NUM_PS_THDS(ps_thd_cnt);
	sq_thd_res_mgmt |= NUM_VS_THDS(
		(((drv_data->gpu.cfg.max_thds - ps_thd_cnt) / 6) / 8) * 8);
	sq_thd_res_mgmt |= NUM_GS_THDS(
		(((drv_data->gpu.cfg.max_thds - ps_thd_cnt) / 6) / 8) * 8);
	sq_thd_res_mgmt |= NUM_ES_THDS(
		(((drv_data->gpu.cfg.max_thds - ps_thd_cnt) / 6) / 8) * 8);
	sq_thd_res_mgmt_2 = NUM_HS_THDS(
		(((drv_data->gpu.cfg.max_thds - ps_thd_cnt) / 6) / 8) * 8);
	sq_thd_res_mgmt_2 |= NUM_LS_THDS(
		(((drv_data->gpu.cfg.max_thds - ps_thd_cnt) / 6) / 8) * 8);

	sq_stack_res_mgmt_1 = NUM_PS_STACK_ENTRIES(
			(drv_data->gpu.cfg.max_stack_entries * 1) / 6);
	sq_stack_res_mgmt_1 |= NUM_VS_STACK_ENTRIES(
			(drv_data->gpu.cfg.max_stack_entries * 1) / 6);
	sq_stack_res_mgmt_2 = NUM_GS_STACK_ENTRIES(
			(drv_data->gpu.cfg.max_stack_entries * 1) / 6);
	sq_stack_res_mgmt_2 |= NUM_ES_STACK_ENTRIES(
			(drv_data->gpu.cfg.max_stack_entries * 1) / 6);
	sq_stack_res_mgmt_3 = NUM_HS_STACK_ENTRIES(
			(drv_data->gpu.cfg.max_stack_entries * 1) / 6);
	sq_stack_res_mgmt_3 |= NUM_LS_STACK_ENTRIES(
			(drv_data->gpu.cfg.max_stack_entries * 1) / 6);

	wr32(dev, sq_cfg, SQ_CFG);
	wr32(dev, sq_gpr_res_mgmt_1, SQ_GPR_RES_MGMT_1);
	wr32(dev, sq_gpr_res_mgmt_2, SQ_GPR_RES_MGMT_2);
	wr32(dev, sq_gpr_res_mgmt_3, SQ_GPR_RES_MGMT_3);
	wr32(dev, sq_thd_res_mgmt, SQ_THD_RES_MGMT);
	wr32(dev, sq_thd_res_mgmt_2, SQ_THD_RES_MGMT_2);
	wr32(dev, sq_stack_res_mgmt_1, SQ_STACK_RES_MGMT_1);
	wr32(dev, sq_stack_res_mgmt_2, SQ_STACK_RES_MGMT_2);
	wr32(dev, sq_stack_res_mgmt_3, SQ_STACK_RES_MGMT_3);
	wr32(dev, 0, SQ_DYN_GPR_CTL_PS_FLUSH_REQ);
	wr32(dev, sq_lds_res_mgmt, SQ_LDS_RES_MGMT);
}

int gpu_init(struct pci_dev *dev)
{
	struct dev_drv_data *drv_data;
	int i;
	int j;
	u32 cc_gc_shader_pipe_cfg;
	u32 cc_rb_backend_disable;
	u32 mc_shared_chmap;
	u32 mc_arb_ramcfg;
	u32 gb_addr_cfg;
	u32 gb_backend_map;
	u32 gfx_idx;
	u32 sx_debug_1;
	u32 smx_dc_ctl0;
	u32 vgt_cache_invalidation;
	u32 hdp_host_path_ctl;
	u32 tmp;

	drv_data = pci_get_drvdata(dev);

	/* initialize hdp */
	for (i = 0, j = 0; i < 32; i++, j += 0x18) {
		wr32(dev, 0x00000000, 0x2c14 + j);
		wr32(dev, 0x00000000, 0x2c18 + j);
		wr32(dev, 0x00000000, 0x2c1c + j);
		wr32(dev, 0x00000000, 0x2c20 + j);
		wr32(dev, 0x00000000, 0x2c24 + j);
	}

	wr32(dev, GRBM_READ_TIMEOUT(0xff), GRBM_CTL);

	/*
	 * For the byte meant for pipes config in this register, init to 0 the
	 * bits corresponding to existing gpu pipes, and leave the others to 1.
	 * Do the same thing for simd units and rb backends.
	 */
	cc_gc_shader_pipe_cfg = rr32(dev, CC_GC_SHADER_PIPE_CFG) & ~2;

	cc_gc_shader_pipe_cfg |= INACTIVE_QD_PIPES((MAX_PIPES_MASK
		<< drv_data->gpu.cfg.max_shader_pipes) & MAX_PIPES_MASK);
	cc_gc_shader_pipe_cfg |= INACTIVE_SIMDS((MAX_SIMDS_MASK
			<< drv_data->gpu.cfg.max_se_simds) & MAX_SIMDS_MASK);

	cc_rb_backend_disable = BACKEND_DISABLE(
			(MAX_BACKENDS_MASK << drv_data->gpu.cfg.max_backends)
							& MAX_BACKENDS_MASK);

	mc_shared_chmap = rr32(dev, MC_SHARED_CHMAP);
	mc_arb_ramcfg = rr32(dev, MC_ARB_RAMCFG);

	gb_addr_cfg = 0;
	switch (drv_data->gpu.cfg.max_tile_pipes) {
	case 1:
		gb_addr_cfg |= NUM_PIPES(0);
		break;
	case 2:
		gb_addr_cfg |= NUM_PIPES(1);
		break;
	case 4:
		gb_addr_cfg |= NUM_PIPES(2);
		break;
	case 8:
		gb_addr_cfg |= NUM_PIPES(3);
		break;
	}

	gb_addr_cfg |= PIPE_INTERLEAVE_SZ((mc_arb_ramcfg & BURSTLENGTH_MASK)
							>> BURSTLENGTH_SHIFT);
	gb_addr_cfg |= BANK_INTERLEAVE_SZ(0);
	gb_addr_cfg |= NUM_SES(drv_data->gpu.cfg.num_ses - 1);
	gb_addr_cfg |= SE_TILE_SZ(1);
	gb_addr_cfg |= NUM_GPUS(0);
	gb_addr_cfg |= MULTI_GPU_TILE_SZ(2);

	/* cap row size to 2 */
	if (((mc_arb_ramcfg & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT) > 2)
		gb_addr_cfg |= ROW_SZ(2);
	else
		gb_addr_cfg |= ROW_SZ((mc_arb_ramcfg & NOOFCOLS_MASK)
							>> NOOFCOLS_SHIFT);

	/* some chips needs special care for their tile pipe to backend map */
	if (dev->device == 0x689e) {/*some CYPRESS*/
		u32 efuse_straps_3;
		u32 efuse_straps_4;
		u8 efuse_box_bit_131_124;

		wr32(dev, 0x204, RCU_IDX);
		efuse_straps_4 = rr32(dev, RCU_DATA);

		wr32(dev, 0x203, RCU_IDX);
		efuse_straps_3 = rr32(dev, RCU_DATA);
		efuse_box_bit_131_124 = (u8)(((efuse_straps_4 & 0xf) << 4)
				| ((efuse_straps_3 & 0xf0000000) >> 28));

		switch(efuse_box_bit_131_124) {
		case 0x00:
			gb_backend_map = 0x76543210;
			break;
		case 0x55:
			gb_backend_map = 0x77553311;
			break;
		case 0x56:
			gb_backend_map = 0x77553300;
			break;
		case 0x59:
			gb_backend_map = 0x77552211;
			break;
		case 0x66:
			gb_backend_map = 0x77443300;
			break;
		case 0x99:
			gb_backend_map = 0x66552211;
			break;
		case 0x5a:
			gb_backend_map = 0x77552200;
			break;
		case 0xaa:
			gb_backend_map = 0x66442200;
			break;
		case 0x95:
			gb_backend_map = 0x66553311;
			break;
		default:
			dev_err(&dev->dev, "bad backend map, using default\n");
			gb_backend_map = tile_pipe_to_backend_map(dev);
			break;
		}
	} else if (dev->device == 0x68b9) {/*some JUNIPER*/
		u32 efuse_straps_3;
		u8 efuse_box_bit_127_124;

		wr32(dev, 0x203, RCU_IDX);
		efuse_straps_3 = rr32(dev, RCU_DATA);
		efuse_box_bit_127_124 = (u8)((efuse_straps_3 & 0xf0000000)
									>> 28);

		switch(efuse_box_bit_127_124) {
		case 0x0:
			gb_backend_map = 0x00003210;
			break;
		case 0x5:
		case 0x6:
		case 0x9:
		case 0xa:
			gb_backend_map = 0x00003311;
			break;
		default:
			dev_err(&dev->dev, "bad backend map, using default\n");
			gb_backend_map = tile_pipe_to_backend_map(dev);
			break;
		}
	} else {
		switch (drv_data->family) {
		case CYPRESS:
		case HEMLOCK:
			gb_backend_map = 0x66442200;
			break;
		case JUNIPER:
			gb_backend_map = 0x00002200;
			break;
		default:
			gb_backend_map = tile_pipe_to_backend_map(dev);
			break;
		}
	}

	wr32(dev, gb_backend_map, GB_BACKEND_MAP);

	wr32(dev, gb_addr_cfg, GB_ADDR_CFG);
	wr32(dev, gb_addr_cfg, DMIF_ADDR_CFG);
	wr32(dev, gb_addr_cfg, HDP_ADDR_CFG);

	/*
	 * For all instance but specific to shader engine.
	 * there is a hardware instance table in the atombios, it may be related.
	 */
	gfx_idx = INSTANCE_BROADCAST_WRITES;

	/*
	 * actually we have a max of 4 backends per se, force
	 * disabling the top 4 backend bits
	 */
	for (i = 0; i < drv_data->gpu.cfg.num_ses; ++i) {
		u32 rb;
		u32 gfx;

		rb = cc_rb_backend_disable | BACKEND_DISABLE(0xf0);
		gfx = gfx_idx | SE_IDX(i);

		/* select se */
		wr32(dev, gfx, GRBM_GFX_IDX);
		wr32(dev, gfx, RLC_GFX_IDX);

		/* apply settings for selected se */

		/* primary */
		wr32(dev, rb, CC_RB_BACKEND_DISABLE);
		wr32(dev, rb, CC_SYS_RB_BACKEND_DISABLE);
		wr32(dev, cc_gc_shader_pipe_cfg, CC_GC_SHADER_PIPE_CFG);

		/* user */
		wr32(dev, rb, GC_USER_RB_BACKEND_DISABLE);
        }

	/* all instances, all shader engines */
	gfx_idx |= SE_BROADCAST_WRITES;
	wr32(dev, gfx_idx, GRBM_GFX_IDX);
	wr32(dev, gfx_idx, RLC_GFX_IDX);

	/* primary */
	wr32(dev, 0, CGTS_SYS_TCC_DISABLE);
	wr32(dev, 0, CGTS_TCC_DISABLE);
	/* user */
	wr32(dev, 0, CGTS_USER_SYS_TCC_DISABLE);
	wr32(dev, 0, CGTS_USER_TCC_DISABLE);

	/* set hw defaults for 3d engine */
	wr32(dev, ROQ_IB1_START(0x16) | ROQ_IB2_START(0x2b),
							CP_QUEUE_THRESHOLDS);
	wr32(dev, STQ_SPLIT(0x30), CP_MEQ_THRESHOLDS);

	wr32(dev, DISABLE_CUBE_ANISO | SYNC_GRADIENT | SYNC_WALKER
						| SYNC_ALIGNER, TA_CTL_AUX);

	sx_debug_1 = rr32(dev, SX_DEBUG_1);
	sx_debug_1 |= ENABLE_NEW_SMX_ADDR;
	wr32(dev, SX_DEBUG_1, sx_debug_1);

	smx_dc_ctl0 = rr32(dev, SMX_DC_CTL0);
	smx_dc_ctl0 &= ~NUMBER_OF_SETS(0x1ff);
	smx_dc_ctl0 |= NUMBER_OF_SETS(drv_data->gpu.cfg.sx_num_of_sets);
	wr32(dev, smx_dc_ctl0, SMX_DC_CTL0);

	wr32(dev, COLOR_BUF_SZ((drv_data->gpu.cfg.sx_max_export_sz / 4) - 1)
		| POSITION_BUF_SZ((drv_data->gpu.cfg.sx_max_export_pos_sz / 4)
									- 1)
		| SMX_BUF_SZ((drv_data->gpu.cfg.sx_max_export_smx_sz / 4) - 1),
							SX_EXPORT_BUF_SZS);

	wr32(dev, SC_PRIM_FIFO_SZ(drv_data->gpu.cfg.sc_prim_fifo_sz)
		| SC_HIZ_TILE_FIFO_SZ(drv_data->gpu.cfg.sc_hiz_tile_fifo_sz)
		| SC_EARLYZ_TILE_FIFO_SZ(
				drv_data->gpu.cfg.sc_earlyz_tile_fifo_sz),
								PA_SC_FIFO_SZ);

	wr32(dev, 1, VGT_NUM_INSTS);

	wr32(dev, 0, SPI_CFG_CTL);
	wr32(dev, VTX_DONE_DELAY(4), SPI_CFG_CTL_1);

	wr32(dev, 0, CP_PERFMON_CTL);

	sq_init(dev);

	wr32(dev, FORCE_EOV_MAX_CLK_CNT(4095) | FORCE_EOV_MAX_REZ_CNT(255),
						PA_SC_FORCE_EOV_MAX_CNTS);

	switch (drv_data->family) {
	case CEDAR:
	case PALM:
		vgt_cache_invalidation = CACHE_INVALIDATION(TC_ONLY);
		break;
	default:
		vgt_cache_invalidation = CACHE_INVALIDATION(VC_AND_TC);
		break;
	}
	vgt_cache_invalidation |= AUTO_INVLD_EN(ES_AND_GS_AUTO);
	wr32(dev, vgt_cache_invalidation, VGT_CACHE_INVALIDATION);

	wr32(dev, 16, VGT_GS_VERTEX_REUSE);
	wr32(dev, 0, PA_SU_LINE_STIPPLE_VALUE);
	wr32(dev, 0, PA_SC_LINE_STIPPLE_STATE);

	wr32(dev, 14, VGT_VERTEX_REUSE_BLOCK_CTL);
	wr32(dev, 16, VGT_OUT_DEALLOC_CTL);

	wr32(dev, 0, CB_PERF_CTR0_SEL_0);
	wr32(dev, 0, CB_PERF_CTR0_SEL_1);
	wr32(dev, 0, CB_PERF_CTR1_SEL_0);
	wr32(dev, 0, CB_PERF_CTR1_SEL_1);
	wr32(dev, 0, CB_PERF_CTR2_SEL_0);
	wr32(dev, 0, CB_PERF_CTR2_SEL_1);
	wr32(dev, 0, CB_PERF_CTR3_SEL_0);
	wr32(dev, 0, CB_PERF_CTR3_SEL_1);

	/* clear render buffer base addresses */
	wr32(dev, 0, CB_COLOR0_BASE);
	wr32(dev, 0, CB_COLOR1_BASE);
	wr32(dev, 0, CB_COLOR2_BASE);
	wr32(dev, 0, CB_COLOR3_BASE);
	wr32(dev, 0, CB_COLOR4_BASE);
	wr32(dev, 0, CB_COLOR5_BASE);
	wr32(dev, 0, CB_COLOR6_BASE);
	wr32(dev, 0, CB_COLOR7_BASE);
	wr32(dev, 0, CB_COLOR8_BASE);
	wr32(dev, 0, CB_COLOR9_BASE);
	wr32(dev, 0, CB_COLOR10_BASE);
	wr32(dev, 0, CB_COLOR11_BASE);

	/* set the shader const cache sizes to 0 */
	for (i = SQ_ALU_CONST_BUF_SZ_PS_0; i < 0x28200; i += 4)
		wr32(dev, 0, i);
	for (i = SQ_ALU_CONST_BUF_SZ_HS_0; i < 0x29000; i += 4)
		wr32(dev, 0, i);

	tmp = rr32(dev, HDP_MISC_CTL);
	tmp |= HDP_FLUSH_INVALIDATE_CACHE;
	wr32(dev, tmp, HDP_MISC_CTL);

	hdp_host_path_ctl = rr32(dev, HDP_HOST_PATH_CTL);
	wr32(dev, hdp_host_path_ctl, HDP_HOST_PATH_CTL);

	wr32(dev, CLIP_VTX_REORDER_ENA | NUM_CLIP_SEQ(3), PA_CL_ENHANCE);

	udelay(50);
	return 0;
}
