/*
  author Sylvain Bertrand <sylvain.bertrand@gmail.com>
  Protected by linux GNU GPLv2
  Copyright 2012-2014
*/
#include <linux/pci.h>
#include <linux/cdev.h>
#include <asm/unaligned.h>

#include <alga/rng_mng.h>
#include <uapi/alga/pixel_fmts.h>
#include <alga/timing.h>
#include <alga/amd/atombios/atb.h>
#include <uapi/alga/amd/dce6/dce6.h>
#include <alga/amd/atombios/vm.h>
#include <alga/amd/atombios/cm.h>
#include <alga/amd/atombios/pp.h>
#include <alga/amd/atombios/vram_info.h>

#include "../mc.h"
#include "../rlc.h"
#include "../ih.h"
#include "../fence.h"
#include "../ring.h"
#include "../dmas.h"
#include "../ba.h"
#include "../cps.h"
#include "../gpu.h"
#include "../drv.h"

#include "../regs.h"

#include "../smc_tbls.h"
#include "../smc.h"

#include "ctx.h"
#include "private.h"
#include "initial.h"
#include "emergency.h"
#include "ulv.h"
#include "driver.h"

#ifdef CONFIG_ALGA_AMD_SI_DYN_PM_LOG
#define L(fmt,...) printk(KERN_INFO fmt "\n", ##__VA_ARGS__)

void smc_mc_reg_tbl_dump(struct smc_mc_reg_tbl *tbl)
{
	u8 i;

	L("SMC_MC_REG_TBL START");

	L("addrs_n=0x%02x",tbl->addrs_n);

	for (i = 0; i < 3; ++i )
		L("rsvd[%u]=0x%02x",i,tbl->rsvd[i]);

	for (i = 0; i < tbl->addrs_n; ++i) {
		u16 tmp;

		tmp = get_unaligned_be16(&tbl->addrs[i].dw_idx_lp);
		L("addrs[%u].dw_idx_lp=0x%04x",i,tmp);

		tmp = get_unaligned_be16(&tbl->addrs[i].dw_idx);
		L("addrs[%u].dw_idx=0x%04x",i,tmp);
	}

	for (i = 0; i < SMC_MC_REG_SETS_N_MAX; ++i) {
		u8 j;

		for (j = 0; j < tbl->addrs_n; ++j) {
			u32 tmp;

			tmp = get_unaligned_be32(&tbl->sets[i].vals[j]);
			L("sets[%u].vals[%u]=0x%08x",i,j,tmp);
		}
	}
		
	L("SMC_MC_REG_TBL END");
}
#endif

static u8 all_valid_regs_n_cnt(struct ctx *ctx)
{
	u8 n;
	u8 reg;

	n = 0;

	/* registers from atombios */
	for (reg = 0; reg < ATB_MC_REGS_N_MAX; ++reg)
		if (ctx->atb_mc_regs_valid & BIT(reg))
			++n;

	/* special registers */
	for (reg = 0; reg < SPECIAL_MC_REGS_N_MAX; ++reg)
		if (ctx->special_mc_regs_valid & BIT(reg))
			++n;
	return n;
}

static u32 addr_lp_get(u32 addr)
{
	switch (addr) {
	case MC_SEQ_RAS_TIMING:
		return MC_SEQ_RAS_TIMING_LP;
	case MC_SEQ_CAS_TIMING:
		return MC_SEQ_CAS_TIMING_LP;
	case MC_SEQ_MISC_TIMING_0:
		return MC_SEQ_MISC_TIMING_0_LP;
	case MC_SEQ_MISC_TIMING_1:
		return MC_SEQ_MISC_TIMING_1_LP;
	case MC_SEQ_RD_CTL_D0:
		return MC_SEQ_RD_CTL_D0_LP;
	case MC_SEQ_RD_CTL_D1:
		return MC_SEQ_RD_CTL_D1_LP;
	case MC_SEQ_WR_CTL_D0:
		return MC_SEQ_WR_CTL_D0_LP;
	case MC_SEQ_WR_CTL_D1:
		return MC_SEQ_WR_CTL_D1_LP;
	case MC_SEQ_PMG_CMD_EMRS:
		return MC_SEQ_PMG_CMD_EMRS_LP;
	case MC_SEQ_PMG_CMD_MRS_0:
		return MC_SEQ_PMG_CMD_MRS_0_LP;
	case MC_SEQ_PMG_CMD_MRS_1:
		return MC_SEQ_PMG_CMD_MRS_1_LP;
	case MC_SEQ_PMG_CMD_MRS_2:
		return MC_SEQ_PMG_CMD_MRS_2_LP;
	case MC_SEQ_PMG_TIMING:
		return MC_SEQ_PMG_TIMING_LP;
	case MC_SEQ_WR_CTL_2:
		return MC_SEQ_WR_CTL_2_LP;
	}
	return addr;
}

static void addr_put(u32 addr, struct smc_mc_reg_addr *smc_addr)
{
	u32 addr_lp;
	u16 addr_lp_dw_idx;
	u16 addr_dw_idx;

	addr_lp = addr_lp_get(addr);

	addr_lp_dw_idx = addr_lp >> 2;
	addr_dw_idx = addr >> 2;

	put_unaligned_be16(addr_dw_idx, &smc_addr->dw_idx);
	put_unaligned_be16(addr_lp_dw_idx, &smc_addr->dw_idx_lp);
}

static void regs_addr_cpy(struct ctx *ctx, struct smc_mc_reg_tbl *tbl)
{
	u8 reg;
	u8 smc_reg;

	smc_reg = 0;

	/* registers from atombios */
	for (reg = 0; reg < ATB_MC_REGS_N_MAX; ++reg)
		if (ctx->atb_mc_regs_valid & BIT(reg)) {
			addr_put(ctx->atb_mc_reg_tbl.addrs[reg],
							&tbl->addrs[smc_reg]);
			++smc_reg;
		}

	/* special registers */
	for (reg = 0; reg < SPECIAL_MC_REGS_N_MAX; ++reg)
		if (ctx->special_mc_regs_valid & BIT(reg)) {
			addr_put(ctx->special_mc_regs_addrs[reg],
							&tbl->addrs[smc_reg]);
			++smc_reg;
		}
}

void smc_mc_reg_set_load(struct ctx *ctx, u8 atb_mc_reg_set_idx,
					struct smc_mc_reg_set *smc_mc_reg_set)
{
	u8 src_reg;
	u8 dst_reg;
	struct atb_mc_reg_set *atb_mc_reg_set;

	atb_mc_reg_set = &ctx->atb_mc_reg_tbl.sets[atb_mc_reg_set_idx];

	dst_reg = 0;

	/* load atb registers */
	for (src_reg = 0; src_reg < ctx->atb_mc_reg_tbl.addrs_n; ++src_reg) {
		if (!(ctx->atb_mc_regs_valid & BIT(src_reg)))
			continue;

		put_unaligned_be32(atb_mc_reg_set->vals[src_reg],
						&smc_mc_reg_set->vals[dst_reg]);
		++dst_reg;
	}

	/* load special registers */
	for (src_reg = 0; src_reg < ctx->special_mc_regs_n; ++src_reg) {
		u32 src_reg_val;
		if (!(ctx->special_mc_regs_valid & BIT(src_reg)))
			continue;

		src_reg_val = ctx->special_mc_regs_sets[atb_mc_reg_set_idx]
								[src_reg];
		put_unaligned_be32(src_reg_val, &smc_mc_reg_set->vals[dst_reg]);
		++dst_reg;
	}
}

/*
 * we are not using a atb_pp_state here, since the initial state is basically
 * built from scratch (the atombios "boot" state pwr lvl is ignored)
 */
static void initial_emergency_init(struct ctx *ctx,
					struct smc_state_tbl *smc_state_tbl,
					struct smc_mc_reg_tbl *smc_mc_reg_tbl)
{
	u8 mc_reg_set_idx;
	u32 initial_lvl_mem_clk;
	struct smc_mc_reg_set *set;
	
	/*
	 * we are using the already inited initial state smc table, since
	 * the atombios pwr lvl is mostly or completely ignored when loading
	 * the smc initial state
	 */
	initial_lvl_mem_clk = get_unaligned_be32(
				&smc_state_tbl->initial_lvl.mem_clk.clk);

	/* select a set of mc regs which can support the pwr lvl mem clk */
	for (mc_reg_set_idx = 0; mc_reg_set_idx < ctx->atb_mc_reg_tbl.sets_n;
							++mc_reg_set_idx) {
		struct atb_mc_reg_set *mc_reg_set;

		mc_reg_set = &ctx->atb_mc_reg_tbl.sets[mc_reg_set_idx];

		if (initial_lvl_mem_clk <= mc_reg_set->mem_clk_max)
			break;
	}

	/*
	 * Not found, then try the last one as a work around which should
	 * accomodate the highest mem clk. The tbl seems to be sorted
	 * from lowest mem clk to highest mem clk.
	 */
	if (mc_reg_set_idx == ctx->atb_mc_reg_tbl.sets_n)
		--mc_reg_set_idx; /* initial lvl is always here */

	set = &smc_mc_reg_tbl->sets[MC_REG_SET_IDX_INITIAL_EMERGENCY];
	smc_mc_reg_set_load(ctx, mc_reg_set_idx, set);
}

void smc_mc_reg_tbl_init(struct ctx *ctx, struct smc_state_tbl *smc_state_tbl,
					struct smc_mc_reg_tbl *smc_mc_reg_tbl)
{
	u8 all_valid_regs_n;

	LOG("smc memory controller register table init");

	all_valid_regs_n = all_valid_regs_n_cnt(ctx);

#if 0
	if (all_valid_regs_n > SMC_MC_REGS_N_MAX) {
		dev_err(&ctx->dev->dev, "dyn_pm:too many valid memory controller registers from atombios to fit the related smc table\n");
		return -SI_ERR;
	}
#endif
	smc_mc_reg_tbl->addrs_n = all_valid_regs_n;
	regs_addr_cpy(ctx, smc_mc_reg_tbl);

	initial_emergency_init(ctx, smc_state_tbl, smc_mc_reg_tbl);
	smc_mc_reg_tbl_ulv_init(ctx, smc_mc_reg_tbl);
	/* XXX:in initing phase, the driver state is the initial state */
}

void smc_mc_reg_tbl_sw_regs_init(struct ctx *ctx)
{
	LOG("smc memory controller software registers init");
	smc_sw_wr32(ctx->dev, 1, SMC_SW_SEQ_IDX);
}
