/*
  author Sylvain Bertrand <sylvain.bertrand@gmail.com>
  Protected by linux GNU GPLv2
  Copyright 2012-2014
*/
#include <linux/pci.h>
#include <linux/cdev.h>
#include <asm/unaligned.h>

#include <alga/rng_mng.h>
#include <uapi/alga/pixel_fmts.h>
#include <alga/timing.h>
#include <alga/amd/atombios/atb.h>
#include <uapi/alga/amd/dce6/dce6.h>
#include <alga/amd/atombios/vm.h>
#include <alga/amd/atombios/cm.h>
#include <alga/amd/atombios/pp.h>
#include <alga/amd/atombios/vram_info.h>

#include "../mc.h"
#include "../rlc.h"
#include "../ih.h"
#include "../fence.h"
#include "../ring.h"
#include "../dmas.h"
#include "../ba.h"
#include "../cps.h"
#include "../gpu.h"
#include "../drv.h"

#include "../bif.h"

#include "../cm.h"
#include "../smc_tbls.h"

#include "../regs.h"

#include "ctx.h"
#include "private.h"

#ifdef CONFIG_ALGA_AMD_SI_DYN_PM_LOG
#include "smc_volt.h"
static void atb_pp_state_dump(struct atb_pp_state *s, char *name)
{
	u8 lvl_idx;
	for (lvl_idx = 0; lvl_idx < s->lvls_n; ++lvl_idx) {
		struct atb_pp_lvl *lvl;

		lvl = &s->lvls[lvl_idx];
		if (IS_VDDC_LKGE_IDX(lvl->vddc_id)) {
			LOG("atb_pp_%s_lvl[%u]:vddc=0x%04x(leakage index) engine clock=%ukHz memory clock=%ukHz pcie generation=%u",
				name, lvl_idx, lvl->vddc_id, lvl->eng_clk * 10,
					lvl->mem_clk * 10, lvl->pcie_gen + 1);
		} else {
			LOG("atb_pp_%s_lvl[%u]:vddc=%umV engine clock=%ukHz memory clock=%ukHz pcie generation=%u",
				name, lvl_idx, lvl->vddc_id, lvl->eng_clk * 10,
					lvl->mem_clk * 10, lvl->pcie_gen + 1);
		}
	}
}

static void atb_volt_on_clk_dep_tbl_dump(struct atb_volt_on_clk_dep_tbl *tbl)
{
	u8 entry_idx;
	for (entry_idx = 0; entry_idx < tbl->entries_n; ++entry_idx) {
		struct atb_volt_on_clk_dep *step;

		step = &tbl->entries[entry_idx];
		if (IS_VDDC_LKGE_IDX(step->volt_id)) {
			LOG("vddc_dep_on_sclk[%u]:clk=%ukHz voltage=0x%04x(leakage index)",
				entry_idx, step->clk * 10, step->volt_id);
		} else {
			LOG("vddc_dep_on_sclk[%u]:clk=%ukHz voltage=%umV",
				entry_idx, step->clk * 10, step->volt_id);
		}
	}
}

static void atb_cac_lkge_tbl_dump(struct atb_cac_lkge_tbl *tbl)
{
	u8 entry_idx;
	for (entry_idx = 0; entry_idx < tbl->entries_n; ++entry_idx) {
		struct atb_cac_lkge *step;

		step = &tbl->entries[entry_idx];
		LOG("cac_leakage[%u]:vddc=%umV leakage=%u", entry_idx,
						step->vddc_mv, step->lkge);
	}
}

static void atb_vddc_phase_shed_limits_tbl_dump(
				struct atb_vddc_phase_shed_limits_tbl *tbl)
{
	u8 entry_idx;
	for (entry_idx = 0; entry_idx < tbl->entries_n; ++entry_idx) {
		struct atb_vddc_phase_shed_limits *step;

		step = &tbl->entries[entry_idx];
		LOG("atb_vddc_phase_shed_limits[%u]:vddc=%umV engine clock=%ukHz memory clock=%ukHz",
				entry_idx, step->vddc_mv, step->sclk * 10,
							step->mclk * 10);
	}
}

static void atb_volt_tbl_dump(struct atb_volt_tbl *tbl, char *name)
{
	u8 entry_idx;

	LOG("atb_volt_%s:phase_delay=%u mask_low=0x%08x", name,
					tbl->phase_delay, tbl->mask_low);
	for (entry_idx = 0; entry_idx < tbl->entries_n; ++entry_idx) {
		struct atb_volt_tbl_entry *entry;

		entry = &tbl->entries[entry_idx];
		LOG("atb_volt_%s[%u]:smio_low=0x%08x val_mv=%u",
			name, entry_idx, entry->smio_low, entry->val_mv);
	}
}
#else
static void atb_pp_state_dump(struct atb_pp_state *s, char *name){}
static void atb_volt_on_clk_dep_tbl_dump(
				struct atb_volt_on_clk_dep_tbl *tbl){}
static void atb_cac_lkge_tbl_dump(struct atb_cac_lkge_tbl *tbl){}
static void atb_vddc_phase_shed_limits_tbl_dump(
				struct atb_vddc_phase_shed_limits_tbl *tbl){}
static void atb_volt_tbl_dump(struct atb_volt_tbl *tbl, char *name){}
#endif

static long volts_ctl_caps_get(struct ctx *ctx)
{
	struct dev_drv_data *dd;
	long r;

	dd = pci_get_drvdata(ctx->dev);
	
	/*--------------------------------------------------------------------*/
	r = atb_have_vddc_ctl(dd->atb);
	if (r == -ATB_ERR) {
		dev_err(&ctx->dev->dev, "dyn_pm:unable to look for vddc control\n");
		return -SI_ERR;
	} else if (r == ATB_HAVE_VDDC_CTL) {
		LOG("vddc control supported");
		ctx->volt_caps |= VOLT_CAPS_VDDC_CTL_ENA;
	} else if (r == ATB_DONT_HAVE_VDDC_CTL) {
		LOG("vddc control unsupported");
		return 0; /* XXX:master switch */
	}
	/*--------------------------------------------------------------------*/

	/*--------------------------------------------------------------------*/
	r = atb_have_mvddc_ctl(dd->atb);
	if (r == -ATB_ERR) {
		dev_err(&ctx->dev->dev, "dyn_pm:unable to look for mvddc control\n");
		return -SI_ERR;
	} else if (r == ATB_HAVE_MVDDC_CTL) {
		LOG("mvdd control supported");
		ctx->volt_caps |= VOLT_CAPS_MVDD_CTL_ENA;
	} else if (r == ATB_DONT_HAVE_MVDDC_CTL) {
		LOG("mvdd control unsupported");
	}
	/*--------------------------------------------------------------------*/

	/*--------------------------------------------------------------------*/
	r = atb_have_vddci_ctl(dd->atb);
	if (r == -ATB_ERR) {
		dev_err(&ctx->dev->dev, "dyn_pm:unable to look for vddci control\n");
		return -SI_ERR;
	} else if (r == ATB_HAVE_VDDCI_CTL) {
		LOG("vddci control supported");
		ctx->volt_caps |= VOLT_CAPS_VDDCI_CTL_ENA;
	} else if (r == ATB_DONT_HAVE_VDDCI_CTL) {
		LOG("vddci control unsupported");
	}
	/*--------------------------------------------------------------------*/

	/*--------------------------------------------------------------------*/
	r = atb_have_vddc_phase_shed_ctl(dd->atb);
	if (r == -ATB_ERR) {
		dev_err(&ctx->dev->dev, "dyn_pm:unable to look for vddc phase shedding control\n");
		return -SI_ERR;
	} else if (r == ATB_HAVE_VDDC_PHASE_SHED_CTL) {
		LOG("vddc phase shedding supported");
		ctx->volt_caps |= VOLT_CAPS_VDDC_PHASE_SHED_CTL_ENA;
	} else if (r == ATB_DONT_HAVE_VDDC_PHASE_SHED_CTL) {
		LOG("vddc phase shedding unsupported");
	}
	/*--------------------------------------------------------------------*/
	return 0;
}

static void u_and_p_compute(u32 i, u32 ref_clk, u32 *p, u32 *u)
{
	u32 p_b;
	u32 b_c;
	u32 i_c;
	u32 tmp;
	
	p_b = 16;
	b_c = 0;

	i_c = (i * ref_clk) / 100;
	tmp = i_c >> p_b;

	while (tmp) {
		b_c++;
		tmp >>= 1;
	}

	*u = (b_c + 1) / 2;
	*p = i_c / (1 << (2 * (*u)));
}

#define ASI_DEFAULT	1000	/* value from rv770 family */
#define P_ASI_DEFAULT	400000	/* value from cypress family */
static void ctx_b_sp_init(struct ctx *ctx)
{
	u32 bs_p;
	u32 bs_u;
	u32 p_bs_p;
	u32 p_bs_u;

	u_and_p_compute(ASI_DEFAULT, ctx->gpu_aux_clk, &bs_p, &bs_u);
	u_and_p_compute(P_ASI_DEFAULT, ctx->gpu_aux_clk, &p_bs_p, &p_bs_u);
	

	LOG("d_sp parameters:gpu_aux_clk=%u kHz, bs_p=0x%08x, bs_u=0x%08x",
					ctx->gpu_aux_clk * 10, bs_p, bs_u);
	LOG("p_sp parameters:gpu_aux_clk=%u kHz, p_bs_p=0x%08x, p_bs_u=0x%08x",
					ctx->gpu_aux_clk * 10, p_bs_p, p_bs_u);
	ctx->bs_p = bs_p;
	ctx->p_bs_p = p_bs_p;

	ctx->d_sp = set(CB_BS_P, bs_p) | set (CB_BS_U, bs_u);
	ctx->p_sp = set(CB_BS_P, p_bs_p) | set (CB_BS_U, p_bs_u);
}

static u32 cus_n_max_compute(struct ctx *ctx)
{
	struct dev_drv_data *dd;
	dd = pci_get_drvdata(ctx->dev);

	if (dd->family == VERDE) {
		switch (ctx->dev->device) {
		case 0x6820:
		case 0x6825:
		case 0x6821:
		case 0x6823:
		case 0x6827:
			return 10;
		case 0x682d:
		case 0x6824:
		case 0x682f:
		case 0x6826:
			return 8;
		case 0x6828:
		case 0x6830:
		case 0x6831:
		case 0x6838:
		case 0x6839:
		case 0x683d:
			return 10;
		case 0x683b:
		case 0x683f:
		case 0x6829:
			return 8;
		}
	}
	return 0;
}

static void mc_seq_pmg_cmd_emrs_add(struct ctx *ctx, u8 mc_seq_misc_1_idx)
{
	u8 new_special_mc_reg_idx;
	u32 cur_mc_seq_pmg_cmd_emrs;
	u8 set_idx;

	new_special_mc_reg_idx = ctx->special_mc_regs_n;	
	ctx->special_mc_regs_addrs[new_special_mc_reg_idx] =
							MC_SEQ_PMG_CMD_EMRS;
	cur_mc_seq_pmg_cmd_emrs = rr32(ctx->dev, MC_SEQ_PMG_CMD_EMRS);

	for (set_idx = 0; set_idx < ctx->atb_mc_reg_tbl.sets_n; ++set_idx) {
		struct atb_mc_reg_set *set;
		u32 mc_seq_misc_1;
		u32 new_mc_seq_pmg_cmd_emrs;
		
		set = &ctx->atb_mc_reg_tbl.sets[set_idx];	
		mc_seq_misc_1 = set->vals[mc_seq_misc_1_idx];	

		new_mc_seq_pmg_cmd_emrs = (cur_mc_seq_pmg_cmd_emrs & 0xffff0000)
					| ((mc_seq_misc_1 & 0xffff0000) >> 16);

		ctx->special_mc_regs_sets[set_idx][new_special_mc_reg_idx] =
							new_mc_seq_pmg_cmd_emrs;
	}
	++ctx->special_mc_regs_n;
}

static void mc_seq_pmg_cmd_mrs_0_add(struct ctx *ctx, u8 mc_seq_misc_1_idx)
{
	u8 new_special_mc_reg_idx;
	u32 cur_mc_seq_pmg_cmd_mrs_0;
	u8 set_idx;

	new_special_mc_reg_idx = ctx->special_mc_regs_n;	
	ctx->special_mc_regs_addrs[new_special_mc_reg_idx] =
							MC_SEQ_PMG_CMD_MRS_0;
	cur_mc_seq_pmg_cmd_mrs_0 = rr32(ctx->dev, MC_SEQ_PMG_CMD_MRS_0);

	for (set_idx = 0; set_idx < ctx->atb_mc_reg_tbl.sets_n; ++set_idx) {
		struct atb_mc_reg_set *set;
		u32 mc_seq_misc_1;
		u32 new_mc_seq_pmg_cmd_mrs_0;

		set = &ctx->atb_mc_reg_tbl.sets[set_idx];	
		mc_seq_misc_1 = set->vals[mc_seq_misc_1_idx];	

		new_mc_seq_pmg_cmd_mrs_0 = (cur_mc_seq_pmg_cmd_mrs_0
				& 0xffff0000) | (mc_seq_misc_1 & 0x0000ffff);
		if (!(ctx->misc_caps & MISC_CAPS_VRAM_IS_GDDR5))
			new_mc_seq_pmg_cmd_mrs_0 |= 0x100;

		ctx->special_mc_regs_sets[set_idx][new_special_mc_reg_idx] =
						new_mc_seq_pmg_cmd_mrs_0;
	}
	++ctx->special_mc_regs_n;
}

static void mc_seq_pmg_auto_cmd_add(struct ctx *ctx, u8 mc_seq_misc_1_idx)
{
	u8 new_special_mc_reg_idx;
	u8 set_idx;

	new_special_mc_reg_idx = ctx->special_mc_regs_n;	
	ctx->special_mc_regs_addrs[new_special_mc_reg_idx] =
							MC_SEQ_PMG_AUTO_CMD;

	for (set_idx = 0; set_idx < ctx->atb_mc_reg_tbl.sets_n; ++set_idx) {
		struct atb_mc_reg_set *set;
		u32 mc_seq_misc_1;
		u32 mc_seq_pmg_auto_cmd;

		set = &ctx->atb_mc_reg_tbl.sets[set_idx];	
		mc_seq_misc_1 = set->vals[mc_seq_misc_1_idx];	

		mc_seq_pmg_auto_cmd = (mc_seq_misc_1 & 0xffff0000) >> 16;

		ctx->special_mc_regs_sets[set_idx][new_special_mc_reg_idx] =
							mc_seq_pmg_auto_cmd;
	}
	++ctx->special_mc_regs_n;
}

static void mc_seq_misc_1_process(struct ctx *ctx, u8 mc_seq_misc_1_idx)
{
	mc_seq_pmg_cmd_emrs_add(ctx, mc_seq_misc_1_idx);
	mc_seq_pmg_cmd_mrs_0_add(ctx, mc_seq_misc_1_idx);
	if (!(ctx->misc_caps & MISC_CAPS_VRAM_IS_GDDR5))
		mc_seq_pmg_auto_cmd_add(ctx, mc_seq_misc_1_idx);
}

static void mc_seq_pmg_cmd_mrs_1_add(struct ctx *ctx, u8 mc_seq_reserve_m_idx)
{
	u8 new_special_mc_reg_idx;
	u32 cur_mc_seq_pmg_cmd_mrs_1;
	u8 set_idx;

	new_special_mc_reg_idx = ctx->special_mc_regs_n;	
	ctx->special_mc_regs_addrs[new_special_mc_reg_idx] =
							MC_SEQ_PMG_CMD_MRS_1;
	cur_mc_seq_pmg_cmd_mrs_1 = rr32(ctx->dev, MC_SEQ_PMG_CMD_MRS_1);

	for (set_idx = 0; set_idx < ctx->atb_mc_reg_tbl.sets_n; ++set_idx) {
		struct atb_mc_reg_set *set;
		u32 mc_seq_reserve_m;
		u32 new_mc_seq_pmg_cmd_mrs_1;

		set = &ctx->atb_mc_reg_tbl.sets[set_idx];	
		mc_seq_reserve_m = set->vals[mc_seq_reserve_m_idx];

		new_mc_seq_pmg_cmd_mrs_1 = (cur_mc_seq_pmg_cmd_mrs_1
				& 0xffff0000) | (mc_seq_reserve_m & 0x0000ffff);

		ctx->special_mc_regs_sets[set_idx][new_special_mc_reg_idx] =
						new_mc_seq_pmg_cmd_mrs_1;
	}
	++ctx->special_mc_regs_n;
}

static void mc_seq_reserve_m_process(struct ctx *ctx, u8 mc_seq_reserve_m_idx)
{
	mc_seq_pmg_cmd_mrs_1_add(ctx, mc_seq_reserve_m_idx);
}

/* if some specific regs are used, then some special regs need to be added */
static void special_mc_regs_init(struct ctx *ctx)
{
	u8 atb_mc_reg;
	u8 atb_mc_regs_n;

	atb_mc_regs_n = ctx->atb_mc_reg_tbl.addrs_n;

	for (atb_mc_reg = 0; atb_mc_reg < atb_mc_regs_n; ++atb_mc_reg)
		switch (ctx->atb_mc_reg_tbl.addrs[atb_mc_reg]) {
		case MC_SEQ_MISC_1:
			mc_seq_misc_1_process(ctx, atb_mc_reg);
			break;
		case MC_SEQ_RESERVE_M:
			mc_seq_reserve_m_process(ctx, atb_mc_reg);
			break;
		}
}

static void special_mc_regs_valid_set(struct ctx *ctx)
{
	u8 special_mc_reg_idx;

	for (special_mc_reg_idx = 0; special_mc_reg_idx
			< ctx->special_mc_regs_n; ++special_mc_reg_idx) {
		u8 set_idx;

		/*
		 * If there is only one set then no register is valid. It means
		 * the mc regs won't be touch, kind of expected.
		 */
		for (set_idx = 1; set_idx < ctx->atb_mc_reg_tbl.sets_n;
								++set_idx) {
			u32 prev_set_val;
			u32 cur_set_val;
			prev_set_val = ctx->special_mc_regs_sets[set_idx - 1]
							[special_mc_reg_idx];
			cur_set_val = ctx->special_mc_regs_sets[set_idx]
							[special_mc_reg_idx];
			if (cur_set_val != prev_set_val)
				ctx->special_mc_regs_valid
						|= BIT(special_mc_reg_idx);
		}
	}
}

static void atb_mc_regs_valid_set(struct ctx *ctx)
{
	u8 atb_mc_reg_idx;

	for (atb_mc_reg_idx = 0; atb_mc_reg_idx < ctx->atb_mc_reg_tbl.addrs_n;
							++atb_mc_reg_idx) {
		u8 set_idx;

		/*
		 * If there is only one set then no register is valid. It means
		 * the mc regs won't be touch, kind of expected.
		 */
		for (set_idx = 1; set_idx < ctx->atb_mc_reg_tbl.sets_n;
								++set_idx) {
			struct atb_mc_reg_set *prev_set;
			struct atb_mc_reg_set *cur_set;
			u32 prev_set_val;
			u32 cur_set_val;

			prev_set = &ctx->atb_mc_reg_tbl.sets[set_idx - 1];
			cur_set = &ctx->atb_mc_reg_tbl.sets[set_idx];

			prev_set_val = prev_set->vals[atb_mc_reg_idx];
			cur_set_val = cur_set->vals[atb_mc_reg_idx];

			if (cur_set_val != prev_set_val)
				ctx->atb_mc_regs_valid |= BIT(atb_mc_reg_idx);
		}
	}
}

/*
 * we need to collect a bunch of information to fill properly the smc tables
 * and init the hw
 */
long ctx_init(struct pci_dev *dev, struct ctx *ctx)
{
	struct dev_drv_data *dd;
	long r;

	ctx->dev = dev;

	ctx->cus_n_max = cus_n_max_compute(ctx);

	ctx->default_pcie_lanes_n = bif_pcie_lanes_n_get(ctx->dev);

	r = bif_pcie_root_speeds_get(ctx->dev, &ctx->pcie_root_speeds_mask);
	if (r == -SI_ERR)
		goto err;
	LOG("dyn_pm:pcie root complex link speed mask=0x%02x",
						ctx->pcie_root_speeds_mask);
	
	r = gpu_aux_clk_get(ctx->dev, &ctx->gpu_aux_clk);
	if (r == -SI_ERR) {
		dev_err(&ctx->dev->dev, "dyn_pm:failed to get the gpu auxiliary clock\n");
		goto err;
	}

	ctx_b_sp_init(ctx);

	dd = pci_get_drvdata(dev);

	r = atb_core_ref_clk_get(dd->atb, &ctx->core_ref_clk);
	if (r == -ATB_ERR) {
		dev_err(&dev->dev, "dyn_pm:unable to get the core reference clock\n");
		goto err;
	}

	r = atb_mem_ref_clk_get(dd->atb, &ctx->mem_ref_clk);
	if (r == -ATB_ERR) {
		dev_err(&dev->dev, "dyn_pm:unable to get the core reference clock\n");
		goto err;
	}

	r = atb_pp_platform_caps_get(dd->atb, &ctx->platform_caps);
	if (r == -ATB_ERR) {
		dev_err(&dev->dev, "dyn_pm:unable to get the powerplay platform caps\n");
		goto err;
	}
	LOG("powerplay platform capabilities is 0x%08x", ctx->platform_caps);

	r = volts_ctl_caps_get(ctx);
	if (r == -SI_ERR)
		goto err;

	r = atb_have_eng_clk_ss(dd->atb);
	if (r == -ATB_ERR) {
		dev_err(&dev->dev, "dyn_pm:unable to look for engine spread spectrum support\n");
		goto err;
	} else if (r == ATB_HAVE_ENG_CLK_SS) {
		ctx->misc_caps |= MISC_CAPS_ENG_CLK_SS_ENA;
		LOG("atombios:have engine clock spread spectrum");
	} 

	r = atb_have_mem_clk_ss(dd->atb);
	if (r == -ATB_ERR) {
		dev_err(&dev->dev, "dyn_pm:unable to look for memory spread spectrum support\n");
		goto err;
	} else if (r == ATB_HAVE_MEM_CLK_SS) {
		ctx->misc_caps |= MISC_CAPS_MEM_CLK_SS_ENA;
		LOG("atombios:have memory clock spread spectrum");
	} 

	r = atb_have_thermal_protection(dd->atb);
	if (r == -ATB_ERR) {
		dev_err(&dev->dev, "dyn_pm:unable to look for thermal protection/controller\n");
		goto err;
	} else if (r == ATB_HAVE_THERMAL_PROTECTION) {
		ctx->misc_caps |= MISC_CAPS_THERMAL_PROTECTION_ENA;
		LOG("atombios:have thermal protection");
	}

	if (mc_vram_is_gddr5(dev)) {
		LOG("vram is gddr5");
		ctx->misc_caps |= MISC_CAPS_VRAM_IS_GDDR5;
	}

	if (ctx->volt_caps & VOLT_CAPS_VDDC_CTL_ENA) {
		r = atb_vddc_tbl_get(dd->atb, &ctx->atb_vddc_tbl);
		if (r == -ATB_ERR) {
			dev_err(&dev->dev, "dyn_pm:unable to fetch the vddc voltage table\n");
			goto err;
		}
		atb_volt_tbl_dump(&ctx->atb_vddc_tbl, "vddc");
	}

	if (ctx->volt_caps & VOLT_CAPS_MVDD_CTL_ENA) {
		r = atb_mvddc_tbl_get(dd->atb, &ctx->atb_mvddc_tbl);
		if (r == -ATB_ERR) {
			dev_err(&dev->dev, "dyn_pm:unable to fetch the mvddc voltage table\n");
			goto err;
		}
		atb_volt_tbl_dump(&ctx->atb_mvddc_tbl, "mvddc");
	}

	if (ctx->volt_caps & VOLT_CAPS_VDDCI_CTL_ENA) {
		r = atb_vddci_tbl_get(dd->atb, &ctx->atb_vddci_tbl);
		if (r == -ATB_ERR) {
			dev_err(&dev->dev, "dyn_pm:unable to fetch the vddci voltage table\n");
			goto err;
		}
		atb_volt_tbl_dump(&ctx->atb_vddci_tbl, "vddci");
	}

	if (ctx->volt_caps & VOLT_CAPS_VDDC_PHASE_SHED_CTL_ENA) {
		r = atb_vddc_phase_shed_tbl_get(dd->atb, &ctx->atb_vddc_phase_shed_tbl);
		if (r == -ATB_ERR) {
			dev_err(&dev->dev, "dyn_pm:unable to fetch the vddc phase shedding table\n");
			goto err;
		}
		atb_volt_tbl_dump(&ctx->atb_vddc_phase_shed_tbl,
							"vddc_phase_shed");

		r = atb_vddc_phase_shed_limits_tbl_get(dd->atb,
						&ctx->atb_vddc_phase_shed_limits_tbl);
		if (r == -ATB_ERR) {
			dev_err(&dev->dev, "dyn_pm:unable to fetch the vddc phase shedding \n");
			goto err;
		}
		atb_vddc_phase_shed_limits_tbl_dump(
						&ctx->atb_vddc_phase_shed_limits_tbl);

	}

	if (ctx->platform_caps & ATB_PP_PLATFORM_CAPS_NEW_CAC_VOLT) {
		r = atb_vddc_dep_on_sclk_tbl_get(dd->atb,
						&ctx->atb_vddc_dep_on_sclk_tbl);
		if (r == -ATB_ERR) {
			dev_err(&dev->dev, "dyn_pm:unable to fetch the vddc on sclk dependency table\n");
			goto err_free_vddc_phase_shed_limits_tbl_entries;
		}
		atb_volt_on_clk_dep_tbl_dump(&ctx->atb_vddc_dep_on_sclk_tbl);
	}

	r = atb_cac_lkge_tbl_get(dd->atb, &ctx->atb_cac_lkge_tbl);
	if (r == -ATB_ERR) {
		dev_err(&dev->dev, "dyn_pm:unable to fetch the cac leakage table\n");
		goto err_free_vddc_dep_on_sclk_tbl_entries;
	}
	atb_cac_lkge_tbl_dump(&ctx->atb_cac_lkge_tbl);

	r = atb_pp_emergency_state_get(dd->atb, &ctx->atb_emergency);
	if (r == -ATB_ERR) {
		dev_err(&dev->dev, "dyn_pm:unable to fetch the emergency state\n");
		goto err_free_cac_lkge_tbl_entries;
	}
	atb_pp_state_dump(&ctx->atb_emergency, "emergency");

	r = atb_pp_ulv_state_get(dd->atb, &ctx->atb_ulv);
	if (r == -ATB_ERR) {
		dev_err(&dev->dev, "dyn_pm:unable to fetch the ulv state\n");
		goto err_free_cac_lkge_tbl_entries;
	}
	atb_pp_state_dump(&ctx->atb_ulv, "ulv");

	r = atb_pp_performance_state_get(dd->atb, &ctx->atb_performance);
	if (r == -ATB_ERR) {
		dev_err(&dev->dev, "dyn_pm:unable to fetch the performance state\n");
		goto err_free_cac_lkge_tbl_entries;
	}
	atb_pp_state_dump(&ctx->atb_performance, "performance");

	r = atb_vram_info(dd->atb, &ctx->atb_mc_reg_tbl);
	if (r == -ATB_ERR) {
		dev_err(&dev->dev, "dyn_pm:unable to fetch the memory controller register values\n");
		goto err_free_cac_lkge_tbl_entries;
	}
	atb_mc_regs_valid_set(ctx);

	special_mc_regs_init(ctx);
	special_mc_regs_valid_set(ctx);

	r = atb_pp_tdp_limits_get(dd->atb, &ctx->atb_tdp_limit,
						&ctx->atb_near_tdp_limit);
	if (r == -ATB_ERR) {
		dev_err(&dev->dev, "dyn_pm:unable to fetch the thermal design power (tdp) limits\n");
		goto err_free_cac_lkge_tbl_entries;
	}

	r = atb_cac_lkge_get(dd->atb, &ctx->atb_cac_lkge);
	if (r == -ATB_ERR) {
		dev_err(&dev->dev, "dyn_pm:unable to fetch the cac leakage\n");
		goto err_free_cac_lkge_tbl_entries;
	}

	r = atb_load_line_slope_get(dd->atb, &ctx->atb_load_line_slope);
	if (r == -ATB_ERR) {
		dev_err(&dev->dev, "dyn_pm:unable to fetch the load line slope\n");
		goto err_free_cac_lkge_tbl_entries;
	}

	r = atb_pp_back_bias_time_get(dd->atb, &ctx->atb_back_bias_time);
	if (r == -ATB_ERR) {
		dev_err(&dev->dev, "dyn_pm:unable to fetch the back bias time\n");
		goto err_free_cac_lkge_tbl_entries;
	}

	r = atb_pp_volt_time_get(dd->atb, &ctx->atb_volt_time);
	if (r == -ATB_ERR) {
		dev_err(&dev->dev, "dyn_pm:unable to fetch the voltage time\n");
		goto err_free_cac_lkge_tbl_entries;
	}

	r = atb_pp_sq_ramping_threshold_get(dd->atb,
					&ctx->atb_sq_ramping_threshold);
	if (r == -ATB_ERR) {
		dev_err(&dev->dev, "dyn_pm:unable to fetch the sequencer block ramping threshold\n");
		goto err_free_cac_lkge_tbl_entries;
	}
	return 0;

err_free_cac_lkge_tbl_entries:
	if (ctx->atb_cac_lkge_tbl.entries_n)
		kfree(ctx->atb_cac_lkge_tbl.entries);
err_free_vddc_dep_on_sclk_tbl_entries:
	if (ctx->platform_caps & ATB_PP_PLATFORM_CAPS_NEW_CAC_VOLT)
		if (ctx->atb_cac_lkge_tbl.entries_n)
			kfree(ctx->atb_cac_lkge_tbl.entries);
err_free_vddc_phase_shed_limits_tbl_entries:
	if (ctx->atb_vddc_phase_shed_limits_tbl.entries_n)
		kfree(ctx->atb_vddc_phase_shed_limits_tbl.entries);
err:
	return -SI_ERR;
}

void ctx_free(struct ctx *ctx)
{
	if (ctx->atb_cac_lkge_tbl.entries_n)
		kfree(ctx->atb_cac_lkge_tbl.entries);
	if (ctx->platform_caps & ATB_PP_PLATFORM_CAPS_NEW_CAC_VOLT)
		if (ctx->atb_vddc_dep_on_sclk_tbl.entries_n)
			kfree(ctx->atb_vddc_dep_on_sclk_tbl.entries);
	if (ctx->atb_vddc_phase_shed_limits_tbl.entries_n)
		kfree(ctx->atb_vddc_phase_shed_limits_tbl.entries);
	kfree(ctx);
}
