/*
  author Sylvain Bertrand <sylvain.bertrand@gmail.com>
  Protected by linux GNU GPLv2
  Copyright 2012-2014
*/
#include <linux/pci.h>
#include <linux/cdev.h>
#include <asm/unaligned.h>

#include <alga/rng_mng.h>
#include <uapi/alga/pixel_fmts.h>
#include <alga/timing.h>
#include <alga/amd/atombios/atb.h>
#include <uapi/alga/amd/dce6/dce6.h>
#include <alga/amd/atombios/vm.h>
#include <alga/amd/atombios/cm.h>
#include <alga/amd/atombios/pp.h>
#include <alga/amd/atombios/vram_info.h>

#include "../mc.h"
#include "../rlc.h"
#include "../ih.h"
#include "../fence.h"
#include "../ring.h"
#include "../dmas.h"
#include "../ba.h"
#include "../cps.h"
#include "../gpu.h"
#include "../drv.h"

#include "../bif.h"

#include "../smc_tbls.h"

#include "../regs.h"

#include "ctx.h"
#include "private.h"
#include "smc_volt.h"
#include "smc_mc_reg_tbl.h"

static void patch(struct ctx *ctx, struct smc_state_tbl *tbl)
{
	struct atb_pp_lvl *atb_lvl;
	struct smc_lvl *smc_lvl;

	atb_lvl = &ctx->atb_emergency.lvls[0];
	smc_lvl = &tbl->emergency_lvl;

	if (ctx->volt_caps & VOLT_CAPS_VDDC_CTL_ENA) {
		/*
		 * We cheat here: We are supposed to use a kind of lowest
		 * vddc in atb pp state tbls. Here we use the lowest value
		 * in the atb volt tbl, namely with idx 0.
		 */
		smc_lvl->vddc.step_idx = 0;
		put_unaligned_be16(ctx->atb_vddc_tbl.entries[0].val_mv,
							&smc_lvl->vddc.val);

		smc_volt_std_vddc_compute(ctx, &smc_lvl->std_vddc,
								&smc_lvl->vddc);
	}

	if (ctx->volt_caps & VOLT_CAPS_VDDC_PHASE_SHED_CTL_ENA) {
		u8 i;

		/*
		 * the limit tbl rng from low to high volts, then locate
		 * a limit with enough pwr for the for the targetted vddc 
		 */
		for (i = 0; i < ctx->atb_vddc_phase_shed_limits_tbl.entries_n;
									++i) {
			struct atb_vddc_phase_shed_limits *limit;

			limit = &ctx->atb_vddc_phase_shed_limits_tbl.entries[i];
			/* don't use the atb_lvl to avoid a lkge index */
			if (limit->vddc_mv
				>= get_unaligned_be16(&smc_lvl->vddc.val))
				break;
		}
		smc_lvl->vddc.phase_settings = i;
	}

	/* do not patch vddci from the initial state */

	smc_lvl->pcie_gen = pcie_speed_cap(ctx, BIF_PCIE_GEN_1); 
}

static long patch_from_atb(struct ctx *ctx, struct smc_state_tbl *tbl)
{
	struct atb_pp_lvl *atb_lvl;
	struct smc_lvl *smc_lvl;

	atb_lvl = &ctx->atb_emergency.lvls[0];
	smc_lvl = &tbl->emergency_lvl;

	if ((ctx->volt_caps & VOLT_CAPS_VDDC_CTL_ENA) && atb_lvl->vddc_id) {
		long r;

		r = smc_volt_vddc_set_from_atb_id(ctx, &smc_lvl->vddc,
							atb_lvl->vddc_id);
		if (r == -SI_ERR) {
			dev_err(&ctx->dev->dev, "dyn_pm:unable to patch emergency state vddc\n");
			return -SI_ERR;
		}
		smc_volt_std_vddc_compute(ctx, &smc_lvl->std_vddc,
								&smc_lvl->vddc);
	}

	if ((ctx->volt_caps & VOLT_CAPS_VDDC_PHASE_SHED_CTL_ENA)
							&& atb_lvl->vddc_id) {
		u8 i;

		/*
		 * the limit tbl rng from low to high volts, then locate
		 * a limit with enough pwr for the for the targetted vddc 
		 */
		for (i = 0; i < ctx->atb_vddc_phase_shed_limits_tbl.entries_n;
									++i) {
			struct atb_vddc_phase_shed_limits *limit;

			limit = &ctx->atb_vddc_phase_shed_limits_tbl.entries[i];
			/* don't use the atb_lvl to avoid a lkge index */
			if (limit->vddc_mv
				>= get_unaligned_be16(&smc_lvl->vddc.val))
				break;
		}
		smc_lvl->vddc.phase_settings = i;
	}

	if ((ctx->volt_caps & VOLT_CAPS_VDDCI_CTL_ENA)
				&& atb_lvl->vddc_id && atb_lvl->vddci_mv)
		 smc_volt_vddci_set_from_atb_mv(ctx, &smc_lvl->vddci,
							atb_lvl->vddci_mv);

	smc_lvl->pcie_gen = pcie_speed_cap(ctx, atb_lvl->pcie_gen); 
	return 0;
}

/* the emergency state was defined for acpi hardware architecture */
long smc_state_tbl_emergency_init(struct ctx *ctx, struct smc_state_tbl *tbl)
{
	long r;
	struct smc_lvl *lvl;
	struct dev_drv_data *dd;
	u32 mem_clk_pm_ctl;
	u32 dll_ctl;
	u32 cg_eng_pll_func_ctl_1;

	LOG("emergency smc table init");

	/*
	 * The emergency state is a patched version of the initial state. If
	 * the atombios has an emergency tbl, do patch further the state
	 * with the data provided.
	 */
	tbl->emergency = tbl->initial;
	tbl->emergency_lvl = tbl->initial_lvl;
	tbl->emergency.flgs &= ~SMC_SW_STATE_FLGS_DC;
	lvl = &tbl->emergency_lvl;

	if (ctx->atb_emergency.lvls_n) {
		r = patch_from_atb(ctx, tbl);
		if (r == -SI_ERR)
			return -SI_ERR;
	} else {
		patch(ctx, tbl);
	}

	dd = pci_get_drvdata(ctx->dev);

	/* patch some clock registers */
	mem_clk_pm_ctl = dd->pp.clks_regs.mem_clk_pm_ctl;
	mem_clk_pm_ctl |= MCPC_MRDCK0_RESET | MCPC_MRDCK1_RESET;
	mem_clk_pm_ctl &= ~(MCPC_MRDCK0_PDNB | MCPC_MRDCK1_PDNB);

	dll_ctl = dd->pp.clks_regs.dll_ctl;
	dll_ctl &= ~(DC_MRDCK0_BYPASS | DC_MRDCK1_BYPASS);

	cg_eng_pll_func_ctl_1 = dd->pp.clks_regs.cg_eng_pll_func_ctl_1;
	cg_eng_pll_func_ctl_1 &= ~CEPFC1_MUX_SEL;
	cg_eng_pll_func_ctl_1 |= set(CEPFC1_MUX_SEL, 4);

	/*--------------------------------------------------------------------*/
	/* memory clock registers and value */
	put_unaligned_be32(dll_ctl, &lvl->mem_clk.dll_ctl);
	put_unaligned_be32(mem_clk_pm_ctl, &lvl->mem_clk.mem_clk_pm_ctl);
	put_unaligned_be32(dd->pp.clks_regs.mem_pll_ad_func_ctl,
					&lvl->mem_clk.mem_pll_ad_func_ctl);
	put_unaligned_be32(dd->pp.clks_regs.mem_pll_dq_func_ctl,
					&lvl->mem_clk.mem_pll_dq_func_ctl);
	put_unaligned_be32(dd->pp.clks_regs.mem_pll_func_ctl_0,
					&lvl->mem_clk.mem_pll_func_ctl_0);
	put_unaligned_be32(dd->pp.clks_regs.mem_pll_func_ctl_1,
					&lvl->mem_clk.mem_pll_func_ctl_1);
	put_unaligned_be32(dd->pp.clks_regs.mem_pll_func_ctl_2,
					&lvl->mem_clk.mem_pll_func_ctl_2);
	put_unaligned_be32(dd->pp.clks_regs.mem_pll_ss_0,
						&lvl->mem_clk.mem_pll_ss_0);
	put_unaligned_be32(dd->pp.clks_regs.mem_pll_ss_1,
						&lvl->mem_clk.mem_pll_ss_1);

	put_unaligned_be32(0, &lvl->mem_clk.clk);
	/*--------------------------------------------------------------------*/

	/*--------------------------------------------------------------------*/
	/* engine clock registers and value */
	put_unaligned_be32(dd->pp.clks_regs.cg_eng_pll_func_ctl_0,
					&lvl->eng_clk.cg_eng_pll_func_ctl_0);
	put_unaligned_be32(cg_eng_pll_func_ctl_1,
					&lvl->eng_clk.cg_eng_pll_func_ctl_1);
	put_unaligned_be32(dd->pp.clks_regs.cg_eng_pll_func_ctl_2,
					&lvl->eng_clk.cg_eng_pll_func_ctl_2);
	put_unaligned_be32(dd->pp.clks_regs.cg_eng_pll_func_ctl_3,
					&lvl->eng_clk.cg_eng_pll_func_ctl_3);
	put_unaligned_be32(dd->pp.clks_regs.cg_eng_pll_ss_0,
						&lvl->eng_clk.cg_eng_pll_ss_0);
	put_unaligned_be32(dd->pp.clks_regs.cg_eng_pll_ss_1,
						&lvl->eng_clk.cg_eng_pll_ss_1);

	put_unaligned_be32(0, &lvl->eng_clk.clk);
	/*--------------------------------------------------------------------*/

	if (ctx->volt_caps & VOLT_CAPS_MVDD_CTL_ENA)
		smc_volt_mvdd_set_from_atb_mem_clk(ctx, &lvl->mvdd, 0);

	lvl->dpm_to_perf_lvl.max_pulse_skip = 0;
	lvl->dpm_to_perf_lvl.near_tdp_dec = 0;
	lvl->dpm_to_perf_lvl.above_safe_inc = 0;
	lvl->dpm_to_perf_lvl.below_safe_inc = 0;
	lvl->dpm_to_perf_lvl.pwr_efficiency_ratio = 0;

	/* set everything to the max value, namely all bits to 1 */
	put_unaligned_be32(SPT0_PWR_MIN | SPT0_PWR_MAX,
						&lvl->sq_pwr_throttle_0);

	put_unaligned_be32(SPT1_PWR_DELTA_MAX | SPT1_STI_SZ | SPT1_LTI_RATIO,
						&lvl->sq_pwr_throttle_1);
	return 0;
}
