/*
  author Sylvain Bertrand <digital.ragnarok@gmail.com>
  Protected by GNU Affero GPL v3 with some exceptions.
  See README at root of alga tree.
*/

#include <linux/module.h>
#include <linux/slab.h>
#include <linux/device.h>
#include <asm/unaligned.h>
#include <linux/mutex.h>
#include <linux/delay.h>

#include <alga/timing.h>
#include <alga/dp.h>
#include <alga/amd/atombios/atb.h>
#include <alga/amd/atombios/dce.h>

#include "tables/atb.h"
#include "tables/cmd.h"
#include "tables/data.h"
#include "tables/firmware_info.h"
#include "tables/obj_hdr.h"
#include "tables/trans_ctl.h"
#include "tables/dp_aux.h"
#include "tables/pixel_clock_set.h"
#include "tables/enc_ctl.h"
#include "tables/crtc_blank.h"
#include "tables/crtc_mem_req.h"
#include "tables/crtc.h"
#include "tables/enc_crtc_src.h"
#include "tables/crtc_db_regs.h"
#include "tables/crtc_timing.h"
#include "tables/crtc_overscan.h"
#include "tables/crtc_scaler.h"

#include "atb.h"
#include "regs.h"
#include "scratch_pads.h"
#include "interpreter.h"

static const size_t send_data_payload_sz_max = 16;/* dp standard */
static const unsigned retries_n = 4;

static int dp_aux(struct atombios *atb, u8 aux_i2c_id, u8 hpd, u8 *send_buf,
		size_t send_buf_sz, u8 *recv_buf, size_t recv_buf_sz, u8 *ack)
{
	u16 of;
	struct master_cmd_tbl *cmd_tbl;
	struct common_cmd_tbl_hdr *dp_aux;
	struct dp_aux_params *ps;
	int r;
	size_t min_recv_sz;
	const size_t send_data_payload_sz_max = 16;/* dp standard */
	unsigned scratch_reply_data_of;

	/* shift of 4 bytes/1 dword */
	if ((sizeof(u32) + send_buf_sz) > atb->scratch_sz) {
		dev_err(atb->adev.dev, "atombios: dp send buffer too big "
			"(%zu bytes for %zu bytes)", send_buf_sz,
							atb->scratch_sz);
		return -ENOMEM;
	}

	/* shift of 4 bytes/1 dword */
	memcpy(atb->scratch + 1, send_buf, send_buf_sz);

	of = get_unaligned_le16(&atb->hdr->master_cmd_tbl_of);
	cmd_tbl = atb->adev.rom + of;
	of = get_unaligned_le16(&cmd_tbl->list.dp_aux);

	dp_aux = atb->adev.rom + of;

	atb->g_ctx.ps_dws = 0x80;/* max for ps index */
	atb->g_ctx.ps_top = kzalloc(atb->g_ctx.ps_dws * 4, GFP_KERNEL);
	if (!atb->g_ctx.ps_top) {
		dev_err(atb->adev.dev, "atombios: unable to allocate "
			"parameter space (stack) for dp_aux method call\n");
		return -ENOMEM;
	}

	/* shift of 4 bytes/1 dword */
	scratch_reply_data_of = sizeof(u32) + send_data_payload_sz_max;

	ps = (struct dp_aux_params *)atb->g_ctx.ps_top;	
	/* req=0 is specific to asic_init */
	put_unaligned_le16(0 + sizeof(u32), &ps->req); 
	put_unaligned_le16(scratch_reply_data_of, &ps->reply_data_of);
	ps->reply_data_sz = 0;
	ps->i2c_id = aux_i2c_id;
	ps->delay = 0;
	ps->hpd = hpd; /* support no HPD with invalid value 0xff */

	/* reset some global runtime workspace data */
	atb->g_ctx.fb_wnd = 0;
	atb->g_ctx.regs_blk = 0;
	atb->g_ctx.io_mode = IO_MM;
	
	r = interpret(atb, of, 0, 0);
	if (r != 0)
		goto ps_free;

	*ack = ps->reply_status;

	if (ps->reply_status == 1) {/* timeout */
		r = -ETIMEDOUT;
		dev_err(atb->adev.dev, "atombios: dp_aux timeout\n");
		goto ps_free;
	}
	if (ps->reply_status == 2) {/* flags not zero */
		r = -EBUSY;
		dev_err(atb->adev.dev, "atombios: dp_aux flags not zero\n");
		goto ps_free;
	}
	if (ps->reply_status == 3) {/* error */
		r = -EIO;
		dev_err(atb->adev.dev, "atombios: dp_aux error\n");
		goto ps_free;
	}

	/* copy as much of the recv data in the recv buf */
	if (recv_buf_sz < ps->reply_data_sz)
		min_recv_sz = recv_buf_sz;
	else
		min_recv_sz = ps->reply_data_sz;

	if ((recv_buf != NULL) && (min_recv_sz != 0))
		memcpy(recv_buf, (void *)atb->scratch
				+ scratch_reply_data_of, min_recv_sz);
	r = min_recv_sz;
ps_free:
	kfree(atb->g_ctx.ps_top);
	return r;
}

int atb_dp_aux_native_write(struct atombios *atb, u8 aux_i2c_id, u8 hpd,
				u32 addr, u8 *send_buf, size_t send_buf_sz)
{
	__le32 *msg_hdr; /* only "related" to the dp standard */
	u8 msg[sizeof(*msg_hdr) + send_data_payload_sz_max]; /* 20 bytes */
	size_t msg_sz;
	u8 ack;
	unsigned retry;
	int r;

	if (send_buf_sz > send_data_payload_sz_max) 
		return -EINVAL;

	msg_sz = sizeof(*msg_hdr) + send_buf_sz;

	msg_hdr = (__le32 *)msg;
	*msg_hdr = cpu_to_le32(addr); /* dpcd address is 20 bits */
	*msg_hdr |= (DP_AUX_NATIVE_WRITE << 20);
	*msg_hdr |= (send_buf_sz - 1) << 24;
	*msg_hdr |= msg_sz << 28;

	memcpy(msg + sizeof(*msg_hdr), send_buf, send_buf_sz);

	for (retry = 0; retry < retries_n; ++retry) {
		ack = 0;
		r = dp_aux(atb, aux_i2c_id, hpd, msg, msg_sz, NULL, 0,  &ack);

		if (r == -EBUSY || r == -ETIMEDOUT) {
			continue;
		} else if (r < 0) {
			return r;
		}

		if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_ACK) {
			return send_buf_sz;
		} else if ((ack & DP_AUX_NATIVE_REPLY_MASK)
						== DP_AUX_NATIVE_REPLY_DEFER) {
			udelay(400); /* dp standard, 400 microsecs timeout */
		} else /* NACK */ {
			return -EIO;
		}
	}
	return -EIO;
}
EXPORT_SYMBOL_GPL(atb_dp_aux_native_write);

int atb_dp_aux_native_read(struct atombios *atb, u8 aux_i2c_id, u8 hpd,
				u32 addr, u8 *recv_buf, size_t recv_buf_sz)
{
	__le32 msg_hdr; /* only "related" to the dp standard */
	size_t msg_sz;
	u8 ack;
	int r;
	unsigned retry;

	msg_sz = sizeof(msg_hdr);

	msg_hdr = cpu_to_le32(addr); /* dpcd address is 20 bits */
	msg_hdr |= (DP_AUX_NATIVE_READ << 20);
	msg_hdr |= (recv_buf_sz - 1) << 24;
	msg_hdr |= msg_sz << 28;

	for (retry = 0; retry < retries_n; ++retry) {
		ack = 0;
		r = dp_aux(atb, aux_i2c_id, hpd, (u8 *)&msg_hdr, msg_sz,
						recv_buf, recv_buf_sz, &ack);
		if (r == -EBUSY || r == -ETIMEDOUT)
			continue;
		else if (r < 0)
			return r;

		if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_ACK)
			return r;
		else if ((ack & DP_AUX_NATIVE_REPLY_MASK)
						== DP_AUX_NATIVE_REPLY_DEFER)
			udelay(400); /* dp standard, 400 microsecs timeout */
		else if (r == 0) /* NACK */
			return -EINVAL; /* dp standard says wrong address */
		else
			return -EIO;
	}
	return -EIO;
}
EXPORT_SYMBOL_GPL(atb_dp_aux_native_read);

int atb_dp_aux_i2c(struct atombios *atb, u8 aux_i2c_id, u8 hpd, u16 addr,
				unsigned mode, u8 byte_send, u8 *byte_recv)
{
	__le32 *msg_hdr; /* only "related" to the dp standard */
	u8 msg[sizeof(*msg_hdr) + 1]; /* 5 bytes */
	unsigned msg_sz;
	int r;
	u8 ack;

	memset(msg, 0, sizeof(msg));
	msg_hdr = (__le32 *)msg;
	*msg_hdr = cpu_to_le32(addr);	/* i2c address is up to 10 bits */
	if (mode & ATB_MODE_I2C_READ)
		*msg_hdr |= (DP_AUX_I2C_READ << 20);
	else
		*msg_hdr |= (DP_AUX_I2C_WRITE << 20);

	if (!(mode & ATB_MODE_I2C_STOP))
		*msg_hdr |= (DP_AUX_I2C_MOT << 20);

	switch (mode) {
	case ATB_MODE_I2C_WRITE:
		msg_sz = sizeof(*msg_hdr) + 1;
		*msg_hdr |= (1-1) << 24;
		*msg_hdr |= msg_sz << 28;
		msg[sizeof(*msg_hdr)] = byte_send;
		break;
	case ATB_MODE_I2C_READ:
		msg_sz = sizeof(*msg_hdr);
		*msg_hdr |= msg_sz << 28;
		break;
	default:/* ATB_MODE_I2C_START/STOP */
		msg_sz = sizeof(*msg_hdr);
		/*
		 * this is a "too small" size to notify an i2c address only
		 * transaction?
		 */
		*msg_hdr |= (sizeof(*msg_hdr) - 1) << 28;
		break;
	}

	r = dp_aux(atb, aux_i2c_id, hpd, msg, msg_sz, byte_recv, 1, &ack);
	if (r < 0) {
		if (r == -EBUSY || r == -ETIMEDOUT) {
			udelay(400);	/* dp standard transaction timeout */
			return -EAGAIN;	/* let the i2c core retry */
		} else {
			return r;
		}
	}

	switch (ack & DP_AUX_NATIVE_REPLY_MASK) {
	case DP_AUX_NATIVE_REPLY_ACK:
		 /* i2c Reply field is only valid with a native aux ack */
		break;
	case DP_AUX_NATIVE_REPLY_NACK:
		dev_err(atb->adev.dev, "atombios: dp sink unable to fetch "
							"all i2c data\n");
		return -EREMOTEIO;
	case DP_AUX_NATIVE_REPLY_DEFER:
		udelay(400);	/* dp standard transaction timeout */
		return -EAGAIN;	/* let the i2c core retry */
	default:
		dev_err(atb->adev.dev, "atombios: dp sink invalid native ack"
								" value\n");
		return -EREMOTEIO;
	}

	switch (ack & DP_AUX_I2C_REPLY_MASK) {
	case DP_AUX_I2C_REPLY_ACK:
		return r;
	case DP_AUX_I2C_REPLY_NACK:
		dev_err(atb->adev.dev, "atombios: dp sink i2c nack\n");
		return -EREMOTEIO;
	case DP_AUX_I2C_REPLY_DEFER:
		udelay(400);	/* dp standard transaction timeout */
		return -EAGAIN;	/* let the i2c core retry */
	default:
		dev_err(atb->adev.dev, "atombios: dp sink invalid i2c ack "	
								"value\n");
		return -EREMOTEIO;
	}
}
EXPORT_SYMBOL_GPL(atb_dp_aux_i2c);

/* XXX: need to fine grainely set the atombios states, not s0/3/6 in one shot */
void atb_dp_state(struct atombios *atb, unsigned dfp, bool connected)
{
	u32 s0, s3, s6;

	mutex_lock(&atb->mutex);

	s0 = atb->adev.rr32(atb->adev.dev, S0);
	s3 = atb->adev.rr32(atb->adev.dev, S3);
	s6 = atb->adev.rr32(atb->adev.dev, S6);

	if (connected) {
		s0 |= vals_s0_dfp[dfp];
		s3 |= vals_s3_dfp[dfp];
		s6 |= vals_s6_dfp[dfp];
	} else {
		s0 &= ~vals_s0_dfp[dfp];
		s3 &= ~vals_s3_dfp[dfp];
		s6 &= ~vals_s6_dfp[dfp];
	}

	atb->adev.wr32(atb->adev.dev, s0, S0);
	atb->adev.wr32(atb->adev.dev, s3, S3);
	atb->adev.wr32(atb->adev.dev, s6, S6);

	mutex_unlock(&atb->mutex);
}
EXPORT_SYMBOL_GPL(atb_dp_state);

static int default_disp_clk_freq(struct atombios *atb, u32 *clk)
{
	u16 of;
	struct master_data_tbl *data_tbl;
	struct firmware_info_v2_1 *info;

	of = get_unaligned_le16(&atb->hdr->master_data_tbl_of);
	data_tbl = atb->adev.rom + of;

	of = get_unaligned_le16(&data_tbl->list.firmware_info);
	info = atb->adev.rom + of;

	if (info->subset.hdr.tbl_fmt_rev != 2
				&& info->subset.hdr.tbl_content_rev != 1) {
		dev_err(atb->adev.dev, "atombios: firmware_info (0x%04x) "
				"revision %u.%u not supported\n", of,
						info->subset.hdr.tbl_fmt_rev,
					info->subset.hdr.tbl_content_rev);
		return -ATB_ERR;
	}
	*clk = get_unaligned_le32(&info->default_disp_clk_freq);
	if (*clk == 0)
		*clk = 60000; /* 600 Mhz for DCE4 */
	return 0;
}

static int dp_mode_ext_clk_freq(struct atombios *atb, u16 *clk)
{
	u16 of;
	struct master_data_tbl *data_tbl;
	struct firmware_info_v2_1 *info;

	of = get_unaligned_le16(&atb->hdr->master_data_tbl_of);
	data_tbl = atb->adev.rom + of;

	of = get_unaligned_le16(&data_tbl->list.firmware_info);
	info = atb->adev.rom + of;

	if (info->subset.hdr.tbl_fmt_rev != 2
				&& info->subset.hdr.tbl_content_rev != 1) {
		dev_err(atb->adev.dev, "atombios: firmware_info (0x%04x) "
				"revision %u.%u not supported\n", of,
						info->subset.hdr.tbl_fmt_rev,
					info->subset.hdr.tbl_content_rev);
		return -ATB_ERR;
	}
	*clk = get_unaligned_le16(&info->uniphy_dp_mode_ext_clk_freq);
	return 0;
}

int atb_crtc_blank(struct atombios *atb, unsigned i, bool on)
{
	u16 of;
	struct master_cmd_tbl *cmd_tbl;
	struct common_cmd_tbl_hdr *crtc_blank;
	struct crtc_blank_params *ps;
	int r;

	mutex_lock(&atb->mutex);
	
	of = get_unaligned_le16(&atb->hdr->master_cmd_tbl_of);
	cmd_tbl = atb->adev.rom + of;
	of = get_unaligned_le16(&cmd_tbl->list.crtc_blank);

	crtc_blank = atb->adev.rom + of;
	dev_info(atb->adev.dev, "atombios: crtc_blank (0x%04x) revision "
				"%u.%u\n", of, crtc_blank->hdr.tbl_fmt_rev,
					crtc_blank->hdr.tbl_content_rev);

	atb->g_ctx.ps_dws = 0x80;/* max for ps index */
	atb->g_ctx.ps_top = kzalloc(atb->g_ctx.ps_dws * 4, GFP_KERNEL);
	if (!atb->g_ctx.ps_top) {
		dev_err(atb->adev.dev, "atombios: unable to allocate parameter "
							"space (stack)\n");
		r = -ATB_ERR;
		goto unlock_mutex;
	}
	ps = (struct crtc_blank_params *)atb->g_ctx.ps_top;
	ps->crtc = i;

	if (on)
		ps->state = CRTC_BLANK_ON;
	else
		ps->state = CRTC_BLANK_OFF;

	atb->g_ctx.fb_wnd = 0;
	atb->g_ctx.regs_blk = 0;
	atb->g_ctx.io_mode = IO_MM;

	r = interpret(atb, of, 0, 0);
	kfree(atb->g_ctx.ps_top);

unlock_mutex:
	mutex_unlock(&atb->mutex);
	return r;
}
EXPORT_SYMBOL_GPL(atb_crtc_blank);

int atb_crtc_mem_req(struct atombios *atb, unsigned i, bool on)
{
	u16 of;
	struct master_cmd_tbl *cmd_tbl;
	struct common_cmd_tbl_hdr *crtc_mem_req;
	struct crtc_mem_req_params *ps;
	int r;

	mutex_lock(&atb->mutex);
	
	of = get_unaligned_le16(&atb->hdr->master_cmd_tbl_of);
	cmd_tbl = atb->adev.rom + of;
	of = get_unaligned_le16(&cmd_tbl->list.crtc_mem_req);

	crtc_mem_req = atb->adev.rom + of;
	dev_info(atb->adev.dev, "atombios: crtc_mem_req (0x%04x) revision "	
			"%u.%u\n", of, crtc_mem_req->hdr.tbl_fmt_rev,
					crtc_mem_req->hdr.tbl_content_rev);

	atb->g_ctx.ps_dws = 0x80;/* max for ps index */
	atb->g_ctx.ps_top = kzalloc(atb->g_ctx.ps_dws * 4, GFP_KERNEL);
	if (!atb->g_ctx.ps_top) {
		dev_err(atb->adev.dev, "atombios: unable to allocate parameter "
							"space (stack)\n");
		r = -ATB_ERR;
		goto unlock_mutex;
	}
	ps = (struct crtc_mem_req_params *)atb->g_ctx.ps_top;
	ps->crtc = i;

	if (on)
		ps->state = CRTC_MEM_REQ_ON;
	else
		ps->state = CRTC_MEM_REQ_OFF;

	atb->g_ctx.fb_wnd = 0;
	atb->g_ctx.regs_blk = 0;
	atb->g_ctx.io_mode = IO_MM;

	r = interpret(atb, of, 0, 0);
	kfree(atb->g_ctx.ps_top);

unlock_mutex:
	mutex_unlock(&atb->mutex);
	return r;
}
EXPORT_SYMBOL_GPL(atb_crtc_mem_req);

int atb_crtc(struct atombios *atb, unsigned i, bool on)
{
	u16 of;
	struct master_cmd_tbl *cmd_tbl;
	struct common_cmd_tbl_hdr *crtc;
	struct crtc_params *ps;
	int r;

	mutex_lock(&atb->mutex);
	
	of = get_unaligned_le16(&atb->hdr->master_cmd_tbl_of);
	cmd_tbl = atb->adev.rom + of;
	of = get_unaligned_le16(&cmd_tbl->list.crtc);

	crtc = atb->adev.rom + of;
	dev_info(atb->adev.dev, "atombios: crtc (0x%04x) revision %u.%u\n",
				of, crtc->hdr.tbl_fmt_rev,
					crtc->hdr.tbl_content_rev);

	atb->g_ctx.ps_dws = 0x80;/* max for ps index */
	atb->g_ctx.ps_top = kzalloc(atb->g_ctx.ps_dws * 4, GFP_KERNEL);
	if (!atb->g_ctx.ps_top) {
		dev_err(atb->adev.dev, "atombios: unable to allocate parameter "
							" space (stack)\n");
		r = -ATB_ERR;
		goto unlock_mutex;
	}
	ps = (struct crtc_params *)atb->g_ctx.ps_top;
	ps->crtc = i;

	if (on)
		ps->state = CRTC_ON;
	else
		ps->state = CRTC_OFF;

	atb->g_ctx.fb_wnd = 0;
	atb->g_ctx.regs_blk = 0;
	atb->g_ctx.io_mode = IO_MM;

	r = interpret(atb, of, 0, 0);
	kfree(atb->g_ctx.ps_top);

unlock_mutex:
	mutex_unlock(&atb->mutex);
	return r;
}
EXPORT_SYMBOL_GPL(atb_crtc);

int atb_crtc_dcpll(struct atombios *atb)
{
	u16 of;
	struct master_cmd_tbl *cmd_tbl;
	struct common_cmd_tbl_hdr *pixel_clk_set;
	struct pixel_clk_set_params_v1_5 *ps;
	int r;
	u32 clk_freq;

	mutex_lock(&atb->mutex);
	
	of = get_unaligned_le16(&atb->hdr->master_cmd_tbl_of);
	cmd_tbl = atb->adev.rom + of;
	of = get_unaligned_le16(&cmd_tbl->list.pixel_clk_set);

	pixel_clk_set = atb->adev.rom + of;
	dev_info(atb->adev.dev, "atombios: pixel_clk_set (0x%04x) revision "
			"%u.%u\n", of, pixel_clk_set->hdr.tbl_fmt_rev,
					pixel_clk_set->hdr.tbl_content_rev);
	if (pixel_clk_set->hdr.tbl_fmt_rev != 1
		|| pixel_clk_set->hdr.tbl_content_rev != 5) {
		dev_err(atb->adev.dev, "atombios: pixel_clk_set revision not "
								"supported");
		r = -ATB_ERR;
		goto unlock_mutex;
	}

	atb->g_ctx.ps_dws = 0x80;/* max for ps index */
	atb->g_ctx.ps_top = kzalloc(atb->g_ctx.ps_dws * 4, GFP_KERNEL);
	if (!atb->g_ctx.ps_top) {
		dev_err(atb->adev.dev, "atombios: unable to allocate parameter "
							"space (stack)\n");
		r = -ATB_ERR;
		goto unlock_mutex;
	}
	ps = (struct pixel_clk_set_params_v1_5 *)atb->g_ctx.ps_top;

	r = default_disp_clk_freq(atb, &clk_freq);
	if (r != 0) {
		r = -ATB_ERR;
		goto unlock_mutex;
	}

	/*
	 * if the default dcpll clock is specified, pixel_clk_set provides the
	 * dividers
	 */
	ps->crtc = CRTC_INVALID;
	put_unaligned_le16((u16)clk_freq, &ps->pixel_clk);
	ps->ppll = DCPLL;

	atb->g_ctx.fb_wnd = 0;
	atb->g_ctx.regs_blk = 0;
	atb->g_ctx.io_mode = IO_MM;

	r = interpret(atb, of, 0, 0);
	kfree(atb->g_ctx.ps_top);

unlock_mutex:
	mutex_unlock(&atb->mutex);
	return r;
}
EXPORT_SYMBOL_GPL(atb_crtc_dcpll);

int atb_crtc_virtual_pixel_clk(struct atombios *atb, unsigned i, unsigned clk)
{
	u16 of;
	struct master_cmd_tbl *cmd_tbl;
	struct common_cmd_tbl_hdr *pixel_clk_set;
	struct pixel_clk_set_params_v1_5 *ps;
	int r;

	mutex_lock(&atb->mutex);
	
	of = get_unaligned_le16(&atb->hdr->master_cmd_tbl_of);
	cmd_tbl = atb->adev.rom + of;
	of = get_unaligned_le16(&cmd_tbl->list.pixel_clk_set);

	pixel_clk_set = atb->adev.rom + of;
	dev_info(atb->adev.dev, "atombios: pixel_clk_set (0x%04x) revision "
			"%u.%u\n", of, pixel_clk_set->hdr.tbl_fmt_rev,
					pixel_clk_set->hdr.tbl_content_rev);
	if (pixel_clk_set->hdr.tbl_fmt_rev != 1
		|| pixel_clk_set->hdr.tbl_content_rev != 5) {
		dev_err(atb->adev.dev, "atombios: pixel_clk_set revision not "	
								"supported");
		r = -ATB_ERR;
		goto unlock_mutex;
	}

	atb->g_ctx.ps_dws = 0x80;/* max for ps index */
	atb->g_ctx.ps_top = kzalloc(atb->g_ctx.ps_dws * 4, GFP_KERNEL);
	if (!atb->g_ctx.ps_top) {
		dev_err(atb->adev.dev, "atombios: unable to allocate parameter "
							"space (stack)\n");
		r = -ATB_ERR;
		goto unlock_mutex;
	}
	ps = (struct pixel_clk_set_params_v1_5 *)atb->g_ctx.ps_top;
	ps->crtc = i;
	ps->ppll = 0xff; /* invalid pll id for dp virtual pixel clk */
	put_unaligned_le16((u16)(clk / 10), &ps->pixel_clk);

	atb->g_ctx.fb_wnd = 0;
	atb->g_ctx.regs_blk = 0;
	atb->g_ctx.io_mode = IO_MM;

	r = interpret(atb, of, 0, 0);
	kfree(atb->g_ctx.ps_top);

unlock_mutex:
	mutex_unlock(&atb->mutex);
	return r;
}
EXPORT_SYMBOL_GPL(atb_crtc_virtual_pixel_clk);

static int trans_link(struct atombios *atb, struct trans_ctl_params *ps)
{
	u16 of;
	struct master_cmd_tbl *cmd_tbl;
	struct common_cmd_tbl_hdr *trans_ctl;
	int r;

	of = get_unaligned_le16(&atb->hdr->master_cmd_tbl_of);
	cmd_tbl = atb->adev.rom + of;
	of = get_unaligned_le16(&cmd_tbl->list.trans_ctl);

	trans_ctl = atb->adev.rom + of;
	dev_info(atb->adev.dev, "atombios: trans_ctl (0x%04x) revision "
				"%u.%u\n", of, trans_ctl->hdr.tbl_fmt_rev,
					trans_ctl->hdr.tbl_content_rev);
	if (trans_ctl->hdr.tbl_fmt_rev != 1
		|| trans_ctl->hdr.tbl_content_rev != 3) {
		dev_err(atb->adev.dev, "atombios: trans_ctl revision not "
								"supported");
		return -ATB_ERR;
	}

	atb->g_ctx.ps_dws = 0x80;/* max for ps index */
	atb->g_ctx.ps_top = kzalloc(atb->g_ctx.ps_dws * 4, GFP_KERNEL);
	if (!atb->g_ctx.ps_top) {
		dev_err(atb->adev.dev, "atombios: unable to allocate parameter "
							"space (stack)\n");
		return -ATB_ERR;
	}
	memcpy(atb->g_ctx.ps_top, ps, sizeof(*ps));

	atb->g_ctx.fb_wnd = 0;
	atb->g_ctx.regs_blk = 0;
	atb->g_ctx.io_mode = IO_MM;

	r = interpret(atb, of, 0, 0);
	kfree(atb->g_ctx.ps_top);
	return r;
}

int atb_enc_video(struct atombios *atb, unsigned i, bool on)
{
	u16 of;
	struct master_cmd_tbl *cmd_tbl;
	struct common_cmd_tbl_hdr *enc_ctl;
	struct enc_ctl_params *ps;
	int r;

	mutex_lock(&atb->mutex);

	of = get_unaligned_le16(&atb->hdr->master_cmd_tbl_of);
	cmd_tbl = atb->adev.rom + of;
	of = get_unaligned_le16(&cmd_tbl->list.enc_ctl);

	enc_ctl = atb->adev.rom + of;
	dev_info(atb->adev.dev, "atombios: enc_ctl (0x%04x) revision %u.%u\n",
				of, enc_ctl->hdr.tbl_fmt_rev,
					enc_ctl->hdr.tbl_content_rev);

	if (enc_ctl->hdr.tbl_fmt_rev != 1
				|| (enc_ctl->hdr.tbl_content_rev != 2
				&& enc_ctl->hdr.tbl_content_rev != 3)) {
		dev_err(atb->adev.dev, "atombios: enc_ctl revision not "
								"supported");
		r = -ATB_ERR;
		goto unlock_mutex;
	}

	atb->g_ctx.ps_dws = 0x80;/* max for ps index */
	atb->g_ctx.ps_top = kzalloc(atb->g_ctx.ps_dws * 4, GFP_KERNEL);
	if (!atb->g_ctx.ps_top) {
		dev_err(atb->adev.dev, "atombios: unable to allocate parameter "
							"space (stack)\n");
		return -ATB_ERR;
	}
	ps = (struct enc_ctl_params *)atb->g_ctx.ps_top;
	if (on)
		ps->action = ENC_ACTION_DP_VIDEO_ON;
	else
		ps->action = ENC_ACTION_DP_VIDEO_OFF;
	ps->cfg = (i << ENC_CFG_SEL_SHIFT);

	atb->g_ctx.fb_wnd = 0;
	atb->g_ctx.regs_blk = 0;
	atb->g_ctx.io_mode = IO_MM;

	r = interpret(atb, of, 0, 0);
	kfree(atb->g_ctx.ps_top);

unlock_mutex:
	mutex_unlock(&atb->mutex);
	return r;
}
EXPORT_SYMBOL_GPL(atb_enc_video);

int atb_enc_dp_training_start(struct atombios *atb, unsigned i)
{
	u16 of;
	struct master_cmd_tbl *cmd_tbl;
	struct common_cmd_tbl_hdr *enc_ctl;
	struct enc_ctl_params *ps;
	int r;

	mutex_lock(&atb->mutex);

	of = get_unaligned_le16(&atb->hdr->master_cmd_tbl_of);
	cmd_tbl = atb->adev.rom + of;
	of = get_unaligned_le16(&cmd_tbl->list.enc_ctl);

	enc_ctl = atb->adev.rom + of;
	dev_info(atb->adev.dev, "atombios: enc_ctl (0x%04x) revision %u.%u\n",
				of, enc_ctl->hdr.tbl_fmt_rev,
					enc_ctl->hdr.tbl_content_rev);

	if (enc_ctl->hdr.tbl_fmt_rev != 1
				|| (enc_ctl->hdr.tbl_content_rev != 2
				&& enc_ctl->hdr.tbl_content_rev != 3)) {
		dev_err(atb->adev.dev, "atombios: enc_ctl revision not "
								"supported");
		r = -ATB_ERR;
		goto unlock_mutex;
	}

	atb->g_ctx.ps_dws = 0x80;/* max for ps index */
	atb->g_ctx.ps_top = kzalloc(atb->g_ctx.ps_dws * 4, GFP_KERNEL);
	if (!atb->g_ctx.ps_top) {
		dev_err(atb->adev.dev, "atombios: unable to allocate parameter "
							"space (stack)\n");
		return -ATB_ERR;
	}
	ps = (struct enc_ctl_params *)atb->g_ctx.ps_top;
	ps->action = ENC_ACTION_DP_LINK_TRAINING_START;
	ps->cfg = (i << ENC_CFG_SEL_SHIFT);

	atb->g_ctx.fb_wnd = 0;
	atb->g_ctx.regs_blk = 0;
	atb->g_ctx.io_mode = IO_MM;

	r = interpret(atb, of, 0, 0);
	kfree(atb->g_ctx.ps_top);

unlock_mutex:
	mutex_unlock(&atb->mutex);
	return r;
}
EXPORT_SYMBOL_GPL(atb_enc_dp_training_start);

int atb_enc_dp_training_complete(struct atombios *atb, unsigned i)
{
	u16 of;
	struct master_cmd_tbl *cmd_tbl;
	struct common_cmd_tbl_hdr *enc_ctl;
	struct enc_ctl_params *ps;
	int r;

	mutex_lock(&atb->mutex);

	of = get_unaligned_le16(&atb->hdr->master_cmd_tbl_of);
	cmd_tbl = atb->adev.rom + of;
	of = get_unaligned_le16(&cmd_tbl->list.enc_ctl);

	enc_ctl = atb->adev.rom + of;
	dev_info(atb->adev.dev, "atombios: enc_ctl (0x%04x) revision %u.%u\n",
		of, enc_ctl->hdr.tbl_fmt_rev, enc_ctl->hdr.tbl_content_rev);

	if (enc_ctl->hdr.tbl_fmt_rev != 1
				|| (enc_ctl->hdr.tbl_content_rev != 2
				&& enc_ctl->hdr.tbl_content_rev != 3)) {
		dev_err(atb->adev.dev, "atombios: enc_ctl revision not "
								"supported");
		r = -ATB_ERR;
		goto unlock_mutex;
	}

	atb->g_ctx.ps_dws = 0x80;/* max for ps index */
	atb->g_ctx.ps_top = kzalloc(atb->g_ctx.ps_dws * 4, GFP_KERNEL);
	if (!atb->g_ctx.ps_top) {
		dev_err(atb->adev.dev, "atombios: unable to allocate parameter "
							"space (stack)\n");
		return -ATB_ERR;
	}
	ps = (struct enc_ctl_params *)atb->g_ctx.ps_top;
	ps->action = ENC_ACTION_DP_LINK_TRAINING_COMPLETE;
	ps->cfg = (i << ENC_CFG_SEL_SHIFT);

	atb->g_ctx.fb_wnd = 0;
	atb->g_ctx.regs_blk = 0;
	atb->g_ctx.io_mode = IO_MM;

	r = interpret(atb, of, 0, 0);
	kfree(atb->g_ctx.ps_top);

unlock_mutex:
	mutex_unlock(&atb->mutex);
	return r;
}
EXPORT_SYMBOL_GPL(atb_enc_dp_training_complete);

int atb_enc_dp_tp(struct atombios *atb, unsigned i, unsigned link_rate,
						unsigned lanes_n, unsigned tp)
{
	u16 of;
	struct master_cmd_tbl *cmd_tbl;
	struct common_cmd_tbl_hdr *enc_ctl;
	struct enc_ctl_params *ps;
	int r;

	mutex_lock(&atb->mutex);

	of = get_unaligned_le16(&atb->hdr->master_cmd_tbl_of);
	cmd_tbl = atb->adev.rom + of;
	of = get_unaligned_le16(&cmd_tbl->list.enc_ctl);

	enc_ctl = atb->adev.rom + of;
	dev_info(atb->adev.dev, "atombios: enc_ctl (0x%04x) revision %u.%u\n",
		of, enc_ctl->hdr.tbl_fmt_rev, enc_ctl->hdr.tbl_content_rev);

	if (enc_ctl->hdr.tbl_fmt_rev != 1
				|| (enc_ctl->hdr.tbl_content_rev != 2
				&& enc_ctl->hdr.tbl_content_rev != 3)) {
		dev_err(atb->adev.dev, "atombios: enc_ctl revision not "
								"supported");
		r = -ATB_ERR;
		goto unlock_mutex;
	}

	atb->g_ctx.ps_dws = 0x80;/* max for ps index */
	atb->g_ctx.ps_top = kzalloc(atb->g_ctx.ps_dws * 4, GFP_KERNEL);
	if (!atb->g_ctx.ps_top) { 
		dev_err(atb->adev.dev, "atombios: unable to allocate parameter "
							"space (stack)\n");
		return -ATB_ERR;
	}
	ps = (struct enc_ctl_params *)atb->g_ctx.ps_top;

	switch (tp) {
	case 1:
		ps->action = ENC_ACTION_DP_LINK_TRAINING_PATTERN1;
		break;
	case 2:
		ps->action = ENC_ACTION_DP_LINK_TRAINING_PATTERN2;
		break;
	case 3:
		ps->action = ENC_ACTION_DP_LINK_TRAINING_PATTERN3;
		break;
	default:
		dev_err(atb->adev.dev, "atombios: unknown dp training pattern "
								"%u\n", tp);
		r = -ATB_ERR;
		goto free_ps;
	}
	ps->cfg = (i << ENC_CFG_SEL_SHIFT);
	ps->lanes_n = lanes_n;
	switch (link_rate) {/* multiple of 270MHz */
	case 6:
		ps->cfg |= ENC_CFG_LINK_RATE_1_62_GHZ;
		break;
	case 10:
		ps->cfg |= ENC_CFG_LINK_RATE_2_7_GHZ;
		break;
	case 20:
		ps->cfg |= ENC_CFG_LINK_CATE_5_4_GHZ;
		break;
	}

	atb->g_ctx.fb_wnd = 0;
	atb->g_ctx.regs_blk = 0;
	atb->g_ctx.io_mode = IO_MM;

	r = interpret(atb, of, 0, 0);

free_ps:
	kfree(atb->g_ctx.ps_top);

unlock_mutex:
	mutex_unlock(&atb->mutex);
	return r;
}
EXPORT_SYMBOL_GPL(atb_enc_dp_tp);

int atb_enc_setup(struct atombios *atb, unsigned i, unsigned dp_lanes_n,
		unsigned dp_link_rate, unsigned bpc, unsigned pixel_clk)
				
{
	u16 of;
	struct master_cmd_tbl *cmd_tbl;
	struct common_cmd_tbl_hdr *enc_ctl;
	struct enc_ctl_params *ps;
	int r;

	mutex_lock(&atb->mutex);

	of = get_unaligned_le16(&atb->hdr->master_cmd_tbl_of);
	cmd_tbl = atb->adev.rom + of;
	of = get_unaligned_le16(&cmd_tbl->list.enc_ctl);

	enc_ctl = atb->adev.rom + of;
	dev_info(atb->adev.dev, "atombios: enc_video (0x%04x) revision %u.%u\n",
		of, enc_ctl->hdr.tbl_fmt_rev, enc_ctl->hdr.tbl_content_rev);

	if (enc_ctl->hdr.tbl_fmt_rev != 1
				|| (enc_ctl->hdr.tbl_content_rev != 2
				&& enc_ctl->hdr.tbl_content_rev != 3)) {
		dev_err(atb->adev.dev, "atombios: enc_video revision not "
								"supported");
		r = -ATB_ERR;
		goto unlock_mutex;
	}

	atb->g_ctx.ps_dws = 0x80;/* max for ps index */
	atb->g_ctx.ps_top = kzalloc(atb->g_ctx.ps_dws * 4, GFP_KERNEL);
	if (!atb->g_ctx.ps_top) {
		dev_err(atb->adev.dev, "atombios: unable to allocate parameter "	
							" space (stack)\n");
		return -ATB_ERR;
	}
	ps = (struct enc_ctl_params *)atb->g_ctx.ps_top;

	ps->cfg = (i << ENC_CFG_SEL_SHIFT);
	ps->action = ENC_ACTION_SETUP;
	put_unaligned_le16((u16)(pixel_clk / 10), &ps->pixel_clk);
	switch (dp_link_rate) {/* multiple of 270MHz */
	case 6:
		ps->cfg |= ENC_CFG_LINK_RATE_1_62_GHZ;
		break;
	case 10:
		ps->cfg |= ENC_CFG_LINK_RATE_2_7_GHZ;
		break;
	case 20:
		ps->cfg |= ENC_CFG_LINK_CATE_5_4_GHZ;
		break;
	}
	ps->lanes_n = dp_lanes_n;
	switch (bpc) {
	case 0:
		ps->bpc = ENC_BPC_UNDEFINE;
		break;
	case 6:
		ps->bpc = ENC_6BITS_PER_COLOR;
		break;
	case 8:
		ps->bpc = ENC_8BITS_PER_COLOR;
		break;
	case 10:
		ps->bpc = ENC_10BITS_PER_COLOR;
		break;
	case 12:
		ps->bpc = ENC_12BITS_PER_COLOR;
		break;
	case 16:
		ps->bpc = ENC_16BITS_PER_COLOR;
		break;
	};

	atb->g_ctx.fb_wnd = 0;
	atb->g_ctx.regs_blk = 0;
	atb->g_ctx.io_mode = IO_MM;

	r = interpret(atb, of, 0, 0);
	kfree(atb->g_ctx.ps_top);

unlock_mutex:
	mutex_unlock(&atb->mutex);
	return r;
}
EXPORT_SYMBOL_GPL(atb_enc_setup);

int atb_trans_link_output_off(struct atombios *atb, unsigned i)
{	
	struct trans_ctl_params ps;
	int r;

	mutex_lock(&atb->mutex);

	memset(&ps, 0, sizeof(ps));
	ps.action = TRANS_ACTION_DISABLE_OUTPUT;
	ps.cfg |= (to_link(i) << TRANS_CFG_LINK_SEL_BIT);
	ps.cfg |= (to_trans(i) << TRANS_CFG_SEL_SHIFT);

	r = trans_link(atb, &ps);

	mutex_unlock(&atb->mutex);
	return r;
}
EXPORT_SYMBOL_GPL(atb_trans_link_output_off);

int atb_trans_link_pwr(struct atombios *atb, unsigned i, bool on)
{
	struct trans_ctl_params ps;
	int r;

	mutex_lock(&atb->mutex);

	memset(&ps, 0, sizeof(ps));
	if (on)
		ps.action = TRANS_ACTION_PWR_ON;
	else
		ps.action = TRANS_ACTION_PWR_OFF;
	ps.cfg |= (to_link(i) << TRANS_CFG_LINK_SEL_BIT);
	ps.cfg |= (to_trans(i) << TRANS_CFG_SEL_SHIFT);

	r = trans_link(atb, &ps);

	mutex_unlock(&atb->mutex);
	return r;
}
EXPORT_SYMBOL_GPL(atb_trans_link_pwr);

int atb_trans_link_off(struct atombios *atb, unsigned i)
{
	struct trans_ctl_params ps;
	int r;

	mutex_lock(&atb->mutex);

	memset(&ps, 0, sizeof(ps));
	ps.action = TRANS_ACTION_DISABLE;
	ps.cfg |= (to_link(i) << TRANS_CFG_LINK_SEL_BIT);
	ps.cfg |= (to_trans(i) << TRANS_CFG_SEL_SHIFT);

	r = trans_link(atb, &ps);

	mutex_unlock(&atb->mutex);
	return r;
}
EXPORT_SYMBOL_GPL(atb_trans_link_off);

static int trans_link_clk_ref(struct atombios *atb, struct trans_ctl_params *ps)
{
	int r;
	u16 dp_ext_clk;

	r = dp_mode_ext_clk_freq(atb, &dp_ext_clk);
	if (r != 0)
		return r;

	/*
 	 * if no external reference dp clock
 	 * we will use dcpll on dce5 cause 540 MHz units for dp 5.4 GHz
 	 * dp link rate
 	 */ 
	if (dp_ext_clk > 0) { 
		ps->cfg |= (2 << TRANS_CFG_CLK_SRC_SHIFT);
	} else {
		dev_err(atb->adev.dev, "atombios: at this time, this driver "
				"supports only external reference dp clock\n");
		return r;
	}
	return 0;
}

static void trans_link_path(u8 *cfg, unsigned i)
{
	*cfg |= (to_link(i) << TRANS_CFG_LINK_SEL_BIT);
	*cfg |= (to_trans(i) << TRANS_CFG_SEL_SHIFT);
	*cfg |= ((i & 1) << TRANS_CFG_ENC_SEL_BIT);
}

/*
 * DCE5 will make the difference between ACTION_SETUP and ACTION_ENA:
 * this is probably to avoid providing all parameters each time we turn on and
 * off the transmitter.
 */
int atb_trans_link_on(struct atombios *atb, unsigned i, unsigned dp_lanes_n,
							unsigned pixel_clk)
{
	struct trans_ctl_params ps;
	int r;

	mutex_lock(&atb->mutex);

	memset(&ps, 0, sizeof(ps));
	ps.action = TRANS_ACTION_ENA;
	put_unaligned_le16((u16)(pixel_clk / 10), &ps.pixel_clk);
	ps.cfg = BIT(TRANS_CFG_COHERENT_MODE_BIT);
	trans_link_path(&ps.cfg, i);
	ps.lanes_n = dp_lanes_n;

	r = trans_link_clk_ref(atb, &ps);
	if (r != 0)
		goto unlock_mutex;

	r = trans_link(atb, &ps);

unlock_mutex:
	mutex_unlock(&atb->mutex);
	return r;
}
EXPORT_SYMBOL_GPL(atb_trans_link_on);

int atb_trans_link_output_on(struct atombios *atb, unsigned i,
					unsigned dp_lanes_n, unsigned pixel_clk)
{
	struct trans_ctl_params ps;
	int r;

	mutex_lock(&atb->mutex);

	memset(&ps, 0, sizeof(ps));
	ps.action = TRANS_ACTION_ENA_OUTPUT;
	trans_link_path(&ps.cfg, i);

	r = trans_link(atb, &ps);

	mutex_unlock(&atb->mutex);
	return r;
}
EXPORT_SYMBOL_GPL(atb_trans_link_output_on);
	
int atb_trans_link_vs_pre_emph(struct atombios *atb, unsigned i, u8 vs_pre_emph)
{
	struct trans_ctl_params ps;
	int r;

	mutex_lock(&atb->mutex);

	memset(&ps, 0, sizeof(ps));
	ps.action = TRANS_ACTION_SETUP_VSEMPH;
	/*
	 * probably a mask, but does not matter: the hardware only support
	 * one set of voltage swing and pre-emphasis for all lanes
	 */
	ps.mode.lane_sel = 0; 
	ps.mode.lane_set = vs_pre_emph;
	ps.cfg |= (to_link(i) << TRANS_CFG_LINK_SEL_BIT);
	ps.cfg |= (to_trans(i) << TRANS_CFG_SEL_SHIFT);

	r = trans_link(atb, &ps);

	mutex_unlock(&atb->mutex);
	return r;
}
EXPORT_SYMBOL_GPL(atb_trans_link_vs_pre_emph);

int atb_trans_link_init(struct atombios *atb, unsigned i, bool edp)
{
	struct trans_ctl_params ps;
	int r;
	u16 conn_sub_id;

	mutex_lock(&atb->mutex);

	memset(&ps, 0, sizeof(ps));
	ps.action = TRANS_ACTION_INIT;
	if (edp)
		conn_sub_id = CONN_SUB_ID_EDP;
	else
		conn_sub_id = CONN_SUB_ID_DP;
	put_unaligned_le16(conn_sub_id, &ps.init_info);
	ps.cfg = BIT(TRANS_CFG_COHERENT_MODE_BIT);
	trans_link_path(&ps.cfg, i);

	r = trans_link(atb, &ps);

	mutex_unlock(&atb->mutex);
	return r;
}
EXPORT_SYMBOL_GPL(atb_trans_link_init);

void atb_lock(struct atombios *atb, bool lock)
{
	u32 s6;

	mutex_lock(&atb->mutex);

	s6 = atb->adev.rr32(atb->adev.dev, S6);

	if (lock) {
		s6 |= S6_CRITICAL_STATE;
		s6 &= ~S6_ACC_MODE;
	} else {
		s6 &= ~S6_CRITICAL_STATE;
		s6 |= S6_ACC_MODE;
	}

	atb->adev.wr32(atb->adev.dev, s6, S6);

	mutex_unlock(&atb->mutex);
}
EXPORT_SYMBOL_GPL(atb_lock);

int atb_enc_crtc_src(struct atombios *atb, unsigned i)
{
	u16 of;
	struct master_cmd_tbl *cmd_tbl;
	struct common_cmd_tbl_hdr *enc_crtc_src;
	struct enc_crtc_src_params *ps;
	int r;

	mutex_lock(&atb->mutex);
	
	of = get_unaligned_le16(&atb->hdr->master_cmd_tbl_of);
	cmd_tbl = atb->adev.rom + of;
	of = get_unaligned_le16(&cmd_tbl->list.enc_crtc_src);

	enc_crtc_src = atb->adev.rom + of;
	dev_info(atb->adev.dev, "atombios: enc_crtc_src (0x%04x) revision "
			"%u.%u\n", of, enc_crtc_src->hdr.tbl_fmt_rev,
					enc_crtc_src->hdr.tbl_content_rev);

	if (enc_crtc_src->hdr.tbl_fmt_rev != 1
		|| enc_crtc_src->hdr.tbl_content_rev != 2) {
		dev_err(atb->adev.dev, "atombios: enc_crtc_src revision not "
								"supported");
		return -ATB_ERR;
	}

	atb->g_ctx.ps_dws = 0x80;/* max for ps index */
	atb->g_ctx.ps_top = kzalloc(atb->g_ctx.ps_dws * 4, GFP_KERNEL);
	if (!atb->g_ctx.ps_top) {
		dev_err(atb->adev.dev, "atombios: unable to allocate parameter "
							"space (stack)\n");
		r = -ATB_ERR;
		goto unlock_mutex;
	}
	ps = (struct enc_crtc_src_params *)atb->g_ctx.ps_top;
	ps->crtc = i;

	switch (i) {
	case 0:
		ps->enc_id = ENC_0_ID;
		break;
	case 1:
		ps->enc_id = ENC_1_ID;
		break;
	case 2:
		ps->enc_id = ENC_2_ID;
		break;
	case 3:
		ps->enc_id = ENC_3_ID;
		break;
	case 4:
		ps->enc_id = ENC_4_ID;
		break;
	case 5:
		ps->enc_id = ENC_5_ID;
		break;
	}
	ps->mode = ENC_MODE_DP;

	atb->g_ctx.fb_wnd = 0;
	atb->g_ctx.regs_blk = 0;
	atb->g_ctx.io_mode = IO_MM;

	r = interpret(atb, of, 0, 0);
	kfree(atb->g_ctx.ps_top);

unlock_mutex:
	mutex_unlock(&atb->mutex);
	return r;
}
EXPORT_SYMBOL_GPL(atb_enc_crtc_src);

int atb_crtc_lock(struct atombios *atb, unsigned i, bool lock)
{
	u16 of;
	struct master_cmd_tbl *cmd_tbl;
	struct common_cmd_tbl_hdr *crtc_db_regs;
	struct crtc_db_regs_params *ps;
	int r;

	mutex_lock(&atb->mutex);
	
	of = get_unaligned_le16(&atb->hdr->master_cmd_tbl_of);
	cmd_tbl = atb->adev.rom + of;
	of = get_unaligned_le16(&cmd_tbl->list.crtc_db_regs);

	crtc_db_regs = atb->adev.rom + of;
	dev_info(atb->adev.dev, "atombios: crtc_lock (0x%04x) revision "	
			"%u.%u\n", of, crtc_db_regs->hdr.tbl_fmt_rev,
					crtc_db_regs->hdr.tbl_content_rev);

	atb->g_ctx.ps_dws = 0x80;/* max for ps index */
	atb->g_ctx.ps_top = kzalloc(atb->g_ctx.ps_dws * 4, GFP_KERNEL);
	if (!atb->g_ctx.ps_top) {
		dev_err(atb->adev.dev, "atombios: unable to allocate parameter "
							"space (stack)\n");
		r = -ATB_ERR;
		goto unlock_mutex;
	}
	ps = (struct crtc_db_regs_params *)atb->g_ctx.ps_top;
	ps->crtc = i;

	if (lock)
		ps->state = CRTC_LOCK;
	else
		ps->state = CRTC_UNLOCK;

	atb->g_ctx.fb_wnd = 0;
	atb->g_ctx.regs_blk = 0;
	atb->g_ctx.io_mode = IO_MM;

	r = interpret(atb, of, 0, 0);
	kfree(atb->g_ctx.ps_top);

unlock_mutex:
	mutex_unlock(&atb->mutex);
	return r;
}
EXPORT_SYMBOL_GPL(atb_crtc_lock);

int atb_crtc_timing(struct atombios *atb, unsigned i, struct alga_timing *t)
{
	u16 of;
	struct master_cmd_tbl *cmd_tbl;
	struct common_cmd_tbl_hdr *crtc_timing;
	struct crtc_timing_params *ps;
	int r;
	u16 info;

	mutex_lock(&atb->mutex);
	
	of = get_unaligned_le16(&atb->hdr->master_cmd_tbl_of);
	cmd_tbl = atb->adev.rom + of;
	of = get_unaligned_le16(&cmd_tbl->list.crtc_timing);

	crtc_timing = atb->adev.rom + of;
	dev_info(atb->adev.dev, "atombios: crtc_timing (0x%04x) revision "	
			"%u.%u\n", of, crtc_timing->hdr.tbl_fmt_rev,
					crtc_timing->hdr.tbl_content_rev);

	atb->g_ctx.ps_dws = 0x80;/* max for ps index */
	atb->g_ctx.ps_top = kzalloc(atb->g_ctx.ps_dws * 4, GFP_KERNEL);
	if (!atb->g_ctx.ps_top) {
		dev_err(atb->adev.dev, "atombios: unable to allocate parameter "
							"space (stack)\n");
		r = -ATB_ERR;
		goto unlock_mutex;
	}
	ps = (struct crtc_timing_params *)atb->g_ctx.ps_top;
	put_unaligned_le16(t->h, &ps->h);
	put_unaligned_le16(t->h_bl, &ps->h_bl);
	put_unaligned_le16(t->v, &ps->v);
	put_unaligned_le16(t->v_bl, &ps->v_bl);
	put_unaligned_le16(t->h_so - t->h, &ps->h_so);/* not EDID definition */
	put_unaligned_le16(t->h_spw, &ps->h_spw);
	put_unaligned_le16(t->v_so - t->v, &ps->v_so);/* not EDID definition */
	put_unaligned_le16(t->v_spw, &ps->v_spw);
	ps->crtc = i;
	
	info = 0;
	if (t->h_sp == 1)
		info |= INFO_HSYNC_POLARITY;
	if (t->v_sp == 1)
		info |= INFO_VSYNC_POLARITY;

	atb->g_ctx.fb_wnd = 0;
	atb->g_ctx.regs_blk = 0;
	atb->g_ctx.io_mode = IO_MM;

	r = interpret(atb, of, 0, 0);
	kfree(atb->g_ctx.ps_top);

unlock_mutex:
	mutex_unlock(&atb->mutex);
	return r;
}
EXPORT_SYMBOL_GPL(atb_crtc_timing);

int atb_crtc_overscan(struct atombios *atb, unsigned i)
{
	u16 of;
	struct master_cmd_tbl *cmd_tbl;
	struct common_cmd_tbl_hdr *crtc_overscan;
	struct crtc_overscan_params *ps;
	int r;

	mutex_lock(&atb->mutex);
	
	of = get_unaligned_le16(&atb->hdr->master_cmd_tbl_of);
	cmd_tbl = atb->adev.rom + of;
	of = get_unaligned_le16(&cmd_tbl->list.crtc_overscan);

	crtc_overscan = atb->adev.rom + of;
	dev_info(atb->adev.dev, "atombios: crtc_overscan (0x%04x) revision "	
			"%u.%u\n", of, crtc_overscan->hdr.tbl_fmt_rev,
					crtc_overscan->hdr.tbl_content_rev);

	atb->g_ctx.ps_dws = 0x80;/* max for ps index */
	atb->g_ctx.ps_top = kzalloc(atb->g_ctx.ps_dws * 4, GFP_KERNEL);
	if (!atb->g_ctx.ps_top) {
		dev_err(atb->adev.dev, "atombios: unable to allocate parameter "
							"space (stack)\n");
		r = -ATB_ERR;
		goto unlock_mutex;
	}
	ps = (struct crtc_overscan_params *)atb->g_ctx.ps_top;
	ps->crtc = i;
	
	atb->g_ctx.fb_wnd = 0;
	atb->g_ctx.regs_blk = 0;
	atb->g_ctx.io_mode = IO_MM;

	r = interpret(atb, of, 0, 0);
	kfree(atb->g_ctx.ps_top);

unlock_mutex:
	mutex_unlock(&atb->mutex);
	return r;
}
EXPORT_SYMBOL_GPL(atb_crtc_overscan);

int atb_crtc_scaler(struct atombios *atb, unsigned i)
{
	u16 of;
	struct master_cmd_tbl *cmd_tbl;
	struct common_cmd_tbl_hdr *crtc_scaler;
	struct crtc_scaler_params *ps;
	int r;

	mutex_lock(&atb->mutex);
	
	of = get_unaligned_le16(&atb->hdr->master_cmd_tbl_of);
	cmd_tbl = atb->adev.rom + of;
	of = get_unaligned_le16(&cmd_tbl->list.crtc_scaler);

	crtc_scaler = atb->adev.rom + of;
	dev_info(atb->adev.dev, "atombios: crtc_scaler (0x%04x) revision "	
			"%u.%u\n", of, crtc_scaler->hdr.tbl_fmt_rev,
					crtc_scaler->hdr.tbl_content_rev);

	atb->g_ctx.ps_dws = 0x80;/* max for ps index */
	atb->g_ctx.ps_top = kzalloc(atb->g_ctx.ps_dws * 4, GFP_KERNEL);
	if (!atb->g_ctx.ps_top) {
		dev_err(atb->adev.dev, "atombios: unable to allocate parameter "
							"space (stack)\n");
		r = -ATB_ERR;
		goto unlock_mutex;
	}
	ps = (struct crtc_scaler_params *)atb->g_ctx.ps_top;
	ps->crtc = i;
	ps->mode = SCALER_DISABLE;
	
	atb->g_ctx.fb_wnd = 0;
	atb->g_ctx.regs_blk = 0;
	atb->g_ctx.io_mode = IO_MM;

	r = interpret(atb, of, 0, 0);
	kfree(atb->g_ctx.ps_top);

unlock_mutex:
	mutex_unlock(&atb->mutex);
	return r;
}
EXPORT_SYMBOL_GPL(atb_crtc_scaler);
