/*
  author Sylvain Bertrand <digital.ragnarok@gmail.com>
  Protected by GNU Affero GPL v3 with some exceptions.
  See README at root of alga tree.
*/
#include <linux/device.h>
#include <linux/module.h>
#include <linux/i2c.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/bitops.h>

#include <alga/alga.h>
#include <alga/pixel_fmts.h>
#include <alga/timing.h>
#include <alga/edid.h>
#include <alga/amd/atombios/atb.h>
#include <alga/amd/atombios/dce.h>
#include <alga/amd/dce4/dce4.h>
#include <alga/amd/dce4/dce4_dev.h>

#include "dce4.h"
#include "i2c.h"
#include "dpcd.h"
#include "regs.h"

#define RR32(of) dce->ddev.rr32(dce->ddev.dev, (of))
#define WR32(v, of) dce->ddev.wr32(dce->ddev.dev, (v), (of))
static void off(struct dce4 *dce, u8 hpd)
{
	WR32(0, regs_hpd_ctl[hpd]);
}

static void on(struct dce4 *dce, u8 hpd)
{
	u32 tmp;

	/*
 	 * This is hardcoded for displayport with the 2 ms threshold in mind.
 	 * In theory, the irq is not supposed to be raised until the hpd logic
 	 * knows if it is a disconnection or a dp sink irq.
	 */
	tmp = SET(HPDx_CTL_CONN_TIMER, 2500) | SET(HPDx_CTL_RX_INT_TIMER, 250)
								| HPDx_CTL_ENA; 
	WR32(tmp, regs_hpd_ctl[hpd]);
}

bool hpd_sense(struct dce4 *dce, u8 hpd)
{
	return ((RR32(regs_hpd_int_status[hpd]) & HPDx_INT_STATUS_SENSE) != 0);
}


void hpd_polarity_rearm(struct dce4 *dce, u8 hpd, bool connected)
{
	u32 tmp;

	tmp = RR32(regs_hpd_int_ctl[hpd]);
	if (connected)
		tmp &= ~HPDx_INT_CTL_INT_POLARITY;
	else
		tmp |= HPDx_INT_CTL_INT_POLARITY;
	WR32(tmp, regs_hpd_int_ctl[hpd]);
}

void hpds_polarity_refresh(struct dce4 *dce)
{
	unsigned i;
	u32 tmp;

	/* keep polarity */
	for (i = 0; i < HPDS_N; ++i) {
		if ((dce->hpds_used & BIT(i)) != 0) {
			tmp = RR32(regs_hpd_int_ctl[i])
						& HPDx_INT_CTL_INT_POLARITY;
			WR32(tmp, regs_hpd_int_ctl[i]);
		}
	}
}

void hpds_off(struct dce4 *dce)
{
	unsigned i;

	for (i = 0; i < HPDS_N; ++i)
		off(dce, i);
}

void hpds_init(struct dce4 *dce)
{
	unsigned i;

	for (i = 0; i < HPDS_N; ++i) {
		if ((dce->hpds_used & BIT(i)) == 0) {
			off(dce, i);
			dev_info(dce->ddev.dev, "dce4: hpd%u off\n", i);
			continue;
		}

		on(dce, i);
		dev_info(dce->ddev.dev, "dce4: hpd%u on\n", i);
	}
}

static int dp_toggle_connected(struct dce4 *dce, unsigned i)
{
	int r;

	r = dp_i2c_adapter_init(dce, i);
	if (r != 0) {
		dev_warn(dce->ddev.dev, "dce4: unable to create i2c adapter "
					"for newly connected dp%u\n", i);
		return r;
	}

	/* dp specs: must query dcpd info before 100 ms */
	r = dpcd_info(dce, i); 
	if (r != 0) {
		dev_warn(dce->ddev.dev, "dce4: unable to query dcpd info for "
						"newly connected dp%u\n", i);
		i2c_del_adapter(&dce->dps[i].i2c_adapter);
		goto err;
	}

	/* get the edid, failing to get it is not fatal */
	r = alga_i2c_edid_fetch(dce->ddev.dev, &dce->dps[i].i2c_adapter,
							&dce->dps[i].edid);
	if (r != 0) {
		dev_warn(dce->ddev.dev, "dce4: unable to fetch edid for dp%u\n",
									i);
		r = 0;
		goto err;
	}
	r = 0;
err:
	return r;
}

/* must be called with the lock held */
static int dp_toggle(struct dce4 *dce, unsigned i)
{
	int r;
	dce->dps[i].connected = !dce->dps[i].connected;	

	r = 0;
	if(dce->dps[i].connected) {
		dev_info(dce->ddev.dev, "dce4: dp%u was connected\n", i);
		r = dp_toggle_connected(dce, i);
		if (r != 0) /* restore disconnected state */
			dce->dps[i].connected = false;
	} else {
		dev_info(dce->ddev.dev, "dce4: dp%u was disconnected\n", i);
		i2c_del_adapter(&dce->dps[i].i2c_adapter);
		if (dce->dps[i].edid != NULL) {
			kfree(dce->dps[i].edid);
			dce->dps[i].edid = NULL;
		}
	}
	atb_dp_state(dce->ddev.atb, dce->dps[i].atb_dfp, dce->dps[i].connected);
	return r;
}

static void dp_sink_irq(void)
{
	//DEBUG
	printk(KERN_INFO "DP SINK IRQ RECEIVED\n");
}

int hpd_irq(struct dce4 *dce, u8 hpd)
{
	int r;
	unsigned i;

	if ((dce->hpds_used & BIT(hpd)) == 0) {
		dev_warn(dce->ddev.dev, "dce4: irq on unused hpd%u\n, hpd",
									hpd);
		return 0;
	}

	r = 0;
	for (i = 0; i < dce->ddev.crtcs_n; ++i) {
		if (dce->dps[i].hpd == hpd) {
			bool sense;

			sense = hpd_sense(dce, hpd);
			hpd_polarity_rearm(dce, hpd, sense);

			lock(dce);
			if (dce->dps[i].connected != sense) {
				r = dp_toggle(dce, i);
			} else {
				dp_sink_irq();
			}
			unlock(dce);
			break;
		}
	}
	return r;
}

void dce4_hpds_intr_ena(struct dce4 *dce)
{
	u32 hpds[HPDS_N];
	unsigned i;

	for (i = 0; i < HPDS_N; ++i) {
		if (dce->hpds_used & BIT(i))
			hpds[i] = RR32(regs_hpd_int_ctl[i])
							| HPDx_INT_CTL_INT_ENA;
		else
			hpds[i] = 0;

	}
	for (i = 0; i < HPDS_N; ++i)
		WR32(hpds[i], regs_hpd_int_ctl[i]);
}
EXPORT_SYMBOL_GPL(dce4_hpds_intr_ena);

void dce4_hpd_irq(struct dce4 *dce, unsigned hpd)
{
	unsigned long flgs;

	spin_lock_irqsave(&dce->irq.lock, flgs);
	dce->irq.hpds |= BIT(hpd);
	spin_unlock_irqrestore(&dce->irq.lock, flgs);
}
EXPORT_SYMBOL_GPL(dce4_hpd_irq);
