/*
  author Sylvain Bertrand <digital.ragnarok@gmail.com>
  Protected by GNU Affero GPL v3 with some exceptions.
  See README at root of alga tree.
*/
#include <linux/module.h>
#include <linux/device.h>
#include <linux/i2c.h>
#include <linux/delay.h>
#include <linux/wait.h>

#include <alga/pixel_fmts.h>
#include <alga/timing.h>
#include <alga/amd/dce4/dce4.h>
#include <alga/amd/dce4/dce4_dev.h>
#include <alga/amd/atombios/atb.h>
#include <alga/amd/atombios/dce.h>

#include "dce4.h"
#include "regs.h"

#define WR32(val, of) dce->ddev.wr32(dce->ddev.dev, (val), (of))
#define RR32(of) dce->ddev.rr32(dce->ddev.dev, (of))
int crtc_fb(struct dce4 *dce, unsigned i, struct db_fb *db_fb)
{
	u32 fb_fmt;
	u32 fb_swap;
	u32 viewport_h;
	u32 viewport_v;
	u32 tmp;

	fb_swap = SET(GRPH_ENDIAN_SWAP, GRPH_ENDIAN_NONE);
	switch (db_fb->pixel_fmt) {
	case ALGA_ARGB6666:
	case ALGA_ARGB8888:
		fb_fmt = (SET(GRPH_DEPTH, GRPH_DEPTH_32BPP)
					| SET(GRPH_FMT, GRPH_FMT_ARGB8888));
#ifdef __BIG_ENDIAN
		fb_swap = SET(GRPH_ENDIAN_SWAP, GRPH_ENDIAN_8IN32);
#endif
		break;
	case ALGA_ARGB2101010:
		fb_fmt = (SET(GRPH_DEPTH, GRPH_DEPTH_32BPP)
					| SET(GRPH_FMT, GRPH_FMT_ARGB8888));
#ifdef __BIG_ENDIAN
		fb_swap = SET(GRPH_ENDIAN_SWAP, GRPH_ENDIAN_8IN32);
#endif
		break;
	default:
		dev_err(dce->ddev.dev, "unsupported pixel format %d\n",
							db_fb->pixel_fmt);
		return -DCE4_ERR;
	}

	WR32(upper_32_bits(db_fb->primary),
					regs_grph_primary_surf_addr_high[i]);
	WR32(upper_32_bits(db_fb->secondary),
				regs_grph_secondary_surf_addr_high[i]);
	WR32((u32)(db_fb->primary) & GRPH_SURF_ADDR_MASK,
					regs_grph_primary_surf_addr[i]);
	WR32((u32)(db_fb->secondary) & GRPH_SURF_ADDR_MASK,
					regs_grph_secondary_surf_addr[i]);

	/*
	 * store the surface gpu addresses for page flipping without reading
	 * the content of the regs
	 */
	dce->dps[i].pf.crtc_surfs.primary = db_fb->primary;
	dce->dps[i].pf.crtc_surfs.secondary = db_fb->secondary;

	WR32(fb_fmt, regs_grph_ctl[i]);
	WR32(fb_swap, regs_grph_swap_ctl[i]);
	WR32(0, regs_grph_surf_of_x[i]);
	WR32(0, regs_grph_surf_of_y[i]);
	/* pixels */
	WR32(0, regs_grph_x_start[i]);
	WR32(0, regs_grph_y_start[i]);
	WR32(db_fb->timing.h, regs_grph_x_end[i]);
	WR32(db_fb->timing.v, regs_grph_y_end[i]);
	WR32(db_fb->pitch, regs_grph_pitch[i]);
	WR32(1, regs_grph_ena[i]);
	WR32(db_fb->timing.v, regs_desktop_height[i]);
	WR32((0 << 16) | 0, regs_viewport_start[i]);
	viewport_h = db_fb->timing.h;
	viewport_v = (db_fb->timing.v + 1) & ~1;
	WR32((viewport_h << 16) | viewport_v, regs_viewport_sz[i]);

	/* XXX: obsolete pageflip setup */
	/* make sure flip is at vb rather than hb */
	tmp = dce->ddev.rr32(dce->ddev.dev, regs_grph_flip_ctl[i]);
	tmp &= ~GRPH_SURF_UPDATE_H_RETRACE_ENA;
	WR32(tmp, regs_grph_flip_ctl[i]);

	/* set pageflip to happen anywhere in vblank interval */
	WR32(0, regs_master_update_mode[i]);
	return 0;
}

void crtc_lut(struct dce4 *dce, unsigned i)
{
	unsigned j;
	u16 v;

	WR32(0, regs_lut_ctl[i]);

	WR32(0, regs_lut_black_of_blue[i]);
	WR32(0, regs_lut_black_of_green[i]);
	WR32(0, regs_lut_black_of_red[i]);

	WR32(0xffff, regs_lut_white_of_blue[i]);
	WR32(0xffff, regs_lut_white_of_green[i]);
	WR32(0xffff, regs_lut_white_of_red[i]);

	WR32(0, regs_lut_rw_mode[i]);
	WR32(0x00000007, regs_lut_write_ena_mask[i]);

	WR32(0, regs_lut_rw_idx[i]);

	/*
	 * XXX: gamma hardcoded, 256 linear values of 10 bits, range 0x400
	 * (1024)
	 */
	for (v = 0, j = 0; j < 256; ++j, v+=4)
		WR32((v << 20) | (v << 10) | (v << 0), regs_lut_30_color[i]);
}

void crtcs_intr_reset(struct dce4 *dce)
{
	unsigned i;

	for (i = 0; i < dce->ddev.crtcs_n; ++i)
		WR32(0, regs_crtc_int_mask[i]);

	for (i = 0; i < dce->ddev.crtcs_n; ++i)
		WR32(0, regs_crtc_grph_int_ctl[i]);

	/* XXX: should know if we have a DAC before */
	WR32(0, DACA_AUTODETECT_INT_CTL);
	WR32(0, DACB_AUTODETECT_INT_CTL);
}

int dce4_mem_req(struct dce4 *dce, bool ena)
{
	unsigned i;
	int r;

	for (i = 0; i < dce->ddev.crtcs_n; ++i) {
		r = atb_crtc_mem_req(dce->ddev.atb, i, ena);
		if (r != 0)
			return -DCE4_ERR;
	}
	return 0;
}
EXPORT_SYMBOL_GPL(dce4_mem_req);

static void page_flip(struct dce4 *dce, unsigned i)
{
	u32 tmp;
	u64 swap;
	struct crtc_surfs *css;
	unsigned j;

	css = &dce->dps[i].pf.crtc_surfs;

	swap = css->secondary;
	css->secondary = css->primary;
	css->primary = swap;
	
	tmp = RR32(regs_grph_update[i]);
	tmp |= GRPH_UPDATE_LOCK;
	WR32(tmp, regs_grph_update[i]);

	WR32(upper_32_bits(css->primary), regs_grph_primary_surf_addr_high[i]);
	WR32(upper_32_bits(css->secondary),
					regs_grph_secondary_surf_addr_high[i]);
	WR32((u32)(css->primary) & GRPH_SURF_ADDR_MASK,
						regs_grph_primary_surf_addr[i]);
	WR32((u32)(css->secondary) & GRPH_SURF_ADDR_MASK,
					regs_grph_secondary_surf_addr[i]);

	#define TIMEOUT 100000
        for (j = 0; j < TIMEOUT; ++j) { /* 100 ms */
                if (RR32(regs_grph_update[i]) & GRPH_SURF_UPDATE_PENDING)
                        break;
                udelay(1);
        }
	if (j == TIMEOUT)
		dev_warn(dce->ddev.dev, "dce4: page flip %u timed out\n", i);

	tmp &= ~GRPH_UPDATE_LOCK;
	WR32(tmp, regs_grph_update[i]);
}

/*
 * this is executed in hard irq context because we want the page flip to happen
 * as soon as possible
 */
void dce4_pf_irq(struct dce4 *dce, unsigned i)
{
	u32 tmp;

	/* disabling page flip is disabling the vblank interrupt */
	tmp = RR32(regs_crtc_int_mask[i]);
	tmp &= ~VBLANK_INT_MASK;
	WR32(tmp, regs_crtc_int_mask[i]);

	page_flip(dce, i);

	atomic_set(&dce->dps[i].pf.occurred, 1);

	wake_up(&dce->dps[i].pf.event);
}
EXPORT_SYMBOL_GPL(dce4_pf_irq);

int dce4_pf(struct dce4 *dce, unsigned i, unsigned *vblanks_n)
{
	u32 tmp;

	/* enabling page flip is enabling vblank interrupt */
	tmp = RR32(regs_crtc_int_mask[i]);
	tmp |= VBLANK_INT_MASK;
	WR32(tmp, regs_crtc_int_mask[i]);

	wait_event(dce->dps[i].pf.event,
				atomic_read(&dce->dps[i].pf.occurred) != 0);

	atomic_set(&dce->dps[i].pf.occurred, 0);

	*vblanks_n = RR32(regs_crtc_status_frame_cnt[i]);
	return 0;
}
EXPORT_SYMBOL_GPL(dce4_pf);
