/*
  author Sylvain Bertrand <digital.ragnarok@gmail.com>
  Protected by GNU Affero GPL v3 with some exceptions.
  See README at root of alga tree.
*/

#include <linux/pci.h>
#include <asm/byteorder.h>
#include <linux/interrupt.h>
#include <linux/firmware.h>
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/cdev.h>
#include <linux/fs.h>

#include <alga/pixel_fmts.h>
#include <alga/timing.h>
#include <alga/amd/atombios/atb.h>
#include <alga/amd/dce4/dce4.h>

#include "types.h"
#include "ba.h"
#include "ucode.h"
#include "gpu.h"
#include "cp.h"
#include "mc.h"
#include "irq.h"
#include "gpu.h"
#include "drv.h"
#include "ih.h"
#include "dce.h"

#include "regs.h"
#include "pcie_ports.h"

#include "fops.h"

static struct class *class;

static struct atb_dev adev;
static struct dce4_dev ddev;

/* those functions exist to deal with regs above regs range */
u32 rr32(struct pci_dev *dev, unsigned offset)
{
	struct dev_drv_data *drv_data;
	u32 val;

	drv_data = pci_get_drvdata(dev);

	if (offset <= (drv_data->regs_sz - sizeof(u32))) {
		val = le32_to_cpu(readl(drv_data->regs + offset));
	} else {
		writel(offset, drv_data->regs + 0);
		val = le32_to_cpu(readl(drv_data->regs + 4));
	}
	return val;
}

void wr32(struct pci_dev *dev, u32 val, unsigned offset)
{
	struct dev_drv_data *drv_data;
	drv_data = pci_get_drvdata(dev);

	if (offset <= (drv_data->regs_sz - sizeof(u32))) {
		writel(cpu_to_le32(val), drv_data->regs + offset);
	} else {
		writel(offset, drv_data->regs + 0);
		writel(cpu_to_le32(val), drv_data->regs + 4);	
	}
}

static u32 atb_rr32(struct device *dev, unsigned offset)
{
	struct dev_drv_data *drv_data;
	struct pci_dev *pdev;

	pdev = container_of(dev, struct pci_dev, dev);
	drv_data = pci_get_drvdata(pdev);

	return rr32(pdev, offset);
}

static void atb_wr32(struct device *dev, u32 val, unsigned offset)
{
	struct dev_drv_data *drv_data;
	struct pci_dev *pdev;

	pdev = container_of(dev, struct pci_dev, dev);
	drv_data = pci_get_drvdata(pdev);

	wr32(pdev, val, offset);
}

static u32 pcie_port_rreg(struct pci_dev *dev, u32 reg)
{
	u32 r;

	wr32(dev, reg & 0xff, PCIE_PORT_INDEX);
	rr32(dev, PCIE_PORT_INDEX);
	r = rr32(dev, PCIE_PORT_DATA);
	return r;
}

static void pcie_port_wreg(struct pci_dev *dev, u32 reg, u32 v)
{
	wr32(dev, reg & 0xff, PCIE_PORT_INDEX);
	rr32(dev, PCIE_PORT_INDEX);
	wr32(dev, v, PCIE_PORT_DATA);
	rr32(dev, PCIE_PORT_DATA);
}

static void * __devinit rom_copy_get(struct pci_dev *dev)
{
	void __iomem *rom;
	void *rom_copy;
	size_t rom_sz;

	rom = pci_map_rom(dev, &rom_sz);
	if (IS_ERR_OR_NULL(rom))
		return rom;

	rom_copy = kzalloc(rom_sz, GFP_KERNEL);
	if (!rom_copy) {
		pci_unmap_rom(dev, rom);
		return ERR_PTR(-ENOMEM);
	}

	memcpy_fromio(rom_copy, rom, rom_sz);
	pci_unmap_rom(dev, rom);
	return rom_copy;
}

static void reset(struct pci_dev *dev)
{
	struct mc_dce_save mc_dce_save;
	u32 grbm_reset;

	mc_dce_stop(dev, &mc_dce_save);
	if (mc_wait_for_idle(dev))
		dev_warn(&dev->dev, "reset:wait for mc idle timed out\n");

	/* disable cp parsing/prefetching */
	wr32(dev, CP_ME_HALT | CP_PFP_HALT, CP_ME_CTL);

	/* reset all the gfx blocks */
	grbm_reset = (SOFT_RESET_CP |
		      SOFT_RESET_CB |
		      SOFT_RESET_DB |
		      SOFT_RESET_PA |
		      SOFT_RESET_SC |
		      SOFT_RESET_SPI |
		      SOFT_RESET_SH |
		      SOFT_RESET_SX |
		      SOFT_RESET_TC |
		      SOFT_RESET_TA |
		      SOFT_RESET_VC |
		      SOFT_RESET_VGT);

	wr32(dev, grbm_reset, GRBM_SOFT_RESET);
	rr32(dev, GRBM_SOFT_RESET);
	udelay(50);
	wr32(dev, 0, GRBM_SOFT_RESET);
	rr32(dev, GRBM_SOFT_RESET);
	udelay(50);

	mc_dce_resume(dev, &mc_dce_save);
}

/* enable PCIE 2.0 */
static void pcie_gen2_enable(struct pci_dev *dev)
{
	u32 link_width_ctl;
	u32 speed_ctl;

	speed_ctl = pcie_port_rreg(dev, PCIE_LC_SPEED_CTL);
	if (!(speed_ctl & LC_OTHER_SIDE_EVER_SENT_GEN2)
				&& !(speed_ctl & LC_OTHER_SIDE_SUPPORTS_GEN2))
		return;

	link_width_ctl = pcie_port_rreg(dev, PCIE_LC_LINK_WIDTH_CTL);
	link_width_ctl &= ~LC_UPCONFIGURE_DIS;
	pcie_port_wreg(dev, PCIE_LC_LINK_WIDTH_CTL, link_width_ctl);

	speed_ctl = pcie_port_rreg(dev, PCIE_LC_SPEED_CTL);
	speed_ctl &= ~LC_TARGET_LINK_SPEED_OVERRIDE_EN;
	pcie_port_wreg(dev, PCIE_LC_SPEED_CTL, speed_ctl);

	speed_ctl = pcie_port_rreg(dev, PCIE_LC_SPEED_CTL);
	speed_ctl |= LC_CLR_FAILED_SPD_CHANGE_CNT;
	pcie_port_wreg(dev, PCIE_LC_SPEED_CTL, speed_ctl);

	speed_ctl = pcie_port_rreg(dev, PCIE_LC_SPEED_CTL);
	speed_ctl &= ~LC_CLR_FAILED_SPD_CHANGE_CNT;
	pcie_port_wreg(dev, PCIE_LC_SPEED_CTL, speed_ctl);

	speed_ctl = pcie_port_rreg(dev, PCIE_LC_SPEED_CTL);
	speed_ctl |= LC_GEN2_EN_STRAP;
	pcie_port_wreg(dev, PCIE_LC_SPEED_CTL, speed_ctl);
}

static int __devinit bars_map(struct pci_dev *dev)
{
	struct dev_drv_data *drv_data;
	drv_data = pci_get_drvdata(dev);

	drv_data->regs = ioremap_nocache(pci_resource_start(dev, 2),
						pci_resource_len(dev, 2));
	if (!drv_data->regs)
		return -ENOMEM;
	drv_data->regs_sz = pci_resource_len(dev, 2);
	
	dev_info(&dev->dev, "regs mmio base: 0x%p\n", drv_data->regs);
	dev_info(&dev->dev, "regs mmio size: %zu\n",
					(size_t)pci_resource_len(dev, 2));

	drv_data->vram.bar0 = ioremap_wc(pci_resource_start(dev, 0),
						pci_resource_len(dev, 0));
	if (!drv_data->vram.bar0) {
		iounmap(drv_data->regs);
		return -ENOMEM;
	}
	dev_info(&dev->dev, "vram mmio base: 0x%p\n", drv_data->vram.bar0);
	dev_info(&dev->dev, "vram mmio size: %zu\n",
					(size_t)pci_resource_len(dev, 0));
	return 0;
}

static void bars_unmap(struct pci_dev *dev)
{
	struct dev_drv_data *drv_data;
	drv_data = pci_get_drvdata(dev);

	iounmap(drv_data->vram.bar0);
	iounmap(drv_data->regs);
}

static unsigned crtcs_n(struct pci_dev *dev)
{
	struct dev_drv_data *drv_data;

	drv_data = pci_get_drvdata(dev);

	switch (drv_data->family) {
	case CYPRESS:
	case HEMLOCK:
	case JUNIPER:
	case REDWOOD:
		return 6;
		break;
	case CEDAR:
		return 4;
		break;
	case PALM:
		return 2;
		break;
	}
	unreachable();
}

static int __devinit probe(struct pci_dev *dev, const struct pci_device_id *id)
{
	int err;
	struct dev_drv_data *drv_data;

	err = pci_enable_device(dev);
	if (err) {
		dev_err(&dev->dev, "cannot enable the device\n");
		return err;
	}

	err = pci_enable_msi(dev);
	if (err) {
		dev_err(&dev->dev, "cannot enable MSI\n");
		goto err_dis_pdev;
	}

	pci_set_master(dev);

	err = dma_set_mask(&dev->dev, DMA_BIT_MASK(40));
	if (err) {
		dev_err(&dev->dev, "cannot set DMA address width to 40 bits\n");
		goto err_dis_msi;
	}
	dma_set_coherent_mask(&dev->dev, DMA_BIT_MASK(40));

	err = pci_request_regions(dev, pci_name(dev));
	if (err != 0) {
		goto err_dis_msi;
	}

	drv_data = kzalloc(sizeof(*drv_data), GFP_KERNEL);
	if (!drv_data) {
		dev_err(&dev->dev, "cannot allocate driver private data for" 
								" device\n");	
		err = -ENOMEM;
		goto err_release_pci_regions;
	}

	pci_set_drvdata(dev, drv_data);/* used to check the device is valid */

	drv_data->family = id->driver_data;

	adev.rom = rom_copy_get(dev);
	if (IS_ERR_OR_NULL(adev.rom)) {
		if (IS_ERR(adev.rom))
			err = PTR_ERR(adev.rom);
		else
			err = -EIO;
		dev_err(&dev->dev, "cannot copy the rom\n");
		goto err_free_drv_data;
	}

	err = bars_map(dev);
	if (err)
		goto err_free_rom_copy;

	drv_data->atb = atb_alloc();
	if (!drv_data->atb) {
		dev_err(&dev->dev, "cannot allocate atombios\n");
		err = -ENOMEM;
		goto err_bars_unmap;
	}

	adev.dev = &dev->dev;
	adev.rr32 = atb_rr32;
	adev.wr32 = atb_wr32;
	err = atb_init(drv_data->atb, &adev);
	if (err) {
		dev_err(&dev->dev, "cannot init the atombios\n");
		goto err_free_atb;
	}

	if (rr32(dev, GRBM_STATUS) & GUI_ACTIVE)
		reset(dev);

	if (!rr32(dev, CFG_MEM_SZ)) {
		dev_info(&dev->dev, "posting now...\n");
		err = atb_asic_init(drv_data->atb);
		if (err) {
			dev_err(&dev->dev, "atombios failed to init the "
								"asic\n");
			goto err_cleanup_atb;
		}
	} else {
		dev_info(&dev->dev, "already posted\n");
	}

	/* CFG_MEM_SZ is now valid */
	dev_info(&dev->dev, "vram size is %uMB\n", rr32(dev, CFG_MEM_SZ));
	drv_data->vram.range.start = 0;
	drv_data->vram.range.end = drv_data->vram.range.start
				+ rr32(dev, CFG_MEM_SZ) * 1024 * 1024 - 1;

	drv_data->crtcs_n = crtcs_n(dev);
	gpu_cfg_init(dev);

	ih_disable(dev);
	intrs_reset(dev);
	err = request_threaded_irq(dev->irq, irq, irq_thd, 0, pci_name(dev),
								(void*)dev);
	if (err) {
		dev_err(&dev->dev, "unable to request threaded irq\n");
		goto err_free_atb;
	}

	pcie_gen2_enable(dev);

	err = ucode_load(dev);
	if (err)
		goto err_free_irq;

	err = mc_program(dev);
	if (err)
		goto err_release_firmware;

	err = ba_init(dev);	
	if (err)
		goto err_release_firmware;

	err = ba_map(dev); /* map wb page, ih ring, cp ring */
	if (err)
		goto err_ba_cleanup;

	err = gpu_init(dev);
	if (err)
		goto err_ba_unmap;

	drv_data->dce = dce4_alloc();
	if (!drv_data->dce) {
		dev_err(&dev->dev, "cannot allocate dce\n");
		err = -ENOMEM;
		goto err_ba_unmap;
	}

	ddev.dev = &dev->dev;
	ddev.atb = drv_data->atb;
	ddev.crtcs_n = drv_data->crtcs_n;
	ddev.hpd_on = dce_hpd_on;
	ddev.hpd_off = dce_hpd_off;
	ddev.hpd_sense = dce_hpd_sense;
	ddev.hpd_polarity_rearm = dce_hpd_polarity_rearm;
	ddev.crtc_fb = dce_crtc_fb;
	ddev.lut = dce_lut;
	err = dce4_init(drv_data->dce, &ddev);
	if (err)
		goto err_dce_cleanup;

	ih_disable(dev); /* FIXME: again? */
	ih_init(dev);
	intrs_reset(dev);
	ih_enable(dev);
	err = intrs_enable(dev);
	if (err)
		goto err_dce_cleanup;

	cp_stop(dev);
	ucode_cp_program(dev);
	cp_init(dev);
	cp_start(dev);

	/* userland interface setup */
	cdev_init(&drv_data->evergreen_cdev, &fops);
	err = cdev_add(&drv_data->evergreen_cdev, devt, 1);
	if (err) {
		dev_err(&dev->dev, "cannot add filesystem char device\n");
		goto err_dce_cleanup;
	}

	drv_data->evergreen_dev = device_create(class, &dev->dev,
			drv_data->evergreen_cdev.dev, NULL, "evergreen0");
	if (IS_ERR(drv_data->evergreen_dev)) {
		dev_err(&dev->dev, "cannot create userspace device\n");
		goto err_cdev_del;
	}

	dev_info(&dev->dev, "ready\n");
	return 0;

err_cdev_del:
	cdev_del(&drv_data->evergreen_cdev);

err_dce_cleanup:
	dce4_cleanup(drv_data->dce);
	kfree(drv_data->dce);

err_ba_unmap:
	ba_unmap(dev);

err_ba_cleanup:
	ba_cleanup(dev);

err_release_firmware:
	ucode_release(dev);

err_free_irq:
	free_irq(dev->irq, (void*)dev);

err_cleanup_atb:
	atb_cleanup(drv_data->atb);

err_free_atb:
	kfree(drv_data->atb);

err_bars_unmap:
	bars_unmap(dev);

err_free_rom_copy:
	kfree(adev.rom);

err_free_drv_data:
	pci_set_drvdata(dev, NULL);/* invalidate the device */
	kfree(drv_data);

err_release_pci_regions:
	pci_release_regions(dev);

err_dis_msi:
	pci_disable_msi(dev);

err_dis_pdev:
	pci_disable_device(dev);
	return err;
}

static void __devexit remove(struct pci_dev *dev)
{
	struct dev_drv_data *drv_data;

	drv_data = pci_get_drvdata(dev);

	/* remove userland interface */
	device_destroy(class, drv_data->evergreen_cdev.dev);
	cdev_del(&drv_data->evergreen_cdev);

	ih_disable(dev);
	free_irq(dev->irq, (void*)dev);

	dce4_cleanup(drv_data->dce);
	kfree(drv_data->dce);
	ba_unmap(dev);

	ba_cleanup(dev);

	ucode_release(dev);

	atb_cleanup(drv_data->atb);
	kfree(drv_data->atb);

	bars_unmap(dev);

	pci_release_regions(dev);
	pci_disable_msi(dev);
	pci_disable_device(dev);

	kfree(drv_data);
	pci_set_drvdata(dev, NULL);/* invalidate the device */
}

static const struct dev_pm_ops pm_ops;

/* FIXME: hardcoded for now */
static DEFINE_PCI_DEVICE_TABLE(pci_tbl) =
{
	{PCI_VENDOR_ID_ATI, 0x68be, PCI_ANY_ID, PCI_ANY_ID, 0, 0, JUNIPER},
	{}
};
MODULE_DEVICE_TABLE(pci, pci_tbl);

static struct pci_driver pci_driver =
{
	.name = "AMD evergreen PCI driver",
	.id_table = pci_tbl,
	.probe = probe,
	.remove = __devexit_p(remove),
	.driver.pm = NULL
};

static int __init init(void)
{
	int r;

	class = class_create(THIS_MODULE, "evergreen");	
	if (IS_ERR(class))
		return PTR_ERR(class);

	r = alloc_chrdev_region(&devt, 0, 1, pci_driver.name);
	if (r < 0) {
		printk(KERN_ERR "%s:cannot allocate major/minor range\n",
							pci_driver.name);
		goto class_destroy;
	}
	
	r = pci_register_driver(&pci_driver);
	if (r != 0) {
		printk(KERN_ERR "%s:cannot register PCI driver\n",
							pci_driver.name);
		goto chrdev_region_unregister;
	}
	return 0;

chrdev_region_unregister:
	unregister_chrdev_region(devt, 1);
	
class_destroy:
	class_destroy(class);
	return r;
}

static void __exit cleanup(void)
{
	pci_unregister_driver(&pci_driver);
	unregister_chrdev_region(devt, 1);
	class_destroy(class);
}

module_init(init);
module_exit(cleanup);

MODULE_AUTHOR("Sylvain Bertrand <digital.ragnarok@gmail.com>");
MODULE_DESCRIPTION("AMD evergreen driver");
MODULE_LICENSE("GPL");
