#ifdef __KERNEL__
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/clk.h>
#include <linux/version.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/interrupt.h>
#include <asm/neon.h>
#include <linux/pm_runtime.h>
#include <linux/pm_clock.h>
#else
#include <rtthread.h>
#include "irq_numbers.h"
#include "clk.h"
#endif
#include "ei_os.h"

typedef struct eiAXVU_DEV_S {
    void __iomem *base;
    int irq;
    unsigned int freq;
} AXVU_DEV_S;
AXVU_DEV_S *pstAXVU;

int s32AxvuIrqRegistered = 0;
int givx_power_on_flg = 0;
int g_vu_clk_on = 0;
os_wait_t stAxvuWait;
os_wait_t stAxvuWait2;
#ifdef __KERNEL__
static struct clk *pstAxvuAhbGate;
static struct clk *pstAxvuAhbReset;
static struct clk *pstAxvuAxiGate;
static struct device * pstDev;
#else
static clk_handle_t  pstAxvuAhbGate;
static clk_handle_t  pstAxvuAhbReset;
static clk_handle_t  pstAxvuAxiGate;
#endif

extern int AXVU_ModInit(void);
extern int AXVU_ModExit(void);
extern int AXVU_DisableIrq(void);
extern int AXVU_CoreInit(unsigned int base_reg);
extern void AXVU_PM_Close(int line_num);

extern void AXVU_SetAdpllEnable(unsigned int enable,unsigned int base_addr);
extern void AXVU_SetInternalGating(unsigned int ivx_core_mask, unsigned int enable,unsigned int base_addr);

void ei_axvu_common_gate_enable(void)
{
#ifdef __KERNEL__
#ifdef CONFIG_PM_GENERIC_DOMAINS
    pm_runtime_get_sync(pstDev);
#else
    clk_prepare_enable(pstAxvuAhbGate);
    clk_prepare_enable(pstAxvuAxiGate);
#endif
#else
    clk_enable(pstAxvuAhbGate);
    clk_enable(pstAxvuAxiGate);
#endif
}

void ei_axvu_common_gate_disable(void)
{
#ifdef __KERNEL__
#ifdef CONFIG_PM_GENERIC_DOMAINS
    pm_runtime_put_sync(pstDev);
#else
    clk_disable_unprepare(pstAxvuAhbGate);
    clk_disable_unprepare(pstAxvuAxiGate);
#endif
#else
    clk_disable(pstAxvuAhbGate);
    clk_disable(pstAxvuAxiGate);
#endif
}

long VuTimeAfter(unsigned long long a, unsigned long long b)
{
#ifdef __KERNEL__
    return time_after((unsigned long)a, (unsigned long)b);
#else
    return (a>=b) ? 1 : 0;
#endif
}

void ei_axvu_fp_enable(void)
{
#ifdef __KERNEL__
    kernel_neon_begin();
#endif
}

void ei_axvu_fp_disable(void)
{
#ifdef __KERNEL__
    kernel_neon_end();
#endif
}

void ei_axvu_clk_prepare_enable(void)
{
#ifdef __KERNEL__
    clk_prepare_enable(pstAxvuAhbGate);
    clk_prepare_enable(pstAxvuAhbReset);
    clk_prepare_enable(pstAxvuAxiGate);
#else
    clk_enable(pstAxvuAhbGate);
    clk_enable(pstAxvuAhbReset);
    clk_enable(pstAxvuAxiGate);
#endif
}

void ei_axvu_clk_disable_unprepare(void)
{
#ifdef __KERNEL__
    clk_disable_unprepare(pstAxvuAhbGate);
    clk_disable_unprepare(pstAxvuAhbReset);
    clk_disable_unprepare(pstAxvuAxiGate);
#else
    clk_disable(pstAxvuAhbGate);
    clk_disable(pstAxvuAhbReset);
    clk_disable(pstAxvuAxiGate);
#endif
}

#ifdef CONFIG_PM_GENERIC_DOMAINS
static void AXVU_Adpll_Enable(void)
{
    AXVU_SetAdpllEnable(1, (unsigned int)pstAXVU->base);
    AXVU_SetInternalGating(1, 1, (unsigned int)pstAXVU->base);
}

static void AXVU_Adpll_Disable(void)
{
    AXVU_SetInternalGating(0, 0, (unsigned int)pstAXVU->base);
    AXVU_SetAdpllEnable(0, (unsigned int)pstAXVU->base);
}
#endif

void axvu_pm_clk_enable(void)
{
    ei_axvu_common_gate_enable();
#ifndef CONFIG_PM_GENERIC_DOMAINS
    AXVU_SetAdpllEnable(1, (unsigned int)pstAXVU->base);
    AXVU_SetInternalGating(1, 1, (unsigned int)pstAXVU->base);
#endif
}

void axvu_pm_clk_disable(void)
{
#ifndef CONFIG_PM_GENERIC_DOMAINS
    AXVU_SetInternalGating(0, 0, (unsigned int)pstAXVU->base);
    AXVU_SetAdpllEnable(0, (unsigned int)pstAXVU->base);
#endif
    ei_axvu_common_gate_disable();
}

#ifdef __KERNEL__
static irqreturn_t ei_axvu_ISR(int irq, void *dev_id)
#else
static int ei_axvu_ISR(int irq, void *dev_id)
#endif
{
    int valid_flg = AXVU_DisableIrq();
    if ((valid_flg)) {
        os_wakeup(&stAxvuWait);
        os_wakeup(&stAxvuWait2);
    }
#ifdef __KERNEL__
    return IRQ_HANDLED;
#else
    return OSAL_IRQ_HANDLED;
#endif
}

#ifdef __KERNEL__
static int ei_axvu_probe(struct platform_device *pdev)
{
    AXVU_DEV_S *vu;
    struct resource *iores;
    int ret = 0;

    pstDev = &pdev->dev;

    vu = devm_kzalloc(&pdev->dev, sizeof(AXVU_DEV_S), GFP_KERNEL);
    if (!vu) {
        ret = -ENOMEM;
        goto devm_kzalloc_err;
    }

    iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (!iores) {
		ret = -EINVAL;
		goto platform_get_resource_err;
	}

	vu->base = devm_ioremap_resource(&pdev->dev, iores);
	if (IS_ERR(vu->base)) {
		ret = PTR_ERR(vu->base);
		goto devm_ioremap_resource_err;
	}

    vu->irq = platform_get_irq(pdev, 0);
    if (vu->irq < 0) {
        ret = vu->irq;
        goto get_irq_err;
    }

    ret = request_irq(vu->irq, (void *)ei_axvu_ISR, 0, "ei_axvu", 0);
    if (ret) {
        s32AxvuIrqRegistered = 0;
        ret = -1;
        goto request_irq_err;
    } else {
        s32AxvuIrqRegistered = 1;
    }

    pstAXVU = vu;
    if (givx_power_on_flg == 0) {
        pstAxvuAhbGate = devm_clk_get(&pdev->dev, "ax_ahb_gate");
        if (IS_ERR(pstAxvuAhbGate)) {
            ret = PTR_ERR(pstAxvuAhbGate);
            goto devm_clk_get_vu_ahb_gate_err;
        }
        pstAxvuAhbReset = devm_clk_get(&pdev->dev, "ax_ahb_reset");
        if (IS_ERR(pstAxvuAhbReset)) {
            ret = PTR_ERR(pstAxvuAhbReset);
            goto devm_clk_get_vu_ahb_reset_err;
        }
        pstAxvuAxiGate = devm_clk_get(&pdev->dev, "ax_axi_gate");
        if (IS_ERR(pstAxvuAxiGate)) {
            ret = PTR_ERR(pstAxvuAxiGate);
            goto devm_clk_get_vu_axi_gate_err;
        }
        clk_prepare_enable(pstAxvuAhbReset);
    #ifdef CONFIG_PM_GENERIC_DOMAINS
        ret = pm_clk_add_clk(&pdev->dev, pstAxvuAhbGate);
        ret = pm_clk_add_clk(&pdev->dev, pstAxvuAxiGate);
	    ret |= pm_clk_resume(&pdev->dev);
	    pm_runtime_set_active(&pdev->dev);
	    pm_runtime_enable(&pdev->dev);
    #else
        clk_prepare_enable(pstAxvuAhbGate);
        clk_prepare_enable(pstAxvuAxiGate);
    #endif

    #ifndef CONFIG_PM_GENERIC_DOMAINS
        g_vu_clk_on ++;
    #endif
        ei_axvu_fp_enable();
        pstAXVU->freq = AXVU_CoreInit((unsigned int)pstAXVU->base);
        ei_axvu_fp_disable();
    #ifndef CONFIG_PM_GENERIC_DOMAINS
        AXVU_PM_Close(__LINE__);
    #endif
    }
    givx_power_on_flg++;
    os_wait_init(&stAxvuWait);
    os_wait_init(&stAxvuWait2);

    AXVU_ModInit();

    return 0;

devm_clk_get_vu_axi_gate_err:
    devm_clk_put(&pdev->dev, pstAxvuAhbReset);
devm_clk_get_vu_ahb_reset_err:
    devm_clk_put(&pdev->dev, pstAxvuAhbGate);
devm_clk_get_vu_ahb_gate_err:
    free_irq(vu->irq, 0);
request_irq_err:
get_irq_err:
platform_get_resource_err:
devm_ioremap_resource_err:
    devm_kfree(&pdev->dev, vu);
devm_kzalloc_err:
    return ret;
}

static int ei_axvu_remove(struct platform_device *pdev)
{
    givx_power_on_flg--;
    if (givx_power_on_flg == 0) {
    #ifdef CONFIG_PM_GENERIC_DOMAINS
        pm_runtime_put_sync(&pdev->dev);
        pm_runtime_disable(&pdev->dev);
        pm_clk_remove_clk(&pdev->dev, pstAxvuAxiGate);
        pm_clk_remove_clk(&pdev->dev, pstAxvuAhbGate);
    #else
        devm_clk_put(&pdev->dev, pstAxvuAxiGate);
        devm_clk_put(&pdev->dev, pstAxvuAhbGate);
    #endif
        devm_clk_put(&pdev->dev, pstAxvuAhbReset);
    }

    if (s32AxvuIrqRegistered == 1) {
        free_irq(pstAXVU->irq, 0);
    }

    if (pstAXVU != NULL) {
        devm_kfree(&pdev->dev, pstAXVU);
        pstAXVU = NULL;
    }

    pstDev = NULL;

    os_wait_destroy(&stAxvuWait);
    os_wait_destroy(&stAxvuWait2);

    AXVU_ModExit();

    return 0;
}

static const struct of_device_id ei_axvu_match[] = {
    { .compatible = "ei, ei-axvu" },
    {},
};
MODULE_DEVICE_TABLE(of, ei_axvu_match);

#ifdef CONFIG_PM_GENERIC_DOMAINS
static int lombo_axvu_runtime_suspend(struct device *dev)
{
    AXVU_Adpll_Disable();
	return 0;
}

static int lombo_axvu_runtime_resume(struct device *dev)
{
    AXVU_Adpll_Enable();
	return 0;
}

static int lombo_axvu_suspend_noirq(struct device *dev)
{
	if (!pm_runtime_status_suspended(dev)) {
        AXVU_Adpll_Disable();
    }
	return 0;
}

static int lombo_axvu_resume_noirq(struct device *dev)
{
	if (!pm_runtime_status_suspended(dev)) {
        AXVU_Adpll_Enable();
    }
	return 0;
}
#endif

#ifdef CONFIG_PM_SLEEP
static int lombo_axvu_suspend(struct device *dev)
{
	return 0;
}

static int lombo_axvu_resume(struct device *dev)
{
	clk_disable_unprepare(pstAxvuAhbReset);
    clk_prepare_enable(pstAxvuAhbReset);

	if (pm_runtime_status_suspended(dev)) {
		pm_clk_resume(dev);
        AXVU_Adpll_Enable();
	}

    AXVU_CoreInit((unsigned int)pstAXVU->base);

	if (pm_runtime_status_suspended(dev)) {
		pm_clk_suspend(dev);
        AXVU_Adpll_Disable();
	}

	return 0;
}
#endif

#ifdef CONFIG_PM
static const struct dev_pm_ops lombo_axvu_pm_ops = {
#ifdef CONFIG_PM_GENERIC_DOMAINS
	SET_RUNTIME_PM_OPS(lombo_axvu_runtime_suspend, lombo_axvu_runtime_resume, NULL)
	.suspend_noirq = lombo_axvu_suspend_noirq,
	.resume_noirq = lombo_axvu_resume_noirq,
#endif

#ifdef CONFIG_PM_SLEEP
	SET_SYSTEM_SLEEP_PM_OPS(lombo_axvu_suspend, lombo_axvu_resume)
#endif
};
#endif

static struct platform_driver ei_axvu_driver = {
    .probe          = ei_axvu_probe,
    .remove         = ei_axvu_remove,
    .driver         =
    {
        .name           = "ei_axvu",
        .of_match_table = ei_axvu_match,
#ifdef CONFIG_PM
        .pm = &lombo_axvu_pm_ops,
#endif
    },
};
os_module_platform_driver(ei_axvu_driver);

MODULE_LICENSE("GPL");

#else

int axvu_driver_init()
{
    AXVU_DEV_S *vu;
    int ret = 0;
    unsigned long res_phy_addr;
    unsigned long res_vir_addr;
    unsigned int res_size;
    unsigned int value;

    vu = os_kzalloc(sizeof(AXVU_DEV_S), os_gfp_kernel);
    if (!vu) {
        ret = -ENOMEM;
        os_printk("malloc axnu dev buffer error\n");
        goto dev_malloc_err;
    }

    res_phy_addr = 0x01780000;
    res_size = 100000;
    res_vir_addr = (unsigned long)os_ioremap_nocache(res_phy_addr, res_size);
    vu->base = (void *)res_vir_addr;

    vu->irq  = INT_AX_IVX;
    ret = os_request_irq(vu->irq, ei_axvu_ISR, 0, "AXVU_CODEC_IRQ", (void *)vu);
    if (ret) {
        s32AxvuIrqRegistered = 0;
        os_printk("init ivx irq error\n");
    } else {
        s32AxvuIrqRegistered = 1;
    }

    pstAXVU = vu;
    if (givx_power_on_flg == 0) {
        pstAxvuAhbGate = clk_get("ahb_ivx_gate");
        if (!pstAxvuAhbGate) {
            os_printk("failed to get ahb ivx gate clk\n");
            goto ahb_ivx_gate_get_err;
        }
        pstAxvuAhbReset = clk_get("ahb_ivx_reset");
        if (!pstAxvuAhbReset) {
            os_printk("failed to get ahb ivx reset clk\n");
            goto ahb_ivx_reset_get_err;
        }
        pstAxvuAxiGate = clk_get("maxi_ivx_gate");
        if (!pstAxvuAxiGate) {
            os_printk("failed to get maxi ivx gate\n");
            goto maxi_ivx_gate_get_err;
        }

        clk_enable(pstAxvuAhbGate);
        clk_enable(pstAxvuAhbReset);
        clk_enable(pstAxvuAxiGate);
    }

    g_vu_clk_on ++;
    ei_axvu_fp_enable();
    pstAXVU->freq = AXVU_CoreInit((unsigned int)pstAXVU->base);
    ei_axvu_fp_disable();
    AXVU_PM_Close(__LINE__);
    givx_power_on_flg++;
    os_wait_init(&stAxvuWait);
    os_wait_init(&stAxvuWait2);

    AXVU_ModInit();

    return 0;

maxi_ivx_gate_get_err:
    clk_put(pstAxvuAhbReset);
ahb_ivx_reset_get_err:
    clk_put(pstAxvuAhbGate);
ahb_ivx_gate_get_err:
    os_kfree(vu);
dev_malloc_err:
    return ret;

}

#endif
