/*
 * This file is licensed under the terms of the GNU General Public
 * License version 2.  This program is licensed "as is" without any
 * warranty of any kind, whether express or implied.
 */

#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/cpuidle.h>
#include <linux/io.h>
#include <linux/export.h>
#include <asm/cpuidle.h>
#include <mach/csp.h>
#include <linux/irqchip/arm-gic.h>
#include <asm/suspend.h>
#include <linux/cpu_pm.h>
#include <asm/arch_timer.h>
#include <asm/cacheflush.h>
#include "cpuidle.h"
#include "dt_idle_states.h"
#include <linux/reboot.h>
#include <linux/syscore_ops.h>
/*
 * Cpuidle and hotplug share the same reset exit addr,because the hardware
 * have only one hot exit address register .The common reset exit address
 * is the entry of the function lombo_cpu_wakeup_entry, which will jump to
 * correspond wakeup handle function according to wakeup_flag,wakeup_flag
 * is percpu var. Cpuidle and hotplug wakeup handle function are set in
 * _cpu_wakeup_addr_save area during smp init.
 */
#define LOMBO_MAX_STATES	2

#ifdef CONFIG_SMP
extern int wakeup_flag[CONFIG_NR_CPUS];
extern void cpu_on(u32 phys_cpu);
static volatile int c1_cpu_num;
static arch_spinlock_t cpuidle_wfi_lock = __ARCH_SPIN_LOCK_UNLOCKED;
#endif

extern void rtc_set_hot_rst_flag(u32 value);
extern void rtc_set_hot_rst_exit(u32 addr);
extern void v7_flush_dcache_all(void);
static volatile reg_cpu_ctrl_t cpu_ctrl_reg_set;


static void v7_dcache_disable(void)
{
	__asm volatile(
		"mrc	p15, #0, r0, c1, c0, #0;"
		"bic	r0,  r0, #0x00000004;"
		"mcr	p15, #0, r0, c1, c0, #0");
}

static void v7_leave_smp(void)
{
		__asm volatile(
			"mrc     p15, 0, r0, c1, c0, 1;"
			"bic     r0, r0, #0x040;"
			"mcr     p15, 0, r0, c1, c0, 1");
}

static void gic_dist_disable(void)
{
	writel(0x0, (void *)VA_GICD_CTLR);
}

static void gic_dist_enable(void)
{
	writel(0x1, (void *)VA_GICD_CTLR);
}

static void cpu_ctrl_reg_save(void)
{
	reg_prcm_cpu_axi_clk_ctrl_t cpu_axi_clk_ctrl;
#ifdef CONFIG_ARCH_LOMBO_N7V3
	reg_prcm_cpu_pll_fac_t cpu_pll_fac;
	reg_prcm_cpu_pll_en_t cpu_pll_en;
	u32 lock;
#endif
	cpu_ctrl_reg_set.cpu_clk_sel =
		(reg_cpu_ctrl_cpu_clk_sel_t)readl_relaxed((void *)VA_CPU_CTRL_CPU_CLK_SEL);
	cpu_ctrl_reg_set.adpll_fac =
		(reg_cpu_ctrl_adpll_fac_t)readl_relaxed((void *)VA_CPU_CTRL_ADPLL_FAC);
	cpu_ctrl_reg_set.adpll_tune0 =
		(reg_cpu_ctrl_adpll_tune0_t)readl_relaxed((void *)VA_CPU_CTRL_ADPLL_TUNE0);
	cpu_ctrl_reg_set.adpll_tune1 =
		(reg_cpu_ctrl_adpll_tune1_t)readl_relaxed((void *)VA_CPU_CTRL_ADPLL_TUNE1);
	/* change the cpu clk to prcm:600M */
	cpu_axi_clk_ctrl.val = readl((void *)VA_PRCM_CPU_AXI_CLK_CTRL);
	cpu_axi_clk_ctrl.bits.cpu_src_sel = 2;

#if defined(CONFIG_ARCH_LOMBO_N7V5)
	cpu_axi_clk_ctrl.bits.cpu_src_div = 1;
#elif defined(CONFIG_ARCH_LOMBO_N7V3)
	/* change the cpu clk to prcm:816M */
	cpu_pll_fac.val = 0;
	cpu_pll_fac.bits.n = 816000000 / 24000000;
	writel(cpu_pll_fac.val, (void *)VA_PRCM_CPU_PLL_FAC);

	/* enable pll */
	cpu_pll_en.val = readl((void *)VA_PRCM_CPU_PLL_EN);
	if ((cpu_pll_en.val & 0x7) != 0x7) {
		cpu_pll_en.bits.enm = 1;
		writel(cpu_pll_en.val, (void *)VA_PRCM_CPU_PLL_EN);
		udelay(100);

		cpu_pll_en.bits.enp = 1;
		writel(cpu_pll_en.val, (void *)VA_PRCM_CPU_PLL_EN);
		udelay(10);

		cpu_pll_en.bits.oen = 1;
		writel(cpu_pll_en.val, (void *)VA_PRCM_CPU_PLL_EN);
	}

	do {
		lock = readl((void *)VA_PRCM_CPU_PLL_STAT);
	} while (lock != 7);

	cpu_axi_clk_ctrl.bits.cpu_src_div = 0;
#endif

	writel(cpu_axi_clk_ctrl.val, (void *)VA_PRCM_CPU_AXI_CLK_CTRL);
}

static void cpu_ctrl_reg_restore(void)
{
#ifdef CONFIG_ARCH_LOMBO_N7V3
	reg_prcm_cpu_pll_en_t cpu_pll_en;
#endif
	writel(0x80000000, (void *)VA_CPU_CTRL_CPU_CLK_SEL);
	writel(cpu_ctrl_reg_set.adpll_fac.val, (void *)VA_CPU_CTRL_ADPLL_FAC);
	writel(cpu_ctrl_reg_set.adpll_tune0.val, (void *)VA_CPU_CTRL_ADPLL_TUNE0);
	writel(cpu_ctrl_reg_set.adpll_tune1.val, (void *)VA_CPU_CTRL_ADPLL_TUNE1);
	writel(cpu_ctrl_reg_set.cpu_clk_sel.val, (void *)VA_CPU_CTRL_CPU_CLK_SEL);
#ifdef CONFIG_ARCH_LOMBO_N7V3
	/* disable cpu pll */
	cpu_pll_en.val = 0;
	writel(cpu_pll_en.val, (void *)VA_PRCM_CPU_PLL_EN);
#endif
}

static void c0_domain_pd_sleep_trigger_ibp(void)
{
	writel(0x00010100, (void *)VA_PCU_TPOFF);
}

static void ibp_dg_delay_set(u32 dg_id, u32 dly)
{
	writel_relaxed(dly, (void *)(VA_PCU_DGCNT0 + dg_id * 4));
}

static void dg_enable(u32 dg_id)
{
	reg_pcu_dgc_t pcu_dgc;

	pcu_dgc.val = readl_relaxed((void *)VA_PCU_DGC) | (1u << dg_id);
	writel_relaxed(pcu_dgc.val, (void *)VA_PCU_DGC);
}

static void pcu_enable_all(void)
{
	reg_pcu_en_ctrl_t ctrl;

	ctrl.val = 0x80000100;

	writel(ctrl.val, (void *)VA_PCU_EN_CTRL);
}

static void pcu_init(void)
{
	u32 i;

#define DG_ID0				(0)
#define DG_NUM				(16)

	/*instruction configuration*/
	for (i = 0; i < DG_NUM; i++) {
		/* IPB configuration */
		/* 2ms + i*8 for non-sim */
#ifndef ARCH_LOMBO_N7V5
		ibp_dg_delay_set(DG_ID0 + i, 24*1000*(2 + i * 8));
#else
		ibp_dg_delay_set(DG_ID0 + i, 24 * 1000 * (0 + i * 2));
#endif
		dg_enable(i);
	}

	pcu_enable_all();
}

#ifndef CONFIG_SMP
void tick_broadcast(const struct cpumask *mask)
{
	/*Do nothing for N7V5*/
}
#endif
/*
 * Cluster power down
 */
static void cluster_idle_flow(void)
{
#ifdef CPUIDLE_LOMBO_DEBUG
	pr_info("c%d I", smp_processor_id());
#endif
#ifdef CONFIG_SMP
	/*
	 * write cpuidle wakeup flag
	 */
	wakeup_flag[smp_processor_id()] = 0;
	sync_cache_w(wakeup_flag);
#endif
	rtc_set_hot_rst_flag(0x455AEE01);
	/*step1: trigger cluster power down*/
	c0_domain_pd_sleep_trigger_ibp();

	/*step2: close d-cache*/
	v7_dcache_disable();

	/*step3: clean and invalid all d-caches*/
	v7_flush_dcache_all();

#if defined(CPUIDLE_LOMBO_DEBUG) && defined(CONFIG_SMP)
	extern struct sleep_save_sp sleep_save_sp;
	pr_info("sleep_save_sp:%x\n", sleep_save_sp);
	pr_info("sleep_save_sp virt:%x %x %x\n",
					sleep_save_sp.save_ptr_stash,
					sleep_save_sp.save_ptr_stash[0],
					sleep_save_sp.save_ptr_stash[1]);
	pr_info("sleep_save_sp phy:%x\n",
					sleep_save_sp.save_ptr_stash_phys);

	unsigned int *addr = NULL;

	if (smp_processor_id() == 0)
		addr = (unsigned int *)(sleep_save_sp.save_ptr_stash[0] + 0x80000000);
	else
		addr = (unsigned inti *)(sleep_save_sp.save_ptr_stash[1] + 0x80000000);
	pr_info("save-gpd:%x\n", addr[0]);
	pr_info("save-sp:%x\n", addr[1]);
	pr_info("save-cpu_do_reusme:%x\n", addr[2]);
#endif
	/*step4: clrex*/
	__asm("CLREX");

	/*step5: switch SMP to AMP*/
	v7_leave_smp();

	/*step6: ISB*/
	__asm("ISB");

	/*step7: DSB*/
	__asm("DSB");

	/*step8: WFI*/
	while (1)
		__asm("WFI");
}

int lombo_cpu_suspend_finish(unsigned long power_down)
{
	if (power_down) {
		cluster_idle_flow();
	} else {
		v7_flush_dcache_all();
		cpu_do_idle();
	}

	return 0;
}

static int lombo_cpuidle_shutdown_notify(struct notifier_block *nb,
				   unsigned long action,
				   void *data)
{
	if (action == SYS_RESTART) {
		disable_cpuidle();
		/* set hot reset flag */
		rtc_set_hot_rst_flag(0);
		/* set hot reset exit addr */
		rtc_set_hot_rst_exit(0);
	}

	return NOTIFY_DONE;
}

static struct notifier_block lombo_cpuidle_shutdown_notifier = {
	.notifier_call	= lombo_cpuidle_shutdown_notify,
};


#define CPUIDLE_CONTEXT_STORE	0
#define CPUIDLE_CONTEXT_RESTORE 1

static void lombo_cpuidle_context(u32 flag)
{
	if (!cpuidle_disabled()) {
		if (flag == CPUIDLE_CONTEXT_STORE) {
#ifdef CONFIG_SMP
			/*
			 * Set cpuidle wakeup flag to indicate cpu
			 * wakeup reason, cpuidle wakeup_flag equal
			 * 0,hotplug is 1.
			 */
			wakeup_flag[smp_processor_id()] = 0;
			sync_cache_w(wakeup_flag);
#endif
			/* Save cpu ctrl reg*/
			cpu_ctrl_reg_save();
		} else if (flag == CPUIDLE_CONTEXT_RESTORE) {
#ifdef CONFIG_SMP
			/*wakeup cpu1*/
			unsigned int cpu_rst_sctl = READREG32(VA_CPU_CTRL_RST_SCTRL);

			if (smp_processor_id() == 0 &&
				(cpu_rst_sctl & 0x2) == 0x0 &&
				wakeup_flag[smp_processor_id()] == 0) {
				/*restore cpu ctrl reg*/
				cpu_ctrl_reg_restore();
				/* Clear hot reset flag when wakeup from deepidle */
				rtc_set_hot_rst_flag(0);
				/* If cpu1 is hot unpluged,skip wakeup cpu1*/
				if (cpu_online(1)) {
				#ifdef CPUIDLE_LOMBO_DEBUG
					pr_info("O");
				#endif
					cpu_on(1);
				}
			}
#else
			cpu_ctrl_reg_restore();
			/* Clear hot reset flag when wakeup from deepidle */
			rtc_set_hot_rst_flag(0);
#ifdef CPUIDLE_LOMBO_DEBUG
			pr_info("O");
#endif
#endif
		}
	}
}

/* Actual code that puts the SoC in different idle states */
static int lombo_enter_idle(struct cpuidle_device *dev,
			struct cpuidle_driver *drv,
			       int index)
{
#ifdef CONFIG_SMP
	unsigned int cpu_rst_sctl = 0;
	unsigned int cpu_status = 0;
	unsigned int wfi_flag = 0;
	unsigned int ret = 0;

	arch_spin_lock(&cpuidle_wfi_lock);
	if (index == 0) {
		cpu_do_idle();
		arch_spin_unlock(&cpuidle_wfi_lock);
	} else {
		if (c1_cpu_num == 0) {
			c1_cpu_num++;
			arch_spin_unlock(&cpuidle_wfi_lock);
			if (smp_processor_id() == 0)
				cpu_cluster_pm_enter();
			cpu_pm_enter();
			lombo_cpuidle_context(CPUIDLE_CONTEXT_STORE);
			/*
			 * If ret == 1, deep idle fail, exit wfi state
			 * else deep idle success
			 */
			ret = cpu_suspend(0, lombo_cpu_suspend_finish);
			if (ret == 1) {
				/*
				 * If ret eq 1,means cluster power down fail,
				 * if eq 0, cluster power down success
				 */
				index = -1;
			}
			arch_spin_lock(&cpuidle_wfi_lock);
			if (c1_cpu_num > 0)
				c1_cpu_num--;
			arch_spin_unlock(&cpuidle_wfi_lock);
			cpu_rst_sctl = READREG32(VA_CPU_CTRL_RST_SCTRL);
			lombo_cpuidle_context(CPUIDLE_CONTEXT_RESTORE);
			cpu_pm_exit();
			if (smp_processor_id() == 0 && (cpu_rst_sctl & 0x2) == 0x0)
				cpu_cluster_pm_exit();
		} else if (c1_cpu_num == 1) {
			arch_spin_unlock(&cpuidle_wfi_lock);
			gic_dist_disable();
			cpu_status = READREG32(VA_CPU_CTRL_STATUS_R);
			if (smp_processor_id() == 0)
				wfi_flag = (cpu_status >> 17) & 0x1;
			else
				wfi_flag = (cpu_status >> 16) & 0x1;
			if (wfi_flag == 1) {
				if (smp_processor_id() == 0)
					cpu_cluster_pm_enter();
				cpu_pm_enter();
				lombo_cpuidle_context(CPUIDLE_CONTEXT_STORE);
				/*
				 * Enter cluster idle flow
				 */
				cpu_suspend(1, lombo_cpu_suspend_finish);
				arch_spin_lock(&cpuidle_wfi_lock);
				c1_cpu_num = 0;
				arch_spin_unlock(&cpuidle_wfi_lock);
				lombo_cpuidle_context(CPUIDLE_CONTEXT_RESTORE);
				cpu_pm_exit();
				if (smp_processor_id() == 0)
					cpu_cluster_pm_exit();
			} else {
				/* Cluster power down fail */
				index = -1;
			}
			gic_dist_enable();
		} else {
			pr_info("bug here!\n");
			while (1)
				;
		}
	}
#else
	if (index == 0) {
		cpu_do_idle();
	} else {
		gic_dist_disable();
		cpu_cluster_pm_enter();
		cpu_pm_enter();
		lombo_cpuidle_context(CPUIDLE_CONTEXT_STORE);
		cpu_suspend(1, lombo_cpu_suspend_finish);
		lombo_cpuidle_context(CPUIDLE_CONTEXT_RESTORE);
		cpu_pm_exit();
		cpu_cluster_pm_exit();
		gic_dist_enable();
	}
#endif

	return index;
}

static struct cpuidle_driver lombo_idle_driver = {
	.name			= "lombo_idle",
	.owner			= THIS_MODULE,
	.states[0]		= ARM_CPUIDLE_WFI_STATE,
	.states[1]		= {
		.enter			= lombo_enter_idle,
		.exit_latency		= 300,
		.target_residency	= 20000,
		.flags			= CPUIDLE_FLAG_TIMER_STOP,
		.name			= "CPU DEEP IDLE",
		.desc			= "ARM Cluster Power Down",
	},
	.state_count = LOMBO_MAX_STATES,
	.safe_state_index = 0,
};

static const struct of_device_id lombo_idle_state_match[] = {
	{ .compatible = "arm,lombo,idle-state",
	  .data = (void *)lombo_enter_idle
	},
	{},
};


static void lombo_cpuidle_pm_resume(void)
{
	pcu_init();
}

static struct syscore_ops cpuidle_pm_syscore_ops = {
	.resume = lombo_cpuidle_pm_resume,
};

#ifndef CONFIG_SMP
extern int *_cpu_wakeup_addr_save;
extern void lombo_cpu_wakeup_entry(void);
#endif

static int __init lombo_cpuidle_init(void)
{
	int ret;

	pcu_init();
#ifndef CONFIG_SMP
	/*
	 * For single-core cpu,we directly set hot exit address,
	 * because there is no hotplug feature to share the address.
	 * so we don't use wakeup_flag to share the hot exit address.
	 */
	_cpu_wakeup_addr_save = (int *)(virt_to_phys(cpu_resume));
	rtc_set_hot_rst_exit(virt_to_phys(lombo_cpu_wakeup_entry));
#endif
	ret = register_reboot_notifier(&lombo_cpuidle_shutdown_notifier);
	if (ret != 0)
		pr_warn("%s can't register reboot notifier\n", __func__);

	dt_init_idle_driver(&lombo_idle_driver, lombo_idle_state_match, 1);

	register_syscore_ops(&cpuidle_pm_syscore_ops);

	return cpuidle_register(&lombo_idle_driver, NULL);
}

device_initcall(lombo_cpuidle_init);
