/*
 * Copyright (c) 2009 hisilicon.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 */
#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/io.h>
#include <linux/interrupt.h>

#include <asm/cacheflush.h>
#include <linux/of_address.h>
#include <linux/of_fdt.h>
#include <linux/of_platform.h>
#include <asm/hardware/cache-l2x0.h>
#include <mach/platsmp.h>
#include <mach/kexport.h>
#include <linux/hal/cpu_param.h>

#include "mach/l2_cache.h"

#ifdef CONFIG_RTOS_HAL_CACHE_L2X0
#include <asm/mach/arch.h>
#endif

void __iomem *l2cache_base;
static int clean_and_invalid_sync __read_mostly;
#define L2CACHE_CLEAN_AND_INVALID_SYNC	"clean_and_invalid_sync"
static DEFINE_SPINLOCK(l2cache_lock);
static DEFINE_SPINLOCK(l2cache_init_lock);

static unsigned int l2_cache_size;
static unsigned int l2_line_size;
static unsigned int l2_way_num;
const char *l2_flush_type;
#ifdef CONFIG_MACH_SD511X
l2_cache_type l2_cache_switch = L2_CACHE_TYPE_NONE;
#endif
static int arch_l2_param(const struct device_node *node)
{
	int err = 0;

	if (of_property_read_u32(node, "l2_cache_size", &l2_cache_size)) {
		pr_err("Find l2_cache_size fail in arch_l2_param!\n");
		l2_cache_size = 0x2000;
		err = -1;
	}

	if (of_property_read_u32(node, "l2_line_size", &l2_line_size)) {
		pr_err("Find l2_line_size fail in arch_l2_param!\n");
		l2_line_size = 0x20;
		err = -1;
	}

	if (of_property_read_u32(node, "l2_way_num", &l2_way_num)) {
		pr_err("Find l2_way_num fail in arch_l2_param!\n");
		l2_way_num = 8;
		err = -1;
	}

	if (of_property_read_string(node, "l2_flush_type", &l2_flush_type)) {
		pr_err("Find l2_flush_type fail, make sure the l2 cache not suppout clean&invalid!\n");
		err = -1;
	}

	return err;
}

static inline void l2cache_sync(void)
{
	readl(l2cache_base + REG_L2_SYNC);
}

/******************************************************************************
 *
 * l2_invalid_auto -  auto invalid l2 cache
 *
 * This routine auto invalid l2 cache
 *
 *****************************************************************************/

/* need invalid cache way num */
static void l2_invalid_auto(unsigned int way)
{
	unsigned int val;

	val = (way << BIT_L2_MAINT_AUTO_WAYADDRESS) | L2_MAINT_AUTO_START;

	writel(val, l2cache_base + REG_L2_MAINT_AUTO);

	/* wait until auto valid finished */
	while (readl(l2cache_base + REG_L2_MAINT_AUTO) & L2_MAINT_AUTO_START)
		barrier();

	l2cache_sync();
}

/******************************************************************************
 *
 * l2_clean_auto-  auto clean l2 cache
 *
 * This routine auto clean l2 cache
 *
 *****************************************************************************/

/* need clean cache way num */
static void l2_clean_auto(unsigned int way)
{
	unsigned long val;

	val = (way << BIT_L2_MAINT_AUTO_WAYADDRESS) | L2_MAINT_AUTO_START
	    | L2_MAINT_AUTO_CLEAN;

	writel(val, l2cache_base + REG_L2_MAINT_AUTO);

	/* wait until auto valid finished */
	while (readl(l2cache_base + REG_L2_MAINT_AUTO) & L2_MAINT_AUTO_START)
		barrier();

	l2cache_sync();
}

static void l2cache_inv_all(void)
{
	unsigned int way;

	/* invalidate cache all-way */
	for (way = 0; way < L2_WAY_NUM; way++)
		l2_invalid_auto(way);
	l2cache_sync();
}

static void l2cache_clean_all(void)
{
	unsigned int way;

	/* invalidate cache all-way */
	for (way = 0; way < L2_WAY_NUM; way++)
		l2_clean_auto(way);
	l2cache_sync();
}

static void l2cache_flush_all(void)
{
	l2cache_clean_all();
	l2cache_inv_all();
}

static void l2cache_inv_line(phys_addr_t addr)
{
	addr &= ~(CACHE_LINE_SIZE - 1);
	if (clean_and_invalid_sync) {
		writel(addr | (1 << BIT_L2_INVALID_BYADDRESS), l2cache_base + REG_L2_INVALID_SYNC);
	} else {
		writel(addr | (1 << BIT_L2_INVALID_BYADDRESS), l2cache_base + REG_L2_INVALID);
		dsb();
		l2cache_sync();
	}
}

static void l2cache_clean_line(phys_addr_t addr)
{
	addr &= ~(CACHE_LINE_SIZE - 1);
	if (clean_and_invalid_sync) {
		writel(addr | (1 << BIT_L2_CLEAN_BYADDRESS), l2cache_base + REG_L2_CLEAN_SYNC);
	} else {
		writel(addr | (1 << BIT_L2_CLEAN_BYADDRESS), l2cache_base + REG_L2_CLEAN);
		dsb();
		l2cache_sync();
	}
}

static inline void l2cache_flush_line(phys_addr_t addr)
{
	l2cache_clean_line(addr);
	l2cache_inv_line(addr);
}

static void l2cache_inv_range(phys_addr_t start, phys_addr_t end)
{
	unsigned long flags;
	unsigned long tmp_value;
	static unsigned int print;

	if ((unlikely(start & (CACHE_LINE_SIZE - 1))) || (unlikely(end & (CACHE_LINE_SIZE - 1)))) {
		if (print != 1) {
			pr_info("[%s][%d] inv L2 is not aligned.start:0x%pa, end phys:0x%pa.\n",
					current->comm, current->pid, &start, &end);
			print = 1;
			dump_stack();
		}
	}

	spin_lock_irqsave(&l2cache_lock, flags);

	tmp_value = readl(l2cache_base + REG_L2_CTRL);
	if (!(tmp_value & L2_CTRL_CACHE_ENABLE)) {
		spin_unlock_irqrestore(&l2cache_lock, flags);
		return;
	}

	if (unlikely(start & (CACHE_LINE_SIZE - 1))) {
		/* flush the line if it is not cacheline aligned */
		start &= ~(CACHE_LINE_SIZE - 1);
		l2cache_flush_line(start);
		start += CACHE_LINE_SIZE;
	}

	if (unlikely(end & (CACHE_LINE_SIZE - 1))) {
		/* flush the line if it is not cacheline aligned */
		end &= ~(CACHE_LINE_SIZE - 1);
		l2cache_flush_line(end);
	}

	while (start < end) {
		phys_addr_t blk_4k = 4096;
		phys_addr_t blk_end = start + min(end - start, blk_4k);

		while (start < blk_end) {
			l2cache_inv_line(start);
			start += CACHE_LINE_SIZE;
		}

		l2cache_sync();

		if (blk_end < end) {
			/* to prevent lock starvation */
			spin_unlock_irqrestore(&l2cache_lock, flags);
			spin_lock_irqsave(&l2cache_lock, flags);
		}
	}
	spin_unlock_irqrestore(&l2cache_lock, flags);

	return;
}

static void l2cache_clean_range(phys_addr_t start, phys_addr_t end)
{
	unsigned long flags;
	unsigned long tmp_value;

	spin_lock_irqsave(&l2cache_lock, flags);

	tmp_value = readl(l2cache_base + REG_L2_CTRL);
	if (!(tmp_value & L2_CTRL_CACHE_ENABLE)) {
		spin_unlock_irqrestore(&l2cache_lock, flags);
		return;
	}

	start &= ~(CACHE_LINE_SIZE - 1);
	while (start < end) {
		phys_addr_t blk_4k = 4096;
		phys_addr_t blk_end = start + min(end - start, blk_4k);

		while (start < blk_end) {
			l2cache_clean_line(start);
			start += CACHE_LINE_SIZE;
		}

		l2cache_sync();

		if (blk_end < end) {
			spin_unlock_irqrestore(&l2cache_lock, flags);
			spin_lock_irqsave(&l2cache_lock, flags);
		}
	}
	spin_unlock_irqrestore(&l2cache_lock, flags);

	return;
}

static void l2cache_flush_range(phys_addr_t start, phys_addr_t end)
{
	unsigned long flags;
	unsigned long tmp_value;

	spin_lock_irqsave(&l2cache_lock, flags);

	tmp_value = readl(l2cache_base + REG_L2_CTRL);
	if (!(tmp_value & L2_CTRL_CACHE_ENABLE)) {
		spin_unlock_irqrestore(&l2cache_lock, flags);
		return;
	}

	start &= ~(CACHE_LINE_SIZE - 1);
	while (start < end) {
		phys_addr_t blk_4k = 4096;
		phys_addr_t blk_end = start + min(end - start, blk_4k);

		while (start < blk_end) {
			l2cache_flush_line(start);
			start += CACHE_LINE_SIZE;
		}

		if (blk_end < end) {
			spin_unlock_irqrestore(&l2cache_lock, flags);
			spin_lock_irqsave(&l2cache_lock, flags);
		}
	}
	l2cache_sync();
	spin_unlock_irqrestore(&l2cache_lock, flags);

	return;
}

#ifdef	CONFIG_OF
static const struct of_device_id l2x0_ids[] __initconst = {
	{.compatible = "arm,pl310-cache", .data = NULL},
	{}
};
#endif

static void l2cache_disable(void)
{
	unsigned long flags;

	outer_flush_all();
	spin_lock_irqsave(&l2cache_lock, flags);
	writel(0, l2cache_base + REG_L2_CTRL);
	spin_unlock_irqrestore(&l2cache_lock, flags);
}

static int __init l2_cache_setup(char *s)
{
#ifdef CONFIG_MACH_SD511X
	if (s && strncmp(s, "l2hi", 4) == 0)
		l2_cache_switch = L2_CACHE_TYPE_HISI;
	else if (s && strncmp(s, "l2x0", 4) == 0)
		l2_cache_switch = L2_CACHE_TYPE_L2X0;
	else if (s && strncmp(s, "ignore_info", 11) == 0)
		l2_cache_switch = L2_CACHE_TYPE_IGNORE;
	else
		l2_cache_switch = L2_CACHE_TYPE_NONE;
#endif
	return 1;
}

__setup("l2_cache=", l2_cache_setup);

#ifdef CONFIG_MACH_SD511X
#ifdef CONFIG_OF
static u32 find_sd511x_auxval(int chip_id, u32 *auxval)
{
	char auxval_id[16];
	struct device_node *node;

	node = of_find_compatible_node(NULL, NULL, "l2-cahe,extra-auxval");
	if (!node) {
		pr_info("No l2-cahe,extra-auxval node\n");
		return -ENODEV;
	}

	if (of_property_read_u32_array(node, "auxval-id", auxval, 2)) {
		snprintf(auxval_id, sizeof(auxval_id), "%s%d", "auxval-id-", chip_id);

		if (of_property_read_u32_array(node, auxval_id, auxval, 2)) {
			pr_err("dts err 11: No %s\n", auxval_id);
			return -ENODEV;
		}
	}

	return 0;
}
#else
static u32 find_sd511x_auxval(int chip_id, u32 *auxval)
{
	printk(KERN_ERR"dts err: NO set l2 cache auxval\n");
	return -ENODEV;
}
#endif
#endif

int __init l2cache_init(void __iomem *base, __u32 aux_val)
{
	volatile unsigned int aux;
	unsigned int l2_def_enable;
	unsigned long flags;
	u32 auxval;
	u32 filter_addr_min = 0;
	u32 filter_addr_max = 0;

#ifdef CONFIG_OF
	struct device_node *node;
	struct resource res;
#endif

#ifdef CONFIG_RTOS_HAL_CACHE_L2X0
	if (machine_desc->l2c_aux_mask || machine_desc->l2c_aux_val)
		return 0;
#endif

#ifdef CONFIG_MACH_SD511X
	u32 auxval_mask[2]; /* auxval , auxmask */

	if (l2_cache_switch == L2_CACHE_TYPE_IGNORE)
		return 0;

	if (l2_cache_switch == L2_CACHE_TYPE_L2X0) {
		hi_chip_id = hi_kernel_get_chip_id();
		if (!find_sd511x_auxval(hi_chip_id, auxval_mask)) {
			if (l2x0_of_init(auxval_mask[0], auxval_mask[1]))
				printk(KERN_ERR "l2x0_of_init failed!\n");
		} else {
			pr_info("dts not find auxval!\n");
		}
		return 0;
	} else if (l2_cache_switch != L2_CACHE_TYPE_HISI) {
		printk(KERN_ERR"The bootargs of l2_cache= is not on, maybe the soc haven't l2 cache\n");
		return -ENODEV;
	}
#endif
#ifndef	CONFIG_OF
	if (base == NULL) {
		pr_err("Invalid L2cache base address\n");
		return -ENOMEM;
	}

	auxval = aux_val;
	l2cache_base = base;
#else
	/* line 32byte; lines per way: 1024; ways: 8 */
	node = of_find_matching_node(NULL, l2x0_ids);
	if (!node) {
		pr_err("The node of l2 cache is null, maybe the soc haven't l2 cache\n");
		return -ENODEV;
	}

	if (of_address_to_resource(node, 0, &res)) {
		pr_err("of_address_to_resource in l2 cache init failed!\n");
		return -ENODEV;
	}

	node = of_find_compatible_node(NULL, NULL, "arm,pl310-cache");
	if (!node) {
		pr_err("dts err: No pl310-cache.\n");
		return -ENODEV;
	}

	if (arch_l2_param(node)) {
		pr_err("analyse arch_l2_param fail!\n");
		return -ENODEV;
	}

	if (of_property_read_u32(node, "aux_reg", &auxval)) {
		pr_err("dts err: No aux_reg!\n");
		return -ENODEV;
	}

	of_property_read_u32(node, "filter_addr_min", &filter_addr_min);
	of_property_read_u32(node, "filter_addr_max", &filter_addr_max);

	if (of_property_read_u32(node, "l2_def_enable", &l2_def_enable))
		l2_def_enable = 1;

	l2cache_base = ioremap(res.start, resource_size(&res));
	if (WARN_ON(!l2cache_base)) {
		pr_err("ioremap in l2 cache init failed!\n");
		return -ENOMEM;
	}

	if (!strcmp(l2_flush_type, L2CACHE_CLEAN_AND_INVALID_SYNC))
		clean_and_invalid_sync = 1;
	else
		clean_and_invalid_sync = 0;
#endif
	spin_lock_irqsave(&l2cache_init_lock, flags);

	aux = readl(l2cache_base + REG_L2_CTRL);
	rmb();
	if (unlikely(aux != 0)) {
		pr_err("L2 cache is enable during kernel starting!!!\n");
		pr_err("We trying to force disable L2 cache!!!\n");
	}

	/* disable L2cache */
	writel(0, l2cache_base + REG_L2_CTRL);
	wmb();

	aux = readl(l2cache_base + REG_L2_AUCTRL);
	rmb();
	aux |= auxval;
	writel(aux, l2cache_base + REG_L2_AUCTRL);
	wmb();

	/* mask all L2 cache interrupt */
	writel(0, l2cache_base + REG_L2_INTMASK);
	wmb();
	aux = readl(l2cache_base + REG_L2_RINT);
	rmb();
	writel(aux, l2cache_base + REG_L2_INTCLR);
	wmb();

	/* clean L2_SPECIAL_CHECK0 and L2_SPECIAL_CHECK1 */
	writel(0x0, l2cache_base + REG_L2_SPECIAL_CHECK0);
	writel(0x0, l2cache_base + REG_L2_SPECIAL_CHECK1);
	pr_info("clean_l2_special_check ok\n");

	/*
	 * filter address 0x20000000~0xC0000000 for HI1210 to bypass a DDR
	 * contoller ECC bug.
	 */
	if ((filter_addr_min < filter_addr_max) &&
			filter_addr_min != 0 &&
			filter_addr_max != 0) {
		aux = readl(l2cache_base + REG_L2_REGION0);
		aux &= ~REG_L2_REGION_MASK;
		aux |= (1 << BIT_REG_L2_REGION_FILTER_EN);
		aux |= ((filter_addr_max >> 20) << 12);
		aux |= (filter_addr_min >> 20);
		writel(aux, l2cache_base + REG_L2_REGION0);
	}

	/* avoid l2-ecc-parity error for 1212 and 1380 */
	if (of_property_read_bool(node, "set_emaw")) {
		aux = readl(l2cache_base + REG_L2_SPECIAL_CTRL1);
		wmb();
		aux |= 0x8;
		writel(aux, l2cache_base + REG_L2_SPECIAL_CTRL1);
		wmb();
	}

	l2cache_inv_all();

	if (l2_def_enable != 0)
		writel(L2_CTRL_CACHE_ENABLE, l2cache_base + REG_L2_CTRL);
	wmb();

	outer_cache.inv_range = l2cache_inv_range;
	outer_cache.clean_range = l2cache_clean_range;
	outer_cache.flush_range = l2cache_flush_range;
	outer_cache.flush_all = l2cache_flush_all;
	outer_cache.clean_all = l2cache_clean_all;
	outer_cache.inv_all = l2cache_inv_all;
	outer_cache.disable = l2cache_disable;
#ifdef CONFIG_OUTER_CACHE_SYNC
	outer_cache.sync = l2cache_sync;
#endif

	spin_unlock_irqrestore(&l2cache_init_lock, flags);

	if (readl(l2cache_base + REG_L2_CTRL))
		pr_info("L2cache cache controller enabled\n");

	return 0;
}

void l2cache_exit(void)
{
	unsigned int val;
	unsigned long flags;

	if (l2cache_base == 0) {
		pr_err("l2cache_base NULL\n");
		return;
	}

	val = readl(l2cache_base + REG_L2_CTRL);
	if ((val & L2_CTRL_CACHE_ENABLE) == 0) {
		pr_warn("L2 cache is disabled already\n");
		return;
	}

	spin_lock_irqsave(&l2cache_lock, flags);
	l2cache_flush_all();
	/* disable L2cache */
	writel(0, l2cache_base + REG_L2_CTRL);
	spin_unlock_irqrestore(&l2cache_lock, flags);
}

#ifdef CONFIG_OF
static int __init early_l2cache_init(void)
{
	return l2cache_init(0, 0);
}
arch_initcall(early_l2cache_init);
#endif
