/*
 * (C) Copyright 2013
 * David Feng <fenghua@phytium.com.cn>
 *
 * SPDX-License-Identifier:	GPL-2.0+
 */

#include <asm-offsets.h>
#include <config.h>
#include <version.h>
#include <asm/macro.h>
#include <linux/linkage.h>

/*
 * void __asm_flush_dcache_level(level)
 *
 * clean and invalidate one level cache.
 *
 * x0: cache level
 * x1~x9: clobbered
 */
ENTRY(__asm_flush_dcache_level)
	lsl	x1, x0, #1
	msr	csselr_el1, x1		/* select cache level */
	isb				/* isb to sych the new cssr & csidr */
	mrs	x6, ccsidr_el1		/* read the new ccsidr */
	and	x2, x6, #7		/* x2 <- length of the cache lines */
	add	x2, x2, #4		/* add 4 (line length offset) */
	mov	x3, #0x3ff
	and	x3, x3, x6, lsr #3	/* x3 <- maximum number of way size */
	clz	w5, w3			/* bit position of way size */
	mov	x4, #0x7fff
	and	x4, x4, x1, lsr #13	/* x4 <- max number of the set size */
	/* x1 <- cache level << 1 */
	/* x2 <- line length offset */
	/* x3 <- number of cache ways */
	/* x4 <- number of cache sets */
	/* x5 <- bit position of way size */

loop_set:
	mov	x6, x3			/* create working copy of way size */
loop_way:
	lsl	x7, x6, x5
	orr	x9, x0, x7		/* map way and level to cisw value */
	lsl	x7, x4, x2
	orr	x9, x9, x7		/* map set number to cisw value */
	dc	cisw, x9		/* clean & invalidate by set/way */
	subs	x6, x6, #1		/* decrement the way */
	b.ge	loop_way
	subs	x4, x4, #1		/* decrement the set */
	b.ge	loop_set

	ret
ENDPROC(__asm_flush_dcache_level)

/*
 * void __asm_flush_dcache_all(void)
 *
 * clean and invalidate all data cache by SET/WAY.
 */
ENTRY(__asm_flush_dcache_all)
	dsb	sy
	mov	x15, lr
	mrs	x10, clidr_el1		/* read clidr */
	lsr	x11, x10, #24
	and	x11, x11, #0x7		/* x11 <- loc */
	cbz	x11, finished		/* if loc is 0, no need to clean */
	mov	x0, #0			/* start flush at cache level 0 */
	/* x0  <- cache level */
	/* x10 <- clidr_el1 */
	/* x11 <- loc */

loop_level:
	lsl	x1, x0, #1
	add	x1, x1, x0		/* x0 <- 3x cache level */
	lsr	x1, x10, x1
	and	x1, x1, #7		/* x1 <- cache type */
	cmp	x1, #2
	b.lt	skip			/* skip if no cache or icache */
	bl	__asm_flush_dcache_level
skip:
	add	x0, x0, #1		/* increment cache level */
	cmp	x11, x0
	b.gt	loop_level

finished:
	mov	x0, #0
	msr	csselr_el1, x0		/* swith back to cache level 0 */
	dsb	sy
	isb
	mov	lr, x15
	ret
ENDPROC(__asm_flush_dcache_all)

/*
 * void __asm_flush_dcache_range(start, end)
 *
 * clean & invalidate data cache in the range
 *
 * x0: start address
 * x1: end address
 */
ENTRY(__asm_flush_dcache_range)
	mrs	x3, ctr_el0		/* read CTR */
	lsr	x3, x3, #16
	and	x3, x3, #0xf		/* cache line size encoding */
	mov	x2, #4			/* bytes per word */
	lsl	x2, x2, x3		/* actual cache line size */

	/* x2 <- minimal cache line size in cache system */
	sub	x3, x2, #1
	bic	x0, x0, x3
1:      dc	civac, x0		/* clean & invalidate D/unified line */
	add	x0, x0, x2
	cmp	x0, x1
	b.lo	1b
	dsb	sy
	ret
ENDPROC(__asm_flush_dcache_range)

/*
 * void __asm_invalidate_icache_all(void)
 *
 * invalidate all tlb entries.
 */
ENTRY(__asm_invalidate_icache_all)
	ic	ialluis
	isb	sy
	ret
ENDPROC(__asm_invalidate_icache_all)
