/* SPDX-License-Identifier: GPL-2.0 */
/*
 * Based on arch/arm64/include/asm/tlbflush.h
 *
 * Copyright (C) 1999-2003 Russell King
 * Copyright (C) 2012 ARM Ltd.
 * Copyright (c) Huawei Technologies Co., Ltd. 2023-2023. All rights reserved.
 * Author: Huawei OS Kernel Lab
 */

#ifndef __LIBLINUX_ASM_TLBFLUSH_H
#define __LIBLINUX_ASM_TLBFLUSH_H

#ifdef CONFIG_ARM64
#ifndef __ASSEMBLY__

#include <linux/bitfield.h>
#include <linux/mm_types.h>
#include <linux/sched.h>
#include <asm/cputype.h>
#include <asm/mmu.h>

/*
 *	TLB Invalidation
 *	================
 *
 *	This header file implements the low-level TLB invalidation routines
 *	(sometimes referred to as "flushing" in the kernel) for arm64.
 *
 *	Every invalidation operation uses the following template:
 *
 *	DSB ISHST	// Ensure prior page-table updates have completed
 *	TLBI ...	// Invalidate the TLB
 *	DSB ISH		// Ensure the TLB invalidation has completed
 *      if (invalidated kernel mappings)
 *		ISB	// Discard any instructions fetched from the old mapping
 *
 *
 *	The following functions form part of the "core" TLB invalidation API,
 *	as documented in Documentation/core-api/cachetlb.rst:
 *
 *	flush_tlb_all()
 *		Invalidate the entire TLB (kernel + user) on all CPUs
 *
 *	flush_tlb_mm(mm)
 *		Invalidate an entire user address space on all CPUs.
 *		The 'mm' argument identifies the ASID to invalidate.
 *
 *	flush_tlb_range(vma, start, end)
 *		Invalidate the virtual-address range '[start, end)' on all
 *		CPUs for the user address space corresponding to 'vma->mm'.
 *		Note that this operation also invalidates any walk-cache
 *		entries associated with translations for the specified address
 *		range.
 *
 *	flush_tlb_kernel_range(start, end)
 *		Same as flush_tlb_range(..., start, end), but applies to
 *		kernel mappings rather than a particular user address space.
 *		Whilst not explicitly documented, this function is used when
 *		unmapping pages from vmalloc/io space.
 *
 *	flush_tlb_page(vma, addr)
 *		Invalidate a single user mapping for address 'addr' in the
 *		address space corresponding to 'vma->mm'.  Note that this
 *		operation only invalidates a single, last-level page-table
 *		entry and therefore does not affect any walk-caches.
 *
 *
 *	Next, we have some undocumented invalidation routines that you probably
 *	don't want to call unless you know what you're doing:
 *
 *	local_flush_tlb_all()
 *		Same as flush_tlb_all(), but only applies to the calling CPU.
 *
 *	__flush_tlb_kernel_pgtable(addr)
 *		Invalidate a single kernel mapping for address 'addr' on all
 *		CPUs, ensuring that any walk-cache entries associated with the
 *		translation are also invalidated.
 *
 *	__flush_tlb_range(vma, start, end, stride, last_level)
 *		Invalidate the virtual-address range '[start, end)' on all
 *		CPUs for the user address space corresponding to 'vma->mm'.
 *		The invalidation operations are issued at a granularity
 *		determined by 'stride' and only affect any walk-cache entries
 *		if 'last_level' is equal to false.
 *
 *
 *	Finally, take a look at asm/tlb.h to see how tlb_flush() is implemented
 *	on top of these routines, since that is our interface to the mmu_gather
 *	API as used by munmap() and friends.
 */
static inline void local_flush_tlb_all(void)
{
}

static inline void flush_tlb_all(void)
{
}

static inline void flush_tlb_mm(struct mm_struct *mm)
{
}

static inline void flush_tlb_page_nosync(struct vm_area_struct *vma,
					 unsigned long uaddr)
{
}

static inline void flush_tlb_page(struct vm_area_struct *vma,
				  unsigned long uaddr)
{
}

/*
 * This is meant to avoid soft lock-ups on large TLB flushing ranges and not
 * necessarily a performance improvement.
 */
#define MAX_TLBI_OPS	PTRS_PER_PTE

static inline void __flush_tlb_range(struct vm_area_struct *vma,
				     unsigned long start, unsigned long end,
				     unsigned long stride, bool last_level,
				     int tlb_level)
{
}

static inline void flush_tlb_range(struct vm_area_struct *vma,
				   unsigned long start, unsigned long end)
{
	/*
	 * We cannot use leaf-only invalidation here, since we may be invalidating
	 * table entries as part of collapsing hugepages or moving page tables.
	 * Set the tlb_level to 0 because we can not get enough information here.
	 */
}

static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end)
{
}

/*
 * Used to invalidate the TLB (walk caches) corresponding to intermediate page
 * table levels (pgd/pud/pmd).
 */
static inline void __flush_tlb_kernel_pgtable(unsigned long kaddr)
{
}
#endif
#endif /* CONFIG_ARM64 */

#ifdef CONFIG_ARM
#ifndef __ASSEMBLY__

static inline void local_flush_tlb_all(void)
{
}
static inline void local_flush_tlb_mm(struct mm_struct *mm)
{
}
static inline void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
{
}
static inline void local_flush_tlb_kernel_page(unsigned long kaddr)
{
}
static inline void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
					unsigned long end)
{
}
static inline void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
{
}
static inline void local_flush_bp_all(void)
{
}
static inline void flush_tlb_all(void)
{
}
static inline void flush_tlb_mm(struct mm_struct *mm)
{
}
static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
{
}
static inline void flush_tlb_kernel_page(unsigned long kaddr)
{
}
static inline void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
				unsigned long end)
{
}
static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end)
{
}
static inline void flush_bp_all(void)
{
}

#endif	/* __ASSEMBLY__ */
#endif /* CONFIG_ARM */

#endif
