/*
 * SPDX-License-Identifier: GPL-2.0
 *
 * Description: The linux kernel community does not support
 * the use of kasan on ppc64 architecture, this patch makes it available.
 * Author: chenjingwen <chenjingwen6@huawei.com>
 * Create: 2020-12-09
 */

#define DISABLE_BRANCH_PROFILING
#define pr_fmt(fmt) "kasan: " fmt
#define RTOS_PPC_TLB_VALID_SHIT 31

#include <asm/barrier.h>
#include <asm/pgalloc.h>
#include <asm/pgtable.h>
#include <asm/paca.h>
#include <linux/gfp.h>
#include <linux/kasan.h>
#include <linux/memblock.h>
#include <linux/moduleloader.h>
#include <linux/printk.h>
#include <linux/smp.h>
#include <mm/mmu_decl.h>

/*
 * a) Note that tmp_pg_dir is used only in kasan_init temporarily.
 * It is used to avoid unmanageable kernel page fault when
 * pagetable is being modified.
 *
 * b) We have to make it aligned to PGD_TABLE_SIZE because it points
 * to a page directory table.
 *
 * c) With '__initdata' attribute, the memory of tmp_pg_dir will be
 * automatically freed. Because at the very end of kernel init,
 * __init[data] sections are freed in free_initmem().
 */
static pgd_t tmp_pg_dir[PTRS_PER_PGD] __initdata __aligned(PGD_TABLE_SIZE);

static inline __init void *kasan_alloc_zeroed_memblock(unsigned long size,
					 unsigned long align)
{
	void *ptr = memblock_alloc(size, align);

	if (!ptr)
		panic("Failed to alloc memory for kasan\n");

	return ptr;
}

static int kasan_map_pagetable(unsigned long ea, unsigned long pa, pgprot_t prot)
{
	pgd_t *pgdp;
	p4d_t *p4dp;
	pud_t *pudp;
	pmd_t *pmdp;
	pte_t *ptep;

	pgdp = pgd_offset_k(ea);
	p4dp = p4d_offset(pgdp, ea);
	if (p4d_none(*p4dp) || (void *)p4d_pgtable(*p4dp) == kasan_early_shadow_pud) {
		if ((void *)p4d_pgtable(*p4dp) == kasan_early_shadow_pud)
			pr_notice("overwrite early_shadow_pud: ea: %lx, pa: %pa\n", ea, &pa);
		pudp = kasan_alloc_zeroed_memblock(PUD_TABLE_SIZE, PUD_TABLE_SIZE);
		p4d_populate(&init_mm, p4dp, pudp);
	}
	pudp = pud_offset(p4dp, ea);
	if (pud_none(*pudp) || (void *)pud_pgtable(*pudp) == kasan_early_shadow_pmd) {
		pmdp = kasan_alloc_zeroed_memblock(PMD_TABLE_SIZE, PMD_TABLE_SIZE);
		pud_populate(&init_mm, pudp, pmdp);
	}
	pmdp = pmd_offset(pudp, ea);
	if (!pmd_present(*pmdp) || (void *)pmd_page_vaddr(*pmdp) == kasan_early_shadow_pte) {
		ptep = kasan_alloc_zeroed_memblock(PAGE_SIZE, PAGE_SIZE);
		pmd_populate_kernel(&init_mm, pmdp, ptep);
	}
	ptep = pte_offset_kernel(pmdp, ea);
	set_pte_at(&init_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT, prot));

	/* need a mb after set_pte_at */
	smp_wmb();
	return 0;
}

/*
 * start: virtual address for kernel address
 * size: memory area size
 */
int __init kasan_init_region(void *start, size_t size)
{
	unsigned long k_start = (unsigned long)kasan_mem_to_shadow(start);
	unsigned long k_end = (unsigned long)kasan_mem_to_shadow(start + size);
	unsigned long k_cur;
	void *block = NULL;

	block = kasan_alloc_zeroed_memblock(k_end - k_start, PAGE_SIZE);

	for (k_cur = k_start & PAGE_MASK; k_cur < k_end; k_cur += PAGE_SIZE) {
		void *va = block ? block + k_cur - k_start :
				kasan_alloc_zeroed_memblock(PAGE_SIZE, PAGE_SIZE);

		if (kasan_map_pagetable(k_cur, __pa(va), PAGE_KERNEL))
			panic("can not allocate memory for kasan\n");
	}
	flush_tlb_kernel_range(k_start, k_end);
	return 0;
}

static void kasan_local_invalidate_early_shadow_tlb(void)
{
	mtspr(SPRN_MAS0, MAS0_TLBSEL(KASAN_EARLY_TLBSEL) |
		MAS0_ESEL(KASAN_EARLY_ESEL));
	mtspr(SPRN_MAS1, 0);
	mtspr(SPRN_MAS2, MAS2_M_IF_NEEDED);
	asm volatile("tlbwe");
	asm volatile("sync");
	isync();
}

void kasan_invalidate_early_shadow_tlb(void)
{
#ifdef CONFIG_SMP
	on_each_cpu((void (*)(void *))kasan_local_invalidate_early_shadow_tlb, NULL, 1);
#else
	kasan_local_invalidate_early_shadow_tlb();
#endif
}

static void __init kasan_populate_pte(pte_t *ptep, pgprot_t prot)
{
	unsigned long va = (unsigned long)kasan_early_shadow_page;
	phys_addr_t pa = __pa(kasan_early_shadow_page);
	int i;

	for (i = 0; i < PTRS_PER_PTE; i++)
		__set_pte_at(&init_mm, va, ptep + i, pfn_pte(PHYS_PFN(pa), prot), 0);
}

static void __init kasan_populate_pmd(pmd_t *pmdp)
{
	int i;

	for (i = 0; i < PTRS_PER_PMD; i++)
		pmd_populate_kernel(&init_mm, pmdp + i, kasan_early_shadow_pte);
}

static void __init kasan_populate_pud(pud_t *pudp)
{
	int i;

	for (i = 0; i < PTRS_PER_PUD; i++)
		pud_populate(&init_mm, pudp + i, kasan_early_shadow_pmd);
}

/*
 * The early shadow maps everything to a single page of zeroes
 * Nota that there is no p4d in powerpc64, p4d is pgd indeed.
 */
void __init kasan_map_early_shadow_ro(void)
{
	unsigned long addr = KASAN_SHADOW_START;
	unsigned long end = KASAN_SHADOW_END;
	pgd_t *pgdp = pgd_offset_k(addr);
	p4d_t *p4dp = p4d_offset(pgdp, addr);

	kasan_populate_pte(kasan_early_shadow_pte, PAGE_KERNEL_RO);
	kasan_populate_pmd(kasan_early_shadow_pmd);
	kasan_populate_pud(kasan_early_shadow_pud);

	do {
		p4d_populate(&init_mm, p4dp, kasan_early_shadow_pud);
	} while (pgdp++, addr = pgd_addr_end(addr, end), addr != end);

	__memset(kasan_early_shadow_page, 0, PAGE_SIZE);
}

void __init kasan_init(void)
{
	int ret;
	u64 i;
	phys_addr_t pa_start, pa_end;
	/*
	 * We are going to perform proper setup of shadow memory.
	 * At first we should re-populate page table
	 * However, instrumented code couldn't execute without shadow memory.
	 * tmp_pg_dir used to keep early shadow mapped until full shadow
	 * setup will be finished.
	 */
	__memcpy(tmp_pg_dir, swapper_pg_dir, sizeof(tmp_pg_dir));
	smp_wmb(); // sync after modifying the pointer to page table directory
	get_paca()->kernel_pgd = tmp_pg_dir;

	for_each_mem_range(i, &pa_start, &pa_end) {
		void *start = __va(pa_start);
		void *end = __va(pa_end);

		if (start >= end)
			break;

		ret = kasan_init_region(start, (end - start));
		if (ret)
			panic("kasan_init_region() failed");
	}

	get_paca()->kernel_pgd = swapper_pg_dir;

	/*
	 * explicitly clear tlb entry when set in exceptions-64e.S
	 * Otherwise this entry can't be used because it has been set IPROT
	 */
	kasan_local_invalidate_early_shadow_tlb();

	/* Enable error messages */
	init_task.kasan_depth = 0;
	pr_notice("KernelAddressSanitizer initialized\n");
}

/*
 * Reserve memory for kasan in early initialization.
 * The mapping between physical memory and virtual address
 * is set by tlb in exceptions-64e.S
 */
void __init kasan_early_init(void)
{
	memblock_reserve((phys_addr_t)KASAN_EARLY_SHADOW_PHYSICAL_START,
			 (phys_addr_t)KASAN_EARLY_SHADOW_PHYSICAL_SIZE);
}

#ifdef CONFIG_MODULES
void *module_alloc(unsigned long size)
{
	return kmalloc(size, GFP_KERNEL);
}

void module_memfree(void *module_region)
{
	kfree(module_region);
}
#endif
