// SPDX-License-Identifier: GPL-2.0-only
/*
 * Copyright (c) Huawei Technologies Co., Ltd. 2023-2023. All rights reserved.
 * Author: Huawei OS Kernel Lab
 * Create: Thu Aug 15 16:14:23 2023
 */

#include <asm/stacktrace.h>
#include <linux/export.h>
#include <linux/mm.h>
#include <linux/mman.h>
#include <linux/kernel.h>
#include <linux/io.h>
#include <linux/smp.h>
#include <liblinux/sysfs.h>

/* mm/mmu.c */
u64 kimage_voffset __read_mostly;
EXPORT_SYMBOL(kimage_voffset);

/* mm/init.c */
phys_addr_t arm64_dma_phys_limit __read_mostly = PHYS_MASK + 1;

/* mm/ioremap.c */
void __iomem *__ioremap(phys_addr_t phys_addr, size_t size, pgprot_t prot)
{
	unsigned long last_addr;
	unsigned long offset = phys_addr & ~PAGE_MASK;
	unsigned long posix_prot = PROT_READ | PROT_WRITE | PROT_EXEC;
	unsigned int pg_flag = LIBLINUX_PAL_REMAP_DEVICE;
	void __iomem *addr = NULL;

	/*
	 * Page align the mapping address and size, taking account of any
	 * offset.
	 */
	phys_addr &= PAGE_MASK;
	size = PAGE_ALIGN(size + offset);

	/*
	 * Don't allow wraparound, zero size or outside PHYS_MASK.
	 */
	last_addr = phys_addr + size - 1;
	if (!size || last_addr < phys_addr || (last_addr & ~PHYS_MASK))
		return NULL;

	if ((pgprot_val(prot) & PTE_UXN) || (pgprot_val(prot) & PTE_PXN))
		posix_prot ^= PROT_EXEC;

	if (!(pgprot_val(prot) & PTE_WRITE))
		posix_prot ^= PROT_WRITE;

	if ((pgprot_val(prot) & PTE_ATTRINDX_MASK) == PTE_ATTRINDX(MT_NORMAL_NC))
		pg_flag = LIBLINUX_PAL_REMAP_NORMAL_NC;

	if ((pgprot_val(prot) & PTE_ATTRINDX_MASK) == PTE_ATTRINDX(MT_NORMAL))
		pg_flag = LIBLINUX_PAL_REMAP_CACHE;

	addr = liblinux_pal_ioremap_ex(phys_addr, size, posix_prot, pg_flag,
						__builtin_return_address(0));
	return addr ? addr + offset : NULL;
}
EXPORT_SYMBOL(__ioremap);

void __iomem *ioremap_cache(phys_addr_t phys_addr, size_t size)
{
	unsigned long last_addr;
	unsigned long offset = phys_addr & ~PAGE_MASK;
	void __iomem *addr = NULL;

	/*
	 * Page align the mapping address and size, taking account of any
	 * offset.
	 */
	phys_addr &= PAGE_MASK;
	size = PAGE_ALIGN(size + offset);

	/*
	 * Don't allow wraparound, zero size or outside PHYS_MASK.
	 */
	last_addr = phys_addr + size - 1;
	if (!size || last_addr < phys_addr || (last_addr & ~PHYS_MASK))
		return NULL;

	addr = liblinux_pal_ioremap_ex(phys_addr, size, PROT_READ | PROT_WRITE,
				       LIBLINUX_PAL_REMAP_CACHE, __builtin_return_address(0));
	return addr ? (addr + offset) : NULL;
}
EXPORT_SYMBOL(ioremap_cache);

void iounmap(volatile void __iomem *io_addr)
{
	unsigned long addr = (unsigned long)io_addr & PAGE_MASK;

	liblinux_pal_vm_unmap((void *)addr);
}
EXPORT_SYMBOL(iounmap);

/* kernel/alternative.c */
void apply_alternatives_module(void *start, size_t length)
{
}

u64 vabits_actual = VA_BITS;
EXPORT_SYMBOL(vabits_actual);

/* arch/arm64/mm/mmu.c */
void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot)
{
	pr_info("liblinux: UN-IMPL: %s(idx=%d, phys=0x%lx, prot=0x%llx)\n", __func__,
		idx, (unsigned long)phys, pgprot_val(prot));
}

void put_task_stack(struct task_struct *tsk)
{
	refcount_dec_and_test(&tsk->stack_refcount);
}
/* for mem stat */
EXPORT_SYMBOL(walk_stackframe);

void smp_cpus_done(unsigned int max_cpus)
{
	liblinux_sysfs_async_init();
}
