// SPDX-License-Identifier: GPL-2.0-only
/*
 *  Based on mm/vmalloc.c
 *
 * Copyright (C) 1993  Linus Torvalds
 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
 * SMP-safe vmalloc/vfree/ioremap, Tigran Aivazian <tigran@veritas.com>, May 2000
 * Major rework to support vmap/vunmap, Christoph Hellwig, SGI, August 2002
 * Numa awareness, Christoph Lameter, SGI, June 2005
 * Copyright (c) Huawei Technologies Co., Ltd. 2023-2023. All rights reserved.
 * Author: Huawei OS Kernel Lab
 * Create: Thu Aug 15 16:14:23 2023
 */

#include <linux/vmalloc.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/highmem.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/interrupt.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/debugobjects.h>
#include <linux/kallsyms.h>
#include <linux/list.h>
#include <linux/rbtree.h>
#include <linux/radix-tree.h>
#include <linux/rcupdate.h>
#include <linux/pfn.h>
#include <linux/kmemleak.h>
#include <linux/atomic.h>
#include <linux/compiler.h>
#include <linux/llist.h>
#include <linux/bitops.h>
#include <linux/mman.h>		/* for PROT_{READ|WRITE|EXEC} */

#include <asm/uaccess.h>
#include <asm/tlbflush.h>
#include <asm/shmparam.h>

static bool is_kmap_addr(const void *x)
{
	unsigned long addr = (unsigned long)x;
	unsigned long k_start = (unsigned long)phys_to_virt(PHYS_OFFSET);
	unsigned long k_end = k_start + (1UL << CONFIG_LIBLINUX_KMAP_BITS);

	return addr >= k_start && addr < k_end;
}

bool is_vmalloc_addr(const void *x)
{
	return !is_kmap_addr(x);
}
EXPORT_SYMBOL(is_vmalloc_addr);

#ifdef CONFIG_LIBLINUX
void __weak arch_module_vmalloc_init(void) { }
void liblinux_init_zone(void);
#endif

void __init vmalloc_init(void)
{
#ifdef CONFIG_LIBLINUX
	arch_module_vmalloc_init();
	liblinux_init_zone();
#endif
}

void *__vmalloc_node_range(unsigned long size, unsigned long align,
		unsigned long start, unsigned long end, gfp_t gfp_mask,
		pgprot_t pgprot, unsigned long vm_flags, int node,
		const void *caller)
{
	unsigned long prot = PROT_READ | PROT_WRITE;
	void *ptr = NULL;

	if ((pgprot_val(pgprot) != pgprot_val(PAGE_KERNEL)) &&
	    (pgprot_val(pgprot) != pgprot_val(PAGE_KERNEL_EXEC))) {
		pr_warn("~~ UN-IMPL ~~: %s: pgprot=0x%llx\n", __func__,
		       (unsigned long long)pgprot_val(pgprot));
		return NULL;
	}

	if (pgprot_val(pgprot) == pgprot_val(PAGE_KERNEL_EXEC))
		prot |= PROT_EXEC;

	ptr = liblinux_pal_vmalloc_range(size, align, start, end, prot, caller);
	if (ptr == NULL)
		return NULL;

	if (gfp_mask & __GFP_ZERO)
		memset(ptr, 0, size);

	kmemleak_alloc(ptr, size, 1, gfp_mask);
	return ptr;
}

struct page *vmalloc_to_page(const void *vmalloc_addr)
{
	return virt_to_page(vmalloc_addr);
}
EXPORT_SYMBOL(vmalloc_to_page);

void vfree(const void *addr)
{
	if (!addr)
		return;

	kmemleak_free(addr);
	liblinux_pal_vfree(addr);
}
EXPORT_SYMBOL(vfree);

void *vmalloc_exec(unsigned long size)
{
	return __vmalloc_node_range(size, 1, VMALLOC_START, VMALLOC_END,
			GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC, 0, NUMA_NO_NODE,
			__builtin_return_address(0));
}

void *__vmalloc_node(unsigned long size, unsigned long align, gfp_t gfp_mask,
		     int node, const void *caller)
{
	return __vmalloc_node_range(size, align, VMALLOC_START, VMALLOC_END,
				gfp_mask, PAGE_KERNEL, 0, node, caller);
}

static inline void *__vmalloc_node_flags(unsigned long size,
					 int node, gfp_t flags)
{
	return __vmalloc_node(size, 1, flags, node, __builtin_return_address(0));
}

void *__vmalloc(unsigned long size, gfp_t gfp_mask)
{
	return __vmalloc_node(size, 1, gfp_mask, NUMA_NO_NODE, __builtin_return_address(0));
}
EXPORT_SYMBOL(__vmalloc);

void *vmalloc(unsigned long size)
{
	return __vmalloc_node_flags(size, NUMA_NO_NODE,
				    GFP_KERNEL | __GFP_HIGHMEM);
}
EXPORT_SYMBOL(vmalloc);

void *vzalloc(unsigned long size)
{
	return __vmalloc_node_flags(size, NUMA_NO_NODE,
				GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO);
}
EXPORT_SYMBOL(vzalloc);

void *vmalloc_node(unsigned long size, int node)
{
	return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM,
			      node, __builtin_return_address(0));
}
EXPORT_SYMBOL(vmalloc_node);

void *vzalloc_node(unsigned long size, int node)
{
	return __vmalloc_node_flags(size, node,
			 GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO);
}
EXPORT_SYMBOL(vzalloc_node);

void *vmap(struct page **pages, unsigned int count,
	   unsigned long flags, pgprot_t pgprot)
{
	int ret;
	unsigned int i;
	void *vaddr = NULL;
	unsigned long size;
	unsigned long prot;
	unsigned int flag;

	prot = PROT_READ | PROT_WRITE;
	if (pgprot_val(pgprot) == pgprot_val(PAGE_KERNEL_EXEC))
		prot |= PROT_EXEC;

	flag = LIBLINUX_PAL_REMAP_CACHE;
	if (pgprot_val(pgprot_writecombine(pgprot)) == pgprot_val(pgprot))
		flag = LIBLINUX_PAL_REMAP_NORMAL_NC;

	size = (unsigned long)count << PAGE_SHIFT;
	vaddr = liblinux_pal_vm_prepare(0, size, prot, flag);
	if (vaddr == NULL)
		return NULL;

	for (i = 0; i < count; i++) {
		ret = liblinux_pal_vm_mmap(
				page_to_phys(pages[i]),
				((unsigned long)vaddr + i * PAGE_SIZE),
				PAGE_SIZE, prot, flag);
		if (ret < 0) {
			(void)liblinux_pal_vm_unmap(vaddr);
			return NULL;
		}
	}

	return vaddr;
}
EXPORT_SYMBOL(vmap);

void vunmap(const void *addr)
{
	(void)liblinux_pal_vm_unmap(addr);
}
EXPORT_SYMBOL(vunmap);

struct vm_struct *find_vm_area(const void *addr)
{
	return NULL;
}

void unmap_kernel_range(unsigned long addr, unsigned long size)
{
	unsigned long end = addr + size;

	pr_warn("liblinux: %s(addr=0x%lx, size=0x%lx)\n", __func__, addr, size);
	flush_cache_vunmap(addr, end);
	flush_tlb_kernel_range(addr, end);
}

void __weak vmalloc_sync_all(void)
{
}
