// SPDX-License-Identifier: GPL-2.0-only
/*
 *  Based on mm/page_alloc.c
 *
 *  Manages the free list, the system allocates free pages here.
 *  Note that kmalloc() lives in slab.c
 *
 *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
 *  Swap reorganised 29.12.95, Stephen Tweedie
 *  Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
 *  Reshaped it to be a zoned allocator, Ingo Molnar, Red Hat, 1999
 *  Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999
 *  Zone balancing, Kanoj Sarcar, SGI, Jan 2000
 *  Per cpu hot/cold page lists, bulk allocation, Martin J. Bligh, Sept 2002
 *          (lots of bits borrowed from Ingo Molnar & Andrew Morton)
 * Copyright (c) Huawei Technologies Co., Ltd. 2023-2023. All rights reserved.
 * Author: Huawei OS Kernel Lab
 * Create: Thu Aug 15 16:14:23 2023
 */

#include <linux/stddef.h>
#include <linux/mm.h>
#include <linux/sched/mm.h>
#include <linux/swap.h>
#include <linux/interrupt.h>
#include <linux/pagemap.h>
#include <linux/jiffies.h>
#include <linux/memblock.h>
#include <linux/compiler.h>
#include <linux/kernel.h>
#include <linux/kasan.h>
#include <linux/module.h>
#include <linux/suspend.h>
#include <linux/pagevec.h>
#include <linux/blkdev.h>
#include <linux/slab.h>
#include <linux/ratelimit.h>
#include <linux/oom.h>
#include <linux/notifier.h>
#include <linux/topology.h>
#include <linux/sysctl.h>
#include <linux/cpu.h>
#include <linux/cpuset.h>
#include <linux/memory_hotplug.h>
#include <linux/nodemask.h>
#include <linux/vmalloc.h>
#include <linux/vmstat.h>
#include <linux/mempolicy.h>
#include <linux/stop_machine.h>
#include <linux/sort.h>
#include <linux/pfn.h>
#include <linux/backing-dev.h>
#include <linux/fault-inject.h>
#include <linux/page-isolation.h>
#include <linux/page_ext.h>
#include <linux/debugobjects.h>
#include <linux/kmemleak.h>
#include <linux/compaction.h>
#include <trace/events/kmem.h>
#include <linux/prefetch.h>
#include <linux/mm_inline.h>
#include <linux/migrate.h>
#include <linux/page_ext.h>
#include <linux/hugetlb.h>
#include <linux/sched/rt.h>
#include <linux/sched/debug.h>
#include <linux/page_owner.h>
#include <linux/kthread.h>
#include <linux/spinlock.h>

#include <asm/sections.h>
#include <asm/tlbflush.h>
#include <asm/div64.h>
#include <asm/kasan.h>
#include "internal.h"

#include <uapi/linux/mman.h>
#include <linux/genalloc.h>
#include <liblinux/sched.h>

#include <trace/hooks/liblinux.h>

#define LIBLINUX_KMAP_SIZE	(1UL << CONFIG_LIBLINUX_KMAP_BITS)

#if defined(CONFIG_LIBLINUX_CDC) && !defined(CONFIG_KASAN)
static DEFINE_SPINLOCK(page_pool_lock);
#else
/* mark the task as uninterruptible because liblinux cannot
 * interrupt hm futex.
 * Update workqueue context because the worker may be blocked by hm */
static void liblinux_ctx_leave(void)
{
	__set_current_state(TASK_UNINTERRUPTIBLE);
	sched_submit_work(current);
}
/* the task must be running when enter liblinux ctx */
static void liblinux_ctx_enter(void)
{
	sched_update_worker(current);
	__set_current_state(TASK_RUNNING);
}
#endif

unsigned long dirty_balance_reserve;
gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK;

#ifdef CONFIG_INIT_ON_ALLOC_DEFAULT_ON
DEFINE_STATIC_KEY_TRUE(init_on_alloc);
#else
DEFINE_STATIC_KEY_FALSE(init_on_alloc);
#endif
EXPORT_SYMBOL(init_on_alloc);

#ifdef CONFIG_INIT_ON_FREE_DEFAULT_ON
DEFINE_STATIC_KEY_TRUE(init_on_free);
#else
DEFINE_STATIC_KEY_FALSE(init_on_free);
#endif
EXPORT_SYMBOL(init_on_free);

static void *__extend_page_pool_alloc(struct liblinux_page_allocator *page_pool, size_t extend_size,
					size_t alloc_size, gfp_t gfp_mask);
unsigned long drop_mem(void *priv, int max_nr);

atomic_long_t _totalram_pages __read_mostly;
EXPORT_SYMBOL(_totalram_pages);
unsigned long totalreserve_pages __read_mostly;

s64 memstart_addr;
EXPORT_SYMBOL(memstart_addr);

static struct liblinux_page_allocator *page_pool_main;
static struct liblinux_page_allocator *page_pool_cache;

const char * const migratetype_names[MIGRATE_TYPES] = {
	"Unmovable",
	"Movable",
	"Reclaimable",
	"HighAtomic",
#ifdef CONFIG_CMA
	"CMA",
#endif
#ifdef CONFIG_MEMORY_ISOLATION
	"Isolate",
#endif
};

extern unsigned long lnxbase_shrink_mem(int size);
static void __page_pool_reclaim_handler(struct work_struct *work);
void wake_up_reserved_page(struct page *page);

struct work_struct reclaim_work = __WORK_INITIALIZER(reclaim_work, __page_pool_reclaim_handler);

static bool page_pool_should_reclaim(const struct liblinux_page_allocator *page_pool)
{
	if (page_pool->should_reclaim == NULL)
		return false;

	return (page_pool->should_reclaim() != 0);
}

static void __page_pool_reclaim_handler(struct work_struct *work)
{
	if (work == NULL) {
		pr_err("page pool reclaim invalid work\n");
		return;
	}
	synchronize_rcu();
	(void)lnxbase_shrink_mem(-1);
}

static void __page_pool_reclaim_check(const struct liblinux_page_allocator *page_pool)
{
	if (page_pool_should_reclaim(page_pool))
		schedule_work(&reclaim_work);
}

static int __mempool_shrink_notify(const struct liblinux_page_allocator *_allocator,
				   unsigned long vaddr, unsigned long long paddr,
				   unsigned long size)
{
	if (_allocator != page_pool_main && _allocator != page_pool_cache)
		return -EINVAL;

	return liblinux_pal_page_alloc_unpopulate(virt_to_page(vaddr),
				ALIGN(size, PAGE_SIZE) / PAGE_SIZE * sizeof(struct page));
}

#ifdef CONFIG_HIGHMEM
static struct page highmem_page = {0};

static struct liblinux_vmmemmap_info highmem_info = {
	.page_addr = (uintptr_t)&highmem_page,
	.page_size = sizeof(struct page),
	.vmemmap_start = VMEMMAP_START,
	.vmemmap_size = VMEMMAP_SIZE,
	.flags = LIBLINUX_VMEMMAP_POPULATE_ALL | LIBLINUX_VMEMAP_HIGHMEM,
};
#endif

void __init page_alloc_init(void)
{
	int ret;
#ifdef CONFIG_PCI
	void *pci_vaddr = NULL;
#endif
#ifdef CONFIG_HIGHMEM
	void *kmap_vaddr = NULL;
#endif
#ifdef CONFIG_LIBLINUX_SUB_SHRINKER
	struct liblinux_pal_sub_shrinker sub_shrinker;
#endif
#ifndef CONFIG_NEED_MULTIPLE_NODES
	mem_map = (struct page *)VMEMMAP_START;
#endif
	struct liblinux_ram_info ram_info;
	struct liblinux_pool_base pool_base = {
		.kmap_virt_start = (unsigned long)PAGE_OFFSET,
		.kmap_virt_end = (unsigned long)PAGE_OFFSET + LIBLINUX_KMAP_SIZE,
		.phys_offset = 0,
		.page_sizeorder = PAGE_SHIFT,
	};

#ifdef PLAT_PHYS_OFFSET
	pool_base.phys_offset = PHYS_OFFSET;
#endif

#ifdef CONFIG_HIGHMEM
	/* prepare highmem page */
	set_page_zone(&highmem_page, ZONE_HIGHMEM);
#else
	/* reserve virtual address space for `page` with NONE */
	ret = liblinux_pal_reserve_range_name(
			(void *)VMEMMAP_START, VMEMMAP_SIZE, PROT_NONE,
			"[liblinux/VMEMMAP]");
	if (ret < 0) {
		panic("reserve range for VMEMMAP (size=0x%lx) failed: %d\n",
		      VMEMMAP_SIZE, ret);
	}
#endif

#ifdef CONFIG_PCI
	/* reserve virtual space for `PCI IO SAPCE` with RW */
	pci_vaddr = liblinux_pal_vm_prepare((unsigned long)PCI_IOBASE, PAGE_ALIGN(IO_SPACE_LIMIT),
					    PROT_READ | PROT_WRITE, LIBLINUX_PAL_REMAP_DEVICE);
	if (pci_vaddr == NULL) {
		panic("reserve range for PCI_IO (0x%p,size=0x%x)\n",
		       PCI_IOBASE, IO_SPACE_LIMIT);
	}
#endif

	ret = liblinux_pal_page_alloc_init(&pool_base, &ram_info);
	if (ret < 0)
		panic("page_alloc init failed: %d\n", ret);
#ifdef CONFIG_HIGHMEM
	ret = liblinux_pal_vmemmap_init(&highmem_info);
	if (ret < 0)
		panic("vmemmap init failed: %d\n", ret);

	/* prepare highmem */
	highmem_kmap_base = FIXADDR_TOP - HIGHMEM_KMAP_SIZE;
	kmap_vaddr = liblinux_pal_vm_prepare(highmem_kmap_base, HIGHMEM_KMAP_SIZE,
					     PROT_NONE, 0);
	if ((unsigned long)kmap_vaddr != highmem_kmap_base)
		panic("reserve range for HIGHMEM at %lx failed\n", highmem_kmap_base);
#endif
	memstart_addr = ram_info.memstart;

	page_pool_main = liblinux_pal_page_allocator_get("common");
	if (page_pool_main == NULL)
		panic("page_pool init failed\n");

	if (page_pool_main != NULL)
		page_pool_main->shrink = __mempool_shrink_notify;

	page_pool_cache = liblinux_pal_page_allocator_get("pagecache");
	if (page_pool_cache != NULL)
		page_pool_cache->shrink = __mempool_shrink_notify;

	if (ram_info.total_ram > LIBLINUX_KMAP_SIZE) {
		panic("total_ram size(size=0x%lx) out of range reserve for KMAPS (size=0x%lx)\n",
		     (unsigned long)ram_info.total_ram, LIBLINUX_KMAP_SIZE);
	}

	totalram_pages_add(ram_info.total_ram >> PAGE_SHIFT);

	kasan_init();

	/* try to extend page pool for 1M */
	if (page_pool_main->extend != NULL)
		page_pool_main->extend(page_pool_main, get_order(SZ_1M),
						NULL, LIBLINUX_PAL_GFP_NORETRY);

#ifdef CONFIG_LIBLINUX_SUB_SHRINKER
	/* register pool shrink*/
	sub_shrinker.priv = NULL;
	sub_shrinker.query = NULL;
	sub_shrinker.shrink = drop_mem;
	liblinux_pal_sub_shrinker_register(page_pool_main, sub_shrinker);
#endif
}

#ifdef CONFIG_HAVE_ARCH_PFN_VALID
int pfn_valid(unsigned long pfn)
{
	return 0;
}
EXPORT_SYMBOL(pfn_valid);
#endif

static unsigned int __gfp_mask_to_page_pool_flag(gfp_t gfp_mask)
{
	unsigned int flags = 0;

	if (gfp_mask & __GFP_NORETRY)
		flags |= LIBLINUX_PAL_GFP_NORETRY;
	if (gfp_mask & __GFP_NOFAIL)
		flags |= LIBLINUX_PAL_GFP_NOFAIL;
	if (!(gfp_mask & __GFP_FS) || !(gfp_mask & __GFP_IO))
		flags |= LIBLINUX_PAL_GFP_NOIO;

	return flags;
}

static void *__extend_page_pool_alloc(struct liblinux_page_allocator *page_pool,
					size_t extend_size,
					size_t alloc_size,
					gfp_t gfp_mask)
{
	unsigned long rvaddr = 0;
	int ret;
	size_t real_size = extend_size;
	unsigned int flags = __gfp_mask_to_page_pool_flag(gfp_mask);
	unsigned int flags_modified;

	/* extend 64K at least */
	extend_size = ALIGN(extend_size, SZ_64K);

	/* add to page pool */
	if (page_pool->extend_alloc == NULL)
		return NULL;

	/* always no retry and without NOFAIL with 64K force align */
	flags_modified = flags | LIBLINUX_PAL_GFP_NORETRY | LIBLINUX_PAL_GFP_NOEXTRA;
	flags_modified &= ~LIBLINUX_PAL_GFP_NOFAIL;
	ret = page_pool->extend_alloc(page_pool, get_order(extend_size),
			get_order(alloc_size), &rvaddr, (int)flags_modified);
	if (ret < 0) {
#ifdef CONFIG_LIBLINUX_MM_DEBUG
		/* dump stack on first failure to determine which driver requrie large memory */
		if ((real_size >= SZ_64K) && ((gfp_mask & __GFP_NORETRY) == 0)) {
			pr_err("extend failed, alloc=0x%zx, extend=0x%zx, gfp_mask=%x, ret=%d\n",
				alloc_size, extend_size, gfp_mask, ret);
			liblinux_pal_thread_dump();
		}
#endif
		/* fallback to 4K */
		extend_size = ALIGN(real_size, SZ_4K);
		ret = page_pool->extend_alloc(page_pool, get_order(extend_size),
				get_order(alloc_size), &rvaddr, flags | LIBLINUX_PAL_GFP_NOEXTRA);
		if (ret < 0)
			goto err_pool;
	}


	if (alloc_size != 0)
		return (void *)rvaddr;

err_pool:
	return NULL;
}

void prep_compound_page(struct page *page, unsigned int order)
{
	int i;
	unsigned int nr_pages = 1U << order;

	set_compound_page_dtor(page, COMPOUND_PAGE_DTOR);
	set_compound_order(page, order);
	__SetPageHead(page);
	for (i = 1; i < nr_pages; i++) {
		struct page *p = page + i;

		set_page_count(p, 0);
		set_compound_head(p, page);
	}
}

static void __prepare_pages_info(void *addr, size_t size)
{
	struct page *page = NULL;
	size_t pgcnt;
	size_t i;

	page = virt_to_page(addr);
	pgcnt = (size >> PAGE_SHIFT);
	memset(page, 0, pgcnt * sizeof(struct page));
	for (i = 0; i < pgcnt; i++) {
		set_page_refcounted(page + i);
		page_mapcount_reset(page + i);
		#ifdef WANT_PAGE_VIRTUAL
		set_page_address(page + i, (void *)((unsigned long)addr + PAGE_SIZE * i));
		#endif
	}
}

static void *__page_pool_alloc(struct liblinux_page_allocator *page_pool, unsigned int order,
			       gfp_t gfp_mask)
{
	size_t size = 1UL << (order + PAGE_SHIFT);
	void *kva = NULL;

	if (page_pool->alloc == NULL)
		return NULL;

	kva = page_pool->alloc(page_pool, order, NULL);
	if ((kva == NULL) && (gfp_mask & __GFP_DIRECT_RECLAIM)
			  && (page_pool_should_reclaim(page_pool))) {
		drop_slab();
		kva = page_pool->alloc(page_pool, order, NULL);
	}
	if (kva == NULL) {
		/*
		 * oops, there is no free page we can use.
		 * so, let's extend the pool and update watermark value.
		 */
		kva = __extend_page_pool_alloc(page_pool, size, size, gfp_mask);
		if (kva == NULL) {
			pr_err_ratelimited("page pool extend failed, order=%u\n", order);
			return NULL;
		}
	}

	return kva;
}

#if defined(CONFIG_LIBLINUX_CDC) && !defined(CONFIG_KASAN)
static void *__page_pool_alloc_locked(struct liblinux_page_allocator *page_pool,
				      unsigned int order, gfp_t gfp_mask)
{
	unsigned long flags;
	void *kva = NULL;

	spin_lock_irqsave(&page_pool_lock, flags);
	kva = __page_pool_alloc(page_pool, order, gfp_mask);
	spin_unlock_irqrestore(&page_pool_lock, flags);

	return kva;
}
#else
static void *__page_pool_alloc_locked(struct liblinux_page_allocator *page_pool,
				      unsigned int order, gfp_t gfp_mask)
{
	void *kva = NULL;

	liblinux_ctx_leave();
	kva = __page_pool_alloc(page_pool, order, gfp_mask);
	liblinux_ctx_enter();

	return kva;
}
#endif

static struct page *__alloc_pages_pool(struct liblinux_page_allocator *page_pool,
					   gfp_t gfp_mask,
					   unsigned int order)
{
	size_t size = 1UL << (order + PAGE_SHIFT);
	struct page *page = NULL;
	void *kva = NULL;

	BUG_ON(page_pool == NULL);

	kva = __page_pool_alloc_locked(page_pool, order, gfp_mask);
	if (kva == NULL) {
		if (gfp_mask & __GFP_NOFAIL)
			panic("alloc with GFP_NOFAIL should not fail\n");

		return NULL;
	}

	__prepare_pages_info(kva, size);

	page = virt_to_page(kva);

	kasan_alloc_pages(page, order);

	if (gfp_mask & __GFP_ZERO)
		memset(kva, 0, size);

	if (order && (gfp_mask & __GFP_COMP))
		prep_compound_page(page, order);

	trace_ldk_rvh_dfx_call_mem(LIBLINUX_DFX_PAGE_ALLOC, kva, 1UL << (order + PAGE_SHIFT), NULL);

	return page;
}

struct page *__alloc_pages_internal(gfp_t gfp_mask, unsigned int order)
{
	struct page *page;

	if (unlikely(order >= MAX_ORDER)) {
		WARN_ON_ONCE(!(gfp_mask & __GFP_NOWARN));
		return NULL;
	}

	page = __alloc_pages_pool(page_pool_main, gfp_mask, order);
	if (page != NULL)
		clear_bit(PG_cache, &page->flags);

	return page;
}
EXPORT_SYMBOL(__alloc_pages_internal);

struct page *__alloc_pages_internal_cache(gfp_t gfp_mask, unsigned int order)
{
	struct liblinux_page_allocator *page_pool;
	struct page *page;

	if (unlikely(order >= MAX_ORDER)) {
		WARN_ON_ONCE(!(gfp_mask & __GFP_NOWARN));
		return NULL;
	}

	page_pool = page_pool_cache == NULL ? page_pool_main : page_pool_cache;
	page = __alloc_pages_pool(page_pool, gfp_mask, order);
	if (page != NULL && page_pool_cache != NULL)
		set_bit(PG_cache, &page->flags);

	return page;
}
EXPORT_SYMBOL(__alloc_pages_internal_cache);

#define PageExternalMem(__p) (page_zonenum(__p) == ZONE_EXTERNAL)

static void __free_pages_ok(struct page *page, unsigned int order)
{
#if defined(CONFIG_LIBLINUX_CDC) && !defined(CONFIG_KASAN)
	unsigned long flags;
#endif
	struct liblinux_page_allocator *page_pool = page_pool_main;

	if (PageExternalMem(page)) {
		/* restore cleared PageWaiters flag in __page_cache_release */
		SetPageWaiters(page);
		wake_up_reserved_page(page);
		return;
	}

	trace_ldk_rvh_dfx_call_mem(LIBLINUX_DFX_PAGE_FREE, page_address(page),
				   1UL << (order + PAGE_SHIFT), NULL);

	if (test_bit(PG_cache, &page->flags) && page_pool_cache != NULL)
		page_pool = page_pool_cache;

	kasan_free_pages(page, order);

#if defined(CONFIG_LIBLINUX_CDC) && !defined(CONFIG_KASAN)
	spin_lock_irqsave(&page_pool_lock, flags);
	page_pool->free(page_pool, (unsigned long)page_address(page), order);
	spin_unlock_irqrestore(&page_pool_lock, flags);
#else
	liblinux_ctx_leave();
	page_pool->free(page_pool, (unsigned long)page_address(page), order);
	liblinux_ctx_enter();
#endif

	__page_pool_reclaim_check(page_pool);
}

static inline void free_the_page(struct page *page, unsigned int order)
{
	if (order == 0)
		free_unref_page(page);
	else
		__free_pages_ok(page, order);
}

void __free_pages(struct page *page, unsigned int order)
{
	if (put_page_testzero(page))
		free_the_page(page, order);
}
EXPORT_SYMBOL(__free_pages);

void __free_kmem_pages(struct page *page, unsigned int order)
{
	__free_pages(page, order);
}

void free_kmem_pages(unsigned long addr, unsigned int order)
{
	if (addr != 0)
		__free_kmem_pages(virt_to_page((void *)addr), order);
}

struct page *
__alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, int preferred_nid,
		       nodemask_t *nodemask)
{
	return __alloc_pages_internal(gfp_mask, order);
}
EXPORT_SYMBOL(__alloc_pages_nodemask);

static inline int
gfp_to_alloc_flags(gfp_t gfp_mask)
{
	int alloc_flags = ALLOC_WMARK_MIN | ALLOC_CPUSET;

	/* __GFP_HIGH is assumed to be the same as ALLOC_HIGH to save a branch. */
	BUILD_BUG_ON(__GFP_HIGH != (__force gfp_t) ALLOC_HIGH);

	/*
	 * The caller may dip into page reserves a bit more if the caller
	 * cannot run direct reclaim, or if the caller has realtime scheduling
	 * policy or is asking for __GFP_HIGH memory.  GFP_ATOMIC requests will
	 * set both ALLOC_HARDER (__GFP_ATOMIC) and ALLOC_HIGH (__GFP_HIGH).
	 */
	alloc_flags |= (__force int) (gfp_mask & __GFP_HIGH);

	if (gfp_mask & __GFP_ATOMIC) {
		/*
		 * Not worth trying to allocate harder for __GFP_NOMEMALLOC even
		 * if it can't schedule.
		 */
		if (!(gfp_mask & __GFP_NOMEMALLOC))
			alloc_flags |= ALLOC_HARDER;
		/*
		 * Ignore cpuset mems for GFP_ATOMIC rather than fail, see the
		 * comment for __cpuset_node_allowed().
		 */
		alloc_flags &= ~ALLOC_CPUSET;
	} else if (unlikely(rt_task(current)) && !in_interrupt())
		alloc_flags |= ALLOC_HARDER;

	if (likely(!(gfp_mask & __GFP_NOMEMALLOC))) {
		if (gfp_mask & __GFP_MEMALLOC)
			alloc_flags |= ALLOC_NO_WATERMARKS;
		else if (in_serving_softirq() && (current->flags & PF_MEMALLOC))
			alloc_flags |= ALLOC_NO_WATERMARKS;
		else if (!in_interrupt() &&
				((current->flags & PF_MEMALLOC) ||
				 unlikely(test_thread_flag(TIF_MEMDIE))))
			alloc_flags |= ALLOC_NO_WATERMARKS;
	}
#ifdef CONFIG_CMA
	if (gfpflags_to_migratetype(gfp_mask) == MIGRATE_MOVABLE)
		alloc_flags |= ALLOC_CMA;
#endif
	return alloc_flags;
}

bool gfp_pfmemalloc_allowed(gfp_t gfp_mask)
{
	return !!((unsigned int)gfp_to_alloc_flags(gfp_mask) & ALLOC_NO_WATERMARKS);
}

/*
 * Common helper functions.
 */
unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order)
{
	struct page *page;

	/*
	 * __get_free_pages() returns a 32-bit address, which cannot represent
	 * a highmem page
	 */
	VM_BUG_ON((gfp_mask & __GFP_HIGHMEM) != 0);

	page = alloc_pages(gfp_mask, order);
	if (!page)
		return 0;
	return (unsigned long) page_address(page);
}
EXPORT_SYMBOL(__get_free_pages);

unsigned long get_zeroed_page(gfp_t gfp_mask)
{
	return __get_free_pages(gfp_mask | __GFP_ZERO, 0);
}
EXPORT_SYMBOL(get_zeroed_page);

void free_pages(unsigned long addr, unsigned int order)
{
	if (addr != 0) {
		VM_BUG_ON(!virt_addr_valid((void *)addr));
		__free_pages(virt_to_page((void *)addr), order);
	}
}
EXPORT_SYMBOL(free_pages);

/*
 * split_page takes a non-compound higher-order page, and splits it into
 * n (1<<order) sub-pages: page[0..n]
 * Each sub-page must be freed individually.
 *
 * Note: this is probably too low level an operation for use in drivers.
 * Please consult with lkml before using this in your driver.
 */
void split_page(struct page *page, unsigned int order)
{
	unsigned int i;

	VM_BUG_ON_PAGE(PageCompound(page), page);
	VM_BUG_ON_PAGE(!page_count(page), page);

	for (i = 1U; i < (1U << order); i++)
		set_page_refcounted(page + i);
	split_page_owner(page, 1U << order);
}
EXPORT_SYMBOL_GPL(split_page);

static void *make_alloc_exact(unsigned long addr, unsigned int order,
		size_t size)
{
	if (addr) {
		unsigned long alloc_end = addr + (PAGE_SIZE << order);
		unsigned long used = addr + PAGE_ALIGN(size);

		split_page(virt_to_page((void *)addr), order);
		while (used < alloc_end) {
			free_page(used);
			used += PAGE_SIZE;
		}
	}
	return (void *)addr;
}

static void *__extend_page_pool_alloc_exact(struct liblinux_page_allocator *page_pool,
						size_t size, size_t alloc_size, gfp_t gfp_mask)
{
	unsigned long rvaddr = 0;
	int ret;
	unsigned int flags = __gfp_mask_to_page_pool_flag(gfp_mask);

	if (page_pool->extend_alloc_exact == NULL)
		return NULL;

	ret = page_pool->extend_alloc_exact(page_pool, size, alloc_size,
						&rvaddr, flags | LIBLINUX_PAL_GFP_NOEXTRA);
	if (ret < 0)
		return NULL;

	if (alloc_size != 0)
		return (void *)rvaddr;

	return NULL;
}

static void *__page_pool_alloc_exact(struct liblinux_page_allocator *page_pool, unsigned long size,
			       gfp_t gfp_mask)
{
	void *kva = NULL;

	if (page_pool->alloc_exact == NULL)
		return NULL;

	/* alloc first, because alloc_pages_exact may be used frequently for small size */
	kva = page_pool->alloc_exact(page_pool, size, NULL);
	if ((kva == NULL) && (gfp_mask & __GFP_DIRECT_RECLAIM)
			  && (page_pool_should_reclaim(page_pool))) {
		drop_slab();
		kva = page_pool->alloc_exact(page_pool, size, NULL);
	}
	if (kva == NULL) {
		kva = __extend_page_pool_alloc_exact(page_pool, size, size, gfp_mask);
		if (kva == 0) {
			pr_err("page pool extend failed\n");
			return NULL;
		}
	}

	return kva;
}

#if defined(CONFIG_LIBLINUX_CDC) && !defined(CONFIG_KASAN)
static void *__page_pool_alloc_exact_locked(struct liblinux_page_allocator *page_pool,
					    unsigned long size, gfp_t gfp_mask)
{
	unsigned long flags;
	void *kva = NULL;

	spin_lock_irqsave(&page_pool_lock, flags);
	kva = __page_pool_alloc_exact(page_pool, size, gfp_mask);
	spin_unlock_irqrestore(&page_pool_lock, flags);

	return kva;
}
#else
static void *__page_pool_alloc_exact_locked(struct liblinux_page_allocator *page_pool,
					    unsigned long size, gfp_t gfp_mask)
{
	void *kva = NULL;

	liblinux_ctx_leave();
	kva = __page_pool_alloc_exact(page_pool, size, gfp_mask);
	liblinux_ctx_enter();

	return kva;
}
#endif

static void *__alloc_pages_pool_exact(struct liblinux_page_allocator *page_pool,
					gfp_t gfp_mask, unsigned long size)
{
	void *kva = NULL;

	BUG_ON(page_pool == NULL);
	size = PAGE_ALIGN(size);

	kva = __page_pool_alloc_exact_locked(page_pool, size, gfp_mask);
	if (kva == NULL) {
		if (gfp_mask & __GFP_NOFAIL)
			panic("alloc with GFP_NOFAIL should not fail\n");
		return NULL;
	}

	kasan_unpoison_shadow(kva, size);

	if (gfp_mask & __GFP_ZERO)
		memset(kva, 0, size);

	__prepare_pages_info(kva, size);

	trace_ldk_rvh_dfx_call_mem(LIBLINUX_DFX_PAGE_ALLOC, kva, size, NULL);

	return kva;
}

/**
 * alloc_pages_exact - allocate an exact number physically-contiguous pages.
 * @size: the number of bytes to allocate
 * @gfp_mask: GFP flags for the allocation
 *
 * This function is similar to alloc_pages(), except that it allocates the
 * minimum number of pages to satisfy the request.  alloc_pages() can only
 * allocate memory in power-of-two pages.
 *
 * This function is also limited by MAX_ORDER.
 *
 * Memory allocated by this function must be released by free_pages_exact().
 */
void *alloc_pages_exact(size_t size, gfp_t gfp_mask)
{
	unsigned int order = get_order(size);
	unsigned long addr;

	if (unlikely(order >= MAX_ORDER)) {
		WARN_ON_ONCE(!(gfp_mask & __GFP_NOWARN));
		return NULL;
	}

	if (WARN_ON_ONCE(gfp_mask & __GFP_COMP))
		gfp_mask &= ~__GFP_COMP;

	if ((page_pool_main->extend_exact) && (page_pool_main->alloc_exact)) {
		/* LDK: use optimized extend_exact */
		return __alloc_pages_pool_exact(page_pool_main, gfp_mask, ALIGN(size, SZ_4K));
	}

	addr = __get_free_pages(gfp_mask, order);
	return make_alloc_exact(addr, order, size);
}
EXPORT_SYMBOL(alloc_pages_exact);

/**
 * free_pages_exact - release memory allocated via alloc_pages_exact()
 * @virt: the value returned by alloc_pages_exact.
 * @size: size of allocation, same value as passed to alloc_pages_exact().
 *
 * Release the memory allocated by a previous call to alloc_pages_exact.
 */
void free_pages_exact(void *virt, size_t size)
{
	unsigned long addr = (unsigned long)virt;
	unsigned long end = addr + PAGE_ALIGN(size);

	while (addr < end) {
		free_page(addr);
		addr += PAGE_SIZE;
	}
}
EXPORT_SYMBOL(free_pages_exact);

void *liblinux_mm_prepare_map(unsigned long long pa, unsigned long size)
{
	void *addr;
	int ret;

	if ((pa + size < pa) || (!PAGE_ALIGNED(pa)) || (!PAGE_ALIGNED(size)))
		return NULL;

	if ((pa < PHYS_OFFSET) || (pa + size - PHYS_OFFSET > LIBLINUX_KMAP_SIZE))
		return NULL;

	addr = phys_to_virt(pa);
	ret = liblinux_pal_page_alloc_populate((void *)virt_to_page(addr),
			ALIGN(size, PAGE_SIZE) / PAGE_SIZE * sizeof(struct page));
	if (ret < 0) {
		pr_err("Failed to populate vmemmap page, ret = %d\n", ret);
		return NULL;
	}

	__prepare_pages_info(addr, size);
	return addr;
}

void liblinux_mm_prepare_unmap(void *va, unsigned long size)
{
	unsigned long pgcnt;
	struct page *page;
#ifdef CONFIG_HIGHMEM
	unsigned long i = 0;
#endif
	int ret;

	if ((((unsigned long)va + size) < (unsigned long)va) ||
	    (!PAGE_ALIGNED((unsigned long)va)) || (!PAGE_ALIGNED(size))) {
		return;
	}
	if (((unsigned long)va < PAGE_OFFSET) ||
	    ((unsigned long)va + size - PAGE_OFFSET > LIBLINUX_KMAP_SIZE))
		return;

	page = virt_to_page(va);
	pgcnt = (size >> PAGE_SHIFT);
#ifdef CONFIG_HIGHMEM
	for (i = 0; i < pgcnt; i++)
		set_page_zone(page + i, ZONE_HIGHMEM);
#else
	memset(page, 0, pgcnt * sizeof(struct page));
#endif
	ret = liblinux_pal_page_alloc_unpopulate(page,
				ALIGN(size, PAGE_SIZE) / PAGE_SIZE * sizeof(struct page));
	if (ret)
		pr_err("Failed to unpopulate vmemmap page, ret = %d\n", ret);
}

/* hack the number for alloc_large_system_hash */
static unsigned long __meminitdata nr_kernel_pages = 8U;
static unsigned long __meminitdata nr_all_pages = 16U;

/* mm/page_alloc.c */
/*
 * allocate a large system hash table from bootmem
 * - it is assumed that the hash table must contain an exact power-of-2
 *   quantity of entries
 * - limit is the number of hash buckets, not the total allocation size
 */
void *__init alloc_large_system_hash(const char *tablename,
				     unsigned long bucketsize,
				     unsigned long numentries,
				     int scale,
				     int flags,
				     unsigned int *_hash_shift,
				     unsigned int *_hash_mask,
				     unsigned long low_limit,
				     unsigned long high_limit)
{
	unsigned long long max = high_limit;
	unsigned long log2qty, size;
	void *table = NULL;
	gfp_t gfp_flags;
	bool virt;

	/* allow the kernel cmdline to have a say */
	if (!numentries) {
		/* round applicable memory size up to nearest megabyte */
		numentries = nr_kernel_pages;

		/* It isn't necessary when PAGE_SIZE >= 1MB */
		if (PAGE_SHIFT < 20)
			numentries = round_up(numentries, (1<<20)/PAGE_SIZE);

		/* limit to 1 bucket per 2^scale bytes of low memory */
		if (scale > PAGE_SHIFT)
			numentries >>= (scale - PAGE_SHIFT);
		else
			numentries <<= (PAGE_SHIFT - scale);

		/* Make sure we've got at least a 0-order allocation.. */
		if (unlikely(flags & HASH_SMALL)) {
			/* Makes no sense without HASH_EARLY */
			WARN_ON(!(flags & HASH_EARLY));
			if (!(numentries >> *_hash_shift)) {
				numentries = 1UL << *_hash_shift;
				BUG_ON(!numentries);
			}
		} else if (unlikely((numentries * bucketsize) < PAGE_SIZE))
			numentries = PAGE_SIZE / bucketsize;
	}
	numentries = roundup_pow_of_two(numentries);

	/* limit allocation size to 1/16 total memory by default */
	if (max == 0) {
		max = ((unsigned long long)nr_all_pages << PAGE_SHIFT) >> 4;
		do_div(max, bucketsize);
	}
	max = min(max, 0x80000000ULL);

	if (numentries < low_limit)
		numentries = low_limit;
	if (numentries > max)
		numentries = max;

	log2qty = ilog2(numentries);

	gfp_flags = (flags & HASH_ZERO) ? GFP_ATOMIC | __GFP_ZERO : GFP_ATOMIC;
	do {
		virt = false;
		size = bucketsize << log2qty;
		if (flags & HASH_EARLY) {
			if (flags & HASH_ZERO)
				table = memblock_alloc(size, SMP_CACHE_BYTES);
			else
				table = memblock_alloc_raw(size,
							   SMP_CACHE_BYTES);
		} else if (get_order(size) >= MAX_ORDER || hashdist) {
			table = __vmalloc(size, gfp_flags);
			virt = true;
		} else {
			/*
			 * If bucketsize is not a power-of-two, we may free
			 * some pages at the end of hash table which
			 * alloc_pages_exact() automatically does
			 */
			table = alloc_pages_exact(size, gfp_flags);
			kmemleak_alloc(table, size, 1, gfp_flags);
		}
	} while (!table && size > PAGE_SIZE && --log2qty);

	if (!table)
		panic("Failed to allocate %s hash table\n", tablename);

	pr_info("%s hash table entries: %ld (order: %d, %lu bytes, %s)\n",
	       tablename, 1UL << log2qty, ilog2(size) - PAGE_SHIFT, size,
	       virt ? "vmalloc" : "linear");

	if (_hash_shift)
		*_hash_shift = log2qty;
	if (_hash_mask)
		*_hash_mask = (1U << log2qty) - 1;

	return table;
}

void free_compound_page(struct page *page)
{
	__free_pages_ok(page, compound_order(page));
}

#ifdef CONFIG_HUGETLB_PAGE
static void free_huge_page(struct page *page)
{
	panic("not support now\n");
}
#endif

compound_page_dtor * const compound_page_dtors[] = {
	NULL,
	free_compound_page,
#ifdef CONFIG_HUGETLB_PAGE
	free_huge_page,
#endif
};

void free_unref_page(struct page *page)
{
	__free_pages_ok(page, 0);
}

/*
 * Free a list of 0-order pages
 */
void free_unref_page_list(struct list_head *list)
{
	struct page *page, *next;

	list_for_each_entry_safe(page, next, list, lru) {
		free_unref_page(page);
	}
}

/* for page zone */
DEFINE_PER_CPU(struct per_cpu_nodestat, boot_nodestats);
DEFINE_PER_CPU(struct per_cpu_pageset, boot_pageset);

static void __meminit zone_init_free_lists(struct zone *zone)
{
	unsigned int order, t;

	for_each_migratetype_order(order, t) {
		INIT_LIST_HEAD(&zone->free_area[order].free_list[t]);
		zone->free_area[order].nr_free = 0;
	}
}

#define PG_ZONE_NID	0
/* reference to `free_area_init_node` */
void liblinux_init_zone(void)
{
	int i;
	struct zone *zone = NULL;
	pg_data_t *pgdat = NODE_DATA(PG_ZONE_NID);
	unsigned long start_pfn = 0;

	pgdat->node_id = PG_ZONE_NID;
	pgdat->node_start_pfn = start_pfn;
	pgdat->per_cpu_nodestats = NULL;
	pgdat->node_spanned_pages = 0;
	pgdat->node_present_pages = 0;

	init_waitqueue_head(&pgdat->kswapd_wait);
	init_waitqueue_head(&pgdat->pfmemalloc_wait);
	lruvec_init(&pgdat->__lruvec);
	pgdat->per_cpu_nodestats = &boot_nodestats;

	for (i = 0; i < MAX_NR_ZONES; ++i) {
		zone = &(NODE_DATA(PG_ZONE_NID)->node_zones[i]);

		atomic_long_set(&zone->managed_pages, 0);
		zone->zone_start_pfn = 0;
		zone->spanned_pages = 0;
		zone->present_pages = 0;
		zone_set_nid(zone, PG_ZONE_NID);
		spin_lock_init(&zone->lock);
		zone_seqlock_init(zone);
		zone->zone_pgdat = NODE_DATA(PG_ZONE_NID);
		zone->name = "Normal";
		zone->pageset = &boot_pageset;

		zone_init_free_lists(zone);
		zone->initialized = 1;
	}
}

/* mm/page_alloc.c */
/*
 * Page Fragment:
 *  An arbitrary-length arbitrary-offset area of memory which resides
 *  within a 0 or higher order page.  Multiple fragments within that page
 *  are individually refcounted, in the page's reference counter.
 *
 * The page_frag functions below provide a simple allocation framework for
 * page fragments.  This is used by the network stack and network device
 * drivers to provide a backing region of memory for use as either an
 * sk_buff->head, or to be used in the "frags" portion of skb_shared_info.
 */
static struct page *__page_frag_cache_refill(struct page_frag_cache *nc,
					     gfp_t gfp_mask)
{
	struct page *page = NULL;
	gfp_t gfp = gfp_mask;

#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
	gfp_mask |= __GFP_COMP | __GFP_NOWARN | __GFP_NORETRY |
		    __GFP_NOMEMALLOC;
	page = alloc_pages_node(NUMA_NO_NODE, gfp_mask,
				PAGE_FRAG_CACHE_MAX_ORDER);
	nc->size = page ? PAGE_FRAG_CACHE_MAX_SIZE : PAGE_SIZE;
#endif
	if (unlikely(!page))
		page = alloc_pages_node(NUMA_NO_NODE, gfp, 0);

	nc->va = page ? page_address(page) : NULL;

	return page;
}

void *page_frag_alloc(struct page_frag_cache *nc,
		      unsigned int fragsz, gfp_t gfp_mask)
{
	unsigned int size = PAGE_SIZE;
	struct page *page;
	int offset;

	if (unlikely(!nc->va)) {
refill:
		page = __page_frag_cache_refill(nc, gfp_mask);
		if (!page)
			return NULL;

#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
		/* if size can vary use size else just use PAGE_SIZE */
		size = nc->size;
#endif
		/* Even if we own the page, we do not use atomic_set().
		 * This would break get_page_unless_zero() users.
		 */
		page_ref_add(page, PAGE_FRAG_CACHE_MAX_SIZE);

		/* reset page count bias and offset to start of new frag */
		nc->pfmemalloc = page_is_pfmemalloc(page);
		nc->pagecnt_bias = PAGE_FRAG_CACHE_MAX_SIZE + 1;
		nc->offset = size;
	}

	offset = nc->offset - fragsz;
	if (unlikely(offset < 0)) {
		page = virt_to_page(nc->va);

		if (!page_ref_sub_and_test(page, nc->pagecnt_bias))
			goto refill;

		if (unlikely(nc->pfmemalloc)) {
			free_the_page(page, compound_order(page));
			goto refill;
		}

#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
		/* if size can vary use size else just use PAGE_SIZE */
		size = nc->size;
#endif
		/* OK, page count is 0, we can safely set it */
		set_page_count(page, PAGE_FRAG_CACHE_MAX_SIZE + 1);

		/* reset page count bias and offset to start of new frag */
		nc->pagecnt_bias = PAGE_FRAG_CACHE_MAX_SIZE + 1;
		offset = size - fragsz;
	}

	nc->pagecnt_bias--;
	nc->offset = offset;

	return nc->va + offset;
}
EXPORT_SYMBOL(page_frag_alloc);

void __page_frag_cache_drain(struct page *page, unsigned int count)
{
	VM_BUG_ON_PAGE(page_ref_count(page) == 0, page);

	if (page_ref_sub_and_test(page, count))
		free_the_page(page, compound_order(page));
}
EXPORT_SYMBOL(__page_frag_cache_drain);

/*
 * Frees a page fragment allocated out of either a compound or order 0 page.
 */
void page_frag_free(void *addr)
{
	struct page *page = virt_to_head_page(addr);

	if (unlikely(put_page_testzero(page)))
		free_the_page(page, compound_order(page));
}
EXPORT_SYMBOL(page_frag_free);

/*
 * Coye from mm/page_alloc.c and not changed
 */
#ifdef CONFIG_LOCKDEP

static struct lockdep_map __fs_reclaim_map =
	STATIC_LOCKDEP_MAP_INIT("fs_reclaim", &__fs_reclaim_map);

static bool __need_fs_reclaim(gfp_t gfp_mask)
{
	gfp_mask = current_gfp_context(gfp_mask);

	/* no reclaim without waiting on it */
	if (!(gfp_mask & __GFP_DIRECT_RECLAIM))
		return false;

	/* this guy won't enter reclaim */
	if (current->flags & PF_MEMALLOC)
		return false;

	/* We're only interested __GFP_FS allocations for now */
	if (!(gfp_mask & __GFP_FS))
		return false;

	if (gfp_mask & __GFP_NOLOCKDEP)
		return false;

	return true;
}

void __fs_reclaim_acquire(void)
{
	lock_map_acquire(&__fs_reclaim_map);
}

void __fs_reclaim_release(void)
{
	lock_map_release(&__fs_reclaim_map);
}

void fs_reclaim_acquire(gfp_t gfp_mask)
{
	if (__need_fs_reclaim(gfp_mask))
		__fs_reclaim_acquire();
}
EXPORT_SYMBOL_GPL(fs_reclaim_acquire);

void fs_reclaim_release(gfp_t gfp_mask)
{
	if (__need_fs_reclaim(gfp_mask))
		__fs_reclaim_release();
}
EXPORT_SYMBOL_GPL(fs_reclaim_release);

#endif
