#include <linux/page_types.h>
#include <linux/mmzone.h>
#include <linux/stddef.h>
#include <linux/nodemask.h>
#include <linux/list.h>
#include <linux/page_alloc.h>
#include <linux/pgalloc_tag.h>
#include <linux/page_ref.h>
#include <linux/spinlock.h>

#include "internal.h"

/* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */
int movable_zone;

#include "_page_/pcp.c"
#include "_page_/freelist.c"
#include "_page_/free_pages.c"
#include "_page_/get_page_from_freelist.c"

static inline bool prepare_alloc_pages(gfp_t gfp_mask, unsigned int order,
									   int preferred_nid, nodemask_t *nodemask,
									   struct alloc_context *ac, gfp_t *alloc_gfp,
									   unsigned int *alloc_flags)
{
	ac->zonelist = node_zonelist(preferred_nid, gfp_mask);
	ac->nodemask = nodemask;

	/*
	 * The preferred zone is used for statistics but crucially it is
	 * also used as the starting point for the zonelist iterator. It
	 * may get reset for allocations that ignore memory policies.
	 */
	ac->preferred_zoneref = first_zones_zonelist(ac->zonelist,
												 ac->highest_zoneidx, ac->nodemask);

	return true;
}

/*
 * This is the 'heart' of the zoned buddy allocator.
 */
struct page *__alloc_frozen_pages_noprof(gfp_t gfp, unsigned int order,
										 int preferred_nid, nodemask_t *nodemask)
{
	struct page *page;
	unsigned int alloc_flags = ALLOC_WMARK_LOW;
	gfp_t alloc_gfp; /* The gfp_t that was actually used for allocation */
	struct alloc_context ac = {};

	alloc_gfp = gfp;

	if (!prepare_alloc_pages(gfp, order, preferred_nid, nodemask, &ac,
							 &alloc_gfp, &alloc_flags))
	{
		return NULL;
	}

	/* First allocation attempt */
	page = get_page_from_freelist(alloc_gfp, order, alloc_flags, &ac);

	return page;
}

/**
 * __free_pages - Free pages allocated with alloc_pages().
 * @page: The page pointer returned from alloc_pages().
 * @order: The order of the allocation.
 *
 * This function can free multi-page allocations that are not compound
 * pages.  It does not check that the @order passed in matches that of
 * the allocation, so it is easy to leak memory.  Freeing more memory
 * than was allocated will probably emit a warning.
 *
 * If the last reference to this page is speculative, it will be released
 * by put_page() which only frees the first page of a non-compound
 * allocation.  To prevent the remaining pages from being leaked, we free
 * the subsequent pages here.  If you want to use the page's reference
 * count to decide when to free the allocation, you should allocate a
 * compound page, and use put_page() instead of __free_pages().
 *
 * Context: May be called in interrupt context or while holding a normal
 * spinlock, but not in NMI context or while holding a raw spinlock.
 */
void __free_pages(struct page *page, unsigned int order)
{
	___free_pages(page, order, FPI_NONE);
}

void free_pages(unsigned long addr, unsigned int order)
{
	if (addr != 0)
	{
		__free_pages(virt_to_page((void *)addr), order);
	}
}

int put_page_testzero(struct page *page)
{
	// todo VM_BUG_ON_PAGE(page_ref_count(page) == 0, page);
	return page_ref_dec_and_test(page);
}

void __free_pages_core(struct page *page, unsigned int order, int context)
{
	unsigned int nr_pages = 1 << order;
	struct page *p = page;
	unsigned int loop;

	/*
	 * Bypass PCP and place fresh pages right to the tail, primarily
	 * relevant for memory onlining.
	 */
	__free_pages_ok(page, order, FPI_TO_TAIL);
}
