#pragma once

#include <linux/types.h>
#include <linux/nodemask_types.h>
#include <linux/compiler.h>
#include <linux/spinlock_types.h>
#include <linux/llist.h>
#include <linux/numa.h>
#include <linux/gfp.h>

/*
 * PAGE_ALLOC_COSTLY_ORDER is the order at which allocations are deemed
 * costly to service.  That is between allocation orders which should
 * coalesce naturally under reasonable reclaim pressure and those which
 * will not.
 */
#define PAGE_ALLOC_COSTLY_ORDER 3

enum zone_watermarks
{
    WMARK_MIN,
    WMARK_LOW,
    WMARK_HIGH,
    WMARK_PROMO,
    NR_WMARK
};

/*
 * Memory initialization context, use to differentiate memory added by
 * the platform statically or via memory hotplug interface.
 */
enum meminit_context
{
    MEMINIT_EARLY,
    MEMINIT_HOTPLUG,
};

#ifndef CONFIG_ARCH_FORCE_MAX_ORDER
#define MAX_PAGE_ORDER 9
#else
#define MAX_PAGE_ORDER CONFIG_ARCH_FORCE_MAX_ORDER
#endif

#define NR_PAGE_ORDERS (MAX_PAGE_ORDER + 1)
#define MAX_ORDER_NR_PAGES (1 << MAX_PAGE_ORDER)

enum migratetype
{
    MIGRATE_UNMOVABLE,
    MIGRATE_MOVABLE,
    MIGRATE_RECLAIMABLE,
    MIGRATE_PCPTYPES, /* the number of types on the pcp lists */
    MIGRATE_HIGHATOMIC = MIGRATE_PCPTYPES,

    /*
     * MIGRATE_CMA migration type is designed to mimic the way
     * ZONE_MOVABLE works.  Only movable pages can be allocated
     * from MIGRATE_CMA pageblocks and page allocator never
     * implicitly change migration type of MIGRATE_CMA pageblock.
     *
     * The way to use it is to change migratetype of a range of
     * pageblocks to MIGRATE_CMA which can be done by
     * __free_pageblock_cma() function.
     */
    MIGRATE_CMA,

    MIGRATE_TYPES
};

enum zone_type
{
    /*
     * Normal addressable memory is in ZONE_NORMAL. DMA operations can be
     * performed on pages in ZONE_NORMAL if the DMA devices support
     * transfers to all addressable memory.
     */
    ZONE_NORMAL,
/*
 * ZONE_DMA and ZONE_DMA32 are used when there are peripherals not able
 * to DMA to all of the addressable memory (ZONE_NORMAL).
 * On architectures where this area covers the whole 32 bit address
 * space ZONE_DMA32 is used. ZONE_DMA is left for the ones with smaller
 * DMA addressing constraints. This distinction is important as a 32bit
 * DMA mask is assumed when ZONE_DMA32 is defined. Some 64-bit
 * platforms may need both zones as they support peripherals with
 * different DMA addressing limitations.
 */
#ifdef CONFIG_ZONE_DMA
    ZONE_DMA,
#endif
#ifdef CONFIG_ZONE_DMA32
    ZONE_DMA32,
#endif

#ifdef CONFIG_HIGHMEM
    /*
     * A memory area that is only addressable by the kernel through
     * mapping portions into its own address space. This is for example
     * used by i386 to allow the kernel to address the memory beyond
     * 900MB. The kernel will set up special mappings (page
     * table entries on i386) for each page that the kernel needs to
     * access.
     */
    ZONE_HIGHMEM,
#endif
    /*
     * ZONE_MOVABLE is similar to ZONE_NORMAL, except that it contains
     * movable pages with few exceptional cases described below. Main use
     * cases for ZONE_MOVABLE are to make memory offlining/unplug more
     * likely to succeed, and to locally limit unmovable allocations - e.g.,
     * to increase the number of THP/huge pages. Notable special cases are:
     *
     * 1. Pinned pages: (long-term) pinning of movable pages might
     *    essentially turn such pages unmovable. Therefore, we do not allow
     *    pinning long-term pages in ZONE_MOVABLE. When pages are pinned and
     *    faulted, they come from the right zone right away. However, it is
     *    still possible that address space already has pages in
     *    ZONE_MOVABLE at the time when pages are pinned (i.e. user has
     *    touches that memory before pinning). In such case we migrate them
     *    to a different zone. When migration fails - pinning fails.
     * 2. memblock allocations: kernelcore/movablecore setups might create
     *    situations where ZONE_MOVABLE contains unmovable allocations
     *    after boot. Memory offlining and allocations fail early.
     * 3. Memory holes: kernelcore/movablecore setups might create very rare
     *    situations where ZONE_MOVABLE contains memory holes after boot,
     *    for example, if we have sections that are only partially
     *    populated. Memory offlining and allocations fail early.
     * 4. PG_hwpoison pages: while poisoned pages can be skipped during
     *    memory offlining, such pages cannot be allocated.
     * 5. Unmovable PG_offline pages: in paravirtualized environments,
     *    hotplugged memory blocks might only partially be managed by the
     *    buddy (e.g., via XEN-balloon, Hyper-V balloon, virtio-mem). The
     *    parts not manged by the buddy are unmovable PG_offline pages. In
     *    some cases (virtio-mem), such pages can be skipped during
     *    memory offlining, however, cannot be moved/allocated. These
     *    techniques might use alloc_contig_range() to hide previously
     *    exposed pages from the buddy again (e.g., to implement some sort
     *    of memory unplug in virtio-mem).
     * 6. ZERO_PAGE(0), kernelcore/movablecore setups might create
     *    situations where ZERO_PAGE(0) which is allocated differently
     *    on different platforms may end up in a movable zone. ZERO_PAGE(0)
     *    cannot be migrated.
     * 7. Memory-hotplug: when using memmap_on_memory and onlining the
     *    memory to the MOVABLE zone, the vmemmap pages are also placed in
     *    such zone. Such pages cannot be really moved around as they are
     *    self-stored in the range, but they are treated as movable when
     *    the range they describe is about to be offlined.
     *
     * In general, no unmovable allocations that degrade memory offlining
     * should end up in ZONE_MOVABLE. Allocators (like alloc_contig_range())
     * have to expect that migrating pages in ZONE_MOVABLE can fail (even
     * if has_unmovable_pages() states that there are no unmovable pages,
     * there can be false negatives).
     */
    ZONE_MOVABLE,
#ifdef CONFIG_ZONE_DEVICE
    ZONE_DEVICE,
#endif
    __MAX_NR_ZONES

};

#define MAX_NR_ZONES __MAX_NR_ZONES
/* Maximum number of zones on a zonelist */
#define MAX_ZONES_PER_ZONELIST (MAX_NUMNODES * MAX_NR_ZONES)

enum
{
    ZONELIST_FALLBACK, /* zonelist with fallback */
#ifdef CONFIG_NUMA
    /*
     * The NUMA zonelists are doubled because we need zonelists that
     * restrict the allocations to a single node for __GFP_THISNODE.
     */
    ZONELIST_NOFALLBACK, /* zonelist without fallback (__GFP_THISNODE) */
#endif
    MAX_ZONELISTS
};

struct free_area
{
    struct list_head free_list[MIGRATE_TYPES];
    unsigned long nr_free;
};

struct zone
{
    /* free areas of different sizes */
    struct free_area free_area[NR_PAGE_ORDERS];

    struct pglist_data *zone_pgdat;

    /* zone_start_pfn == zone_start_paddr >> PAGE_SHIFT */
    uintptr_t zone_start_pfn;
    /*
     * spanned_pages is the total pages spanned by the zone, including
     * holes, which is calculated as:
     * 	spanned_pages = zone_end_pfn - zone_start_pfn;
     */
    unsigned long spanned_pages;

    unsigned long present_pages;

    struct page *zone_mem_map;

    /* Primarily protects free_area */
    spinlock_t lock;
    /* Pages to be freed when next trylock succeeds */
    struct llist_head trylock_free_pages;
};

/*
 * This struct contains information about a zone in a zonelist. It is stored
 * here to avoid dereferences into large structures and lookups of tables
 */
struct zoneref
{
    struct zone *zone; /* Pointer to actual zone */
    int zone_idx;      /* zone_idx(zoneref->zone) */
};

/*
 * One allocation request operates on a zonelist. A zonelist
 * is a list of zones, the first one is the 'goal' of the
 * allocation, the other zones are fallback zones, in decreasing
 * priority.
 *
 * To speed the reading of the zonelist, the zonerefs contain the zone index
 * of the entry being read. Helper functions to access information given
 * a struct zoneref are
 *
 * zonelist_zone()	- Return the struct zone * for an entry in _zonerefs
 * zonelist_zone_idx()	- Return the index of the zone for an entry
 * zonelist_node_idx()	- Return the index of the node for an entry
 */
struct zonelist
{
    struct zoneref _zonerefs[MAX_ZONES_PER_ZONELIST + 1];
};

typedef struct pglist_data
{
    /*
     * node_zones contains just the zones for THIS node. Not all of the
     * zones may be populated, but it is the full list. It is referenced by
     * this node's node_zonelists as well as other node's node_zonelists.
     */
    struct zone node_zones[MAX_NR_ZONES];

    /*
     * node_zonelists contains references to all zones in all nodes.
     * Generally the first zones will be references to this node's
     * node_zones.
     */
    struct zonelist node_zonelists[MAX_ZONELISTS];

    unsigned long node_start_pfn;
    /* total number of physical pages */
    unsigned long node_present_pages;
    /* total size of physical page range, including holes */
    unsigned long node_spanned_pages;

    int node_id;

    int nr_zones; /* number of populated zones in this node */

    void *per_cpu_nodestats;

    struct page *node_mem_map;
} pg_data_t;

static inline unsigned long zone_end_pfn(const struct zone *zone)
{
    return zone->zone_start_pfn + zone->spanned_pages;
}

extern struct pglist_data *NODE_DATA(unsigned int nid);

extern void init_currently_empty_zone(struct zone *zone, unsigned long start_pfn,
                                      unsigned long size);

static inline int zone_to_nid(struct zone *zone)
{
    return 0;
}

static inline struct zone *zonelist_zone(struct zoneref *zoneref)
{
    return zoneref->zone;
}

static inline int zonelist_zone_idx(struct zoneref *zoneref)
{
    return zoneref->zone_idx;
}

static inline int zonelist_node_idx(struct zoneref *zoneref)
{
    return zone_to_nid(zoneref->zone);
}

struct zoneref *__next_zones_zonelist(struct zoneref *z,
                                      enum zone_type highest_zoneidx,
                                      nodemask_t *nodes);

/**
 * next_zones_zonelist - Returns the next zone at or below highest_zoneidx within the allowed nodemask using a cursor within a zonelist as a starting point
 * @z: The cursor used as a starting point for the search
 * @highest_zoneidx: The zone index of the highest zone to return
 * @nodes: An optional nodemask to filter the zonelist with
 *
 * This function returns the next zone at or below a given zone index that is
 * within the allowed nodemask using a cursor as the starting point for the
 * search. The zoneref returned is a cursor that represents the current zone
 * being examined. It should be advanced by one before calling
 * next_zones_zonelist again.
 *
 * Return: the next zone at or below highest_zoneidx within the allowed
 * nodemask using a cursor within a zonelist as a starting point
 */
static inline struct zoneref *next_zones_zonelist(struct zoneref *z,
                                                  enum zone_type highest_zoneidx,
                                                  nodemask_t *nodes)
{
    if (likely(!nodes && zonelist_zone_idx(z) <= highest_zoneidx))
        return z;
    return __next_zones_zonelist(z, highest_zoneidx, nodes);
}

#define for_next_zone_zonelist_nodemask(zone, z, highidx, nodemask) \
    for (zone = zonelist_zone(z);                                   \
         zone;                                                      \
         z = next_zones_zonelist(++z, highidx, nodemask),           \
        zone = zonelist_zone(z))

static inline unsigned long pgdat_end_pfn(pg_data_t *pgdat)
{
    return pgdat->node_start_pfn + pgdat->node_spanned_pages;
}

extern int movable_zone;

/*
 * There is only one page-allocator function, and two main namespaces to
 * it. The alloc_page*() variants return 'struct page *' and as such
 * can allocate highmem pages, the *get*page*() variants return
 * virtual kernel addresses to the allocated page(s).
 */

static inline int gfp_zonelist(gfp_t flags)
{
#ifdef CONFIG_NUMA
    if (unlikely(flags & __GFP_THISNODE))
        return ZONELIST_NOFALLBACK;
#endif
    return ZONELIST_FALLBACK;
}

/*
 * We get the zone list from the current node and the gfp_mask.
 * This zone list contains a maximum of MAX_NUMNODES*MAX_NR_ZONES zones.
 * There are two zonelists per node, one for all zones with memory and
 * one containing just zones from the node the zonelist belongs to.
 *
 * For the case of non-NUMA systems the NODE_DATA() gets optimized to
 * &contig_page_data at compile-time.
 */
static inline struct zonelist *node_zonelist(int nid, gfp_t flags)
{
    return NODE_DATA(nid)->node_zonelists + gfp_zonelist(flags);
}

/**
 * first_zones_zonelist - Returns the first zone at or below highest_zoneidx within the allowed nodemask in a zonelist
 * @zonelist: The zonelist to search for a suitable zone
 * @highest_zoneidx: The zone index of the highest zone to return
 * @nodes: An optional nodemask to filter the zonelist with
 *
 * This function returns the first zone at or below a given zone index that is
 * within the allowed nodemask. The zoneref returned is a cursor that can be
 * used to iterate the zonelist with next_zones_zonelist by advancing it by
 * one before calling.
 *
 * When no eligible zone is found, zoneref->zone is NULL (zoneref itself is
 * never NULL). This may happen either genuinely, or due to concurrent nodemask
 * update due to cpuset modification.
 *
 * Return: Zoneref pointer for the first suitable zone found
 */
static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist,
                                                   enum zone_type highest_zoneidx,
                                                   nodemask_t *nodes)
{
    return next_zones_zonelist(zonelist->_zonerefs,
                               highest_zoneidx, nodes);
}

/* Returns true if a zone has memory */
static inline bool populated_zone(struct zone *zone)
{
    return zone->present_pages;
}

/*
 * zone_idx() returns 0 for the ZONE_DMA zone, 1 for the ZONE_NORMAL zone, etc.
 */
#define zone_idx(zone) ((zone) - (zone)->zone_pgdat->node_zones)
