#pragma once

#include <linux/types.h>
#include <linux/numa.h>

/* Flags for memblock allocation APIs */
#define MEMBLOCK_ALLOC_ANYWHERE (~(phys_addr_t)0)
#define MEMBLOCK_ALLOC_ACCESSIBLE 0

#define MEMBLOCK_ALLOC_NOLEAKTRACE 1

/* We are using top down, so it is safe to use 0 here */
#define MEMBLOCK_LOW_LIMIT 0

#define __init_memblock

extern unsigned long max_low_pfn;

/**
 * enum memblock_flags - definition of memory region attributes
 * @MEMBLOCK_NONE: no special request
 * @MEMBLOCK_HOTPLUG: memory region indicated in the firmware-provided memory
 * map during early boot as hot(un)pluggable system RAM (e.g., memory range
 * that might get hotunplugged later). With "movable_node" set on the kernel
 * commandline, try keeping this memory region hotunpluggable. Does not apply
 * to memblocks added ("hotplugged") after early boot.
 * @MEMBLOCK_MIRROR: mirrored region
 * @MEMBLOCK_NOMAP: don't add to kernel direct mapping and treat as
 * reserved in the memory map; refer to memblock_mark_nomap() description
 * for further details
 * @MEMBLOCK_DRIVER_MANAGED: memory region that is always detected and added
 * via a driver, and never indicated in the firmware-provided memory map as
 * system RAM. This corresponds to IORESOURCE_SYSRAM_DRIVER_MANAGED in the
 * kernel resource tree.
 * @MEMBLOCK_RSRV_NOINIT: memory region for which struct pages are
 * not initialized (only for reserved regions).
 * @MEMBLOCK_RSRV_KERN: memory region that is reserved for kernel use,
 * either explictitly with memblock_reserve_kern() or via memblock
 * allocation APIs. All memblock allocations set this flag.
 * @MEMBLOCK_KHO_SCRATCH: memory region that kexec can pass to the next
 * kernel in handover mode. During early boot, we do not know about all
 * memory reservations yet, so we get scratch memory from the previous
 * kernel that we know is good to use. It is the only memory that
 * allocations may happen from in this phase.
 */
enum memblock_flags
{
    MEMBLOCK_NONE = 0x0,           /* No special request */
    MEMBLOCK_HOTPLUG = 0x1,        /* hotpluggable region */
    MEMBLOCK_MIRROR = 0x2,         /* mirrored region */
    MEMBLOCK_NOMAP = 0x4,          /* don't add to kernel direct mapping */
    MEMBLOCK_DRIVER_MANAGED = 0x8, /* always detected via a driver */
    MEMBLOCK_RSRV_NOINIT = 0x10,   /* don't initialize struct pages */
    MEMBLOCK_RSRV_KERN = 0x20,     /* memory reserved for kernel use */
    MEMBLOCK_KHO_SCRATCH = 0x40,   /* scratch memory for kexec handover */
};

struct memblock_region
{
    phys_addr_t base;
    phys_addr_t size;
    enum memblock_flags flags;
    int nid;
};

struct memblock_type
{
    unsigned long cnt;
    unsigned long max;
    phys_addr_t total_size;
    struct memblock_region *regions;
    char *name;
};

struct memblock
{
    bool bottom_up; /* is bottom up direction? */
    phys_addr_t current_limit;
    struct memblock_type memory;
    struct memblock_type reserved;
};

extern struct memblock memblock;

int memblock_add(phys_addr_t base, phys_addr_t size);
int memblock_reserve(phys_addr_t base, phys_addr_t size);
int memblock_mark_hotplug(phys_addr_t base, phys_addr_t size);
void *__memblock_alloc_or_panic(phys_addr_t size, phys_addr_t align, const char *func);

#define memblock_alloc_or_panic(size, align) \
    __memblock_alloc_or_panic(size, align, __func__)

void *memblock_alloc(phys_addr_t size, phys_addr_t align);
phys_addr_t memblock_alloc_range_nid(phys_addr_t size,
                                     phys_addr_t align, phys_addr_t start,
                                     phys_addr_t end, int nid, bool exact_nid);

phys_addr_t memblock_start_of_DRAM(void);
phys_addr_t memblock_end_of_DRAM(void);

/*
 Low level functions
 */
void __next_mem_range(u64 *idx, int nid, enum memblock_flags flags,
                      struct memblock_type *type_a,
                      struct memblock_type *type_b, phys_addr_t *out_start,
                      phys_addr_t *out_end, int *out_nid);

void __next_mem_pfn_range(int *idx, int nid,
                          unsigned long *out_start_pfn,
                          unsigned long *out_end_pfn, int *out_nid);

/**
 * for_each_mem_pfn_range - early memory pfn range iterator
 * @i: an integer used as loop variable
 * @nid: node selector, %MAX_NUMNODES for all nodes
 * @p_start: ptr to ulong for start pfn of the range, can be %NULL
 * @p_end: ptr to ulong for end pfn of the range, can be %NULL
 * @p_nid: ptr to int for nid of the range, can be %NULL
 *
 * Walks over configured memory ranges.
 */
#define for_each_mem_pfn_range(i, nid, p_start, p_end, p_nid)          \
    for (i = -1, __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid); \
         i >= 0; __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid))

void *memblock_alloc_exact_nid_raw(phys_addr_t size, phys_addr_t align,
                                   phys_addr_t min_addr, phys_addr_t max_addr,
                                   int nid);
void *memblock_alloc_try_nid_raw(phys_addr_t size, phys_addr_t align,
                                 phys_addr_t min_addr, phys_addr_t max_addr,
                                 int nid);

/**
 * __for_each_mem_range - iterate through memblock areas from type_a and not
 * included in type_b. Or just type_a if type_b is NULL.
 * @i: u64 used as loop variable
 * @type_a: ptr to memblock_type to iterate
 * @type_b: ptr to memblock_type which excludes from the iteration
 * @nid: node selector, %NUMA_NO_NODE for all nodes
 * @flags: pick from blocks based on memory attributes
 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
 * @p_nid: ptr to int for nid of the range, can be %NULL
 */
#define __for_each_mem_range(i, type_a, type_b, nid, flags,      \
                             p_start, p_end, p_nid)              \
    for (i = 0, __next_mem_range(&i, nid, flags, type_a, type_b, \
                                 p_start, p_end, p_nid);         \
         i != (u64) - 1;                                         \
         __next_mem_range(&i, nid, flags, type_a, type_b,        \
                          p_start, p_end, p_nid))

/**
 * for_each_free_mem_range - iterate through free memblock areas
 * @i: u64 used as loop variable
 * @nid: node selector, %NUMA_NO_NODE for all nodes
 * @flags: pick from blocks based on memory attributes
 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
 * @p_nid: ptr to int for nid of the range, can be %NULL
 *
 * Walks over free (memory && !reserved) areas of memblock.  Available as
 * soon as memblock is initialized.
 */
#define for_each_free_mem_range(i, nid, flags, p_start, p_end, p_nid) \
    __for_each_mem_range(i, &memblock.memory, &memblock.reserved,     \
                         nid, flags, p_start, p_end, p_nid)
