#include <linux/memblock.h>
#include <linux/mm.h>
#include <linux/numa.h>
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/math.h>
#include <linux/bug.h>
#include <linux/io.h>
#include <linux/cache.h>
#include <linux/pfn.h>

#define INIT_MEMBLOCK_REGIONS 128
#define INIT_PHYSMEM_REGIONS 4

#ifndef INIT_MEMBLOCK_RESERVED_REGIONS
#define INIT_MEMBLOCK_RESERVED_REGIONS INIT_MEMBLOCK_REGIONS
#endif

#ifndef INIT_MEMBLOCK_MEMORY_REGIONS
#define INIT_MEMBLOCK_MEMORY_REGIONS INIT_MEMBLOCK_REGIONS
#endif

unsigned long max_low_pfn;

static struct memblock_region _memory_init_regions[INIT_MEMBLOCK_MEMORY_REGIONS];
static struct memblock_region _reserved_init_regions[INIT_MEMBLOCK_RESERVED_REGIONS];

struct memblock memblock = {
    .memory.regions = _memory_init_regions,
    .memory.max = INIT_MEMBLOCK_MEMORY_REGIONS,
    .memory.name = "memory",

    .reserved.regions = _reserved_init_regions,
    .reserved.max = INIT_MEMBLOCK_RESERVED_REGIONS,
    .reserved.name = "reserved",

    .bottom_up = false,
    .current_limit = MEMBLOCK_ALLOC_ANYWHERE,
};

static inline void memblock_set_region_node(struct memblock_region *r, int nid)
{
}

static inline int memblock_get_region_node(const struct memblock_region *r)
{
    return 0;
}

#include "_mb_/find.c"
#include "_mb_/add_range.c"
#include "_mb_/remove_range.c"
#include "_mb_/alloc.c"
#include "_mb_/isolate_range.c"
#include "_mb_/setclr_flag.c"

int memblock_add(phys_addr_t base, phys_addr_t size)
{
    phys_addr_t end = base + size - 1;

    return memblock_add_range(&memblock.memory, base, size, MAX_NUMNODES, 0);
}

int memblock_phys_free(phys_addr_t base, phys_addr_t size)
{
    phys_addr_t end = base + size - 1;

    return memblock_remove_range(&memblock.reserved, base, size);
}

int memblock_free(void *ptr, size_t size)
{
    if (ptr)
        memblock_phys_free(__pa(ptr), size);
}

void *memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align,
                             phys_addr_t min_addr, phys_addr_t max_addr,
                             int nid)
{
    void *ptr;

    ptr = memblock_alloc_internal(size, align,
                                  min_addr, max_addr, nid, false);

    if (ptr)
        memset(ptr, 0, size);

    return ptr;
}

void *memblock_alloc(phys_addr_t size, phys_addr_t align)
{
    return memblock_alloc_try_nid(size, align, MEMBLOCK_LOW_LIMIT,
                                  MEMBLOCK_ALLOC_ACCESSIBLE, NUMA_NO_NODE);
}

int __init_memblock __memblock_reserve(phys_addr_t base, phys_addr_t size,
                                       int nid, enum memblock_flags flags)
{
    phys_addr_t end = base + size - 1;

    return memblock_add_range(&memblock.reserved, base, size, nid, flags);
}

phys_addr_t memblock_alloc_range_nid(phys_addr_t size,
                                     phys_addr_t align, phys_addr_t start,
                                     phys_addr_t end, int nid,
                                     bool exact_nid)
{
    enum memblock_flags flags = MEMBLOCK_NONE;
    phys_addr_t found;

    if (!align)
    {
        align = SMP_CACHE_BYTES;
    }

    found = memblock_find_in_range_node(size, align, start, end, nid,
                                        flags);

    if (found && !__memblock_reserve(found, size, nid, MEMBLOCK_RSRV_KERN))
        goto done;

done:
    return found;
}

int memblock_reserve(phys_addr_t base, phys_addr_t size)
{
    phys_addr_t end = base + size - 1;

    return memblock_add_range(&memblock.reserved, base, size, MAX_NUMNODES, 0);
}

phys_addr_t memblock_start_of_DRAM(void)
{
    return memblock.memory.regions[0].base;
}

phys_addr_t memblock_end_of_DRAM(void)
{
    int idx = memblock.memory.cnt - 1;

    return (memblock.memory.regions[idx].base + memblock.memory.regions[idx].size);
}

/*
 * Common iterator interface used to define for_each_mem_pfn_range().
 */
void __init_memblock __next_mem_pfn_range(int *idx, int nid,
                                          unsigned long *out_start_pfn,
                                          unsigned long *out_end_pfn, int *out_nid)
{
    struct memblock_type *type = &memblock.memory;
    struct memblock_region *r;
    int r_nid;

    while (++*idx < type->cnt)
    {
        r = &type->regions[*idx];
        r_nid = memblock_get_region_node(r);

        if (PFN_UP(r->base) >= PFN_DOWN(r->base + r->size))
            continue;
        if (!numa_valid_node(nid) || nid == r_nid)
            break;
    }
    if (*idx >= type->cnt)
    {
        *idx = -1;
        return;
    }

    if (out_start_pfn)
        *out_start_pfn = PFN_UP(r->base);
    if (out_end_pfn)
        *out_end_pfn = PFN_DOWN(r->base + r->size);
    if (out_nid)
        *out_nid = r_nid;
}

/**
 * memblock_alloc_exact_nid_raw - allocate boot memory block on the exact node
 * without zeroing memory
 * @size: size of memory block to be allocated in bytes
 * @align: alignment of the region and block's size
 * @min_addr: the lower bound of the memory region from where the allocation
 *	  is preferred (phys address)
 * @max_addr: the upper bound of the memory region from where the allocation
 *	      is preferred (phys address), or %MEMBLOCK_ALLOC_ACCESSIBLE to
 *	      allocate only from memory limited by memblock.current_limit value
 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
 *
 * Public function, provides additional debug information (including caller
 * info), if enabled. Does not zero allocated memory.
 *
 * Return:
 * Virtual address of allocated memory block on success, NULL on failure.
 */
void *memblock_alloc_exact_nid_raw(phys_addr_t size, phys_addr_t align,
                                   phys_addr_t min_addr, phys_addr_t max_addr,
                                   int nid)
{
    return memblock_alloc_internal(size, align, min_addr, max_addr, nid, true);
}

/**
 * memblock_alloc_try_nid_raw - allocate boot memory block without zeroing
 * memory and without panicking
 * @size: size of memory block to be allocated in bytes
 * @align: alignment of the region and block's size
 * @min_addr: the lower bound of the memory region from where the allocation
 *	  is preferred (phys address)
 * @max_addr: the upper bound of the memory region from where the allocation
 *	      is preferred (phys address), or %MEMBLOCK_ALLOC_ACCESSIBLE to
 *	      allocate only from memory limited by memblock.current_limit value
 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
 *
 * Public function, provides additional debug information (including caller
 * info), if enabled. Does not zero allocated memory, does not panic if request
 * cannot be satisfied.
 *
 * Return:
 * Virtual address of allocated memory block on success, NULL on failure.
 */
void *memblock_alloc_try_nid_raw(phys_addr_t size, phys_addr_t align,
                                 phys_addr_t min_addr, phys_addr_t max_addr,
                                 int nid)
{
    return memblock_alloc_internal(size, align, min_addr, max_addr, nid, false);
}

/**
 * memblock_mark_hotplug - Mark hotpluggable memory with flag MEMBLOCK_HOTPLUG.
 * @base: the base phys addr of the region
 * @size: the size of the region
 *
 * Return: 0 on success, -errno on failure.
 */
int __init_memblock memblock_mark_hotplug(phys_addr_t base, phys_addr_t size)
{
    return memblock_setclr_flag(&memblock.memory, base, size, 1, MEMBLOCK_HOTPLUG);
}

/**
 * __memblock_alloc_or_panic - Try to allocate memory and panic on failure
 * @size: size of memory block to be allocated in bytes
 * @align: alignment of the region and block's size
 * @func: caller func name
 *
 * This function attempts to allocate memory using memblock_alloc,
 * and in case of failure, it calls panic with the formatted message.
 * This function should not be used directly, please use the macro memblock_alloc_or_panic.
 */
void *__memblock_alloc_or_panic(phys_addr_t size, phys_addr_t align,
                                       const char *func)
{
    void *addr = memblock_alloc(size, align);

    if (unlikely(!addr))
        panic("%s: Failed to allocate %pap bytes\n", func, &size);

    return addr;
}
