#include <linux/kconfig.h>
#include <linux/init.h>
#include <linux/memblock.h>
#include <linux/page_types.h>
#include <linux/pgalloc_tag.h>
#include <linux/page_alloc.h>
#include <linux/mmzone.h>
#include <linux/minmax.h>

static inline int early_pfn_to_nid(unsigned long pfn)
{
    return 0;
}

static inline bool early_page_initialised(unsigned long pfn, int nid)
{
    return true;
}

void __init memblock_free_pages(struct page *page, unsigned long pfn,
                                unsigned int order)
{
    if (IS_ENABLED(CONFIG_DEFERRED_STRUCT_PAGE_INIT))
    {
        int nid = early_pfn_to_nid(pfn);

        if (!early_page_initialised(pfn, nid))
            return;
    }

    /* pages were reserved and not allocated */
    clear_page_tag_ref(page);
    __free_pages_core(page, order, MEMINIT_EARLY);
}

static void __init __free_pages_memory(unsigned long start, unsigned long end)
{
    int order;

    while (start < end)
    {
        /*
         * Free the pages in the largest chunks alignment allows.
         *
         * __ffs() behaviour is undefined for 0. start == 0 is
         * MAX_PAGE_ORDER-aligned, set order to MAX_PAGE_ORDER for
         * the case.
         */
        if (start)
            order = min_t(int, MAX_PAGE_ORDER, __ffs(start));
        else
            order = MAX_PAGE_ORDER;

        while (start + (1UL << order) > end)
            order--;

        memblock_free_pages(pfn_to_page(start), start, order);

        start += (1UL << order);
    }
}

static unsigned long __init __free_memory_core(phys_addr_t start,
                                               phys_addr_t end)
{
    unsigned long start_pfn = PFN_UP(start);
    unsigned long end_pfn = PFN_DOWN(end);

    if (!IS_ENABLED(CONFIG_HIGHMEM) && end_pfn > max_low_pfn)
        end_pfn = max_low_pfn;

    if (start_pfn >= end_pfn)
        return 0;

    __free_pages_memory(start_pfn, end_pfn);

    return end_pfn - start_pfn;
}

static unsigned long __init free_low_memory_core_early(void)
{
    unsigned long count = 0;
    phys_addr_t start, end;
    u64 i;

    /*
     * We need to use NUMA_NO_NODE instead of NODE_DATA(0)->node_id
     *  because in some case like Node0 doesn't have RAM installed
     *  low ram will be on Node1
     */
    for_each_free_mem_range(i, NUMA_NO_NODE, MEMBLOCK_NONE, &start, &end, NULL)
    {
        count += __free_memory_core(start, end);
    }

    return count;
}

/**
 * memblock_free_all - release free pages to the buddy allocator
 */
void __init memblock_free_all(void)
{
    free_low_memory_core_early();
}
