#include <linux/init.h>
#include <linux/mm_init.h>
#include <linux/nodemask.h>
#include <linux/numa.h>
#include <linux/mmzone.h>
#include <linux/memblock.h>
#include <linux/minmax.h>
#include <linux/stddef.h>
#include <linux/cache.h>
#include <linux/math.h>
#include <linux/page_types.h>
#include <linux/bug.h>
#include <linux/align.h>
#include <linux/page_alloc.h>
#include <linux/pgalloc_tag.h>
#include <linux/minmax.h>
#include <linux/pfn.h>
#include <linux/string.h>
#include <linux/page_ref.h>

unsigned long max_mapnr;

struct page *mem_map;

bool mirrored_kernelcore = false;

static unsigned long arch_zone_lowest_possible_pfn[MAX_NR_ZONES] __initdata;
static unsigned long arch_zone_highest_possible_pfn[MAX_NR_ZONES] __initdata;
static unsigned long zone_movable_pfn[MAX_NUMNODES] __initdata;

void __init *memmap_alloc(phys_addr_t size, phys_addr_t align,
                          phys_addr_t min_addr, int nid, bool exact_nid)
{
    void *ptr;

    /*
     * Kmemleak will explicitly scan mem_map by traversing all valid
     * `struct *page`,so memblock does not need to be added to the scan list.
     */
    if (exact_nid)
        ptr = memblock_alloc_exact_nid_raw(size, align, min_addr,
                                           MEMBLOCK_ALLOC_NOLEAKTRACE,
                                           nid);
    else
        ptr = memblock_alloc_try_nid_raw(size, align, min_addr,
                                         MEMBLOCK_ALLOC_NOLEAKTRACE,
                                         nid);

    return ptr;
}

static void __init alloc_node_mem_map(struct pglist_data *pgdat)
{
    unsigned long start, offset, size, end;
    struct page *map;

    /* Skip empty nodes */
    if (!pgdat->node_spanned_pages)
        return;

    start = pgdat->node_start_pfn & ~(MAX_ORDER_NR_PAGES - 1);
    offset = pgdat->node_start_pfn - start;
    /*
     * The zone's endpoints aren't required to be MAX_PAGE_ORDER
     * aligned but the node_mem_map endpoints must be in order
     * for the buddy allocator to function correctly.
     */
    end = ALIGN(pgdat_end_pfn(pgdat), MAX_ORDER_NR_PAGES);
    size = (end - start) * sizeof(struct page);
    map = memmap_alloc(size, SMP_CACHE_BYTES, MEMBLOCK_LOW_LIMIT,
                       pgdat->node_id, false);

    pgdat->node_mem_map = map + offset;

    /* the global mem_map is just set as node 0's */
    WARN_ON(pgdat != NODE_DATA(0));

    mem_map = pgdat->node_mem_map;
    if (page_to_pfn(mem_map) != pgdat->node_start_pfn)
        mem_map -= offset;

    max_mapnr = end - start;
}

/**
 * get_pfn_range_for_nid - Return the start and end page frames for a node
 * @nid: The nid to return the range for. If MAX_NUMNODES, the min and max PFN are returned.
 * @start_pfn: Passed by reference. On return, it will have the node start_pfn.
 * @end_pfn: Passed by reference. On return, it will have the node end_pfn.
 *
 * It returns the start and end page frame of a node based on information
 * provided by memblock_set_node(). If called for a node
 * with no available memory, the start and end PFNs will be 0.
 */
void __init get_pfn_range_for_nid(unsigned int nid,
                                  unsigned long *start_pfn, unsigned long *end_pfn)
{
    unsigned long this_start_pfn, this_end_pfn;
    int i;

    *start_pfn = -1UL;
    *end_pfn = 0;

    for_each_mem_pfn_range(i, nid, &this_start_pfn, &this_end_pfn, NULL)
    {
        *start_pfn = min(*start_pfn, this_start_pfn);
        *end_pfn = max(*end_pfn, this_end_pfn);
    }

    if (*start_pfn == -1UL)
        *start_pfn = 0;
}

static void __init zone_init_internals(struct zone *zone, enum zone_type idx, int nid,
                                       unsigned long remaining_pages)
{
    zone->zone_pgdat = NODE_DATA(nid);
}

static void __init free_area_init_core(struct pglist_data *pgdat)
{
    enum zone_type j;
    int nid = pgdat->node_id;

    for (j = 0; j < MAX_NR_ZONES; j++)
    {
        struct zone *zone = pgdat->node_zones + j;
        unsigned long size = zone->spanned_pages;

        /*
         * Initialize zone->managed_pages as 0 , it will be reset
         * when memblock allocator frees pages into buddy system.
         */
        zone_init_internals(zone, j, nid, zone->present_pages);

        if (!size)
            continue;

        init_currently_empty_zone(zone, zone->zone_start_pfn, size);
    }
}

/*
 * The zone ranges provided by the architecture do not include ZONE_MOVABLE
 * because it is sized independent of architecture. Unlike the other zones,
 * the starting point for ZONE_MOVABLE is not fixed. It may be different
 * in each node depending on the size of each node and how evenly kernelcore
 * is distributed. This helper function adjusts the zone ranges
 * provided by the architecture for a given node by using the end of the
 * highest usable zone for ZONE_MOVABLE. This preserves the assumption that
 * zones within a node are in order of monotonic increases memory addresses
 */
static void __init adjust_zone_range_for_zone_movable(int nid,
                                                      unsigned long zone_type,
                                                      unsigned long node_end_pfn,
                                                      unsigned long *zone_start_pfn,
                                                      unsigned long *zone_end_pfn)
{
    /* Only adjust if ZONE_MOVABLE is on this node */
    if (zone_movable_pfn[nid])
    {
        /* Size ZONE_MOVABLE */
        if (zone_type == ZONE_MOVABLE)
        {
            *zone_start_pfn = zone_movable_pfn[nid];
            *zone_end_pfn = min(node_end_pfn,
                                arch_zone_highest_possible_pfn[movable_zone]);

            /* Adjust for ZONE_MOVABLE starting within this range */
        }
        else if (!mirrored_kernelcore &&
                 *zone_start_pfn < zone_movable_pfn[nid] &&
                 *zone_end_pfn > zone_movable_pfn[nid])
        {
            *zone_end_pfn = zone_movable_pfn[nid];

            /* Check if this whole range is within ZONE_MOVABLE */
        }
        else if (*zone_start_pfn >= zone_movable_pfn[nid])
            *zone_start_pfn = *zone_end_pfn;
    }
}

/*
 * Return the number of pages a zone spans in a node, including holes
 * present_pages = zone_spanned_pages_in_node() - zone_absent_pages_in_node()
 */
static unsigned long __init zone_spanned_pages_in_node(int nid,
                                                       unsigned long zone_type,
                                                       unsigned long node_start_pfn,
                                                       unsigned long node_end_pfn,
                                                       unsigned long *zone_start_pfn,
                                                       unsigned long *zone_end_pfn)
{
    unsigned long zone_low = arch_zone_lowest_possible_pfn[zone_type];
    unsigned long zone_high = arch_zone_highest_possible_pfn[zone_type];

    /* Get the start and end of the zone */
    *zone_start_pfn = clamp(node_start_pfn, zone_low, zone_high);
    *zone_end_pfn = clamp(node_end_pfn, zone_low, zone_high);
    adjust_zone_range_for_zone_movable(nid, zone_type, node_end_pfn,
                                       zone_start_pfn, zone_end_pfn);

    /* Check that this node has pages within the zone's required range */
    if (*zone_end_pfn < node_start_pfn || *zone_start_pfn > node_end_pfn)
        return 0;

    /* Move the zone boundaries inside the node if necessary */
    *zone_end_pfn = min(*zone_end_pfn, node_end_pfn);
    *zone_start_pfn = max(*zone_start_pfn, node_start_pfn);

    /* Return the spanned pages */
    return *zone_end_pfn - *zone_start_pfn;
}

/*
 * Return the number of holes in a range on a node. If nid is MAX_NUMNODES,
 * then all holes in the requested range will be accounted for.
 */
static unsigned long __init __absent_pages_in_range(int nid,
                                                    unsigned long range_start_pfn,
                                                    unsigned long range_end_pfn)
{
    unsigned long nr_absent = range_end_pfn - range_start_pfn;
    unsigned long start_pfn, end_pfn;
    int i;

    for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL)
    {
        start_pfn = clamp(start_pfn, range_start_pfn, range_end_pfn);
        end_pfn = clamp(end_pfn, range_start_pfn, range_end_pfn);
        nr_absent -= end_pfn - start_pfn;
    }

    return nr_absent;
}

/* Return the number of page frames in holes in a zone on a node */
static unsigned long __init zone_absent_pages_in_node(int nid,
                                                      unsigned long zone_type,
                                                      unsigned long zone_start_pfn,
                                                      unsigned long zone_end_pfn)
{
    unsigned long nr_absent;

    /* zone is empty, we don't have any absent pages */
    if (zone_start_pfn == zone_end_pfn)
        return 0;

    nr_absent = __absent_pages_in_range(nid, zone_start_pfn, zone_end_pfn);

    return nr_absent;
}

static void __init calculate_node_totalpages(struct pglist_data *pgdat,
                                             unsigned long node_start_pfn,
                                             unsigned long node_end_pfn)
{
    unsigned long realtotalpages = 0, totalpages = 0;
    enum zone_type i;

    for (i = 0; i < MAX_NR_ZONES; i++)
    {
        struct zone *zone = pgdat->node_zones + i;
        unsigned long zone_start_pfn, zone_end_pfn;
        unsigned long spanned, absent;
        unsigned long real_size;

        spanned = zone_spanned_pages_in_node(pgdat->node_id, i,
                                             node_start_pfn,
                                             node_end_pfn,
                                             &zone_start_pfn,
                                             &zone_end_pfn);
        absent = zone_absent_pages_in_node(pgdat->node_id, i,
                                           zone_start_pfn,
                                           zone_end_pfn);

        real_size = spanned - absent;

        if (spanned)
            zone->zone_start_pfn = zone_start_pfn;
        else
            zone->zone_start_pfn = 0;
        zone->spanned_pages = spanned;
        zone->present_pages = real_size;
#if defined(CONFIG_MEMORY_HOTPLUG)
        zone->present_early_pages = real_size;
#endif

        totalpages += spanned;
        realtotalpages += real_size;
    }

    pgdat->node_spanned_pages = totalpages;
    pgdat->node_present_pages = realtotalpages;
}

static void __init free_area_init_node(int nid, pg_data_t *pgdat)
{
    unsigned long start_pfn = 0;
    unsigned long end_pfn = 0;

    get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);

    pgdat->node_id = nid;
    pgdat->node_start_pfn = start_pfn;
    pgdat->per_cpu_nodestats = NULL;

    if (start_pfn != end_pfn)
    {
        calculate_node_totalpages(pgdat, start_pfn, end_pfn);
    }
    else
    {
    }

    alloc_node_mem_map(pgdat);

    free_area_init_core(pgdat);
}

/*
 * Some architectures, e.g. ARC may have ZONE_HIGHMEM below ZONE_NORMAL. For
 * such cases we allow max_zone_pfn sorted in the descending order
 */
static bool arch_has_descending_max_zone_pfns(void)
{
    return false;
}

void __meminit __init_single_page(struct page *page, unsigned long pfn,
                                  unsigned long zone, int nid)
{
    init_page_count(page);
}

/* If zone is ZONE_MOVABLE but memory is mirrored, it is an overlapped init */
static bool __meminit overlap_memmap_init(unsigned long zone, unsigned long *pfn)
{
    return false;
}

void __meminit memmap_init_range(unsigned long size, int nid, unsigned long zone,
                                 unsigned long start_pfn, unsigned long zone_end_pfn,
                                 enum meminit_context context,
                                 struct vmem_altmap *altmap, int migratetype,
                                 bool isolate_pageblock)
{
    unsigned long pfn, end_pfn = start_pfn + size;
    struct page *page;

    for (pfn = start_pfn; pfn < end_pfn;)
    {

        page = pfn_to_page(pfn);
        __init_single_page(page, pfn, zone, nid);

        //todo
        pfn++;
    }
}

static void __init init_unavailable_range(unsigned long spfn,
                                          unsigned long epfn,
                                          int zone, int node)
{
}

static void __init memmap_init_zone_range(struct zone *zone,
                                          unsigned long start_pfn,
                                          unsigned long end_pfn,
                                          unsigned long *hole_pfn)
{
    unsigned long zone_start_pfn = zone->zone_start_pfn;
    unsigned long zone_end_pfn = zone_start_pfn + zone->spanned_pages;
    int nid = zone_to_nid(zone), zone_id = zone_idx(zone);

    start_pfn = clamp(start_pfn, zone_start_pfn, zone_end_pfn);
    end_pfn = clamp(end_pfn, zone_start_pfn, zone_end_pfn);

    if (start_pfn >= end_pfn)
        return;

    memmap_init_range(end_pfn - start_pfn, nid, zone_id, start_pfn,
                      zone_end_pfn, MEMINIT_EARLY, NULL, MIGRATE_MOVABLE,
                      false);

    if (*hole_pfn < start_pfn)
        init_unavailable_range(*hole_pfn, start_pfn, zone_id, nid);

    *hole_pfn = end_pfn;
}

static void __init memmap_init(void)
{
    unsigned long start_pfn, end_pfn;
    unsigned long hole_pfn = 0;
    int i, j, zone_id = 0, nid;

    for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid)
    {
        struct pglist_data *node = NODE_DATA(nid);

        for (j = 0; j < MAX_NR_ZONES; j++)
        {
            struct zone *zone = node->node_zones + j;

            if (!populated_zone(zone))
                continue;

            memmap_init_zone_range(zone, start_pfn, end_pfn,
                                   &hole_pfn);
            zone_id = j;
        }
    }
}

void __init free_area_init(unsigned long *max_zone_pfn)
{
    unsigned long start_pfn, end_pfn;
    int nid;
    bool descending;

    /* Record where the zone boundaries are */
    memset(arch_zone_lowest_possible_pfn, 0,
           sizeof(arch_zone_lowest_possible_pfn));
    memset(arch_zone_highest_possible_pfn, 0,
           sizeof(arch_zone_highest_possible_pfn));

    start_pfn = PHYS_PFN(memblock_start_of_DRAM());

    descending = arch_has_descending_max_zone_pfns();

    for (int i = 0, zone; i < MAX_NR_ZONES; i++)
    {
        if (descending)
            zone = MAX_NR_ZONES - i - 1;
        else
            zone = i;

        if (zone == ZONE_MOVABLE)
            continue;

        end_pfn = max(max_zone_pfn[zone], start_pfn);
        arch_zone_lowest_possible_pfn[zone] = start_pfn;
        arch_zone_highest_possible_pfn[zone] = end_pfn;

        start_pfn = end_pfn;
    }

    for_each_node(nid)
    {
        pg_data_t *pgdat;

        pgdat = NODE_DATA(nid);

        free_area_init_node(nid, pgdat);
    }

    memmap_init();
}
