

struct alloc_context
{
    struct zonelist *zonelist;
    nodemask_t *nodemask;
    struct zoneref *preferred_zoneref;
    int migratetype;

    /*
     * highest_zoneidx represents highest usable zone index of
     * the allocation request. Due to the nature of the zone,
     * memory on lower zone than the highest_zoneidx will be
     * protected by lowmem_reserve[highest_zoneidx].
     *
     * highest_zoneidx is also used by reclaim/compaction to limit
     * the target zone since higher zone than this index cannot be
     * usable for this allocation request.
     */
    enum zone_type highest_zoneidx;
    bool spread_dirty_pages;
};

enum rmqueue_mode
{
    RMQUEUE_NORMAL,
    RMQUEUE_CMA,
    RMQUEUE_CLAIM,
    RMQUEUE_STEAL,
};

static inline struct page *get_page_from_free_area(struct free_area *area,
                                                   int migratetype)
{
    return list_first_entry_or_null(&area->free_list[migratetype],
                                    struct page, buddy_list);
}

/*
 * The order of subdivision here is critical for the IO subsystem.
 * Please do not alter this order without good reasons and regression
 * testing. Specifically, as large blocks of memory are subdivided,
 * the order in which smaller blocks are delivered depends on the order
 * they're subdivided in this function. This is the primary factor
 * influencing the order in which pages are delivered to the IO
 * subsystem according to empirical testing, and this is also justified
 * by considering the behavior of a buddy system containing a single
 * large block of memory acted on by a series of small allocations.
 * This behavior is a critical factor in sglist merging's success.
 *
 * -- nyc
 */
static inline unsigned int expand(struct zone *zone, struct page *page, int low,
                                  int high, int migratetype)
{
    unsigned int size = 1 << high;
    unsigned int nr_added = 0;

    while (high > low)
    {
        high--;
        size >>= 1;

        /*
         * Mark as guard pages (or page), that will allow to
         * merge back to allocator when buddy will be freed.
         * Corresponding page table entries will not be touched,
         * pages will stay not present in virtual address space
         */
        if (set_page_guard(zone, &page[size], high))
            continue;

        __add_to_free_list(&page[size], zone, high, migratetype, false);
        set_buddy_order(&page[size], high);
        nr_added += size;
    }

    return nr_added;
}

static inline void account_freepages(struct zone *zone, int nr_pages,
                                     int migratetype)
{
    // todo
}

static inline void page_del_and_expand(struct zone *zone,
                                       struct page *page, int low,
                                       int high, int migratetype)
{
    int nr_pages = 1 << high;

    __del_page_from_free_list(page, zone, high, migratetype);
    nr_pages -= expand(zone, page, low, high, migratetype);
    account_freepages(zone, -nr_pages, migratetype);
}

/*
 * Go through the free lists for the given migratetype and remove
 * the smallest available page from the freelists
 */
static inline struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
                                              int migratetype)
{
    unsigned int current_order;
    struct free_area *area;
    struct page *page = NULL;

    /* Find a page of the appropriate size in the preferred list */
    for (current_order = order; current_order < NR_PAGE_ORDERS; ++current_order)
    {
        area = &(zone->free_area[current_order]);
        page = get_page_from_free_area(area, migratetype);
        if (!page)
            continue;

        page_del_and_expand(zone, page, order, current_order,
                            migratetype);

        break;
    }

    return page;
}

static inline struct page *__rmqueue_cma_fallback(struct zone *zone,
                                                  unsigned int order)
{
    return __rmqueue_smallest(zone, order, MIGRATE_CMA);
}

/*
 * Try to allocate from some fallback migratetype by claiming the entire block,
 * i.e. converting it to the allocation's start migratetype.
 *
 * The use of signed ints for order and current_order is a deliberate
 * deviation from the rest of this file, to make the for loop
 * condition simpler.
 */
static inline struct page *__rmqueue_claim(struct zone *zone, int order, int start_migratetype,
                                           unsigned int alloc_flags)
{
    // todo
    return NULL;
}

/*
 * Try to steal a single page from some fallback migratetype. Leave the rest of
 * the block as its current migratetype, potentially causing fragmentation.
 */
static inline struct page *__rmqueue_steal(struct zone *zone, int order, int start_migratetype)
{
    return NULL;
}

/*
 * Do the hard work of removing an element from the buddy allocator.
 * Call me with the zone->lock already held.
 */
static inline struct page *__rmqueue(struct zone *zone, unsigned int order,
                                     int migratetype,
                                     unsigned int alloc_flags, enum rmqueue_mode *mode)
{
    struct page *page;

    /*
     * First try the freelists of the requested migratetype, then try
     * fallbacks modes with increasing levels of fragmentation risk.
     *
     * The fallback logic is expensive and rmqueue_bulk() calls in
     * a loop with the zone->lock held, meaning the freelists are
     * not subject to any outside changes. Remember in *mode where
     * we found pay dirt, to save us the search on the next call.
     */
    switch (*mode)
    {
    case RMQUEUE_NORMAL:
        page = __rmqueue_smallest(zone, order, migratetype);
        break;
    case RMQUEUE_CMA:
        if (alloc_flags & ALLOC_CMA)
        {
            page = __rmqueue_cma_fallback(zone, order);
            if (page)
            {
                *mode = RMQUEUE_CMA;
            }
        }
        break;
    case RMQUEUE_CLAIM:
        page = __rmqueue_claim(zone, order, migratetype, alloc_flags);
        if (page)
        {
            /* Replenished preferred freelist, back to normal mode. */
            *mode = RMQUEUE_NORMAL;
        }
        break;
    case RMQUEUE_STEAL:
        if (!(alloc_flags & ALLOC_NOFRAGMENT))
        {
            page = __rmqueue_steal(zone, order, migratetype);
            if (page)
            {
                *mode = RMQUEUE_STEAL;
            }
        }
        break;
    }

    return page;
}

static inline bool check_new_pages(struct page *page, unsigned int order)
{
    return false;
}

static inline struct page *rmqueue_buddy(struct zone *preferred_zone, struct zone *zone,
                                         unsigned int order, unsigned int alloc_flags,
                                         int migratetype)
{
    struct page *page;
    unsigned long flags;
    // todo
    do
    {
        page = NULL;

        if (!page)
        {
            enum rmqueue_mode rmqm = RMQUEUE_NORMAL;

            page = __rmqueue(zone, order, migratetype, alloc_flags, &rmqm);
        }

    } while (check_new_pages(page, order));

    return page;
}

/* Lock and remove page from the per-cpu list */
static struct page *rmqueue_pcplist(struct zone *preferred_zone,
                                    struct zone *zone, unsigned int order,
                                    int migratetype, unsigned int alloc_flags)
{
    return NULL;
}

/*
 * Allocate a page from the given zone.
 * Use pcplists for THP or "cheap" high-order allocations.
 */

/*
 * Do not instrument rmqueue() with KMSAN. This function may call
 * __msan_poison_alloca() through a call to set_pfnblock_migratetype().
 * If __msan_poison_alloca() attempts to allocate pages for the stack depot, it
 * may call rmqueue() again, which will result in a deadlock.
 */
static inline struct page *rmqueue(struct zone *preferred_zone,
                                   struct zone *zone, unsigned int order,
                                   gfp_t gfp_flags, unsigned int alloc_flags,
                                   int migratetype)
{
    struct page *page;

    if (likely(pcp_allowed_order(order)))
    {
        page = rmqueue_pcplist(preferred_zone, zone, order,
                               migratetype, alloc_flags);
        if (likely(page))
            goto out;
    }

    // todo
    page = rmqueue_buddy(preferred_zone, zone, order, alloc_flags,
                         migratetype);

out:
    return page;
}

static void prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags,
                          unsigned int alloc_flags)
{
}

/*
 * get_page_from_freelist goes through the zonelist trying to allocate
 * a page.
 */
static struct page *get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags,
                                           const struct alloc_context *ac)
{
    struct zoneref *z;
    struct zone *zone;
    struct pglist_data *last_pgdat = NULL;
    bool last_pgdat_dirty_ok = false;
    bool no_fallback;
    struct page *page = NULL;

    z = ac->preferred_zoneref;
    for_next_zone_zonelist_nodemask(zone, z, ac->highest_zoneidx, ac->nodemask)
    {

        page = rmqueue(zonelist_zone(ac->preferred_zoneref), zone, order,
                       gfp_mask, alloc_flags, ac->migratetype);
        if (page)
        {
            prep_new_page(page, order, gfp_mask, alloc_flags);

            break;
        }
    }

    return page;
}
