
int page_to_nid(const struct page *page)
{
    return 0;
}

static inline enum zone_type page_zonenum(const struct page *page)
{
    return ZONE_NORMAL;
}

static inline struct zone *page_zone(const struct page *page)
{
    return &NODE_DATA(page_to_nid(page))->node_zones[page_zonenum(page)];
}

/* When tag is not NULL, assuming mem_alloc_profiling_enabled */
static inline void pgalloc_tag_sub_pages(struct alloc_tag *tag, unsigned int nr)
{
}

static inline bool free_pages_prepare(struct page *page, unsigned int order)
{
    return true;
}

/**
 * get_pfnblock_migratetype - Return the migratetype of a pageblock
 * @page: The page within the block of interest
 * @pfn: The target page frame number
 *
 * Return: The migratetype of the pageblock
 *
 * Use get_pfnblock_migratetype() if caller already has both @page and @pfn
 * to save a call to page_to_pfn().
 */
static inline enum migratetype get_pfnblock_migratetype(const struct page *page, unsigned long pfn)
{

    return 0;
}

static inline void set_buddy_order(struct page *page, unsigned int order)
{
    set_page_private(page, order);
    __SetPageBuddy(page);
}

static inline void __free_one_page(struct page *page,
                                   unsigned long pfn,
                                   struct zone *zone, unsigned int order,
                                   int migratetype, fpi_t fpi_flags)
{
    unsigned long buddy_pfn = 0;
    unsigned long combined_pfn;
    struct page *buddy;
    bool to_tail;

    while (order < MAX_PAGE_ORDER)
    {
        int buddy_mt = migratetype;

        buddy = find_buddy_page_pfn(page, pfn, order, &buddy_pfn);
        if (!buddy)
            break;

        /*
         * Our buddy is free or it is CONFIG_DEBUG_PAGEALLOC guard page,
         * merge with it and move up one order.
         */
        if (page_is_guard(buddy))
            clear_page_guard(zone, buddy, order);
        else
            __del_page_from_free_list(buddy, zone, order, buddy_mt);

        combined_pfn = buddy_pfn & pfn;
        page = page + (combined_pfn - pfn);
        pfn = combined_pfn;
        order++;
    }

    set_buddy_order(page, order);

    if (fpi_flags & FPI_TO_TAIL)
        to_tail = true;

    __add_to_free_list(page, zone, order, migratetype, to_tail);
}

/* Split a multi-block free page into its individual pageblocks. */
static void split_large_buddy(struct zone *zone, struct page *page,
                              unsigned long pfn, int order, fpi_t fpi)
{
    unsigned long end = pfn + (1 << order);

    // todo
    do
    {
        int mt = get_pfnblock_migratetype(page, pfn);

        __free_one_page(page, pfn, zone, order, mt, fpi);
        pfn += 1 << order;
        if (pfn == end)
            break;
        page = pfn_to_page(pfn);
    } while (1);
}

static void add_page_to_zone_llist(struct zone *zone, struct page *page,
                                   unsigned int order)
{
    /* Remember the order */
    page->order = order;
    /* Add the page to the free list */
    llist_add(&page->pcp_llist, &zone->trylock_free_pages);
}

static void free_one_page(struct zone *zone, struct page *page,
                          unsigned long pfn, unsigned int order,
                          fpi_t fpi_flags)
{
    struct llist_head *llhead;
    unsigned long flags;

    if (unlikely(fpi_flags & FPI_TRYLOCK))
    {
        if (!spin_trylock_irqsave(&zone->lock, flags))
        {
            add_page_to_zone_llist(zone, page, order);
            return;
        }
    }
    else
    {
        spin_lock_irqsave(&zone->lock, flags);
    }

    /* The lock succeeded. Process deferred pages. */
    llhead = &zone->trylock_free_pages;
    if (unlikely(!llist_empty(llhead) && !(fpi_flags & FPI_TRYLOCK)))
    {
        struct llist_node *llnode;
        struct page *p, *tmp;

        llnode = llist_del_all(llhead);
        llist_for_each_entry_safe(p, tmp, llnode, pcp_llist)
        {
            unsigned int p_order = p->order;

            split_large_buddy(zone, p, page_to_pfn(p), p_order, fpi_flags);
        }
    }
    split_large_buddy(zone, page, pfn, order, fpi_flags);
    spin_unlock_irqrestore(&zone->lock, flags);
}

static void __free_pages_ok(struct page *page, unsigned int order,
                            fpi_t fpi_flags)
{
    unsigned long pfn = page_to_pfn(page);
    struct zone *zone = page_zone(page);

    if (free_pages_prepare(page, order))
        free_one_page(zone, page, pfn, order, fpi_flags);
}

/*
 * Free a pcp page
 */
static void __free_frozen_pages(struct page *page, unsigned int order,
                                fpi_t fpi_flags)
{
    unsigned long UP_flags;
    struct per_cpu_pages *pcp;
    struct zone *zone;
    unsigned long pfn = page_to_pfn(page);
    int migratetype;

    if (!pcp_allowed_order(order))
    {
        __free_pages_ok(page, order, fpi_flags);
        return;
    }

    if (!free_pages_prepare(page, order))
        return;

    /*
     * We only track unmovable, reclaimable and movable on pcp lists.
     * Place ISOLATE pages on the isolated list because they are being
     * offlined but treat HIGHATOMIC and CMA as movable pages so we can
     * get those areas back if necessary. Otherwise, we may have to free
     * excessively into the page allocator
     */
    zone = page_zone(page);

    free_one_page(zone, page, pfn, order, fpi_flags);
}

static void ___free_pages(struct page *page, unsigned int order, fpi_t fpi_flags)
{
    /* get PageHead before we drop reference */
    int head = PageHead(page);
    /* get alloc tag in case the page is released by others */
    struct alloc_tag *tag = pgalloc_tag_get(page);

    if (put_page_testzero(page))
        __free_frozen_pages(page, order, fpi_flags);
    else if (!head)
    {
        pgalloc_tag_sub_pages(tag, (1 << order) - 1);
        while (order-- > 0)
            __free_frozen_pages(page + (1 << order), order,
                                fpi_flags);
    }
}
