/*
 * Copyright (C) Huawei Technologies Co., Ltd. 2025. All rights reserved.
 * Author: Huawei OS Kernel Lab
 * Create: Tue Feb 18 11:45:36 2025
 */
#define pr_fmt(fmt) "ascend huge pool: " fmt

#include <linux/hugetlb.h>
#include <linux/proc_fs.h>
#include <linux/memblock.h>
#include <linux/page-flags.h>

#include "internal.h"
#include "hugetlb_vmemmap.h"

#define PAGES_PER_PMD (PMD_SIZE >> PAGE_SHIFT)
#define PAGES_PER_PUD (PUD_SIZE >> PAGE_SHIFT)
#define PMDS_PER_PUD (PUD_SIZE >> PMD_SHIFT)

enum ascend_huge_pool_type {
	ASCEND_HUGE_POOL_1G = 0,
	ASCEND_HUGE_POOL_2M,
	ASCEND_HUGE_POOL_NR,
};

struct ascend_split_hugepage {
	struct list_head head_pages;
	struct page *page;
	unsigned int nr_free;
};

struct ascend_huge_pool {
	enum ascend_huge_pool_type type;
	unsigned int nr_total;
	unsigned int nr_free;
	union {
		unsigned int nr_split; /* for 1G pool */
		struct list_head split_list; /* for 2M pool */
	};
	struct list_head free_list;
	spinlock_t lock;
};

static DEFINE_MUTEX(ascend_update_huge_pool_mutex);
struct ascend_huge_pool ascend_huge_pools[ASCEND_HUGE_POOL_NR][MAX_NUMNODES];

#define gigantic_pool(nid) (struct ascend_huge_pool *)&ascend_huge_pools[ASCEND_HUGE_POOL_1G][nid]
#define huge_pool(nid) (struct ascend_huge_pool *)&ascend_huge_pools[ASCEND_HUGE_POOL_2M][nid]

#define ASCEND_GIGANTIC_HEAD_PAGE(page) \
	((struct page *)pfn_to_page(ALIGN_DOWN(page_to_pfn(page), PAGES_PER_PUD)))

#define ASCEND_GIGANTIC_FIRST_TAIL_PRI(page) (ASCEND_GIGANTIC_HEAD_PAGE(page)[1].private)

/*
 * To prevent frequent splitting of 1G pages in edge cases,
 * the merge threshold is set to PMDS_PER_PUD plus 50% of PMDS_PER_PUD.
 */
#define ASCEND_MERGE_THRESHOLD (PMDS_PER_PUD + (PMDS_PER_PUD >> 1))

/* like prep_new_huge_page but only set the first page */
static void make_new_huge_page(struct page *page)
{
	int i, nr_pages;
	__SetPageHead(page);
	nr_pages = HPageVmemmapOptimized(page) ? PAGE_SIZE / sizeof(*page) : PAGES_PER_PMD;
	for (i = 0; i < nr_pages; i++) {
		struct page *p = page + i;
		set_compound_head(p, page);
	}

	SetPagePool(page);
	set_compound_page_dtor(page, HUGETLB_PAGE_DTOR);
	set_compound_order(page, get_order(PMD_SIZE));
	atomic_set(compound_mapcount_ptr(page), -1);
	if (hpage_pincount_available(page))
		atomic_set(compound_pincount_ptr(page), 0);
}

static bool make_new_giant_page(struct page *page)
{
	int i;
	struct hstate *h = size_to_hstate(PMD_SIZE);

	if (!prep_compound_gigantic_page(page, get_order(PUD_SIZE)))
		return false;

	/*
	 * Free struct pages at the PMD granularity because we would
	 * split the 1G giant pages to hugepage of PMD_SIZE.
	 */
	for (i = 0; i < PAGES_PER_PUD; i += PAGES_PER_PMD)
		hugetlb_vmemmap_free(h, page + i);

	SetPagePool(page);
	set_page_count(page, 0);
	set_compound_page_dtor(page, HUGETLB_PAGE_DTOR);
	return true;
}

static void prep_merge_giant_page(struct page *page)
{
	int i, j, nr_pages;

	nr_pages = HPageVmemmapOptimized(page) ? PAGE_SIZE / sizeof(*page) : PAGES_PER_PMD;
	for (i = 0; i < PAGES_PER_PUD; i += PAGES_PER_PMD) {
		struct page *huge = page + i;
		ClearPagePool(huge);
		for (j = 0; j < nr_pages; j++) {
			set_page_count(huge + j, 0);
			set_compound_head(huge + j, page);
		}
	}
	set_compound_order(page, get_order(PUD_SIZE));
	__SetPageHead(page);
	SetPagePool(page);
	atomic_set(compound_mapcount_ptr(page), -1);
	atomic_set(compound_pincount_ptr(page), 0);
	set_compound_page_dtor(page, HUGETLB_PAGE_DTOR);
}

static struct page *dequeue_page_from_pool(struct ascend_huge_pool *pool)
{
	struct page *page = NULL;
	struct ascend_split_hugepage *split;

	spin_lock_irq(&pool->lock);
	if (!pool->nr_free) {
		spin_unlock_irq(&pool->lock);
		return NULL;
	}
	page = list_first_entry(&pool->free_list, struct page, lru);
	list_del(&page->lru);
	pool->nr_free--;
	if (pool->type == ASCEND_HUGE_POOL_2M) {
		split = (struct ascend_split_hugepage *)ASCEND_GIGANTIC_FIRST_TAIL_PRI(page);
		if (split)
			split->nr_free--;
	}
	spin_unlock_irq(&pool->lock);
	return page;
}

static int gigantic_page_split(int nid)
{
	int i;
	struct page *page = NULL;
	struct ascend_huge_pool *pool;
	struct ascend_split_hugepage *split;
	LIST_HEAD(page_list);

	split = kzalloc(sizeof(struct ascend_split_hugepage), GFP_KERNEL);
	if (!split) {
		return -ENOMEM;
	}

	pool = gigantic_pool(nid);
	page = dequeue_page_from_pool(pool);
	if (!page) {
		kfree(split);
		return -ENOMEM;
	}
	split->page = page;
	split->nr_free = PMDS_PER_PUD;
	spin_lock_irq(&pool->lock);
	pool->nr_split++;
	spin_unlock_irq(&pool->lock);

	pool = huge_pool(nid);
	for (i = 0; i < PAGES_PER_PUD; i += PAGES_PER_PMD) {
		struct page *huge = page + i;
		make_new_huge_page(huge);
		list_add_tail(&huge->lru, &page_list);
	}
	spin_lock_irq(&pool->lock);
	list_splice_tail(&page_list, &pool->free_list);
	list_add(&split->head_pages, &pool->split_list);
	ASCEND_GIGANTIC_FIRST_TAIL_PRI(page) = (unsigned long)split;
	pool->nr_free += PMDS_PER_PUD;
	pool->nr_total += PMDS_PER_PUD;
	spin_unlock_irq(&pool->lock);
	return 0;
}

static void ascend_huge_pool_merge_pages(unsigned int nid, unsigned int nr_pages);

/*
 * If fail to dequeue a 2M page, attempt to split a 1G page.
 * If fail to dequeue a 1G page, attempt to merge 2M pages.
 */
static inline void dequeue_page_prepare(bool try_hard, unsigned long page_size, int nid)
{
	if (!try_hard)
		return;

	if (page_size == PMD_SIZE)
		gigantic_page_split(nid);
	else if (page_size == PUD_SIZE)
		ascend_huge_pool_merge_pages(nid, 1);
	else
		BUG_ON(1);
}

/*
 * Allocation strategy:
 * 1. First, attempt to allocate from the specified node (nid).
 * 2. If that fails, try allocating from the nodes specified in the nodemask.
 * 3. If allocation still fails, attempt to merge or split pages.
 */
struct page *ascend_huge_pool_alloc_page(unsigned long page_size,
			gfp_t gfp_mask, int nid, nodemask_t *nodemask)
{
	int node;
	bool try_hard = false;
	struct page *page;
	struct ascend_huge_pool *hpools, *pool;

	if (page_size == PUD_SIZE)
		hpools = gigantic_pool(0);
	else
		hpools = huge_pool(0);

retry:
	pool = &hpools[nid];
	dequeue_page_prepare(try_hard, page_size, nid);
	page = dequeue_page_from_pool(pool);
	if (page)
		goto out;

	for_each_node_mask(node, *nodemask) {
		if (node == nid)
			continue;

		pool = &hpools[node];
		dequeue_page_prepare(try_hard, page_size, node);
		page = dequeue_page_from_pool(pool);
		if (page)
			goto out;
	}

	if (!try_hard) {
		try_hard = true;
		goto retry;
	}

out:
	if (page)
		set_page_refcounted(page);

	return page;
}

static void ascend_merge_hugepage(unsigned int nid, struct page *page)
{
	struct ascend_huge_pool *pool;

	prep_merge_giant_page(page);
	pool = gigantic_pool(nid);
	spin_lock_irq(&pool->lock);
	pool->nr_free++;
	pool->nr_split--;
	list_add(&page->lru, &pool->free_list);
	spin_unlock_irq(&pool->lock);
}

/* Ascend huge page destructor */
void ascend_huge_pool_free_page(struct page *page)
{
	int i;
	struct page *head;
	struct ascend_huge_pool *pool;
	int nid = page_to_nid(page);
	struct ascend_split_hugepage *split_page;

	VM_BUG_ON_PAGE(!PageHuge(page), page);
	if (page_size(page) == PUD_SIZE)
		pool = gigantic_pool(nid);
	else
		pool = huge_pool(nid);

	spin_lock_irq(&pool->lock);
	list_add(&page->lru, &pool->free_list);
	pool->nr_free++;
	if (page_size(page) == PMD_SIZE) {
		head = ASCEND_GIGANTIC_HEAD_PAGE(page);
		split_page = (struct ascend_split_hugepage *)head[1].private;
		if (!split_page) {
			spin_unlock_irq(&pool->lock);
			return;
		}
		split_page->nr_free++;
		if (split_page->nr_free == PMDS_PER_PUD
				&& pool->nr_free > ASCEND_MERGE_THRESHOLD) {
			pool->nr_free -= PAGES_PER_PMD;
			pool->nr_total -= PAGES_PER_PMD;
			for (i = 0; i < PAGES_PER_PUD; i += PAGES_PER_PMD) {
				struct page *huge = head + i;
				__ClearPageHead(huge);
				huge[1].compound_nr = 0;
				huge[1].compound_order = 0;
				list_del(&huge->lru);
			}
			list_del(&split_page->head_pages);
			spin_unlock_irq(&pool->lock);
			kfree(split_page);
			ASCEND_GIGANTIC_FIRST_TAIL_PRI(page) = 0;
			ascend_merge_hugepage(nid, head);
			return;
		}
	}
	spin_unlock_irq(&pool->lock);
}

static void ascend_huge_pool_add_pages(int nid, int nr, struct list_head *page_list)
{
	struct ascend_huge_pool *pool = gigantic_pool(nid);
	if (nr) {
		spin_lock_irq(&pool->lock);
		list_splice_tail(page_list, &pool->free_list);
		pool->nr_total += nr;
		pool->nr_free += nr;
		spin_unlock_irq(&pool->lock);
	}
}

/*
 * Allocate from the hugetlb pool to ascend huge pool.
 */
static int alloc_hugepage_for_ascend_huge_pool(unsigned int nid, unsigned int nr_pages)
{
	int i, nr_fail;
	LIST_HEAD(page_list);

	for (i = 0, nr_fail = 0; i < nr_pages; i++) {
		struct page *page = alloc_contig_pages(PAGES_PER_PUD,
				/*
				 * GFP_KERNEL for reclaim
				 * __GFP_MOVABLE and __GFP_HIGHMEM to alloc movable memory
				 * __GFP_THISNODE to forbidden fallback to other node
				 */
				GFP_KERNEL | __GFP_MOVABLE | __GFP_HIGHMEM | __GFP_THISNODE,
				nid, NULL);
		if (!page) {
			pr_warn("alloc %d giant page failed, available %d\n", nr_pages, i);
			break;
		}
		if (!make_new_giant_page(page)) {
			free_contig_range(page_to_pfn(page), PAGES_PER_PUD);
			pr_warn("Giant page can not be used due to unexpected inflated ref count\n");
			nr_fail++;
			continue;
		}
		list_add_tail(&page->lru, &page_list);
	}

	i -= nr_fail;
	ascend_huge_pool_add_pages(nid, i, &page_list);

	return i;
}

static void ascend_huge_pool_merge_pages(unsigned int nid, unsigned int nr_pages)
{
	int i;
	struct page *page;
	struct ascend_huge_pool *pool = huge_pool(nid);
	struct ascend_split_hugepage *split_page, *split_next;
	LIST_HEAD(page_list);

	if (nr_pages == 0)
		return;
	spin_lock_irq(&pool->lock);
	list_for_each_entry_safe(split_page, split_next, &pool->split_list, head_pages) {
		page = split_page->page;
		if (split_page->nr_free == PMDS_PER_PUD) {
			pool->nr_free -= PAGES_PER_PMD;
			pool->nr_total -= PAGES_PER_PMD;
			for (i = 0; i < PAGES_PER_PUD; i += PAGES_PER_PMD) {
				struct page *huge = page + i;
				__ClearPageHead(huge);
				huge[1].compound_nr = 0;
				huge[1].compound_order = 0;
				list_del(&huge->lru);
			}
			list_del(&split_page->head_pages);
			nr_pages--;
			list_add(&split_page->head_pages, &page_list);
			if (nr_pages == 0)
				break;
		}
	}
	spin_unlock_irq(&pool->lock);

	list_for_each_entry_safe(split_page, split_next, &page_list, head_pages) {
		page = split_page->page;
		ascend_merge_hugepage(nid, page);
		list_del(&split_page->head_pages);
		kfree(split_page);
		cond_resched();
	}
}

static inline void ascend_fallback_giant_page(struct page *page, int offset)
{
	int i;
	struct page *huge;
	struct hstate *h = size_to_hstate(PMD_SIZE);

	for (i = 0; i * PAGES_PER_PMD < offset; i += PAGES_PER_PMD) {
		huge = page + i;
		hugetlb_vmemmap_free(h, huge);
	}
}

static int ascend_disolve_huge_page_failure(struct page *head,
	struct page *page, struct list_head *list)
{
	int ret, nid = page_to_nid(page);
	struct hstate *h = size_to_hstate(PMD_SIZE);
	struct ascend_huge_pool *pool;
	struct ascend_split_hugepage *split;

	ret = hugetlb_vmemmap_alloc(h, head);
	if (unlikely(ret))
		return ret;

	pool = huge_pool(nid);
	spin_lock_irq(&pool->lock);
	if (list) { /* split from 1G page, enqueue 511 2M hugepage */
		list_splice_tail(list, &pool->free_list);
		pool->nr_free += PMDS_PER_PUD - 1;
		pool->nr_total += PMDS_PER_PUD - 1;
	} else { /* dequeue from 2M pool */
		list_del(&head->lru);
		pool->nr_free--;
		pool->nr_total--;
		split = (struct ascend_split_hugepage *)ASCEND_GIGANTIC_FIRST_TAIL_PRI(page);
		if (split)
			list_del(&split->head_pages);
	}
	spin_unlock_irq(&pool->lock);

	if (!list && split) {
		kfree(split);
		ASCEND_GIGANTIC_FIRST_TAIL_PRI(page) = 0;
		pool = gigantic_pool(nid);
		spin_lock_irq(&pool->lock);
		pool->nr_split--;
		spin_unlock_irq(&pool->lock);
	}

	if (head == ASCEND_GIGANTIC_HEAD_PAGE(page)) {
		page_ref_inc(head);
		SetPageHWPoison(page);
		/* head->flag will always be set to hwposion */
		return 0;
	}

	if (PageHWPoison(head) && page != head) {
		SetPageHWPoison(page);
		ClearPageHWPoison(head);
	}

	set_page_refcounted(head);
	__free_pages(head, get_order(PMD_SIZE));
	if (take_page_off_buddy(page))
		page_ref_inc(page);
	else
		return -EBUSY;

	return 0;
}

static int ascend_disolve_giant_page_failure(
	struct page *head, struct page *page)
{
	struct ascend_huge_pool *pool;
	int i, nid = page_to_nid(page);
	LIST_HEAD(page_list);
	struct page *huge, *p;

	pool = gigantic_pool(nid);
	spin_lock_irq(&pool->lock);
	list_del(&head->lru);
	pool->nr_free--;
	spin_unlock_irq(&pool->lock);

	huge = pfn_to_page(ALIGN_DOWN(page_to_pfn(page), PAGES_PER_PMD));
	for (i = 0; i < PAGES_PER_PUD; i += PAGES_PER_PMD) {
		p = head + i;
		make_new_huge_page(p);
		if (p != huge)
			list_add_tail(&p->lru, &page_list);
	}

	if (PageHWPoison(head) && huge != head) {
		SetPageHWPoison(huge);
		ClearPageHWPoison(head);
	}

	return ascend_disolve_huge_page_failure(huge, page, &page_list);
}

int ascend_huge_page_failure(struct page *page)
{
	int ret;
	struct page *head = compound_head(page);

	if (page_count(page))
		return -EBUSY;

	mutex_lock(&ascend_update_huge_pool_mutex);
	if (page_size(head) == PMD_SIZE)
		ret = ascend_disolve_huge_page_failure(head, page, NULL);
	else if (page_size(head) == PUD_SIZE)
		ret = ascend_disolve_giant_page_failure(head, page);
	else
		ret = -EFAULT;

	mutex_unlock(&ascend_update_huge_pool_mutex);
	return ret;
}

static int ascend_destroy_giant_page(struct page *page)
{
	int i, ret;
	struct page *huge, *p;
	struct hstate *h = size_to_hstate(PMD_SIZE);

	if (unlikely(PageHWPoison(page))) {
		pr_err("cannot destroy posioned giant page: pfn=0x%lx\n", page_to_pfn(page));
		return -EBUSY;
	}

	for (i = 0; i < PAGES_PER_PUD; i += PAGES_PER_PMD) {
		huge = page + i;
		ret = hugetlb_vmemmap_alloc(h, huge);
		if (unlikely(ret)) {
			pr_err("cannot decrease 1G page, ret=%d\n", ret);
			ascend_fallback_giant_page(page, i);
			return ret;
		}
	}
	ClearPagePool(page);
	__ClearPageHead(page);
	page[1].compound_nr = 0;
	page[1].compound_order = 0;
	for (i = 0; i < PAGES_PER_PUD; i++) {
		p = page + i;
		set_page_count(p, 1);
		__free_page(p);
		cond_resched();
	}

	return 0;
}

static int ascend_huge_page_dec(unsigned int nid)
{
	int ret = 0;
	struct ascend_huge_pool *pool;
	struct page *page;

	pool = gigantic_pool(nid);
	spin_lock_irq(&pool->lock);
	if (!pool->nr_free) {
		spin_unlock_irq(&pool->lock);
		return -ENOMEM;
	}
	page = list_first_entry(&pool->free_list, struct page, lru);
	list_del(&page->lru);
	pool->nr_total--;
	pool->nr_free--;
	spin_unlock_irq(&pool->lock);

	ret = ascend_destroy_giant_page(page);
	if (unlikely(ret)) {
		spin_lock_irq(&pool->lock);
		list_add(&page->lru, &pool->free_list);
		pool->nr_total++;
		pool->nr_free++;
		spin_unlock_irq(&pool->lock);
		return ret;
	}

	return 0;
}

static int update_huge_page_pool(unsigned int nid, unsigned int nr_pages)
{
	int ret = 0;
	long diff;
	unsigned long nr_free;
	struct ascend_huge_pool *pool = gigantic_pool(nid);

	mutex_lock(&ascend_update_huge_pool_mutex);
	spin_lock_irq(&pool->lock);
	diff = (long)nr_pages - pool->nr_total;
	nr_free = pool->nr_free;
	spin_unlock_irq(&pool->lock);

	if (diff > 0) {
		ret = alloc_hugepage_for_ascend_huge_pool(nid, diff);
	} else if(diff < 0) {
		if (nr_free < abs(diff))
			ascend_huge_pool_merge_pages(nid, abs(diff) - nr_free);
		for (; diff != 0; diff++)
			if (ascend_huge_page_dec(nid))
				break;
	}
	mutex_unlock(&ascend_update_huge_pool_mutex);
	return ret;
}

static int huge_control_parase_str(const char *s, int (*func)(unsigned, unsigned))
{
	int ret;
	int count = 0;
	unsigned int nid, nr_pages;

	while (*s && *s != '\n') {
		if (sscanf(s, "%u:%u%n", &nid, &nr_pages, &count) != 2) {
			pr_warn("invalid format\n");
			return -EINVAL;
		}

		if (nid >= MAX_NUMNODES || !node_online(nid)) {
			pr_warn("invalid nid %d\n", nid);
			return -EINVAL;
		}

		ret = func(nid, nr_pages);
		if (ret < 0)
			return ret;

		s += count;
		if (*s == ',')
			s++;
	}

	return 0;
}

/*
 * echo <nid>:<nr_1G_pages>[,<nid>:<nr_1G_pages>] > /proc/huge_control
 */
ssize_t huge_control_proc_write(struct file *f, const char __user *ubuf, size_t size,
			  loff_t *_pos)
{
	int ret;
	char *buf;

	buf = memdup_user_nul(ubuf, size);
	if (IS_ERR(buf))
		return PTR_ERR(buf);

	ret = huge_control_parase_str(buf, update_huge_page_pool);

	kfree(buf);
	return ret < 0 ? ret : size;
}

static int huge_control_stat_show(struct seq_file *seq, void *offset)
{
	int nid;

	for_each_online_node(nid) {
		struct ascend_huge_pool *pool;
		unsigned int nr_total, nr_free, nr_split;

		pool = huge_pool(nid);
		spin_lock_irq(&pool->lock);
		nr_total = pool->nr_total;
		nr_free = pool->nr_free;
		spin_unlock_irq(&pool->lock);
		seq_printf(seq, "node: %d\nHuge_2M_total: %u\nHuge_2M_free:  %u\n",
				nid, nr_total, nr_free);

		pool = gigantic_pool(nid);
		spin_lock_irq(&pool->lock);
		nr_total = pool->nr_total;
		nr_free = pool->nr_free;
		nr_split = pool->nr_split;
		spin_unlock_irq(&pool->lock);
		seq_printf(seq, "Huge_1G_total: %u\nHuge_1G_free:  %u\nHuge_1G_split: %u\n",
				nr_total, nr_free, nr_split);
	}

	return 0;
}

static int huge_control_proc_open(struct inode *inode, struct file *file)
{
	return single_open(file, huge_control_stat_show, NULL);
}

static const struct proc_ops huge_control_proc_ops = {
	.proc_open	= huge_control_proc_open,
	.proc_read_iter	= seq_read_iter,
	.proc_lseek	= seq_lseek,
	.proc_release	= single_release,
	.proc_write	= huge_control_proc_write,
};

static void *bootmem_list;

static void __init update_pool_use_bootmem(void)
{
	while (bootmem_list) {
		LIST_HEAD(page_list);
		void *addr = bootmem_list;
		struct page *page = virt_to_page(addr);

		/* should not fail for bootmem */
		make_new_giant_page(page);
		list_add_tail(&page->lru, &page_list);
		ascend_huge_pool_add_pages(page_to_nid(page), 1, &page_list);

		adjust_managed_page_count(page, PAGES_PER_PUD);
		bootmem_list = *((void **)addr);
	}
}

static int fallback_count[MAX_NUMNODES];
static int __init ascend_huge_pool_alloc_bootmem(unsigned int nid, unsigned int nr_pages)
{
	int i;

	for (i = 0; i < nr_pages; i++) {
		void *addr;
		phys_addr_t phys;

		phys = memblock_alloc_range_nid_flags(PUD_SIZE, PUD_SIZE, 0,
				MEMBLOCK_ALLOC_ACCESSIBLE, nid, true, MEMBLOCK_HOTPLUG);
		if (!phys) {
			pr_warn("during boot, node: %d alloc %d giant page failed, available %d\n",
					nid, nr_pages, i);
			fallback_count[nid] = nr_pages - i;
			break;
		}

		addr = phys_to_virt(phys);
		*((void **)addr) = bootmem_list;
		bootmem_list = addr;
	}

	return 0;
}

static int __init ascend_huge_pool_setup(char *s)
{
	return !huge_control_parase_str(s, ascend_huge_pool_alloc_bootmem);
}
__setup("ascend_huge_pool=", ascend_huge_pool_setup);

static void __init ascend_huge_pool_fallback(void)
{
	int nid;
	struct hstate *h = size_to_hstate(PMD_SIZE);

	for_each_online_node(nid) {
		if (fallback_count[nid]) {
			nodemask_t nodes_allowed;
			init_nodemask_of_node(&nodes_allowed, nid);
			set_max_huge_pages(h, fallback_count[nid] * PMDS_PER_PUD,
						   nid, &nodes_allowed);
			pr_info("node: %d, fallback to 2M hugepages count: %ld\n",
					nid, fallback_count[nid] * PMDS_PER_PUD);
		}
	}
}

static int __init ascend_huge_pool_init(void)
{
	int i, j;
	struct proc_dir_entry *p;

	for (i = 0; i < ASCEND_HUGE_POOL_NR; i++) {
		for (j = 0; j < MAX_NUMNODES; j++) {
			struct ascend_huge_pool *pool = &ascend_huge_pools[i][j];

			pool->nr_total = 0;
			pool->nr_free = 0;
			if (i == ASCEND_HUGE_POOL_1G)
				pool->nr_split = 0;
			else
				INIT_LIST_HEAD(&pool->split_list);
			INIT_LIST_HEAD(&pool->free_list);
			spin_lock_init(&pool->lock);
			pool->type = i;
		}
	}
	p = proc_create("huge_control", 0644, NULL, &huge_control_proc_ops);
	if (!p)
		pr_err("create huge_control failed\n");
	update_pool_use_bootmem();
	ascend_huge_pool_fallback();

	return 0;
}
/* Use fs_initcall because ascend_huge_pool_fallback should be called after
 * hugetlbfs, which use subsys_initcall.  */
fs_initcall(ascend_huge_pool_init)
