// SPDX-License-Identifier: GPL-2.0
/*
 * Copyright (C) Huawei Technologies Co., Ltd. 2023. All rights reserved.
 */

#include <linux/hugetlb.h>
#include <linux/memory_patrol.h>
#include "internal.h"

static void get_free_hugetlb(struct seq_file *m, int nid)
{
	struct hstate *h;
	unsigned long flags;
	struct page *page;
	unsigned long region;
	int i;

	spin_lock_irqsave(&hugetlb_lock, flags);
	for (i = 0; i < hugetlb_max_hstate; i++) {
		h = &hstates[i];
		if (!h->free_huge_pages)
			continue;

		if (h->order < pageblock_order)
			continue;

		list_for_each_entry(page, &h->hugepage_freelists[nid], lru) {
			if (PageHWPoison(page))
				continue;

			region = encode_free_range(page_to_pfn(page), h->order);
			seq_write(m, &region, sizeof(region));
		};
	}
	spin_unlock_irqrestore(&hugetlb_lock, flags);
}

static bool is_page_managed_by_hugetlb(unsigned long pfn)
{
	return pfn_valid(pfn) && PageHuge(pfn_to_page(pfn));
}

static inline int check_valid_free_hpage(unsigned long pfn, unsigned int order)
{
	struct page *page;

	if (!pfn_valid(pfn))
		return -EINVAL;

	page = pfn_to_page(pfn);
	if (!PageHuge(page) || !HPageFreed(page) ||
		compound_order(page) != order)
		return -EBUSY;

	if (PageHWPoison(page))
		return -EHWPOISON;

	return 0;
}

static int take_hugetlb_off_hstate(unsigned long pfn, unsigned long nr_pages)
{
	struct hstate *h;
	struct page *page;
	unsigned long flags;
	unsigned int order, nr_hpages, i, hpage_size;
	unsigned long size = PAGE_SIZE * nr_pages;
	struct page *head_page = pfn_to_page(pfn);
	int ret = 0;

	spin_lock_irqsave(&hugetlb_lock, flags);

	if (!PageHeadHuge(head_page)) {
		ret = -EINVAL;
		goto out;
	}

	hpage_size = page_size(head_page);
	if (!IS_ALIGNED(size, hpage_size)) {
		ret = -EINVAL;
		goto out;
	}

	h = size_to_hstate(hpage_size);
	if (h->order < pageblock_order) {
		ret = -EINVAL;
		goto out;
	}

	order = compound_order(head_page);
	nr_hpages = size / hpage_size;
	for (i = 0; i < nr_hpages; i++) {
		ret = check_valid_free_hpage(pfn, order);
		if (ret)
			goto out;

		pfn += 1 << order;
	}

	if (h->free_huge_pages - nr_hpages < h->resv_huge_pages) {
		ret = -ENOMEM;
		goto out;
	}

	for (i = 0, page = head_page; i < nr_hpages; ++i) {
		list_move(&page->lru, &h->hugepage_activelist);
		set_page_refcounted(page);
		ClearHPageFreed(page);
		h->free_huge_pages--;
		h->free_huge_pages_node[page_to_nid(page)]--;
		page = nth_page(page, 1 << order);
	}

out:
	spin_unlock_irqrestore(&hugetlb_lock, flags);
	return ret;
}

static int add_hugetlb_to_hstate(unsigned long pfn, unsigned long nr_pages)
{
	struct hstate *h;
	unsigned long flags, i;
	int nid;
	struct page *page;
	struct page *head_page = pfn_to_page(pfn);
	unsigned long nr_hpages = nr_pages / compound_nr(head_page);
	unsigned int order = compound_order(head_page);

	h = size_to_hstate(page_size(head_page));
	spin_lock_irqsave(&hugetlb_lock, flags);

	for (i = 0, page = head_page; i < nr_hpages; ++i) {
		nid = page_to_nid(page);

		if (PageHWPoison(page))
			goto next;

		if (!page_ref_dec_and_test(page))
			VM_BUG_ON_PAGE(true, page);

		list_move(&page->lru, &h->hugepage_freelists[nid]);
		SetHPageFreed(page);
		h->free_huge_pages++;
		h->free_huge_pages_node[nid]++;
next:
		page = nth_page(page, 1 << order);
	}

	spin_unlock_irqrestore(&hugetlb_lock, flags);

	return 0;
}

static struct scan_mem_operations scan_mem_hugetlb_ops = {
	.name = "hugetlb",
	.sprint_free_pages = get_free_hugetlb,
	.is_page_managed = is_page_managed_by_hugetlb,
	.alloc_pages_pfn = take_hugetlb_off_hstate,
	.free_pages_pfn = add_hugetlb_to_hstate,
};

static int __init register_mem_patrol_for_hugetlb(void)
{
	return register_mem_patrol(&scan_mem_hugetlb_ops);
}
late_initcall(register_mem_patrol_for_hugetlb);
