/* SPDX-License-Identifier: GPL-2.0 */
/*
 * Copyright (C) Huawei Technologies Co., Ltd. 2023. All rights reserved.
 */

#ifndef _LINUX_MM_PATROL_H
#define _LINUX_MM_PATROL_H

#include <linux/list.h>
#include <linux/seq_file.h>
#include <linux/node.h>
#include <linux/mm.h>

struct scan_mem_operations {
	char *name;
	void (*sprint_free_pages)(struct seq_file *m, int nid);
	bool (*is_page_managed)(unsigned long pfn);
	int (*alloc_pages_pfn)(unsigned long pfn, unsigned long nr_pages);
	int (*free_pages_pfn)(unsigned long pfn, unsigned long nr_pages);
	int (*alloc_pages_pfn_fd)(unsigned long pfn, unsigned long nr_pages);
};

#ifdef CONFIG_MEMORY_PATROL
extern const struct vm_operations_struct scan_pages_vmops;
int register_mem_patrol(struct scan_mem_operations *ops);
void unregister_mem_patrol(struct scan_mem_operations *ops);
void mem_patrol_register_node(struct node *node);
void mem_patrol_unregister_node(struct node *node);
static inline bool vma_is_patrol_vma(struct vm_area_struct *vma)
{
	return vma && (vma->vm_ops == &scan_pages_vmops);
}

static inline unsigned long encode_free_range(unsigned long pfn, int order)
{
	return (pfn << PAGE_SHIFT) | (1 << (order - pageblock_order));
}

#else
static inline int register_mem_patrol(struct scan_mem_operations *ops)
{
	return -EOPNOTSUPP;
}

static inline void mem_patrol_register_node(struct node *node) {}
static inline void mem_patrol_unregister_node(struct node *node) {}
static inline bool vma_is_patrol_vma(struct vm_area_struct *vma)
{
	return false;
}
#endif /* CONFIG_MEMORY_PATROL */
#endif /* _LINUX_MM_PATROL_H */
