/* SPDX-License-Identifier: GPL-2.0 */
/*
 * Copyright @ Huawei Technologies Co., Ltd. 2022-2023. All rights reserved.
 * Description: Header file for hpmm module.
 */
#ifndef _LINUX_HPMM_H
#define _LINUX_HPMM_H

#include <linux/mm.h>

#ifdef CONFIG_EULEROS_HYBRID_MM

#include <uapi/linux/hpmm.h>

#define MAX_SN_BYTE		20
#define MAX_PHY_SGE		32
#define MAX_TAKE_PFN_RETYR	10
#define BBU_META_MEMORY		34
#define MB_TO_BYTES(mb)		((mb) << 20UL)
#define MB_TO_PAGES(mb)		((mb) << 8UL)
#define BBU_META_NR_PAGES	MB_TO_PAGES(BBU_META_MEMORY)
#define BBU_THP_MAPPING_OFFSET	(BBU_META_NR_PAGES)

extern struct file_operations hpmm_fops;

struct hpmm_node_info {
	/* linear mapping start phys addr */
	unsigned long phy_addr[MAX_PHY_SGE];
	/* linear mapping phys size */
	unsigned long phy_size[MAX_PHY_SGE];
	/* linear mapping seg num */
	unsigned long max_index;
};

struct hpmm_operations {
	swp_entry_t (*get_swap_page)(struct page *page);
	void (*put_swap_page)(struct page *page, swp_entry_t entry);
	struct page *(*alloc_pte_page)(struct vm_area_struct *vma, unsigned long addr, int order);
	void (*free_pte_page)(struct vm_area_struct *vma, unsigned long addr, struct page *page);
	struct page *(*alloc_page)(struct vm_area_struct *vma, unsigned long addr,
				   int order, gfp_t gfp);
	void (*free_page)(struct vm_area_struct *vma, unsigned long addr, struct page *page);
	/* hpmm functions frees user-level page tables of a process */
	void (*free_pgtable_range)(struct mmu_gather *tlb, unsigned long addr, unsigned long end,
				   unsigned long floor, unsigned long ceiling);
};

struct hpmm_device {
	/* Common field. */
	int mem_type;
	const char *name;
	struct hpmm_operations *hpmm_ops;
	int dev_num;
	/* hpmm_persist_device list */
	struct list_head p_dev;
	/* Private field. */
	unsigned long private;
	/* the proecess uuid */
	unsigned long pid_uuid;
	struct mutex hpmm_lock;
};

static inline bool is_file_hpmm(struct file *file)
{
	if (file->f_op == &hpmm_fops)
		return true;

	return false;
}

static inline bool is_hpmm_page(struct vm_area_struct *vma)
{
	return vma != NULL && !!(vma->vm_flags & VM_HYBRID_MM);
}

static inline int update_preferred_nid(int preferred_nid, nodemask_t *nodemask)
{
	int ret_preferred_nid = preferred_nid;

	if (nodemask) {
		node_clear(preferred_nid, *nodemask);
		ret_preferred_nid = NODE_DATA(preferred_nid)->peer_node;
		node_set(preferred_nid, *nodemask);
	} else {
		ret_preferred_nid = NODE_DATA(preferred_nid)->peer_node;
	}

	return ret_preferred_nid;
}

vm_fault_t hpmm_fault(struct vm_fault *vmf);
void hpmm_persist_free_pgd_range(struct mmu_gather *tlb,
				unsigned long addr, unsigned long end,
				unsigned long floor, unsigned long ceiling);
int hpmm_fault_linear_mapping(struct vm_area_struct *vma,
				 unsigned long va_start,
				 unsigned long va_end,
				 unsigned long start_pfn,
				 bool recover);
struct hpmm_node_info *hpmm_get_node_info(int node);
void hpmm_free_reserved_persistent_memory(void);
struct page *take_pages_off_node(int nid, unsigned long nr_pages, gfp_t gfp_mask);
bool is_node_pmem(int nid);
int take_pfn_off_buddy(unsigned long pfn, unsigned int alloc_order, gfp_t gfp);
bool bbu_memory_on(void);
bool check_file_hpmm(struct file *file, unsigned long flags,
		     unsigned long len, unsigned long addr);
void hpmm_create_debugfs(void);
void hpmm_remove_debugfs(void);
int check_should_skip_page(struct page *page, int ret);

#else /* CONFIG_EULEROS_HYBRID_MM */
static inline bool is_file_hpmm(struct file *file)
{
	return false;
}

static inline bool is_hpmm_page(struct vm_area_struct *vma)
{
	return false;
}

static inline bool bbu_memory_on(void)
{
	return false;
}

static inline void hpmm_free_reserved_persistent_memory(void)
{
}

static inline int update_preferred_nid(int preferred_nid, nodemask_t *nodemask)
{
	return preferred_nid;
}
#endif /* CONFIG_EULEROS_HYBRID_MM */

#endif /* _LINUX_HPMM_H */
