/* SPDX-License-Identifier: GPL-2.0 */
#ifndef SEMINIX_MM_H
#define SEMINIX_MM_H

#include <utils/utils.h>
#include <seminix/string.h>
#include <seminix/mm_types.h>
#include <seminix/mmzone.h>
#include <seminix/tcb.h>
#include <asm/memory.h>

struct mmu_gather;

#ifndef __pa_symbol
#define __pa_symbol(x)  __pa(RELOC_HIDE((unsigned long)(x), 0))
#endif

#ifndef page_to_virt
#define page_to_virt(x)	__va(PFN_PHYS(page_to_pfn(x)))
#endif

#ifndef lm_alias
#define lm_alias(x)	__va(__pa_symbol(x))
#endif

#ifndef mm_zero_struct_page
#define mm_zero_struct_page(pp) ((void)memset((pp), 0, sizeof(struct page)))
#endif

#define page_address(page) page_to_virt(page)

/*
 * Drop a ref, return true if the refcount fell to zero (the page has no users)
 */
static inline int put_page_testzero(struct page *page)
{
    BUG_ON(page_ref_count(page) == 0);
    return page_ref_dec_and_test(page);
}

/*
 * Try to grab a ref unless the page has a refcount of zero, return false if
 * that is the case.
 * This can be called when MMU is off so it must not access
 * any of the virtual mappings.
 */
static inline int get_page_unless_zero(struct page *page)
{
    return page_ref_add_unless(page, 1, 0);
}

/*
 * The atomic page->_mapcount, starts from -1: so that transitions
 * both from it and to it can be tracked, using atomic_inc_and_test
 * and atomic_add_negative(-1).
 */
static inline void page_mapcount_reset(struct page *page)
{
    atomic_set(&(page)->_mapcount, -1);
}

static inline int page_mapcount(struct page *page)
{
    return atomic_read(&page->_mapcount) + 1;
}

static inline struct page *virt_to_head_page(const void *x)
{
    struct page *page = virt_to_page(x);

    return compound_head(page);
}

static inline unsigned int compound_order(struct page *page)
{
    if (!PageHead(page))
        return 0;
    return page[1].compound_order;
}

static inline void set_compound_order(struct page *page, unsigned int order)
{
    page[1].compound_order = order;
}

#define page_private(page)		((page)->private)
#define set_page_private(page, v)	((page)->private = (v))

static inline void set_page_zone(struct page *page, enum zone_type zone)
{
    page->flags &= ~(ZONEID_MASK << NODES_PGOFF);
    page->flags |= (zone & ZONEID_MASK) << NODES_PGOFF;
}

static inline void set_page_links(struct page *page, enum zone_type zone,
    unsigned long pfn)
{
    set_page_zone(page, zone);
}

static inline int page_zone_id(const struct page *page)
{
    return (page->flags >> NODES_PGOFF) & ZONEID_MASK;
}

static inline struct zone *page_zone(const struct page *page)
{
    return &NODE_DATA()->node_zones[page_zone_id(page)];
}

static inline pg_data_t *page_pgdat(const struct page *page)
{
    return NODE_DATA();
}

static inline void clean_page_space(struct page *page)
{
    page->flags &= ~(GFP_SPACE_MASK);
}

static inline void set_page_space(struct page *page, gfp_t gfp_mask)
{
    clean_page_space(page);
    if (!(gfp_mask & GFP_SPACE_MASK))
        gfp_mask |= GFP_KERNEL;
    page->flags |= gfp_mask & GFP_SPACE_MASK;
}

static inline int page_space(struct page *page)
{
    return (int)((page->flags & GFP_SPACE_MASK) >> GFP_SPACE_SHIFT);
}

extern void free_compound_page(struct page *page);
extern void free_unref_page(struct page *page);

static inline void __put_page(struct page *page)
{
    if (unlikely(PageCompound(page)))
        free_compound_page(page);
    else
        free_unref_page(page);
}

static inline void put_page(struct page *page)
{
    page = compound_head(page);

    if (put_page_testzero(page))
        __put_page(page);
}

static inline void get_page(struct page *page)
{
    page = compound_head(page);
    /*
     * Getting a normal page or the head of a compound page
     * requires to already have an elevated page->_refcount.
     */
    BUG_ON(page_ref_count(page) <= 0);
    page_ref_inc(page);
}

/* 释放当前 memblock 所有内存到 buddy 系统 */
extern void free_area_init_nodes(void);

/*
 * 取消内核域指定范围映射, 用于 free_initmem 释放 __init 后取消预留内核映射
 */
extern void unmap_kernel_range(unsigned long addr, unsigned long size);
/*
 * 将所有 __init* 段释放到 buddy 系统中
 */
extern void free_initmem(void);

/* 打印所有内存信息 */
extern void mem_print_memory_info(void);

extern unsigned long nr_managed_pages(void);
extern unsigned long nr_zone_free_pages(struct zone *zone);
extern unsigned long nr_free_pages(void);
extern unsigned long nr_zone_percpu_cache_pages(struct zone *zone);
extern unsigned long nr_percpu_cache_pages(int cpu);
extern unsigned long nr_total_physpages(void);
extern unsigned long nr_totalreserve_pages(void);

extern struct mm_struct *mm_struct_create(void);

static inline void mmget(struct mm_struct *mm)
{
    atomic_inc(&mm->mm_users);
}

extern void __mmput(struct mm_struct *mm);

static inline void mmput(struct mm_struct *mm)
{
    if (atomic_dec_and_test(&mm->mm_users))
        __mmput(mm);
}

extern void mm_set_task(struct mm_struct *mm, struct tcb *tsk);
extern void mm_clear_task(struct tcb *tsk);

#endif /* !SEMINIX_MM_H */
