/* SPDX-License-Identifier: GPL-2.0 */
#ifndef SEMINIX_MM_TYPES_H
#define SEMINIX_MM_TYPES_H

#ifndef __ASSEMBLY__

#include <seminix/log2.h>
#include <seminix/rbtree.h>
#include <seminix/rwsem.h>
#include <seminix/page_types.h>
#include <asm/pgtable-types.h>
#include <asm/mmu.h>

struct vma_pages {
    struct page **pages;
    atomic_t pages_ref;
    int nr_pages;
    int page_shift;
    int page_size;
};

struct vm_area_struct {
    unsigned long   vm_start;
    unsigned long   vm_end;

    struct vm_area_struct *vm_next, *vm_prev;

    struct rb_node vm_rb;
    unsigned long rb_subtree;
    struct mm_struct *vm_mm;
    pgprot_t vm_page_prot;
    unsigned long vm_flags;

    struct vma_pages *pages;
};

struct mm_struct {
    struct vm_area_struct *mmap;
    struct rb_root mm_rb;
    u64 vmacache_seqnum;
    unsigned long highest_vm_end;
    pgd_t *pgd;

    /**
     * @mm_users: The number of users including userspace.
     *
     * Use mmget()/mmget_not_zero()/mmput() to modify. When this
     * drops to 0 (i.e. when the task exits and there are no other
     * temporary reference holders), we also release a reference on
     * @mm_count (which may then free the &struct mm_struct if
     * @mm_count also drops to 0).
     */
    atomic_t mm_users;

    /**
     * @mm_count: The number of references to &struct mm_struct
     * (@mm_users count as 1).
     *
     * Use mmgrab()/mmdrop() to modify. When this drops to 0, the
     * &struct mm_struct is freed.
     */
    atomic_t mm_count;

    atomic_long_t pgtables_bytes;	/* PTE page table pages */
    int map_count;			/* number of VMAs */
    spinlock_t page_table_lock;
    struct rw_semaphore mmap_sem;

    spinlock_t  tl_lock;
    struct list_head thread_list;  /* share all thread list */

    /* Architecture-specific MM context */
    mm_context_t context;

    atomic_t tlb_flush_pending;

    unsigned long start_brk, brk;
};

extern struct mm_struct init_mm;

static inline void init_tlb_flush_pending(struct mm_struct *mm)
{
    atomic_set(&mm->tlb_flush_pending, 0);
}

static inline void inc_tlb_flush_pending(struct mm_struct *mm)
{
    atomic_inc(&mm->tlb_flush_pending);
    /*
     * The only time this value is relevant is when there are indeed pages
     * to flush. And we'll only flush pages after changing them, which
     * requires the PTL.
     *
     * So the ordering here is:
     *
     *	atomic_inc(&mm->tlb_flush_pending);
     *	spin_lock(&ptl);
     *	...
     *	set_pte_at();
     *	spin_unlock(&ptl);
     *
     *				spin_lock(&ptl)
     *				mm_tlb_flush_pending();
     *				....
     *				spin_unlock(&ptl);
     *
     *	flush_tlb_range();
     *	atomic_dec(&mm->tlb_flush_pending);
     *
     * Where the increment if constrained by the PTL unlock, it thus
     * ensures that the increment is visible if the PTE modification is
     * visible. After all, if there is no PTE modification, nobody cares
     * about TLB flushes either.
     *
     * This very much relies on users (mm_tlb_flush_pending() and
     * mm_tlb_flush_nested()) only caring about _specific_ PTEs (and
     * therefore specific PTLs), because with SPLIT_PTE_PTLOCKS and RCpc
     * locks (PPC) the unlock of one doesn't order against the lock of
     * another PTL.
     *
     * The decrement is ordered by the flush_tlb_range(), such that
     * mm_tlb_flush_pending() will not return false unless all flushes have
     * completed.
     */
}

static inline void dec_tlb_flush_pending(struct mm_struct *mm)
{
    /*
     * See inc_tlb_flush_pending().
     *
     * This cannot be smp_mb__before_atomic() because smp_mb() simply does
     * not order against TLB invalidate completion, which is what we need.
     *
     * Therefore we must rely on tlb_flush_*() to guarantee order.
     */
    atomic_dec(&mm->tlb_flush_pending);
}

static inline bool mm_tlb_flush_pending(struct mm_struct *mm)
{
    /*
     * Must be called after having acquired the PTL; orders against that
     * PTLs release and therefore ensures that if we observe the modified
     * PTE we must also observe the increment from inc_tlb_flush_pending().
     *
     * That is, it only guarantees to return true if there is a flush
     * pending for _this_ PTL.
     */
    return atomic_read(&mm->tlb_flush_pending);
}

static inline bool mm_tlb_flush_nested(struct mm_struct *mm)
{
    /*
     * Similar to mm_tlb_flush_pending(), we must have acquired the PTL
     * for which there is a TLB flush pending in order to guarantee
     * we've seen both that PTE modification and the increment.
     *
     * (no requirement on actually still holding the PTL, that is irrelevant)
     */
    return atomic_read(&mm->tlb_flush_pending) > 1;
}

/*
 * The per task VMA cache array:
 */
#define VMACACHE_BITS 2
#define VMACACHE_SIZE (1U << VMACACHE_BITS)
#define VMACACHE_MASK (VMACACHE_SIZE - 1)

struct vmacache {
    u64 seqnum;
    struct vm_area_struct *vmas[VMACACHE_SIZE];
};

#endif /* !__ASSEMBLY__ */
#endif /* !SEMINIX_MM_TYPES_H */
