#include <seminix/pgtable.h>
#include <seminix/mm.h>
#include <seminix/mmap.h>
#include <asm/tlb.h>
#include <asm/cacheflush.h>

static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
{
    pte_t *pte;

    pte = pte_offset_kernel(pmd, addr);
    do {
        pte_t ptent = ptep_get_and_clear(addr, pte);
        WARN_ON(!pte_none(ptent) && !pte_present(ptent));
    } while (pte++, addr += UTILS_PAGE_SIZE, addr != end);
}

static void vunmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end)
{
    pmd_t *pmd;
    unsigned long next;
    int cleared;

    pmd = pmd_offset(pud, addr);
    do {
        next = pmd_addr_end(addr, end);
        cleared = pmd_clear_huge(pmd);
        if (cleared)
            continue;
        if (pmd_none_or_clear_bad(pmd))
            continue;
        vunmap_pte_range(pmd, addr, next);
    } while (pmd++, addr = next, addr != end);
}

static void vunmap_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end)
{
    pud_t *pud;
    unsigned long next;
    int cleared;

    pud = pud_offset(p4d, addr);
    do {
        next = pud_addr_end(addr, end);
        cleared = pud_clear_huge(pud);
        if (cleared)
            continue;
        if (pud_none_or_clear_bad(pud))
            continue;
        vunmap_pmd_range(pud, addr, next);
    } while (pud++, addr = next, addr != end);
}

static void vunmap_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end)
{
    p4d_t *p4d;
    unsigned long next;

    p4d = p4d_offset(pgd, addr);
    do {
        next = p4d_addr_end(addr, end);
        p4d_clear_huge(p4d);
        if (p4d_none_or_clear_bad(p4d))
            continue;
        vunmap_pud_range(p4d, addr, next);
    } while (p4d++, addr = next, addr != end);
}

/*
 * vunmap_range_noflush is similar to unmap_kernel_range, but does not
 * flush caches or TLBs.
 *
 * The caller is responsible for calling flush_cache_vmap() before calling
 * this function, and flush_tlb_kernel_range after it has returned
 * successfully (and before the addresses are expected to cause a page fault
 * or be re-mapped for something else, if TLB flushes are being delayed or
 * coalesced).
 *
 * This is an internal function only. Do not use outside mm/.
 */
static void vunmap_range_noflush(unsigned long start, unsigned long end)
{
    unsigned long next;
    pgd_t *pgd;
    unsigned long addr = start;

    BUG_ON(addr >= end);
    pgd = pgd_offset_k(addr);
    do {
        next = pgd_addr_end(addr, end);
        if (pgd_none_or_clear_bad(pgd))
            continue;
        vunmap_p4d_range(pgd, addr, next);
    } while (pgd++, addr = next, addr != end);
}

/**
 * unmap_kernel_range - unmap kernel virtual addresses
 * @addr: start of the VM area to unmap
 * @end: end of the VM area to unmap (non-inclusive)
 *
 * Clears any present PTEs in the virtual address range, flushes TLBs and
 * caches. Any subsequent access to the address before it has been re-mapped
 * is a kernel bug.
 */
void unmap_kernel_range(unsigned long addr, unsigned long size)
{
    unsigned long end = addr + size;

    flush_cache_vunmap(addr, end);
    vunmap_range_noflush(addr, end);
    flush_tlb_kernel_range(addr, end);
}

static void free_pte_range(struct mmu_gather *tlb, pmd_t *pmd,
    unsigned long addr, unsigned long end,
    unsigned long floor, unsigned long ceiling)
{
    pte_t *pte;
    unsigned long start;

    start = addr;
    pte = pte_offset_map(pmd, addr);
    do {
        pte_t ptent = ptep_get_and_clear(addr, pte);
        WARN_ON(!pte_none(ptent) && !pte_present(ptent));
    } while (pte++, addr += UTILS_PAGE_SIZE, addr != end);

    start &= PMD_MASK;
    if (start < floor)
        return;
    if (ceiling) {
        ceiling &= PMD_MASK;
        if (!ceiling)
            return;
    }
    if (end - 1 > ceiling - 1)
        return;

    pte = pte_offset_map(pmd, addr);
    pmd_clear(pmd);
    pte_free_tlb(tlb, pte_page(*pte), addr);
    mm_dec_nr_ptes(tlb->mm);
}

static void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
    unsigned long addr, unsigned long end,
    unsigned long floor, unsigned long ceiling)
{
    pmd_t *pmd;
    unsigned long next;
    unsigned long start;

    start = addr;
    pmd = pmd_offset(pud, addr);
    do {
        next = pmd_addr_end(addr, end);

        if (pmd_clear_huge(pmd))
            continue;
        if (pmd_none_or_clear_bad(pmd))
            continue;
        free_pte_range(tlb, pmd, addr, end, floor, ceiling);
    } while (pmd++, addr = next, addr != end);

    start &= PUD_MASK;
    if (start < floor)
        return;
    if (ceiling) {
        ceiling &= PUD_MASK;
        if (!ceiling)
            return;
    }
    if (end - 1 > ceiling - 1)
        return;

    pmd = pmd_offset(pud, start);
    pud_clear(pud);
    pmd_free_tlb(tlb, pmd, start);
    mm_dec_nr_pmds(tlb->mm);
}

static void free_pud_range(struct mmu_gather *tlb, p4d_t *p4d,
    unsigned long addr, unsigned long end,
    unsigned long floor, unsigned long ceiling)
{
    pud_t *pud;
    unsigned long next;
    unsigned long start;

    start = addr;
    pud = pud_offset(p4d, addr);
    do {
        next = pud_addr_end(addr, end);

        if (pud_clear_huge(pud))
            continue;
        if (pud_none_or_clear_bad(pud))
            continue;
        free_pmd_range(tlb, pud, addr, next, floor, ceiling);
    } while (pud++, addr = next, addr != end);

    start &= P4D_MASK;
    if (start < floor)
        return;
    if (ceiling) {
        ceiling &= P4D_MASK;
        if (!ceiling)
            return;
    }
    if (end - 1 > ceiling - 1)
        return;

    pud = pud_offset(p4d, start);
    p4d_clear(p4d);
    pud_free_tlb(tlb, pud, start);
    mm_dec_nr_puds(tlb->mm);
}

static void free_p4d_range(struct mmu_gather *tlb, pgd_t *pgd,
    unsigned long addr, unsigned long end,
    unsigned long floor, unsigned long ceiling)
{
    p4d_t *p4d;
    unsigned long next;
    unsigned long start;

    start = addr;
    p4d = p4d_offset(pgd, addr);
    do {
        next = p4d_addr_end(addr, end);

        p4d_clear_huge(p4d);
        if (p4d_none_or_clear_bad(p4d))
            continue;
        free_pud_range(tlb, p4d, addr, next, floor, ceiling);
    } while (p4d++, addr = next, addr != end);

    start &= PGDIR_MASK;
    if (start < floor)
        return;
    if (ceiling) {
        ceiling &= PGDIR_MASK;
        if (!ceiling)
            return;
    }
    if (end - 1 > ceiling - 1)
        return;

    p4d = p4d_offset(pgd, start);
    pgd_clear(pgd);
    p4d_free_tlb(tlb, p4d, start);
}

static void free_pgd_range(struct mmu_gather *tlb,
    unsigned long addr, unsigned long end,
    unsigned long floor, unsigned long ceiling)
{
    pgd_t *pgd;
    unsigned long next;

    /*
     * The next few lines have given us lots of grief...
     *
     * Why are we testing PMD* at this top level?  Because often
     * there will be no work to do at all, and we'd prefer not to
     * go all the way down to the bottom just to discover that.
     *
     * Why all these "- 1"s?  Because 0 represents both the bottom
     * of the address space and the top of it (using -1 for the
     * top wouldn't help much: the masks would do the wrong thing).
     * The rule is that addr 0 and floor 0 refer to the bottom of
     * the address space, but end 0 and ceiling 0 refer to the top
     * Comparisons need to use "end - 1" and "ceiling - 1" (though
     * that end 0 case should be mythical).
     *
     * Wherever addr is brought up or ceiling brought down, we must
     * be careful to reject "the opposite 0" before it confuses the
     * subsequent tests.  But what about where end is brought down
     * by PMD_SIZE below? no, end can't go down to 0 there.
     *
     * Whereas we round start (addr) and ceiling down, by different
     * masks at different levels, in order to test whether a table
     * now has no other vmas using it, so can be freed, we don't
     * bother to round floor or end up - the tests don't need that.
     */

    addr &= PMD_MASK;
    if (addr < floor) {
        addr += PMD_SIZE;
        if (!addr)
            return;
    }
    if (ceiling) {
        ceiling &= PMD_MASK;
        if (!ceiling)
            return;
    }
    if (end - 1 > ceiling - 1)
        end -= PMD_SIZE;
    if (addr > end - 1)
        return;

    pgd = pgd_offset(tlb->mm->pgd, addr);
    do {
        next = pgd_addr_end(addr, end);
        if (pgd_none_or_clear_bad(pgd))
            continue;
        free_p4d_range(tlb, pgd, addr, next, floor, ceiling);
    } while (pgd++, addr = next, addr != end);
}

void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *vma,
    unsigned long floor, unsigned long ceiling)
{
    while (vma) {
        struct vm_area_struct *next = vma->vm_next;
        unsigned long addr = vma->vm_start;

        /*
            * Optimization: gather nearby vmas into one call down
            */
        while (next && next->vm_start <= vma->vm_end + PMD_SIZE
               && !is_vm_hugetlb_page(next)) {
            vma = next;
            next = vma->vm_next;
        }
        free_pgd_range(tlb, addr, vma->vm_end,
            floor, next ? next->vm_start : ceiling);
        vma = next;
    }
}

static int remap_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
            unsigned long addr, unsigned long end,
            phys_addr_t phys_addr, unsigned int max_page_shift)
{
    pte_t *pte;
    u64 pfn;
    unsigned long size = UTILS_PAGE_SIZE;

    pfn = phys_addr >> UTILS_PAGE_SHIFT;
    pte = pte_alloc_map(vma->vm_mm, pmd, addr);
    if (!pte)
        return -ENOMEM;
    do {
        BUG_ON(!pte_none(*pte));

        set_pte_at(vma->vm_mm, addr, pte, pfn_pte(pfn, vma->vm_page_prot));
        pfn++;
    } while (pte += PFN_DOWN(size), addr += size, addr != end);
    return 0;
}

static int vmap_try_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
            unsigned long addr, unsigned long end,
            phys_addr_t phys_addr, unsigned int max_page_shift)
{
    if (max_page_shift < PMD_SHIFT)
        return 0;

    if (!arch_vmap_pmd_supported(vma->vm_page_prot))
        return 0;

    if ((end - addr) != PMD_SIZE)
        return 0;

    if (!IS_ALIGNED(addr, PMD_SIZE))
        return 0;

    if (!IS_ALIGNED(phys_addr, PMD_SIZE))
        return 0;

    if (pmd_present(*pmd) && !pmd_free_pte_page(vma->vm_mm, pmd, addr))
        return 0;

    return pmd_set_huge(pmd, phys_addr, vma->vm_page_prot);
}

static inline int remap_pmd_range(struct vm_area_struct *vma, pud_t *pud,
            unsigned long addr, unsigned long end,
            phys_addr_t phys_addr, unsigned int max_page_shift)
{
    pmd_t *pmd;
    unsigned long next;

    pmd = pmd_alloc(vma->vm_mm, pud, addr);
    if (!pmd)
        return -ENOMEM;
    BUG_ON(pmd_trans_huge(*pmd));
    do {
        next = pmd_addr_end(addr, end);

        if (is_vm_hugetlb_page(vma) && vm_hugtlb_map_pmd(vma)) {
            if (vmap_try_huge_pmd(vma, pmd, addr, next, phys_addr,
                        max_page_shift))
                continue;
            return -EINVAL;
        }
        if (remap_pte_range(vma, pmd, addr, next, phys_addr, max_page_shift))
            return -ENOMEM;
    } while (pmd++, phys_addr += (next - addr), addr = next, addr != end);
    return 0;
}

static int vmap_try_huge_pud(struct vm_area_struct *vma, pud_t *pud,
            unsigned long addr, unsigned long end,
            phys_addr_t phys_addr, unsigned int max_page_shift)
{
    if (max_page_shift < PUD_SHIFT)
        return 0;

    if (!arch_vmap_pud_supported(vma->vm_page_prot))
        return 0;

    if ((end - addr) != PUD_SIZE)
        return 0;

    if (!IS_ALIGNED(addr, PUD_SIZE))
        return 0;

    if (!IS_ALIGNED(phys_addr, PUD_SIZE))
        return 0;

    if (pud_present(*pud) && !pud_free_pmd_page(vma->vm_mm,pud, addr))
        return 0;

    return pud_set_huge(pud, phys_addr, vma->vm_page_prot);
}

static inline int remap_pud_range(struct vm_area_struct *vma, p4d_t *p4d,
            unsigned long addr, unsigned long end,
            phys_addr_t phys_addr, unsigned int max_page_shift)
{
    pud_t *pud;
    unsigned long next;

    pud = pud_alloc(vma->vm_mm, p4d, addr);
    if (!pud)
        return -ENOMEM;
    do {
        next = pud_addr_end(addr, end);

        if (is_vm_hugetlb_page(vma) && vm_hugtlb_map_pud(vma)) {
            if (vmap_try_huge_pud(vma, pud, addr, next, phys_addr,
                        max_page_shift))
                continue;
            return -EINVAL;
        }
        if (remap_pmd_range(vma, pud, addr, next, phys_addr, max_page_shift))
            return -ENOMEM;
    } while (pud++, phys_addr += (next - addr), addr = next, addr != end);
    return 0;
}

static int vmap_try_huge_p4d(struct vm_area_struct *vma, p4d_t *p4d,
            unsigned long addr, unsigned long end,
            phys_addr_t phys_addr, unsigned int max_page_shift)
{
    if (max_page_shift < P4D_SHIFT)
        return 0;

    if (!arch_vmap_p4d_supported(vma->vm_page_prot))
        return 0;

    if ((end - addr) != P4D_SIZE)
        return 0;

    if (!IS_ALIGNED(addr, P4D_SIZE))
        return 0;

    if (!IS_ALIGNED(phys_addr, P4D_SIZE))
        return 0;

    if (p4d_present(*p4d) && !p4d_free_pud_page(vma->vm_mm, p4d, addr))
        return 0;

    return p4d_set_huge(p4d, phys_addr, vma->vm_page_prot);
}

static inline int remap_p4d_range(struct vm_area_struct *vma, pgd_t *pgd,
    unsigned long addr, unsigned long end,
    phys_addr_t phys_addr, unsigned int max_page_shift)
{
    p4d_t *p4d;
    unsigned long next;

    p4d = p4d_alloc(vma->vm_mm, pgd, addr);
    if (!p4d)
        return -ENOMEM;
    do {
        next = p4d_addr_end(addr, end);

        if (is_vm_hugetlb_page(vma) && vm_hugtlb_map_p4d(vma)) {
            if (vmap_try_huge_p4d(vma, p4d, addr, next, phys_addr, max_page_shift))
                continue;
            return -EINVAL;
        }
        if (remap_pud_range(vma, p4d, addr, next, phys_addr, max_page_shift))
            return -ENOMEM;
    } while (p4d++, phys_addr += (next - addr), addr = next, addr != end);
    return 0;
}

static int vmap_range_noflush(struct vm_area_struct *vma,
    unsigned long addr, unsigned long end,
    phys_addr_t phys_addr, unsigned int max_page_shift)
{
    pgd_t *pgd;
    unsigned long next;
    int err;

    pgd = pgd_offset(vma->vm_mm->pgd, addr);
    flush_cache_range(vma, addr, end);
    do {
        next = pgd_addr_end(addr, end);
        err = remap_p4d_range(vma, pgd, addr, next, phys_addr, max_page_shift);
        if (err)
            break;
    } while (pgd++, phys_addr += (next - addr), addr = next, addr != end);

    return err;
}

int alloc_pgtables(struct vm_area_struct *vma)
{
    int err, i, nr;
    int page_shift = UTILS_PAGE_SHIFT;
    unsigned long addr, end;
    struct page **pages;

    if (!vma)
        return -EINVAL;

    if (is_vm_hugetlb_page(vma)) {
        page_shift = vm_hugetlb_pageshift(vma->page_size);
        if (page_shift < 0)
            return page_shift;
    }

    addr = vma->vm_start;
    end = PAGE_ALIGN(vma->vm_end);
    pages = vma->pages;
    nr = (end - addr) >> UTILS_PAGE_SHIFT;
    BUG_ON(addr >= end);
    for (i = 0; i < nr; i += 1U << (page_shift - UTILS_PAGE_SHIFT)) {
        err = vmap_range_noflush(vma, addr, addr + (1UL << page_shift),
                        __pa(page_address(pages[i])), page_shift);
        if (err)
            return err;
        addr += 1UL << page_shift;
    }
    return 0;
}






#ifndef __PAGETABLE_P4D_FOLDED
/*
 * Allocate p4d page table.
 * We've already handled the fast-path in-line.
 */
int __p4d_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
{
    p4d_t *new = p4d_alloc_one(mm, address);
    if (!new)
        return -ENOMEM;

    spin_lock(&mm->page_table_lock);
    if (pgd_present(*pgd)) {	/* Another has populated it */
        p4d_free(mm, new);
    } else {
        smp_wmb(); /* See comment in pmd_install() */
        pgd_populate(mm, pgd, new);
    }
    spin_unlock(&mm->page_table_lock);
    return 0;
}
#endif /* __PAGETABLE_P4D_FOLDED */

#ifndef __PAGETABLE_PUD_FOLDED
/*
 * Allocate page upper directory.
 * We've already handled the fast-path in-line.
 */
int __pud_alloc(struct mm_struct *mm, p4d_t *p4d, unsigned long address)
{
    pud_t *new = pud_alloc_one(mm, address);
    if (!new)
        return -ENOMEM;

    spin_lock(&mm->page_table_lock);
    if (!p4d_present(*p4d)) {
        mm_inc_nr_puds(mm);
        smp_wmb(); /* See comment in pmd_install() */
        p4d_populate(mm, p4d, new);
    } else	/* Another has populated it */
        pud_free(mm, new);
    spin_unlock(&mm->page_table_lock);
    return 0;
}
#endif /* __PAGETABLE_PUD_FOLDED */

#ifndef __PAGETABLE_PMD_FOLDED
/*
 * Allocate page middle directory.
 * We've already handled the fast-path in-line.
 */
int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
{
    pmd_t *new = pmd_alloc_one(mm, address);
    if (!new)
        return -ENOMEM;

    spin_lock(&mm->page_table_lock);
    if (!pud_present(*pud)) {
        mm_inc_nr_pmds(mm);
        smp_wmb(); /* See comment in pmd_install() */
        pud_populate(mm, pud, new);
    } else {	/* Another has populated it */
        pmd_free(mm, new);
    }
    spin_unlock(&mm->page_table_lock);
    return 0;
}
#endif /* __PAGETABLE_PMD_FOLDED */

static void pmd_install(struct mm_struct *mm, pmd_t *pmd, struct page **pte)
{
    spin_lock(&mm->page_table_lock);
    if (likely(pmd_none(*pmd))) {	/* Has another populated it ? */
        mm_inc_nr_ptes(mm);
        /*
         * Ensure all pte setup (eg. pte page lock and page clearing) are
         * visible before the pte is made visible to other CPUs by being
         * put into page tables.
         *
         * The other side of the story is the pointer chasing in the page
         * table walking code (when walking the page table without locking;
         * ie. most of the time). Fortunately, these data accesses consist
         * of a chain of data-dependent loads, meaning most CPUs (alpha
         * being the notable exception) will already guarantee loads are
         * seen in-order. See the alpha page table accessors for the
         * smp_rmb() barriers in page table walking code.
         */
        smp_wmb(); /* Could be smp_wmb__xxx(before|after)_spin_lock */
        pmd_populate(mm, pmd, *pte);
        *pte = NULL;
    }
    spin_unlock(&mm->page_table_lock);
}

int __pte_alloc(struct mm_struct *mm, pmd_t *pmd)
{
    struct page *new = pte_alloc_one(mm);
    if (!new)
        return -ENOMEM;

    pmd_install(mm, pmd, &new);
    if (new)
        pte_free(mm, new);
    return 0;
}






void pgd_clear_bad(pgd_t *pgd)
{
    pgd_ERROR(*pgd);
    pgd_clear(pgd);
}

#ifndef __PAGETABLE_P4D_FOLDED
void p4d_clear_bad(p4d_t *p4d)
{
    p4d_ERROR(*p4d);
    p4d_clear(p4d);
}
#endif

#ifndef __PAGETABLE_PUD_FOLDED
void pud_clear_bad(pud_t *pud)
{
    pud_ERROR(*pud);
    pud_clear(pud);
}
#endif

/*
 * Note that the pmd variant below can't be stub'ed out just as for p4d/pud
 * above. pmd folding is special and typically pmd_* macros refer to upper
 * level even when folded
 */
void pmd_clear_bad(pmd_t *pmd)
{
    pmd_ERROR(*pmd);
    pmd_clear(pmd);
}

unsigned long change_pgtable(struct vm_area_struct *vma, unsigned long start,
               unsigned long end, pgprot_t newprot,
               int dirty_accountable)
{
    // TODO
    return 0;
}

static void arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
                unsigned long start, unsigned long end)
{
    tlb->mm = mm;

    /* Is it from 0 to ~0? */
    tlb->fullmm = !(start | (end + 1));
}

void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
            unsigned long start, unsigned long end)
{
    arch_tlb_gather_mmu(tlb, mm, start, end);
    __tlb_reset_range(tlb);
    inc_tlb_flush_pending(tlb->mm);
}

static void tlb_flush_mmu(struct mmu_gather *tlb)
{
    tlb_flush_mmu_tlbonly(tlb);
}

static void arch_tlb_finish_mmu(struct mmu_gather *tlb,
        unsigned long start, unsigned long end, bool force)
{
    if (force) {
        __tlb_reset_range(tlb);
        __tlb_adjust_range(tlb, start, end - start);
    }

    tlb_flush_mmu(tlb);
}

void tlb_finish_mmu(struct mmu_gather *tlb,
        unsigned long start, unsigned long end)
{
    /*
     * If there are parallel threads are doing PTE changes on same range
     * under non-exclusive lock(e.g., mmap_sem read-side) but defer TLB
     * flush by batching, a thread has stable TLB entry can fail to flush
     * the TLB by observing pte_none|!pte_dirty, for example so flush TLB
     * forcefully if we detect parallel PTE batching threads.
     */
    bool force = mm_tlb_flush_nested(tlb->mm);

    arch_tlb_finish_mmu(tlb, start, end, force);
    dec_tlb_flush_pending(tlb->mm);
}

static inline void tlb_table_invalidate(struct mmu_gather *tlb)
{
    tlb_flush_mmu_tlbonly(tlb);
}

static void tlb_remove_table_one(void *table)
{
    put_page((struct page *)table);
}

void tlb_remove_table(struct mmu_gather *tlb, void *table)
{
    tlb_table_invalidate(tlb);
    tlb_remove_table_one(table);
}

int vm_hugetlb_pageshift(unsigned long size)
{
    if (size > MAX_ORDER_NR_PAGES * UTILS_PAGE_SIZE)
        return -ENOMEM;

    switch (size) {
#ifndef __PAGETABLE_PMD_FOLDED
    case PMD_SIZE:
        if (!arch_vmap_pmd_supported(__pgprot(0)))
                return -EINVAL;
        return PMD_SHIFT;
#endif
#ifndef __PAGETABLE_PUD_FOLDED
    case PUD_SIZE:
        if (!arch_vmap_pud_supported(__pgprot(0)))
            return -EINVAL;
        return PUD_SHIFT;
#endif
#ifndef __PAGETABLE_P4D_FOLDED
    case P4D_SIZE:
        if (!arch_vmap_p4d_supported(__pgprot(0)))
            return -EINVAL;
        return P4D_SHIFT;
#endif
    default:
        return -EINVAL;
    }
}
