#include <linux/mm.h>
#include <linux/errno.h>
#include <linux/bug.h>
#include <linux/mmap_lock.h>
#include <linux/spinlock.h>

#include "inc/pt.h"

static int validate_page_before_insert(struct vm_area_struct *vma,
                                       struct page *page)
{
    return 0;
}

static int insert_page(struct vm_area_struct *vma, unsigned long addr,
                       struct page *page, pgprot_t prot)
{
    int retval;
    pte_t *pte;
    spinlock_t *ptl;

    retval = validate_page_before_insert(vma, page);
    if (retval == 0)
    {
        pte = get_locked_pte(vma->vm_mm, addr, &ptl);
        if (pte)
        {
            retval = insert_page_into_pte_locked(vma, pte, addr, page, prot);
            pte_unmap_unlock(pte, ptl);
        }
        else
        {
            retval = -ENOMEM;
        }
    }

    return retval;
}

/*
 * __vm_map_pages - maps range of kernel pages into user vma
 * @vma: user vma to map to
 * @pages: pointer to array of source kernel pages
 * @num: number of pages in page array
 * @offset: user's requested vm_pgoff
 *
 * This allows drivers to map range of kernel pages into a user vma.
 * The zeropage is supported in some VMAs, see
 * vm_mixed_zeropage_allowed().
 *
 * Return: 0 on success and error code otherwise.
 */
static int __vm_map_pages(struct vm_area_struct *vma, struct page **pages,
                          unsigned long num, unsigned long offset)
{
    unsigned long count = vma_pages(vma);
    unsigned long uaddr = vma->vm_start;
    int ret, i;

    /* Fail if the user requested offset is beyond the end of the object */
    if (offset >= num)
        return -ENXIO;

    /* Fail if the user requested size exceeds available object size */
    if (count > num - offset)
        return -ENXIO;

    for (i = 0; i < count; i++)
    {
        ret = vm_insert_page(vma, uaddr, pages[offset + i]);
        if (ret < 0)
            return ret;
        uaddr += PAGE_SIZE;
    }

    return 0;
}

int vm_insert_page(struct vm_area_struct *vma, uintptr_t addr,
                   struct page *page)
{
    if (addr < vma->vm_start || addr >= vma->vm_end)
        return -EFAULT;

    if (!(vma->vm_flags & VM_MIXEDMAP))
    {
        BUG_ON(mmap_read_trylock(vma->vm_mm));
        BUG_ON(vma->vm_flags & VM_PFNMAP);
        vm_flags_set(vma, VM_MIXEDMAP);
    }

    return insert_page(vma, addr, page, vma->vm_page_prot);
}

/**
 * vm_map_pages - maps range of kernel pages starts with non zero offset
 * @vma: user vma to map to
 * @pages: pointer to array of source kernel pages
 * @num: number of pages in page array
 *
 * Maps an object consisting of @num pages, catering for the user's
 * requested vm_pgoff
 *
 * If we fail to insert any page into the vma, the function will return
 * immediately leaving any previously inserted pages present.  Callers
 * from the mmap handler may immediately return the error as their caller
 * will destroy the vma, removing any successfully inserted pages. Other
 * callers should make their own arrangements for calling unmap_region().
 *
 * Context: Process context. Called by mmap handlers.
 * Return: 0 on success and error code otherwise.
 */
int vm_map_pages(struct vm_area_struct *vma, struct page **pages,
                 unsigned long num)
{
    return __vm_map_pages(vma, pages, num, vma->vm_pgoff);
}
