// SPDX-License-Identifier: GPL-2.0-only
/*
 * Copyright (c) Huawei Technologies Co., Ltd. 2023-2023. All rights reserved.
 * Author: Huawei OS Kernel Lab
 * Create: Thu Aug 15 16:14:23 2023
 */

#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt

#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/backing-dev.h>
#include <linux/mm.h>
#include <linux/vmacache.h>
#include <linux/shm.h>
#include <linux/mman.h>
#include <linux/pagemap.h>
#include <linux/swap.h>
#include <linux/syscalls.h>
#include <linux/capability.h>
#include <linux/init.h>
#include <linux/file.h>
#include <linux/fs.h>
#include <linux/personality.h>
#include <linux/security.h>
#include <linux/hugetlb.h>
#include <linux/profile.h>
#include <linux/export.h>
#include <linux/mount.h>
#include <linux/mempolicy.h>
#include <linux/rmap.h>
#include <linux/mmu_notifier.h>
#include <linux/mmdebug.h>
#include <linux/perf_event.h>
#include <linux/audit.h>
#include <linux/khugepaged.h>
#include <linux/uprobes.h>
#include <linux/rbtree_augmented.h>
#include <linux/sched/sysctl.h>
#include <linux/notifier.h>
#include <linux/memory.h>
#include <linux/printk.h>
#include <linux/userfaultfd_k.h>
#include <linux/mm.h>
#include <asm/uaccess.h>
#include <asm/cacheflush.h>
#include <asm/tlb.h>
#include <asm/mmu_context.h>
#include <asm/override.h>
#include <linux/mm.h>

static inline u64 file_mmap_size_max(struct file *file, struct inode *inode)
{
	if (S_ISREG(inode->i_mode))
		return MAX_LFS_FILESIZE;

	if (S_ISBLK(inode->i_mode))
		return MAX_LFS_FILESIZE;

	/* Special "we do even unsigned file positions" case */
	if (file->f_mode & FMODE_UNSIGNED_OFFSET)
		return 0;

	/* Yes, random drivers might want more. But I'm tired of buggy drivers */
	return ULONG_MAX;
}

static inline bool file_mmap_ok(struct file *file, struct inode *inode,
				unsigned long pgoff, unsigned long len)
{
	u64 maxsize = file_mmap_size_max(file, inode);

	if (maxsize && len > maxsize)
		return false;
	maxsize -= len;
	if (pgoff > maxsize >> PAGE_SHIFT)
		return false;
	return true;
}

static unsigned long __do_mmap(struct file *file, unsigned long addr,
			unsigned long len, unsigned long prot, unsigned long flags,
			unsigned long pgoff, unsigned long *populate, struct list_head *uf)
{
	vm_flags_t vm_flags = 0;

	if (file) {
		struct inode *inode = file_inode(file);

		if (!file_mmap_ok(file, inode, pgoff, len))
			return -EOVERFLOW;

		switch (flags & MAP_TYPE) {
		case MAP_SHARED:
			if ((prot&PROT_WRITE) && !(file->f_mode&FMODE_WRITE))
				return -EACCES;

			/*
			 * Make sure we don't allow writing to an append-only
			 * file..
			 */
			if (IS_APPEND(inode) && (file->f_mode & FMODE_WRITE))
				return -EACCES;

			/*
			 * Make sure there are no mandatory locks on the file.
			 */
			if (locks_verify_locked(file))
				return -EAGAIN;

			vm_flags |= VM_SHARED | VM_MAYSHARE;
			if (!(file->f_mode & FMODE_WRITE))
				vm_flags &= ~(VM_MAYWRITE | VM_SHARED);

			fallthrough;
		case MAP_PRIVATE:
			if (!(file->f_mode & FMODE_READ))
				return -EACCES;

			if (!file->f_op->mmap)
				return -ENODEV;
			if (vm_flags & (VM_GROWSDOWN|VM_GROWSUP))
				return -EINVAL;
			break;

		default:
			return -EINVAL;
		}
	} else {
		switch (flags & MAP_TYPE) {
		case MAP_SHARED:
			if (vm_flags & (VM_GROWSDOWN|VM_GROWSUP))
				return -EINVAL;
			/*
			 * Ignore pgoff.
			 */
			pgoff = 0;
			vm_flags |= VM_SHARED | VM_MAYSHARE;
			break;
		case MAP_PRIVATE:
			/*
			 * Set pgoff according to addr for anon_vma.
			 */
			pgoff = addr >> PAGE_SHIFT;
			break;
		default:
			return -EINVAL;
		}
	}

	/*
	 * Set 'VM_NORESERVE' if we should not account for the
	 * memory use of this mapping.
	 */
	if (flags & MAP_NORESERVE) {
		/* We honor MAP_NORESERVE if allowed to overcommit */
		if (sysctl_overcommit_memory != OVERCOMMIT_NEVER)
			vm_flags |= VM_NORESERVE;

		/* hugetlb applies strict overcommit unless MAP_NORESERVE */
		if (file && is_file_hugepages(file))
			vm_flags |= VM_NORESERVE;
	}

	addr = mmap_region(file, addr, len, vm_flags, pgoff, uf);
	if (!IS_ERR_VALUE(addr) &&
	    ((vm_flags & VM_LOCKED) ||
	     (flags & (MAP_POPULATE | MAP_NONBLOCK)) == MAP_POPULATE))
		*populate = len;
	return addr;
}

/*
 * The caller must hold down_write(&current->mm->mmap_sem).
 */
unsigned long __override do_mmap(struct file *file, unsigned long addr,
		      unsigned long len, unsigned long prot, unsigned long flags,
		      unsigned long pgoff, unsigned long *populate, struct list_head *uf)
{
	unsigned long ret = 0;
	void *priv = NULL;

	*populate = 0;

	if (!len)
		return -EINVAL;

	/* Careful about overflows.. */
	len = PAGE_ALIGN(len);
	if (!len)
		return -ENOMEM;

	/* offset overflow? */
	if ((pgoff + (len >> PAGE_SHIFT)) < pgoff)
		return -EOVERFLOW;

	addr = liblinux_pal_usermap_prepare(addr, len, prot, flags, &priv);
	if (IS_ERR_OR_NULL((const void *)addr))
		return addr;

	ret = __do_mmap(file, addr, len, prot, flags, pgoff, populate, uf);

	/* if __do_mmap failed, will do unprepare */
	liblinux_pal_usermap_finish((const void *)priv, !IS_ERR_VALUE(ret));

	return ret;
}

unsigned long __override mmap_region(struct file *file, unsigned long addr,
		unsigned long len, vm_flags_t vm_flags, unsigned long pgoff,
		struct list_head *uf)
{
	int error = 0;
	struct vm_area_struct *vma;

	vma = kzalloc(sizeof(struct vm_area_struct), GFP_KERNEL);
	if (!vma)
		return -ENOMEM;

	vma->vm_start = addr;
	vma->vm_end = addr + len;
	vma->vm_flags = vm_flags;
	vma->vm_page_prot = vm_get_page_prot(vm_flags);
	vma->vm_pgoff = pgoff;

	if (file) {
		if (!file->f_op || !file->f_op->mmap) {
			addr = -ENODEV;
			goto out;
		}

		error = file->f_op->mmap(file, vma);
	} else if (vm_flags & VM_SHARED) {
		error = -ENOSYS;
	}

	if (error) {
		liblinux_pal_vm_unmap((const void *)vma->vm_start);
		addr = error;
	}

out:
	kfree(vma);
	return addr;
}

int __override vm_munmap(unsigned long start, size_t len)
{
	return liblinux_pal_usermap_munmap(start, len);
}

unsigned long vm_mmap_pgoff(struct file *file, unsigned long addr,
			    unsigned long len, unsigned long prot,
			    unsigned long flag, unsigned long pgoff)
{
	unsigned long ret;
	unsigned long populate;
	LIST_HEAD(uf);

	ret = do_mmap(file, addr, len, prot, flag, pgoff, &populate, &uf);
	return ret;
}

unsigned long __override vm_mmap(struct file *file, unsigned long addr,
		      unsigned long len, unsigned long prot,
		      unsigned long flag, unsigned long offset)
{
	if (unlikely(offset + PAGE_ALIGN(len) < offset))
		return -EINVAL;
	if (unlikely(offset_in_page(offset)))
		return -EINVAL;

	return vm_mmap_pgoff(file, addr, len, prot, flag, offset >> PAGE_SHIFT);
}

int ioremap_page_range(unsigned long addr,
		       unsigned long end, phys_addr_t phys_addr, pgprot_t prot)
{
	int flags = LIBLINUX_PAL_REMAP_DEVICE;

	if ((addr >= end) ||
	    (!(PAGE_ALIGNED(addr) && PAGE_ALIGNED(end) && PAGE_ALIGNED(phys_addr)))) {
		return -EINVAL;
	}
	if (pgprot_val(pgprot_writecombine(prot)) == pgprot_val(prot))
		flags = LIBLINUX_PAL_REMAP_NORMAL_NC;

	return liblinux_pal_vm_mmap(phys_addr, addr, end - addr, PROT_READ | PROT_WRITE, flags);
}
EXPORT_SYMBOL_GPL(ioremap_page_range);

struct vm_area_struct * __override find_vma(struct mm_struct *mm, unsigned long addr)
{
	static struct vm_area_struct vma;

	vma.vm_flags = VM_READ | VM_WRITE;
	vma.vm_page_prot = PAGE_KERNEL;
	return &vma;
}
