// SPDX-License-Identifier: GPL-2.0-only
/*
 * Based on arch/arm/mm/mmap.c
 *
 * Copyright (C) 2012 ARM Ltd.
 */

#include <linux/elf.h>
#include <linux/fs.h>
#include <linux/memblock.h>
#include <linux/mm.h>
#include <linux/mman.h>
#include <linux/export.h>
#include <linux/shm.h>
#include <linux/sched/signal.h>
#include <linux/sched/mm.h>
#include <linux/io.h>
#include <linux/personality.h>
#include <linux/random.h>
#ifdef CONFIG_ASCEND_SHARE_POOL
#include <linux/share_pool.h>
#endif

#include <asm/cputype.h>

/*
 * You really shouldn't be using read() or write() on /dev/mem.  This might go
 * away in the future.
 */
int valid_phys_addr_range(phys_addr_t addr, size_t size)
{
	/*
	 * Check whether addr is covered by a memory region without the
	 * MEMBLOCK_NOMAP attribute, and whether that region covers the
	 * entire range. In theory, this could lead to false negatives
	 * if the range is covered by distinct but adjacent memory regions
	 * that only differ in other attributes. However, few of such
	 * attributes have been defined, and it is debatable whether it
	 * follows that /dev/mem read() calls should be able traverse
	 * such boundaries.
	 */
	return memblock_is_region_memory(addr, size) &&
	       memblock_is_map_memory(addr);
}

/*
 * Do not allow /dev/mem mappings beyond the supported physical range.
 */
int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
{
	return !(((pfn << PAGE_SHIFT) + size) & ~PHYS_MASK);
}

#ifdef CONFIG_ASCEND_PAGE_TABLE_MULTI_VIEW
struct ascend_vm_area {
	unsigned long va_start;
	unsigned long va_end;
};

static inline bool check_range_valid(unsigned long start, unsigned long end,
		struct ascend_vm_area *range)
{
	return start >= range->va_start && end <= range->va_end;
}

static inline bool check_range_overlap(unsigned long start, unsigned long end,
		struct ascend_vm_area *range)
{
	return (start >= range->va_start && start < range->va_end) ||
	       (end > range->va_start && end <= range->va_end) ||
	       (start < range->va_start && end > range->va_end);
}

static inline bool ascend_check_range_valid(unsigned long addr, unsigned long len,
		unsigned long flags)
{
	struct ascend_vm_area range;

#ifdef CONFIG_ASCEND_SHARE_POOL
	if (!sp_is_enabled())
		goto skip;

	range.va_start = MMAP_SHARE_POOL_NC_START;
	range.va_end = MMAP_SHARE_POOL_NC_END;

	if ((flags & MAP_NPTMV) && (flags & MAP_SHARE_POOL))
		return check_range_valid(addr, addr + len, &range);

	if (check_range_overlap(addr, addr + len, &range))
		return false;
skip:
#endif
	range.va_start = N_PT_MULTI_VIEW_MMAP_START;
	range.va_end = N_PT_MULTI_VIEW_MMAP_END;

	if (flags & MAP_NPTMV)
		return check_range_valid(addr, addr + len, &range);

	return !check_range_overlap(addr, addr + len, &range);
}

int ascend_mmap_check(unsigned long addr, unsigned long len,
		unsigned long flags)
{
	if (!ascend_pt_multi_view_enabled())
		return 0;

	if (!addr)
		return 0;

	if(ascend_check_range_valid(addr, len, flags))
		return 0;

	return -EINVAL;
}

void n_pt_multi_view_mmap_get_area(struct vm_unmapped_area_info *info,
				unsigned long flags)
{
	if (!ascend_pt_multi_view_enabled())
		return;

	if (flags & MAP_NPTMV) {
		info->low_limit = N_PT_MULTI_VIEW_MMAP_START;
		info->high_limit = N_PT_MULTI_VIEW_MMAP_END;
		info->flags |= VM_UNMAPPED_AREA_NC;
	}
#ifdef CONFIG_ASCEND_SHARE_POOL
	if (!sp_is_enabled())
		return;

	if ((flags & MAP_NPTMV) && (flags & MAP_SHARE_POOL)) {
		info->low_limit = MMAP_SHARE_POOL_NC_START;
		info->high_limit = MMAP_SHARE_POOL_NC_END;
	}
#endif
}
#endif

#ifdef CONFIG_STRICT_DEVMEM

#include <linux/ioport.h>

#ifdef CONFIG_RTOS_STRICT_DEVMEM_IOMEM_CONTROL
#include <linux/rtos/strict_devmem.h>
#endif

/*
 * devmem_is_allowed() checks to see if /dev/mem access to a certain address
 * is valid. The argument is a physical page number.  We mimic x86 here by
 * disallowing access to system RAM as well as device-exclusive MMIO regions.
 * This effectively disable read()/write() on /dev/mem.
 */
int devmem_is_allowed(unsigned long pfn)
{
#ifdef CONFIG_RTOS_STRICT_DEVMEM_IOMEM_CONTROL
	if (!is_strict_devmem_enable())
		return 1;
#endif
	if (iomem_is_exclusive(pfn << PAGE_SHIFT))
		return 0;
	if (!page_is_ram(pfn))
		return 1;
	return 0;
}

#endif
