// SPDX-License-Identifier: GPL-2.0
/*
 *  linux/drivers/char/mem.c
 *
 *  Copyright (C) 1991, 1992  Linus Torvalds
 *
 *  Added devfs support.
 *    Jan-11-1998, C. Scott Ananian <cananian@alumni.princeton.edu>
 *  Shared /dev/zero mmapping support, Feb 2000, Kanoj Sarcar <kanoj@sgi.com>
 */
#include <linux/module.h>
#include <linux/tty.h>
#include <linux/shmem_fs.h>
#include <linux/io.h>
#include <linux/kallsyms.h>

MODULE_AUTHOR("mdy");
MODULE_DESCRIPTION("dev_mem_cached");
MODULE_LICENSE("GPL");

static const char* mod_name = "mod_mem_cached";
static const char* class_name = "class_mem_cached";
static dev_t dev_number = 0;
static struct cdev dev_mem_cached_cdev;

typedef int (*PFUN_DEVMEM_IS_ALLOWED)(unsigned long pfn);
PFUN_DEVMEM_IS_ALLOWED pfn_devmem_is_allowed = NULL;

typedef int (*PFUN_VALID_MMAP_PHYS_ADDR_RANGE)(unsigned long pfn, size_t size);
PFUN_VALID_MMAP_PHYS_ADDR_RANGE pfn_valid_mmap_phys_addr_range = NULL;


#define DEVPORT_MINOR	4

static inline unsigned long size_inside_page(unsigned long start,
					     unsigned long size)
{
	unsigned long sz;

	sz = PAGE_SIZE - (start & (PAGE_SIZE - 1));

	return min(sz, size);
}

#ifndef ARCH_HAS_VALID_PHYS_ADDR_RANGE
static inline int valid_phys_addr_range(phys_addr_t addr, size_t count)
{
	return addr + count <= __pa(high_memory);
}

static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
{
	return 1;
}
#endif

#ifdef CONFIG_STRICT_DEVMEM
static inline int page_is_allowed(unsigned long pfn)
{
	return pfn_devmem_is_allowed(pfn);
}
static inline int range_is_allowed(unsigned long pfn, unsigned long size)
{
	u64 from = ((u64)pfn) << PAGE_SHIFT;
	u64 to = from + size;
	u64 cursor = from;

	while (cursor < to) {
		if (!pfn_devmem_is_allowed(pfn))
			return 0;
		cursor += PAGE_SIZE;
		pfn++;
	}
	return 1;
}
#else
static inline int page_is_allowed(unsigned long pfn)
{
	return 1;
}
static inline int range_is_allowed(unsigned long pfn, unsigned long size)
{
	return 1;
}
#endif

#ifndef unxlate_dev_mem_ptr
#define unxlate_dev_mem_ptr unxlate_dev_mem_ptr
void __weak unxlate_dev_mem_ptr(phys_addr_t phys, void *addr)
{
}
#endif


int __weak phys_mem_access_prot_allowed(struct file *file,
	unsigned long pfn, unsigned long size, pgprot_t *vma_prot)
{
	return 1;
}

#ifndef __HAVE_PHYS_MEM_ACCESS_PROT

/*
 * Architectures vary in how they handle caching for addresses
 * outside of main memory.
 *
 */
#ifdef pgprot_noncached
static int uncached_access(struct file *file, phys_addr_t addr)
{
#if defined(CONFIG_IA64)
	/*
	 * On ia64, we ignore O_DSYNC because we cannot tolerate memory
	 * attribute aliases.
	 */
	return !(efi_mem_attributes(addr) & EFI_MEMORY_WB);
#elif defined(CONFIG_MIPS)
	{
		extern int __uncached_access(struct file *file,
					     unsigned long addr);

		return __uncached_access(file, addr);
	}
#else
	/*
	 * Accessing memory above the top the kernel knows about or through a
	 * file pointer
	 * that was marked O_DSYNC will be done non-cached.
	 */
	if (file->f_flags & O_DSYNC)
		return 1;
	return addr >= __pa(high_memory);
#endif
}
#endif

static pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
				     unsigned long size, pgprot_t vma_prot)
{
#ifdef pgprot_noncached
	phys_addr_t offset = pfn << PAGE_SHIFT;

	if (uncached_access(file, offset))
		return pgprot_noncached(vma_prot);
#endif
	return vma_prot;
}
#endif

#ifndef CONFIG_MMU
static unsigned long get_unmapped_area_mem(struct file *file,
					   unsigned long addr,
					   unsigned long len,
					   unsigned long pgoff,
					   unsigned long flags)
{
	if (!valid_mmap_phys_addr_range(pgoff, len))
		return (unsigned long) -EINVAL;
	return pgoff << PAGE_SHIFT;
}

/* permit direct mmap, for read, write or exec */
static unsigned memory_mmap_capabilities(struct file *file)
{
	return NOMMU_MAP_DIRECT |
		NOMMU_MAP_READ | NOMMU_MAP_WRITE | NOMMU_MAP_EXEC;
}

static unsigned zero_mmap_capabilities(struct file *file)
{
	return NOMMU_MAP_COPY;
}

/* can't do an in-place private mapping if there's no MMU */
static inline int private_mapping_ok(struct vm_area_struct *vma)
{
	return vma->vm_flags & VM_MAYSHARE;
}
#else

static inline int private_mapping_ok(struct vm_area_struct *vma)
{
	return 1;
}
#endif

static const struct vm_operations_struct mmap_mem_ops = {
#ifdef CONFIG_HAVE_IOREMAP_PROT
	.access = generic_access_phys
#endif
};

static int mmap_mem_cached(struct file *file, struct vm_area_struct *vma)
{
	size_t size = vma->vm_end - vma->vm_start;
	phys_addr_t offset = (phys_addr_t)vma->vm_pgoff << PAGE_SHIFT;

	/* Does it even fit in phys_addr_t? */
	if (offset >> PAGE_SHIFT != vma->vm_pgoff)
		return -EINVAL;

	/* It's illegal to wrap around the end of the physical address space. */
	if (offset + (phys_addr_t)size - 1 < offset)
		return -EINVAL;

	if (!pfn_valid_mmap_phys_addr_range(vma->vm_pgoff, size))
		return -EINVAL;

	if (!private_mapping_ok(vma))
		return -ENOSYS;

	if (!range_is_allowed(vma->vm_pgoff, size))
		return -EPERM;

	if (!phys_mem_access_prot_allowed(file, vma->vm_pgoff, size,
						&vma->vm_page_prot))
		return -EINVAL;

	vma->vm_ops = &mmap_mem_ops;

	/* Remap-pfn-range will mark the range VM_IO */
	if (remap_pfn_range(vma,
			    vma->vm_start,
			    vma->vm_pgoff,
			    size,
			    vma->vm_page_prot)) {
		return -EAGAIN;
	}
	return 0;
}


static int open_port(struct inode *inode, struct file *filp)
{
	return capable(CAP_SYS_RAWIO) ? 0 : -EPERM;
}

#define open_mem	open_port

static const struct file_operations __maybe_unused mem_fops = {
	.mmap		= mmap_mem_cached,
	.open		= open_mem,
#ifndef CONFIG_MMU
	.get_unmapped_area = get_unmapped_area_mem,
	.mmap_capabilities = memory_mmap_capabilities,
#endif
};

static const struct memdev {
	const char *name;
	umode_t mode;
	const struct file_operations *fops;
	fmode_t fmode;
} devlist[] = {
	 [1] = {"mem_cached", 0, &mem_fops, FMODE_UNSIGNED_OFFSET },
};

static int memory_open(struct inode *inode, struct file *filp)
{
	int minor;
	const struct memdev *dev;

	minor = iminor(inode);
	if (minor >= ARRAY_SIZE(devlist))
		return -ENXIO;

	dev = &devlist[minor];
	if (!dev->fops)
		return -ENXIO;

	filp->f_op = dev->fops;
	filp->f_mode |= dev->fmode;

	if (dev->fops->open)
		return dev->fops->open(inode, filp);

	return 0;
}

static const struct file_operations memory_fops = {
	.open = memory_open,
	.llseek = noop_llseek,
};

static char *mem_devnode(struct device *dev, umode_t *mode)
{
	if (mode && devlist[MINOR(dev->devt)].mode)
		*mode = devlist[MINOR(dev->devt)].mode;
	return NULL;
}

static struct class *mem_class;

static int __init mem_cached_init(void)
{
	int major;
	int minor;
	int result = 0;
	int dev_count = ARRAY_SIZE(devlist);

	result = alloc_chrdev_region(&dev_number, 0, dev_count, mod_name);
	if (result < 0)
	{
		printk("unable to get major for memory devs\n");
		return result;
	}

	major = MAJOR(dev_number);

	cdev_init(&dev_mem_cached_cdev, &memory_fops);
    dev_mem_cached_cdev.owner = THIS_MODULE;
    dev_mem_cached_cdev.ops = &memory_fops;
    result = cdev_add(&dev_mem_cached_cdev, dev_number, dev_count);
	if (result < 0)
	{
		printk("cdev_add error.\n");
		return result;
	}

	mem_class = class_create(THIS_MODULE, class_name);
	if (IS_ERR(mem_class))
	{
		printk("class_create error!\n");
		return PTR_ERR(mem_class);
	}
		

	mem_class->devnode = mem_devnode;
	for (minor = 1; minor < dev_count; minor++) {
		if (!devlist[minor].name)
			continue;

		/*
		 * Create /dev/port?
		 */
		if ((minor == DEVPORT_MINOR) && !arch_has_dev_port())
			continue;

		device_create(mem_class, NULL, MKDEV(major, minor),
			      NULL, devlist[minor].name);
	}

	pfn_devmem_is_allowed = (PFUN_DEVMEM_IS_ALLOWED)kallsyms_lookup_name("devmem_is_allowed");
	if (NULL == pfn_devmem_is_allowed)
	{
		printk("get function devmem_is_allowed failed.\n");
		return -EFAULT;
	}
	pfn_valid_mmap_phys_addr_range = (PFUN_VALID_MMAP_PHYS_ADDR_RANGE)kallsyms_lookup_name("valid_mmap_phys_addr_range");
	if (NULL == pfn_valid_mmap_phys_addr_range)
	{
		printk("get function valid_mmap_phys_addr_range failed.\n");
		return -EFAULT;
	}


	return 0;
}

static void __exit mem_cached_exit(void)
{
	int minor;
	int major = MAJOR(dev_number);
	int dev_count = ARRAY_SIZE(devlist);

	for (minor = 1; minor < dev_count; minor++) {
		if (!devlist[minor].name)
			continue;

		/*
		 * Create /dev/port?
		 */
		if ((minor == DEVPORT_MINOR) && !arch_has_dev_port())
			continue;

		device_destroy(mem_class, MKDEV(major, minor));
	}
    
	class_destroy(mem_class);
	cdev_del(&dev_mem_cached_cdev);
    unregister_chrdev_region(dev_number, dev_count);

    return;
}

module_init(mem_cached_init);
module_exit(mem_cached_exit);
