/*
 * Copyright (c) Huawei Technologies Co., Ltd. 2018-2021.
 * Description: support cetartlb feature
 * Author: fanglinxu <fanglinxu@huawei.com>
 * Create: 2018-09-13
 */

#include <linux/mm.h>
#include <linux/miscdevice.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/mman.h>
#include <linux/random.h>
#include <linux/init.h>
#include <linux/raw.h>
#include <linux/tty.h>
#include <linux/capability.h>
#include <linux/ptrace.h>
#include <linux/device.h>
#include <linux/highmem.h>
#include <linux/crash_dump.h>
#include <linux/backing-dev.h>
#include <linux/splice.h>
#include <linux/pfn.h>
#include <linux/hugetlb.h>
#include <linux/uaccess.h>
#include <linux/io.h>
#include <linux/mm_types.h>
#include <linux/slab.h>
#include <linux/mm.h>
#include <linux/init.h>
#include <linux/file.h>
#include <linux/fs.h>
#include <linux/security.h>
#include <linux/hugetlb.h>
#include <linux/cetatlb.h>
#include <linux/pgtable.h>
#include <linux/printk.h>
#ifdef CONFIG_X86_64
#include <asm/page_types.h>
#include <asm/e820/api.h>
#include <linux/elf-randomize.h>
#endif

#ifdef CONFIG_RTOS_MTRC
#include <linux/mtrc_pat.h>
#endif

#ifdef CONFIG_RTOS_DEV_MMAP_MEM_TRACER
#include <linux/rtos_devmem.h>
#endif

static int cetatlb_range_is_allowed(unsigned long pfn, unsigned long size, unsigned long flag);

/* the follow functions are exported from other kernel code */
static pte_t *cetatlb_pte_alloc(struct vm_area_struct *vma, struct mm_struct *mm,
		unsigned long addr, unsigned long sz);

#ifdef CONFIG_X86_64
static void cetatlb_pmd_share(struct vm_area_struct *vma, struct mm_struct *mm,
		unsigned long addr, pud_t *pud);
#endif

unsigned long cetartlb_change_protection(struct vm_area_struct *vma,
		unsigned long address, unsigned long end, pgprot_t newprot)
{
	struct mm_struct *mm = vma->vm_mm;
	unsigned long start = address;
	unsigned long hugesize = cetatlb_flags_to_size(vma->rtos_vm_flags);
	pte_t *ptep = NULL;
	pte_t pte;
	unsigned long pages = 0;
	struct mmu_notifier_range range;

	mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma, mm, start, end);

	BUG_ON(address >= end);
	flush_cache_range(vma, address, end);
#ifdef CONFIG_KVM_PVIRT
	mmu_notifier_invalidate_range_start(&range);
#endif
	i_mmap_lock_write(vma->vm_file->f_mapping);
	spin_lock(&mm->page_table_lock);
	for (; address < end; address += hugesize) {
		ptep = huge_pte_offset(mm, address, hugesize);
		if (!ptep)
			continue;
		if (huge_pmd_unshare(mm, vma, &address, ptep)) {
			pages++;
			continue;
		}
		if (!huge_pte_none(huge_ptep_get(ptep))) {
			pte = huge_ptep_get_and_clear(mm, address, ptep);
			pte = pte_mkhuge(pte_modify(pte, newprot));
			pte = pte_mkdirty(pte);
			set_huge_pte_at(mm, address, ptep, pte);
			pages++;
		}
	}
	spin_unlock(&mm->page_table_lock);
	flush_tlb_range(vma, start, end);

#ifdef CONFIG_KVM_PVIRT
	mmu_notifier_invalidate_range(mm, start, end);
#endif
	i_mmap_unlock_write(vma->vm_file->f_mapping);
#ifdef CONFIG_KVM_PVIRT
	mmu_notifier_invalidate_range_end(&range);
#endif
	return pages * hugesize / PAGE_SIZE;
}

#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
int cetartlb_on_working;
unsigned long long cetartlb_addr;
unsigned long long cetartlb_size;

static int __init cetartlb_param_parse(char *cmdline_cetartlb)
{
	char *str_start = cmdline_cetartlb;
	char *str_end = NULL;
	unsigned long long cetartlb_end;

	cetartlb_size = simple_strtoull(str_start, &str_end, 0);
	if ('%' != *str_end)
		goto err;

	if (strlen(str_end) <= 1)
		goto err;

	str_start = str_end + 1;
	cetartlb_addr = simple_strtoull(str_start, &str_end, 0);
	if (str_end == str_start)
		goto err;

	cetartlb_on_working = 1;
	cetartlb_end = cetartlb_addr + cetartlb_size;
	/*
	 * There isn't any risk when cetartlb_addr + cetartlb_size overflow
	 * It is adequate assurance for the function.
	 * But if we make a illegally, it may does affect to functionality,
	 * because the user who use it may pass a large value and expect success
	 * even through overflow occurred but the final range is ok.
	 * So we just make some warning-info when overflow occurred.
	 */
	if (cetartlb_end < cetartlb_addr) {
		pr_err("cetartlb: cetartlb_addr + cetartlb_size > ULLONG_MAX\n"
			"cetartlb: max allowed phy addr using cut value: %llx\n",
			cetartlb_end);
	}
	return 0;
err:
	pr_err("cetartlb param error!\n");
	return 0;
}
__setup("cetartlb=", cetartlb_param_parse);

static int cetatlb_range_is_allowed(unsigned long pfn, unsigned long length, unsigned long flag)
{
	unsigned long long phyaddr = (unsigned long long)pfn << PAGE_SHIFT;

	if (cetartlb_on_working == 0) {
		if ((flag & RTOS_VM_CETARTLB_SUPPORT_UIO) == 0)
			return 0;
		else
			return 1;
	}

	if ((phyaddr < cetartlb_addr) || ((phyaddr + length) > (cetartlb_addr + cetartlb_size)))
		return 0;
	else
		return 1;
}
#endif

#ifdef CONFIG_X86_64
static int cetatlb_range_is_allowed(unsigned long pfn, unsigned long length, unsigned long flag)
{
	__u32 i;
	struct e820_entry *ei  = NULL;
	unsigned long phyaddr = pfn << PAGE_SHIFT;

	for (i = 0; i < e820_table->nr_entries; i++) {
		ei = &e820_table->entries[i];

		/* area is not RESERVED type */
		if (ei->type != E820_TYPE_RESERVED) {
			ei = NULL;
			continue;
		}

		if ((phyaddr >= ei->addr) && (phyaddr+length <= ei->addr+ei->size))
			break;

		ei = NULL;
	}

	return (ei == NULL) ? 0 : 1;
}
#endif

int remap_cetatlb_range(struct vm_area_struct *vma, unsigned long addr,
		unsigned long pfn, unsigned long size, struct file *file)
{
	pte_t new_pte;
	pte_t *ptep = NULL;
	struct mm_struct *mm = vma->vm_mm;
	unsigned long hugesize = cetatlb_flags_to_size(vma->rtos_vm_flags);
	unsigned long sizerec = size;

	/* cetatlb needn't support remap ,so we don't call flush_cache_range here */
	while (sizerec >= hugesize) {
		ptep = cetatlb_pte_alloc(vma, mm, addr, hugesize);
		if (!ptep) {
			pr_err("huge_pte_alloc failed!\n");
			return -ENOMEM;
		}

		new_pte = pte_mkdirty(pfn_pte(pfn, vma->vm_page_prot));
		new_pte = pte_mkyoung(new_pte);
		new_pte = pte_mkhuge(new_pte);
		new_pte = pte_mkspecial(new_pte);

		set_huge_pte_at(mm, addr, ptep, new_pte);
		addr += hugesize;
		sizerec -= hugesize;
		pfn += hugesize >> PAGE_SHIFT;
	}
#ifdef CONFIG_RTOS_DEV_MMAP_MEM_TRACER
	mmap_mem_cetartlb_tracer(file, addr, size, pfn);
#endif

	return 0;
}

static int cetatlb_remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
		unsigned long pfn, unsigned long size, struct file *file)
{
	vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
	/* control do not copy this vma when in copy_mm */
	vma->vm_flags |= VM_DONTCOPY;

	return remap_cetatlb_range(vma,
			vma->vm_start,
			vma->vm_pgoff,
			size,
			file);
}

#ifndef ARCH_HAS_VALID_PHYS_ADDR_RANGE
static inline int valid_phys_addr_range(unsigned long addr, size_t count)
{
	if (addr + count > __pa(high_memory))
		return 0;

	return 1;
}

static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
{
	return 1;
}
#endif

/*
 * it is no need to support read, so it returns -ENOSYS.
 */
static ssize_t ceta_read_mem(struct file *file, char __user *buf,
		size_t count, loff_t *ppos)
{
	return -ENOSYS;
}

/*
 * it is no need to support write, so it returns -ENOSYS.
 */
static ssize_t ceta_write_mem(struct file *file, const char __user *buf,
		size_t count, loff_t *ppos)
{
	return -ENOSYS;
}

/* cetatlb only support shared mapping */
static inline int is_shared_mapping(struct vm_area_struct *vma)
{
	return vma->vm_flags & VM_MAYSHARE;
}

static int get_valid_addr(unsigned long *addr, unsigned long len,
		unsigned long ceta_size, struct vm_area_struct *vma,
		struct mm_struct *mm)
{
#ifdef CONFIG_X86_64
	unsigned long random_factor = 0UL;
	if (current->flags & PF_RANDOMIZE)
		random_factor = arch_mmap_rnd();
#endif
	*addr = ALIGN(*addr, ceta_size);
	/*
	 * when user use no fixed mode ,and if we can't get a usable
	 * vma near by the passing addr ,we need find  other idle addr
	 * and don't return NULL addr without MAP_FIXED flag
	 */
	if (!(*addr)) {
#ifdef CONFIG_X86_64
		*addr = ALIGN(TASK_UNMAPPED_BASE + random_factor, ceta_size);
#else
		*addr = ALIGN(TASK_UNMAPPED_BASE, ceta_size);
#endif
	}

	while (TASK_SIZE - len >= *addr) {
		vma = find_vma(mm, *addr);
		if (!vma || *addr + len <= vma->vm_start)
			return 1;
		*addr += ceta_size;
	}
	return 0;
}

static unsigned long get_unmapped_area_mem(struct file *file, unsigned long addr,
		unsigned long len, unsigned long pgoff, unsigned long flags,
		unsigned long page_mask, unsigned long ceta_size, int page_shift)
{
	struct mm_struct *mm = current->mm;
	struct vm_area_struct *vma = NULL;

#ifdef CONFIG_RTOS_MTRC
	unsigned long error = is_mtrc_region_conflict(pgoff, len);
	if (error) {
		pr_err("[PAT]Error,mmap region[pgoff:%lx len:%lx] is conflict with mtrc. cat /proc/mtrc get more info.\n",
			pgoff, len);
		return -EINVAL;
	}
#endif

	if (len > TASK_SIZE)
		return -ENOMEM;

	if (flags & MAP_PRIVATE) {
		pr_err("CetaTLB doesn't support MAP_PRIVATE mode!\n");
		return -EINVAL;
	}

	if (len & ~page_mask) {
		pr_err("CetaTLB received invalid len!\n");
		return -EINVAL;
	}

	if ((pgoff << PAGE_SHIFT) & ~page_mask) {
		pr_err("CetaTLB received invalid pgoff!\n");
		return -EINVAL;
	}

	if (flags & MAP_FIXED) {
		if (addr & ~page_mask) {
			pr_err("CetaTLB received invalid addr!\n");
			return -EINVAL;
		}
		return addr;
	}

	if (get_valid_addr(&addr, len, ceta_size, vma, mm))
		return addr;

	pr_err("CetaTLB can't get a usable VMA	by the passing addr!\n");
	return -EINVAL;
}
#ifdef CONFIG_X86_64
static int follow_phys_huge(struct vm_area_struct *vma, unsigned long address,
		unsigned int flags, unsigned long *prot, resource_size_t *phys)
#else
static int follow_phys_huge(struct vm_area_struct *vma, unsigned long address,
		unsigned int flags, pgprot_t *prot, resource_size_t *phys)
#endif
{
	int ret = -EINVAL;
	pte_t *ptep = NULL;
	pte_t pte;
	spinlock_t *ptl = NULL;
	unsigned long hugesize = cetatlb_flags_to_size(vma->rtos_vm_flags);

	if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
		goto out;

	ptep = huge_pte_offset(vma->vm_mm, address, hugesize);
	if (!ptep)
		goto out;

	ptl = &vma->vm_mm->page_table_lock;
	spin_lock(ptl);
	pte = huge_ptep_get((pte_t *)ptep);
	if ((flags & FOLL_WRITE) && !pte_write(pte))
		goto unlock;

#ifdef CONFIG_X86_64
	*prot = pgprot_val(pte_pgprot(pte));
#else
	*prot = __pgprot(pgprot_val(__pte_pgprot(pte)));
#endif

	if (hugesize == CETARTLB_PAGESIZE_2M)
		*phys = ((resource_size_t)pte_pfn(pte) << PAGE_SHIFT)
				| ((resource_size_t)(address & (CETARTLB_PAGESIZE_2M - 0x1000)));
	if (hugesize == CETARTLB_PAGESIZE_1G)
		*phys = ((resource_size_t)pte_pfn(pte) << PAGE_SHIFT)
				| ((resource_size_t)(address & (CETARTLB_PAGESIZE_1G - 0x1000)));

	ret = 0;
unlock:
	pte_unmap_unlock(ptep, ptl);
out:
	return ret;
}

#ifdef CONFIG_ARM
static int alwayse_4k_access_phys(struct vm_area_struct *vma, unsigned long addr,
		void *buf, int len, int write)
{
	resource_size_t phys_addr = 0;
	pgprot_t prot = 0;
	void __iomem *maddr;
	int offset = addr & (PAGE_SIZE-1);

	if (follow_phys_huge(vma, addr, write, &prot, &phys_addr))
		return -EINVAL;

	if ((prot & PMD_SECT_WB) == PMD_SECT_WB)
		maddr = ioremap_cache(phys_addr, PAGE_SIZE);
	else if ((prot & PMD_SECT_WB) == PMD_SECT_BUFFERED)
		maddr = ioremap_wc(phys_addr, PAGE_SIZE);
	else if ((prot & PMD_SECT_WB) == PMD_SECT_UNCACHED)
		maddr = ioremap(phys_addr, PAGE_SIZE);
	else
		return -EINVAL;
	if (!maddr)
		return -ENOMEM;

	if (len > (PAGE_SIZE - offset))
		len = PAGE_SIZE - offset;

	if (write) {
		if (copy_to_kernel_nofault(maddr + offset, buf, len)) {
			iounmap(maddr);
			return -EPERM;
		}
	} else {
		memcpy_fromio(buf, maddr + offset, len);
	}

	iounmap(maddr);

	return len;
}
#endif

#ifdef CONFIG_ARM64
static int alwayse_4k_access_phys(struct vm_area_struct *vma, unsigned long addr,
		void *buf, int len, int write)
{
	resource_size_t phys_addr = 0;
	unsigned long _prot = 0;
	void __iomem *maddr;
	int offset = addr & (PAGE_SIZE-1);
	pgprot_t prot = __pgprot(_prot);
	if (follow_phys_huge(vma, addr, write, &prot, &phys_addr))
		return -EINVAL;

	_prot = pgprot_val(prot);
	_prot = (_prot & PMD_ATTRINDX_MASK)
			| (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE);
	prot = __pgprot(_prot);
	maddr = __ioremap(phys_addr, PAGE_SIZE, prot);
	if (!maddr)
		return -ENOMEM;

	if (len > (PAGE_SIZE - offset))
		len = PAGE_SIZE - offset;

	if (write) {
		if (copy_to_kernel_nofault(maddr + offset, buf, len)) {
			iounmap(maddr);
			return -EPERM;
		}
	} else {
		memcpy_fromio(buf, maddr + offset, len);
	}

	iounmap(maddr);

	return len;
}
#endif

#ifdef CONFIG_X86_64
static int alwayse_4k_access_phys(struct vm_area_struct *vma, unsigned long addr,
		void *buf, int len, int write)
{
	resource_size_t phys_addr = 0;
	unsigned long prot = 0;
	void __iomem *maddr;
	int offset = addr & (PAGE_SIZE-1);

	if (follow_phys_huge(vma, addr, write, &prot, &phys_addr))
		return -EINVAL;

	prot = prot & ~_PAGE_PSE;
	maddr = ioremap_prot(phys_addr, PAGE_SIZE, prot);
	if (!maddr)
		return -ENOMEM;

	if (len > (PAGE_SIZE - offset))
		len = PAGE_SIZE - offset;

	if (write)
		memcpy_toio(maddr + offset, buf, len);
	else
		memcpy_fromio(buf, maddr + offset, len);

	iounmap(maddr);

	return len;
}
#endif

static const struct vm_operations_struct ceta_mmap_mem_ops = {
#ifdef CONFIG_HAVE_IOREMAP_PROT
	.access = alwayse_4k_access_phys
#endif
};

/* hook for mmap 1G */
static unsigned long get_unmapped_area_mem_1g(struct file *file, unsigned long addr,
		unsigned long len, unsigned long pgoff, unsigned long flags)
{
	return get_unmapped_area_mem(file, addr, len, pgoff, flags,
			CETARTLB_PAGE_MASK_1G, CETARTLB_PAGESIZE_1G, CETARTLB_SHIFT_1G);
}

/* hook for mmap 2M */
unsigned long get_unmapped_area_mem_2M(struct file *file, unsigned long addr,
		unsigned long len, unsigned long pgoff, unsigned long flags)
{
	return get_unmapped_area_mem(file, addr, len, pgoff, flags,
			CETARTLB_PAGE_MASK_2M, CETARTLB_PAGESIZE_2M, CETARTLB_SHIFT_2M);
}
EXPORT_SYMBOL(get_unmapped_area_mem_2M);

static int ceta_mmap_mem(struct file *file, struct vm_area_struct *vma, unsigned long hugesize)
{
	size_t size = vma->vm_end - vma->vm_start;

	/*
	 * VM_READ VM_WRITE VM_EXEC must be set one
	 * at least when !PROT_NONE. Here not support PROT_NONE
	 */
	if (!(vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)))
		return -EINVAL;

#if defined(CONFIG_ARM) && !defined(CONFIG_ARM_LPAE)
	if (!valid_mmap_phys_addr_range(vma->vm_pgoff, size))
		return -EINVAL;
#endif

	if (!cetatlb_range_is_allowed(vma->vm_pgoff, size, vma->rtos_vm_flags))
		return -EPERM;

#ifdef CONFIG_X86_64
	if (!phys_mem_access_prot_allowed(file, vma->vm_pgoff, size,
			&vma->vm_page_prot))
		return -EINVAL;
#endif

	vma->vm_page_prot = phys_mem_access_prot(file, vma->vm_pgoff,
			size,
			vma->vm_page_prot);
	vma->vm_ops = &ceta_mmap_mem_ops;
	/*
	 * RTOS_VM_CETARTLB_16M, RTOS_VM_CETARTLB_16M and RTOS_VM_CETARTLB_16M are mutually exclusive
	 * so we reset all the bits before new assignment.
	 */
	vma->rtos_vm_flags &= ~(RTOS_VM_CETARTLB_2M | RTOS_VM_CETARTLB_16M | RTOS_VM_CETARTLB_1G);
	vma->rtos_vm_flags |= cetartlb_size_to_flags(hugesize);
#ifdef CONFIG_RTOS_MTRC
	mtrc_sync_vmflags(vma, vma->vm_pgoff, size);
#endif

	return cetatlb_remap_pfn_range(vma,
			vma->vm_start,
			vma->vm_pgoff,
			size,
			file);
}

static int ceta_mmap_mem_1G(struct file *file, struct vm_area_struct *vma)
{
	return ceta_mmap_mem(file, vma, CETARTLB_PAGESIZE_1G);
}

int ceta_mmap_mem_2M(struct file *file, struct vm_area_struct *vma)
{
	return ceta_mmap_mem(file, vma, CETARTLB_PAGESIZE_2M);
}
EXPORT_SYMBOL(ceta_mmap_mem_2M);

/*
 * it is no need to support lseek, so it returns -ENOSYS.
 */
static loff_t ceta_memory_lseek(struct file *file, loff_t offset, int orig)
{
	return -ENOSYS;
}

static int ceta_open_mem(struct inode *inode, struct file *filp)
{
	return capable(CAP_SYS_RAWIO) ? 0 : -EPERM;
}

/* define file_operations for cetatlb 1G */
static const struct file_operations mem_fops_1g = {
	.llseek		= ceta_memory_lseek,
	.read		= ceta_read_mem,
	.write		= ceta_write_mem,
	.mmap		= ceta_mmap_mem_1G,
	.open		= ceta_open_mem,
	.get_unmapped_area = get_unmapped_area_mem_1g,
};

/* define file_operations for cetatlb 2M */
static const struct file_operations mem_fops_2m = {
	.llseek		= ceta_memory_lseek,
	.read		= ceta_read_mem,
	.write		= ceta_write_mem,
	.mmap		= ceta_mmap_mem_2M,
	.open		= ceta_open_mem,
	.get_unmapped_area = get_unmapped_area_mem_2M,
};

int is_file_cetartlb_2m(struct file *file)
{
	return file->f_op == &mem_fops_2m;
}

int is_file_cetartlb_1g(struct file *file)
{
	return file->f_op == &mem_fops_1g;
}

static const struct ceta_memdev {
	const char *name;
	umode_t mode;
	const struct file_operations *fops;
	fmode_t fmode;
} ceta_devlist[] = {
#ifdef CONFIG_X86_64
	[1] = { "cetatlb1G", 0, &mem_fops_1g, FMODE_UNSIGNED_OFFSET },
	[2] = { "cetatlb2M", 0, &mem_fops_2m, FMODE_UNSIGNED_OFFSET },
#endif
#ifdef CONFIG_ARM
	[1] = { "cetatlb2M", 0, &mem_fops_2m, FMODE_UNSIGNED_OFFSET },
#endif
#ifdef CONFIG_ARM64
#ifdef CONFIG_RTOS_CETARTLB_1G
	[1] = { "cetatlb2M", 0, &mem_fops_2m, FMODE_UNSIGNED_OFFSET },
	[2] = { "cetatlb1G", 0, &mem_fops_1g, FMODE_UNSIGNED_OFFSET }
#else
	[1] = { "cetatlb2M", 0, &mem_fops_2m, FMODE_UNSIGNED_OFFSET },
#endif
#endif
};

static int ceta_memory_open(struct inode *inode, struct file *filp)
{
	unsigned int minor;
	const struct ceta_memdev *dev = NULL;

	minor = iminor(inode);
	if (minor >= ARRAY_SIZE(ceta_devlist))
		return -ENXIO;

	dev = &ceta_devlist[minor];
	if (!dev->fops)
		return -ENXIO;

	filp->f_op = dev->fops;

	if (dev->fops->open)
		return dev->fops->open(inode, filp);

	return 0;
}

static const struct file_operations memory_fops = {
	.open = ceta_memory_open,
};

static char *ceta_mem_devnode(struct device *dev, umode_t *mode)
{
	if (mode && ceta_devlist[MINOR(dev->devt)].mode)
		*mode = ceta_devlist[MINOR(dev->devt)].mode;

	return NULL;
}

#define HPAGE_RESV_UNMAPPED (1UL << 1)

#ifdef CONFIG_ARM
pte_t *cetatlb_pte_alloc(struct vm_area_struct *vma, struct mm_struct *mm,
		unsigned long addr, unsigned long sz)
{
	pgd_t *pgd;
#ifndef CONFIG_ARM_LPAE
	pmd_t *pmd;
#endif
	pte_t *hpte = NULL;

	pgd = pgd_offset(mm, addr);
#ifdef CONFIG_ARM_LPAE
	if (sz == PGDIR_SIZE)
		return pgd;
	else
		hpte = (pte_t *)pmd_alloc(mm, (pud_t *)pgd, addr);
#else
	pmd = pmd_offset((pud_t *)pgd, addr);
	hpte = (pte_t *)pmd;
#endif
	return hpte;
}
#endif

#ifdef CONFIG_ARM64
pte_t *cetatlb_pte_alloc(struct vm_area_struct *vma, struct mm_struct *mm,
		unsigned long addr, unsigned long sz)
{
#if CONFIG_PGTABLE_LEVELS > 3
	pgd_t *pgd;
	p4d_t *p4d;
	pud_t *pud;
	pte_t *pte = NULL;

	pgd = pgd_offset(mm, addr);
	p4d = p4d_alloc(mm, pgd, addr);
	if (!p4d)
		return NULL;
	pud = pud_alloc(mm, p4d, addr);
	if (pud) {
		if (sz == PUD_SIZE)
			pte = (pte_t *)pud;
		else
			pte = (pte_t *)pmd_alloc(mm, pud, addr);
	}
	return pte;
#else
	pgd_t *pgd;
	pte_t *hpte = NULL;

	pgd = pgd_offset(mm, addr);
	if (sz == PGDIR_SIZE)
		return (pte_t *)pgd;
	else
		hpte = (pte_t *)pmd_alloc(mm, (pud_t *)pgd, addr);
	return hpte;
#endif
}
#endif


#ifdef CONFIG_X86_64
pte_t *cetatlb_pte_alloc(struct vm_area_struct *vma, struct mm_struct *mm,
		unsigned long addr, unsigned long sz)
{
	pgd_t *pgd;
	p4d_t *p4d;
	pud_t *pud;
	pte_t *pte = NULL;

	pgd = pgd_offset(mm, addr);
	p4d = p4d_alloc(mm, pgd, addr);
	if (!p4d)
		return NULL;
	pud = pud_alloc(mm, p4d, addr);
	if (pud) {
		if (sz == PUD_SIZE) {
			pte = (pte_t *)pud;
		} else {
			BUG_ON(sz != PMD_SIZE);
			if (pud_none(*pud))
				cetatlb_pmd_share(vma, mm, addr, pud);
			pte = (pte_t *) pmd_alloc(mm, pud, addr);
		}
	}
	BUG_ON(pte && !pte_none(*pte) && !pte_huge(*pte));

	return pte;
}

static void cetatlb_pmd_share(struct vm_area_struct *vma, struct mm_struct *mm,
		unsigned long addr, pud_t *pud)
{
	struct address_space *mapping = vma->vm_file->f_mapping;
	pgoff_t idx = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
	struct vm_area_struct *svma = NULL;
	unsigned long saddr;
	pte_t *spte = NULL;
	unsigned long hugesize = cetatlb_flags_to_size(vma->rtos_vm_flags);

	if (!rtlb_vma_shareable(vma, addr))
		return;

	i_mmap_lock_write(mapping);
	vma_interval_tree_foreach(svma, &mapping->i_mmap, idx, idx) {
		if (svma == vma)
			continue;

		saddr = rtlb_page_table_shareable(svma, vma, addr, idx);
		if (saddr) {
			spte = huge_pte_offset(svma->vm_mm, saddr, hugesize);
			if (spte) {
				get_page(virt_to_page(spte));
				break;
			}
		}
	}

	if (!spte)
		goto out;

	spin_lock(&mm->page_table_lock);
	if (pud_none(*pud))
		pud_populate(mm, pud, (pmd_t *)((uintptr_t)spte & PAGE_MASK));
	else
		put_page(virt_to_page(spte));
	spin_unlock(&mm->page_table_lock);
out:
	i_mmap_unlock_write(mapping);
}
#endif


static void __unmap_cetatlbpage_range(struct vm_area_struct *vma, unsigned long start,
		unsigned long end, struct page *ref_page)
{
	struct mm_struct *mm = vma->vm_mm;
	unsigned long address;
	pte_t *ptep = NULL;
	/* right now vma->rtos_vm_flags is 0x00000002 or 0x00000008 */
	unsigned long sz = cetatlb_flags_to_size(vma->rtos_vm_flags);
	struct mmu_notifier_range range;

	mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma, mm, start, end);

	mmu_notifier_invalidate_range_start(&range);
	spin_lock(&mm->page_table_lock);
	for (address = start; address < end; address += sz) {
		ptep = huge_pte_offset(mm, address, sz);
		if (!ptep)
			continue;

		if (huge_pmd_unshare(mm, vma, &address, ptep))
			continue;

		huge_ptep_get_and_clear(mm, address, ptep);
	}
	spin_unlock(&mm->page_table_lock);
	flush_tlb_range(vma, start, end);
	/* just used by KVM to notify mmu */
	mmu_notifier_invalidate_range_end(&range);
}

void unmap_cetatlbpage_range(struct vm_area_struct *vma, unsigned long start,
		unsigned long end, struct page *ref_page)
{
	i_mmap_lock_write(vma->vm_file->f_mapping);

	__unmap_cetatlbpage_range(vma, start, end, ref_page);

	i_mmap_unlock_write(vma->vm_file->f_mapping);
}

static int cetatlb_dev_init(void)
{
	unsigned int minor;
	unsigned int minor_destroy;
	struct device *class_dev = NULL;
	struct class *ceta_mem_class = NULL;
	int cetatlb_major;
	int ret = 0;

	cetatlb_major = register_chrdev(0, "cetatlbmem", &memory_fops);
	if (cetatlb_major < 0) {
		pr_err("unable to get major %d for memory devs\n", cetatlb_major);
		return -1;
	}

	ceta_mem_class = class_create(THIS_MODULE, "cetatlbmem");
	if (IS_ERR(ceta_mem_class)) {
		ret = PTR_ERR(ceta_mem_class);
		goto error_class_create;
	}

	ceta_mem_class->devnode = ceta_mem_devnode;
	for (minor = 1; minor < ARRAY_SIZE(ceta_devlist); minor++) {
		if (!ceta_devlist[minor].name)
			continue;

		class_dev = device_create(ceta_mem_class,
				NULL, MKDEV((unsigned int)cetatlb_major, minor),
				NULL, ceta_devlist[minor].name);
		if (unlikely(IS_ERR(class_dev))) {
			ret = PTR_ERR(class_dev);
			goto error_device_create;
		}
	}
	return 0;

error_device_create:
	for (minor_destroy = 1; minor_destroy < minor; minor_destroy++) {
		if (!ceta_devlist[minor_destroy].name)
			continue;
		device_destroy(ceta_mem_class, MKDEV((unsigned int)cetatlb_major, minor_destroy));
	}
	class_destroy(ceta_mem_class);
error_class_create:
	unregister_chrdev(cetatlb_major, "cetatlbmem");
	return ret;
}

module_init(cetatlb_dev_init);
