/*
 * Copyright (c) Huawei Technologies Co., Ltd. 2022-2022.
 * Description: support for RTOS_MEM_HUGEPAGE_SHARED_LIB
 * Author: liusongtang
 * Create: 2022-08-26
 */

#include <linux/mm.h>
#include <linux/fs.h>
#include <linux/hugetlb.h>
#include <linux/rtos_mm_types.h>
#include <internal/rtos_hugepage_sharedlib.h>
#include <uapi/asm-generic/mman-common.h>

static struct vm_area_struct *find_vma_need_split(struct mm_struct *mm,
						  unsigned long addr)
{
	struct vm_area_struct *vma;

	vma = find_vma(mm, addr);
	if (vma && (vma->rtos_vm_flags & RTOS_VM_HUGEPAGE_SHARED_LIB) &&
	    is_vm_hugetlb_page(vma) && vma->vm_file &&
	    !(vma->vm_flags & VM_SHARED))
		return vma;

	return NULL;
}

vm_fault_t retry_page_fault(struct mm_struct *mm, unsigned long addr,
			    vm_fault_t fault, unsigned int flags,
			    struct pt_regs *regs)
{
	int error;
	unsigned long start;
	unsigned long end;
	unsigned long cur_addr;
	struct hstate *h;
	vm_fault_t ret = 0;
	struct vm_area_struct *vma = find_vma_need_split(mm, addr);

	if (!vma)
		return fault;

	h = hstate_vma(vma);
	start = addr & huge_page_mask(h);
	end = start + huge_page_size(h);

	/*
	 * hugetlb_fault might have been filled page table,
	 * use zap_page_range() to clear orgin hugetlb page table.
	 */
	zap_page_range(vma, start, huge_page_size(h));

	/*
	 * mmap_read_lock has already been taken in do_page_fault,
	 * and hugetlb_fault never return VM_FAULT_RETRY, so mmap_lock will not be released.
	 * Therefore, we have to release mmap_lock here.
	 * Any behaviour which results in changes to the vma->vm_flags needs to
	 * take mmap_lock for writing.
	 */
	mmap_read_unlock(mm);
	mmap_write_lock(mm);

	/* finally, use mprotect_fixup to split vma and update vm stat. */
	error = mprotect_fixup(vma, &vma->vm_prev, start, end,
			       vma->vm_flags & ~VM_HUGETLB);
	mmap_write_unlock(mm);

	/* mmap_lock will be release in do_page_fault */
	mmap_read_lock(mm);
	/* no need to restore vma, because task will exit when do_page_fault return VM_FAULT_SIGBUS */
	if (error)
		return fault;

	/*
	 * handle all pages fault using cow to avoid do_read_fault
	 * do_read_fault will add page mapcount, but the page is a tail page
	 * in a huge page. This will cause huge page mapcount conflict.
	 * In this case, handle_mm_fault will not return VM_FAULT_RETRY,
	 * because read page from huge page will not do I/O,
	 * so we don't support retry handle_mm_fault.
	 */
	flags &= ~FAULT_FLAG_ALLOW_RETRY;
	for (cur_addr = start; cur_addr < end; cur_addr += PAGE_SIZE) {
		ret = handle_mm_fault(vma, cur_addr, flags, regs);
		if (ret & VM_FAULT_ERROR) {
			pr_err("%s: fault 0x%lx is triggered by %s, error=0x%x\n",
			       __func__, cur_addr, current->comm, ret);
			return ret;
		}
	}

	return ret;
}

bool check_shared_hugepage(struct mm_struct *mm, unsigned long flag, unsigned long ret)
{
	struct vm_area_struct *vma = find_vma(mm, ret);
	struct rtos_mm_struct *rtos_mm = mm_to_rtos_mm(mm);

	if (!IS_ERR_VALUE(ret) && (flag & MAP_RTOS_HUGEPAGE_SHARED_LIB)) {
		if (!vma)
			return false;

		vma->rtos_vm_flags |= RTOS_VM_HUGEPAGE_SHARED_LIB;
		rtos_mm->hugepage_shared = RTOS_MM_HUAGEPAGE_SHARED;
	}
	return true;
}

vm_fault_t rtos_hugetlb_vm_op_fault(struct vm_fault *vmf)
{
	pgoff_t idx;
	pgoff_t offset;
	struct page *page;
	struct hstate *h;
	struct file *file;
	struct address_space *mapping;
	unsigned long haddr;
	struct vm_area_struct *vma = vmf->vma;

	if (!(vma->rtos_vm_flags & RTOS_VM_HUGEPAGE_SHARED_LIB) ||
	    (vma->vm_flags & VM_SHARED)) {
		BUG();
		return 0;
	}

	h = hstate_vma(vma);
	haddr = vmf->address & huge_page_mask(h);
	/* see vma_hugecache_offset */
	offset = ((haddr - vma->vm_start) >> huge_page_shift(h)) +
		 (vma->vm_pgoff >> huge_page_order(h));

	file = vma->vm_file;
	mapping = file->f_mapping;
	page = find_lock_page(mapping, offset);
	if (!page) {
		pr_err("[%s] try to find and lock page failed!\n", __func__);
		return VM_FAULT_SIGBUS;
	}
	idx = (vmf->address - haddr) >> PAGE_SHIFT;
	vmf->page = page + idx;

	return VM_FAULT_LOCKED;
}
