// SPDX-License-Identifier: GPL-2.0
/*
 * Copyright (c) 2023 HUAWEI TECHNOLOGIES CO., LTD.
 */
#include <linux/kvm_host.h>
#include <linux/pgtable.h>

#include "hotupgrade_vpmem.h"

bool hva_to_pfn_remapped_vpmem(unsigned long addr, bool *write_fault, bool *writable,
				kvm_pfn_t *pfn, pte_t *ptep, pmd_t *pmdp, int *r)
{
	if (ptep) {
		if (*write_fault && !pte_write(*ptep)) {
			*pfn = KVM_PFN_ERR_RO_FAULT;
			return true;
		}
		if (writable)
			*writable = pte_write(*ptep);
		*pfn = pte_pfn(*ptep);
	} else if (pmdp) {
		if (*write_fault && !pmd_write(*pmdp)) {
			*pfn = KVM_PFN_ERR_RO_FAULT;
			return true;
		}
		if (writable)
			*writable = pmd_write(*pmdp);
		*pfn = pmd_pfn(*pmdp) + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
	} else {
		*r = -EFAULT;
		*pfn = KVM_PFN_ERR_FAULT;
	}
	return false;
}

kvm_pfn_t vpmem_hva_to_pfn(unsigned long addr, bool atomic, bool *async,
						   bool write_fault, bool *writable)
{
	struct vm_area_struct *vma;
	kvm_pfn_t pfn = 0;
	int r;

	/* we can do it either atomically or asynchronously, not both */
	BUG_ON(atomic && async);

	mmap_read_lock(current->mm);
retry:
	vma = find_vma_intersection(current->mm, addr, addr + 1);

	if (vma == NULL) {
		pfn = KVM_PFN_ERR_FAULT;
	} else if (vma->vm_flags & (VM_IO | VM_PFNMAP)) {
		r = hva_to_pfn_remapped(vma, addr, async, write_fault, writable, &pfn);
		if (r == -EAGAIN)
			goto retry;
		if (r < 0)
			pfn = KVM_PFN_ERR_FAULT;
	} else {
		if (async && vma_is_valid(vma, write_fault))
			*async = true;
		pfn = KVM_PFN_ERR_FAULT;
	}

	mmap_read_unlock(current->mm);
	return pfn;
}
