/*
 * Copyright (c) Huawei Technologies Co., Ltd. 2022-2022.
 * Description: add stage2 memory managemet
 * Author: lilinjie8 <lilinjie8@huawei.com>
 * Create: 2022-01-24
 */

#include "../../virt/hpvisor/dfx.h"
#include "mmu.h"

#include <linux/swap.h>
#include <linux/hugetlb.h>
#include <linux/hugetlb_inline.h>

#include <asm/kvm_mmu.h>
#include <asm/pgtable.h>
#include <asm/kvm_pgtable.h>
#include <asm/stage2_pgtable.h>

#define PTE_ATTRINDX_SHIFT 2
#define HPVISOR_SLOT_ID_START (KVM_USER_MEM_SLOTS + KVM_PRIVATE_MEM_SLOTS)

struct hpvisor_page_info {
	kvm_pfn_t pfn;
	size_t page_size;
	enum kvm_pgtable_prot prot;
};

static struct mm_struct *get_current_mm(unsigned long addr)
{
	struct mm_struct *mm = NULL;

	if (is_ttbr0_addr(addr))
		mm = current->mm;
	else if (is_ttbr1_addr(addr))
		mm = &init_mm;

	if (!mm)
		hpvisor_err("address [%016lx] mm is NULL\n", addr);
	return mm;
}

static pgd_t *get_pgd_level(struct mm_struct *mm, u64 addr, pte_t *pte, size_t *sz)
{
	pgd_t pgd;
	pgd_t *pgdp = pgd_offset(mm, addr);

	pgd = READ_ONCE(*pgdp);
	if (pgd_none(pgd) || pgd_bad(pgd)) {
		*pte = __pte(pgd_val(pgd));
		*sz = PGDIR_SIZE;
		return NULL;
	}
	return pgdp;
}

static p4d_t *get_p4d_level(pgd_t *pgd, u64 addr, pte_t *pte, size_t *sz)
{
	p4d_t p4d;
	p4d_t *p4dp = p4d_offset(pgd, addr);

	p4d = READ_ONCE(*p4dp);
	if (p4d_none(p4d) || p4d_bad(p4d)) {
		*pte = __pte(p4d_val(p4d));
		*sz = P4D_SIZE;
		return NULL;
	}
	return p4dp;
}

static pud_t *get_pud_level(p4d_t *p4d, u64 addr, pte_t *pte, size_t *sz)
{
	pud_t pud;
	pud_t *pudp = pud_offset(p4d, addr);

	pud = READ_ONCE(*pudp);
	if (pud_none(pud) || pud_bad(pud)) {
		*pte = __pte(pud_val(pud));
		*sz = PUD_SIZE;
		return NULL;
	}
	return pudp;
}

static pmd_t *get_pmd_level(pud_t *pud, u64 addr, pte_t *pte, size_t *sz)
{
	pmd_t pmd;
	pmd_t *pmdp = pmd_offset(pud, addr);

	pmd = READ_ONCE(*pmdp);
	if (pmd_none(pmd) || pmd_bad(pmd)) {
		*pte = __pte(pmd_val(pmd));
		*sz = PMD_SIZE;
		return NULL;
	}
	return pmdp;
}

static int get_stage1_pte_and_page_size(u64 addr, pte_t *pte, size_t *page_size)
{
	size_t sz;
	pte_t pte_val = {0};
	struct mm_struct *mm = get_current_mm(addr);

	if (!mm)
		return -EFAULT;
	do {
		pgd_t *pgd = NULL;
		p4d_t *p4d = NULL;
		pud_t *pud = NULL;
		pmd_t *pmd = NULL;
		pte_t *pte = NULL;

		pgd = get_pgd_level(mm, addr, &pte_val, &sz);
		if (!pgd)
			break;
		p4d = get_p4d_level(pgd, addr, &pte_val, &sz);
		if (!p4d)
			break;
		pud = get_pud_level(p4d, addr, &pte_val, &sz);
		if (!pud)
			break;
		pmd = get_pmd_level(pud, addr, &pte_val, &sz);
		if (!pmd)
			break;

		pte = pte_offset_map(pmd, addr);
		pte_val = *pte;
		sz = PAGE_SIZE;
		pte_unmap(pte);
	} while (0);

	if (!(pte_val(pte_val) & PTE_VALID)) {
		hpvisor_err("hva:0x%llx get pte:0x%llx invalied\n", addr, pte_val(pte_val));
		return -EFAULT;
	}
	*pte = pte_val;
	if (page_size)
		*page_size = sz;
	return 0;
}

static void try_flush_cache(const struct hpvisor_page_info *p_info)
{
	/* device pfn not need flush dcache */
	if (!(p_info->prot & KVM_PGTABLE_PROT_DEVICE))
		__clean_dcache_guest_page(p_info->pfn, p_info->page_size);
	/* only executable pfn need flush icache */
	if (p_info->prot & KVM_PGTABLE_PROT_X)
		__invalidate_icache_guest_page(p_info->pfn, p_info->page_size);
}

#ifdef CONFIG_RTOS_HAL_KVM_MEM_EXTENSION
static void cached_pfn_try_flush_cache(const struct hpvisor_page_info *p_info, unsigned long hva)
{
	/* device pfn not need flush dcache */
	if (!(p_info->prot & KVM_PGTABLE_PROT_DEVICE))
		__reserved_clean_dcache_guest_page(hva, p_info->page_size);
	/* only executable pfn need flush icache */
	if (p_info->prot & KVM_PGTABLE_PROT_X)
		__reserved_invalidate_icache_guest_page(hva, p_info->page_size);
}
#endif

static int find_unused_slot_range(struct kvm *kvm, u16 start, u16 end)
{
	int i;

	for (i = start; i < end; i++) {
		if (!id_to_memslot(kvm_memslots(kvm), i))
			break;
	}
	return i == end ? -ENOMEM : i;
}

/*
 * set mem slot.
 * return 0, the operation is successful. return negative, the operation is failed.
 */
static int set_memory_region(struct kvm *kvm, const struct hpvisor_s2_map *sm)
{
	int ret;
	struct kvm_userspace_memory_region region;
	struct kvm_memory_slot *memslot = NULL;
	static u16 id_start_cache = HPVISOR_SLOT_ID_START;

	mutex_lock(&kvm->slots_lock);
	memslot = gfn_to_memslot(kvm, gpa_to_gfn(sm->gpa));
	if (memslot) {
		if ((memslot->flags & ~HPVISOR_MEM_SETUP_NOW) != (sm->flags & ~HPVISOR_MEM_SETUP_NOW)) {
			hpvisor_err("memslot flags:0x%x is not equal to flags:0x%x\n", memslot->flags, sm->flags);
			ret = -EINVAL;
		} else {
			ret = 0;
		}
		goto unlock;
	}

	ret = find_unused_slot_range(kvm, id_start_cache, KVM_MEM_SLOTS_NUM);
	if (ret < 0)
		ret = find_unused_slot_range(kvm, HPVISOR_SLOT_ID_START, id_start_cache);
	if (ret < 0)
		goto unlock;
	id_start_cache = ret + 1;

	region.slot = ret;
	region.flags = sm->flags;
	region.guest_phys_addr = sm->gpa;
	region.memory_size = sm->size;
	region.userspace_addr = sm->hva;
	ret = __kvm_set_memory_region(kvm, &region);
unlock:
	mutex_unlock(&kvm->slots_lock);
	return ret;
}

static int hpvisor_get_page_info(hva_t hva, struct hpvisor_page_info *p_info, u32 flags, unsigned long vm_flags)
{
	pte_t pte;
	size_t sz;
	int attr_index;
	enum kvm_pgtable_prot pte_prot = KVM_PGTABLE_PROT_R;

	if (get_stage1_pte_and_page_size(hva, &pte, &sz))
		return -EFAULT;

	if (!(pte_val(pte) & PTE_RDONLY) || (is_ttbr0_addr(hva) && (vm_flags & VM_WRITE)))
		pte_prot |= KVM_PGTABLE_PROT_W;
	if (flags & KVM_MEM_READONLY)
		pte_prot &= ~KVM_PGTABLE_PROT_W;
	if (!(pte_val(pte) & PTE_UXN))
		pte_prot |= KVM_PGTABLE_PROT_X;
	attr_index = (pte_val(pte) & PTE_ATTRINDX_MASK) >> PTE_ATTRINDX_SHIFT;
	if (attr_index >= MT_DEVICE_nGnRnE && attr_index <= MT_DEVICE_GRE)
		pte_prot |= KVM_PGTABLE_PROT_DEVICE;

	p_info->page_size = sz;
	p_info->pfn = pte_pfn(pte);
	p_info->prot = pte_prot;
	return 0;
}

static int hpvisor_pgtable_stage2_map(struct kvm_vcpu *vcpu, const struct hpvisor_s2_map *sm,
	unsigned long vm_flags)
{
	int ret;
	hva_t hva;
	gpa_t gpa;
	int dfx_flag;
	unsigned long mmu_seq;
	struct hpvisor_page_info page_info = { 0, 0, KVM_PGTABLE_PROT_R };
	struct kvm_mmu_memory_cache *cache = &vcpu->arch.mmu_page_cache;

	mmap_read_lock(vcpu->kvm->mm);
	for (gpa = sm->gpa, hva = sm->hva; gpa < sm->gpa + sm->size; gpa += page_info.page_size, hva += page_info.page_size) {
		ret = kvm_mmu_topup_memory_cache(cache, kvm_mmu_cache_min_pages(vcpu->kvm));
		if (ret != 0) {
			hpvisor_err("kvm_mmu_topup_memory_cache failed, ret:%d, gpa:0x%llx\n", ret, gpa);
			break;
		}
		mmu_seq = vcpu->kvm->mmu_notifier_seq;
		ret = hpvisor_get_page_info(hva, &page_info, sm->flags, vm_flags);
		if (ret != 0)
			break;
		dfx_flag = 0;
		spin_lock(&vcpu->kvm->mmu_lock);
		while (mmu_notifier_retry(vcpu->kvm, mmu_seq)) {
			spin_unlock(&vcpu->kvm->mmu_lock);
			mmu_seq = vcpu->kvm->mmu_notifier_seq;
			ret = hpvisor_get_page_info(hva, &page_info, sm->flags, vm_flags);
			if (ret != 0) {
				mmap_read_unlock(vcpu->kvm->mm);
				return ret;
			}
			if (!dfx_flag) {
				hpvisor_info("mmu notifier check failed, ret:%d, gpa:0x%llx hva:0x%lx pfn:0x%llx\n",
					ret, gpa, hva, page_info.pfn);
				dfx_flag = 1;
			}
			spin_lock(&vcpu->kvm->mmu_lock);
		}
		kvm_get_pfn(page_info.pfn);
		if (pfn_valid(page_info.pfn))
			try_flush_cache(&page_info);
#ifdef CONFIG_RTOS_HAL_KVM_MEM_EXTENSION
		if (sm->flags & KVM_RESERVED_MEM)
			cached_pfn_try_flush_cache(&page_info, hva);
#endif
		ret = kvm_pgtable_stage2_map(vcpu->kvm->arch.mmu.pgt, gpa,
			page_info.page_size, __pfn_to_phys(page_info.pfn), page_info.prot, cache);
		spin_unlock(&vcpu->kvm->mmu_lock);
		if (ret != 0) {
			kvm_release_pfn_clean(page_info.pfn);
			hpvisor_err("stage2 map failed ret:%d, gpa:0x%llx hva:0x%lx pfn:0x%llx\n",
				ret, gpa, hva, page_info.pfn);
			break;
		}
		kvm_set_pfn_accessed(page_info.pfn);
		/* kvm_pgtable_stage2_map() will get_page() again, here we need to release one. */
		kvm_release_pfn_clean(page_info.pfn);
		hpvisor_stage2_show_pte(vcpu->kvm->arch.mmu.pgt, gpa);
	}
	mmap_read_unlock(vcpu->kvm->mm);
	return ret;
}

static bool is_cached_pfnmap_memory(unsigned long vm_flags, pgprot_t vm_page_prot)
{
	if (vm_flags & (VM_IO | VM_PFNMAP))
		return (pgprot_val(vm_page_prot) & PTE_ATTRINDX_MASK) == PTE_ATTRINDX(MT_NORMAL);
	else
		return 0;
}

static bool is_cached_reserved_memory(unsigned long hva, pgprot_t vm_page_prot)
{
	pte_t pte;

	mmap_read_lock(current->mm);
	if (!get_stage1_pte_and_page_size(hva, &pte, NULL)) {
		if (!pfn_valid(pte_pfn(pte)) &&
			((pgprot_val(vm_page_prot) & PTE_ATTRINDX_MASK) == PTE_ATTRINDX(MT_NORMAL))) {
			mmap_read_unlock(current->mm);
			return true;
		}
	}
	mmap_read_unlock(current->mm);
	return false;
}

static int hpvisor_do_stage2_unmap(struct kvm *kvm, const struct hpvisor_s2_map *sm)
{
	hva_t hva;
	int ret = -EINVAL;
	gfn_t gfn = gpa_to_gfn(sm->gpa);
	struct kvm_memory_slot old = {0};
	struct kvm_memory_slot *memslot = NULL;
	struct kvm_userspace_memory_region mem = {0};
	struct vm_area_struct *vma = NULL;
	unsigned long vm_flags = VM_NONE;
	pgprot_t vm_page_prot = { 0 };

	if (is_ttbr1_addr(sm->hva)) {
		hpvisor_unmap_stage2_range(kvm, sm->gpa, sm->size);
		return 0;
	}
	mmap_read_lock(current->mm);
	vma = find_vma_intersection(current->mm, sm->hva, sm->hva + 1);
	if (vma == NULL) {
		mmap_read_unlock(current->mm);
		hpvisor_err("vma not exist! hva:0x%llx, gpa:0x%llx, size:%x\n", sm->hva, sm->gpa, sm->size);
		return -EINVAL;
	}
	if (vma->vm_end - sm->hva < sm->size) {
		hpvisor_err("vma not match with hva and size! m_start is %lx, vm_end is %lx, hva is %llx, m_size:%u\n",
			vma->vm_start, vma->vm_end, sm->hva, sm->size);
		mmap_read_unlock(current->mm);
		return -EINVAL;
	}
	vm_flags = vma->vm_flags;
	vm_page_prot = vma->vm_page_prot;
	mmap_read_unlock(current->mm);

	if ((vm_flags & (VM_IO | VM_PFNMAP)) && !is_cached_pfnmap_memory(vm_flags, vm_page_prot)) {
		hpvisor_unmap_stage2_range(kvm, sm->gpa, sm->size);
		return 0;
	}

	mutex_lock(&kvm->slots_lock);
	memslot = gfn_to_memslot(kvm, gfn);
	if (!memslot) {
		hpvisor_err("get memslot failed for gfn:0x%llx\n", gfn);
		goto out;
	}
	if ((memslot->npages << PAGE_SHIFT) != sm->size) {
		hpvisor_err("memslot size:0x%lx is not equal sm->size:0x%x, hva:0x%llx, gpa:0x%llx\n",
			(memslot->npages << PAGE_SHIFT), sm->size, sm->hva, sm->gpa);
		goto out;
	}
	hva = gfn_to_hva_memslot(memslot, gfn);
	if (hva != sm->hva) {
		hpvisor_err("find hva:0x%lx of gpa:0x%llx is not equal sm->hva:0x%llx\n",
			hva, sm->gpa, sm->hva);
		goto out;
	}

	old = *memslot;
	ret = hpvisor_delete_memslot(kvm, &mem, &old, memslot->as_id);
out:
	mutex_unlock(&kvm->slots_lock);
	return ret;
}

int hpvisor_arch_do_stage2_map(struct kvm *kvm, struct hpvisor_s2_map *sm)
{
	int ret = 0;
	struct kvm_vcpu *vcpu = NULL;
	struct vm_area_struct *vma = NULL;
	unsigned long vm_flags = VM_NONE;
	pgprot_t vm_page_prot = { 0 };

	if (kvm == NULL || sm == NULL) {
		hpvisor_err("kvm:0x%llx or sm:0x%llx is null\n", (u64)kvm, (u64)sm);
		return -EINVAL;
	}
	if (is_ttbr1_addr(sm->hva) && !(sm->flags & HPVISOR_MEM_SETUP_NOW)) {
		hpvisor_err("kernel hva:0x%llx must be setup now !\n", sm->hva);
		return -EINVAL;
	}
	vcpu = kvm_get_vcpu(kvm, 0);
	if (vcpu == NULL && (sm->flags & HPVISOR_MEM_SETUP_NOW)) {
		hpvisor_err("at least one vcpu must be created for stage2 setup now !\n");
		return -EINVAL;
	}
	if (is_ttbr0_addr(sm->hva)) {
		mmap_read_lock(current->mm);
		vma = find_vma_intersection(current->mm, sm->hva, sm->hva + 1);
		if (unlikely(!vma)) {
			mmap_read_unlock(current->mm);
			hpvisor_err("Failed to find VMA for hva 0x%llx\n", sm->hva);
			return -EFAULT;
		}
		if (vma->vm_end - sm->hva < sm->size) {
			hpvisor_err("vma not match with hva and size! m_start is %lx, vm_end is %lx, hva is %llx\n",
				vma->vm_start, vma->vm_end, sm->hva);
			mmap_read_unlock(current->mm);
			return -EINVAL;
		}
		vm_flags = vma->vm_flags;
		vm_page_prot = vma->vm_page_prot;
		mmap_read_unlock(current->mm);
		if (!(vm_flags & (VM_IO | VM_PFNMAP)))
			ret = set_memory_region(kvm, sm);

#ifdef CONFIG_RTOS_HAL_KVM_MEM_EXTENSION
		if (is_cached_pfnmap_memory(vm_flags, vm_page_prot)) {
			if ((sm->flags & HPVISOR_MEM_SETUP_NOW) && is_cached_reserved_memory(sm->hva, vm_page_prot))
				sm->flags |= KVM_RESERVED_MEM;
			ret = set_memory_region(kvm, sm);
		}
#endif
		if (ret < 0) {
			hpvisor_err("set memory region failed, ret:%d\n", ret);
			return ret;
		}
	}
	if (!(sm->flags & HPVISOR_MEM_SETUP_NOW))
		return 0;
	ret = hpvisor_pgtable_stage2_map(vcpu, sm, vm_flags);
	if (ret != 0)
		hpvisor_do_stage2_unmap(kvm, sm);
	return ret;
}

int hpvisor_arch_ioctl_stage2_map(struct kvm *kvm, u64 arg)
{
	struct hpvisor_s2_map sm;

	if (unlikely(!access_ok(((void *__user)arg), sizeof(struct hpvisor_s2_map)))) {
		hpvisor_err("arg:0x%llx error\n", arg);
		return -EFAULT;
	}
	if (copy_from_user(&sm, (void *__user)arg, sizeof(struct hpvisor_s2_map)))
		return -EFAULT;

	hpvisor_mmu_debug_common("hva:0x%llx gpa:0x%llx size:0x%x flags:%x\n", sm.hva, sm.gpa, sm.size, sm.flags);
	return hpvisor_arch_do_stage2_map(kvm, &sm);
}

int hpvisor_arch_ioctl_stage2_unmap(struct kvm *kvm, u64 arg)
{
	struct hpvisor_s2_map sm;

	if (unlikely(!access_ok(((void *__user)arg), sizeof(struct hpvisor_s2_map)))) {
		hpvisor_err("arg:0x%llx error\n", arg);
		return -EFAULT;
	}
	if (copy_from_user(&sm, (void *__user)arg, sizeof(struct hpvisor_s2_map)))
		return -EFAULT;

	hpvisor_mmu_debug_common("hva:0x%llx, gpa:0x%llx, size:0x%x flags:0x%x\n", sm.hva, sm.gpa, sm.size, sm.flags);
	return hpvisor_do_stage2_unmap(kvm, &sm);
}
