// SPDX-License-Identifier: GPL-2.0
/*
 * Copyright (c) 2017 HUAWEI TECHNOLOGIES CO., LTD.
 */
#include <linux/mm.h>
#include <linux/rmap.h>
#include <linux/file.h>
#include <linux/kprobes.h>
#include <linux/pgtable.h>

#include "hot_replace.h"
#ifdef CONFIG_EULEROS_VIRTUAL
#include "kvm_mm.h"
#endif

typedef int (*anon_vma_clone_fn)(struct vm_area_struct *vma,
				 struct vm_area_struct *pvma);
typedef int (*copy_page_range_fn)(struct mm_struct *dst_mm,
				  struct mm_struct *src_mm,
				  struct vm_area_struct *vma);
typedef int (*split_vma_fn)(struct mm_struct *,
			    struct vm_area_struct *,
			    unsigned long, int);

static anon_vma_clone_fn anon_vma_clone_ptr ____cacheline_aligned;
static copy_page_range_fn copy_page_range_ptr;
static split_vma_fn split_vma_ptr;

#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS
static unsigned long check_pmd_ok(struct mm_struct *vm_mm, struct vm_area_struct *vma,
			pud_t *pud, unsigned long start, unsigned long end)
{
	unsigned long next;
	pmd_t *pmd;
	unsigned long pmd_cnt = 0;

	pmd = pmd_offset(pud, start);
	do {
		next = pmd_addr_end(start, end);
		if (pmd_trans_huge(*pmd)) {
			VM_BUG_ON(next-start != HPAGE_PMD_SIZE);
			++pmd_cnt;
		}
		if (pmd_none(*pmd) || pmd_bad(*pmd))
			continue;
	} while (pmd++, start = next, start != end);
	return pmd_cnt;
}

static unsigned long check_pud_ok(struct mm_struct *vm_mm, struct vm_area_struct *vma,
			pgd_t *pgd, unsigned long start, unsigned long end)
{
	unsigned long ret = 0;
	unsigned long next;
	pud_t *pud;
	unsigned long pud_cnt = 0;

	pud = pud_offset(pgd, start);
	do {
		next = pud_addr_end(start, end);
		if (pud_none(*pud) || pud_bad(*pud))
			continue;
		ret += check_pmd_ok(vm_mm, vma, pud, start, next);
		++pud_cnt;
	} while (pud++, start = next, start != end);

	return ret;
}

static unsigned long check_pgd_ok(struct mm_struct *vm_mm, struct vm_area_struct *vma,
			unsigned long start, unsigned long end)
{
	unsigned long ret = 0;
	unsigned long next;
	pgd_t *pgd;
	unsigned long pgd_cnt = 0;

	pgd = pgd_offset(vm_mm, start);
	do {
		next = pgd_addr_end(start, end);
		if (pgd_none(*pgd) || pgd_bad(*pgd))
			continue;
		ret += check_pud_ok(vm_mm, vma, pgd, start, next);
		++pgd_cnt;
	} while (pgd++, start = next, start != end);

	return ret;
}

/* extend dst's pmd_huge_pte list */
static struct list_head *extend_dst_pmd_huge_pte_list(struct mm_struct *dst,
			struct list_head *iter, struct list_head *head)
{
	struct list_head *first, *last, *dst_head;
	pgtable_t dst_pgtable;

	first = head->next;
	last = iter;
	iter = iter->next;

	if (!dst->pmd_huge_pte) {
		last->next = first;
		first->prev = last;
		dst->pmd_huge_pte = list_entry(first, struct page, lru);
	} else {
		dst_pgtable = dst->pmd_huge_pte;
		dst_head = &dst_pgtable->lru;
		/* insert first--...--last before dst_head */
		dst_head->prev->next = first;
		first->prev = dst_head->prev;
		dst_head->prev = last;
		last->next = dst_head;
	}
	return iter;
}

static unsigned long sync_huge_pmd_lru_list(struct mm_struct *src, struct mm_struct *dst,
			unsigned long ram_start, unsigned long ram_end)
{
	struct list_head old_head;
	struct list_head *iter, *head;
	struct vm_area_struct *vma;
	unsigned long cnt = 0;
	unsigned long no_ram_cnt = 0, ram_cnt = 0;

	spin_lock(&src->page_table_lock);
	spin_lock(&dst->page_table_lock);

	for (vma = src->mmap; vma; vma = vma->vm_next) {
		cnt = check_pgd_ok(src, vma, vma->vm_start, vma->vm_end);
		if (cnt) {
			if (vma->vm_start >= ram_end || vma->vm_end < ram_start)
				no_ram_cnt += cnt;
			else
				ram_cnt += cnt;
		}
	}

	/* if no huge pmd tables in src ram */
	if (!ram_cnt || !src->pmd_huge_pte)
		goto done;

	INIT_LIST_HEAD(&old_head);
	list_add(&old_head, &(src->pmd_huge_pte)->lru);

	head = &old_head;
	cnt = 0;
	list_for_each(iter, head) {
		++cnt;
		if (cnt == ram_cnt)
			break;
	}

	if (cnt != ram_cnt)
		goto done;

	pr_info("sync_huge_pmd_lru: no_ram_cnt:%lu ram_cnt:%lu cnt:%lu\n",
		no_ram_cnt, ram_cnt, cnt);

	iter = extend_dst_pmd_huge_pte_list(dst, iter, head);

	/* make src's new pmd_huge_pte list */
	iter->prev = head;
	head->next = iter;
	if (!list_empty(head)) {
		list_del(&old_head);
		src->pmd_huge_pte = list_entry(iter, struct page, lru);
	} else {
		src->pmd_huge_pte = NULL;
	}

done:
	spin_unlock(&dst->page_table_lock);
	spin_unlock(&src->page_table_lock);
	return cnt;
}
#endif

static struct kprobe kprobe_event = {
	.symbol_name = "kallsyms_lookup_name"
};
typedef unsigned long (*kallsyms_lookup_name_t)(const char *);

int kvm_hot_replace_init(void)
{
	kallsyms_lookup_name_t kallsyms_lookup_name;

	register_kprobe(&kprobe_event);
	kallsyms_lookup_name = (kallsyms_lookup_name_t)kprobe_event.addr;
	unregister_kprobe(&kprobe_event);

	anon_vma_clone_ptr = (anon_vma_clone_fn)kallsyms_lookup_name("anon_vma_clone");
	if (!anon_vma_clone_ptr) {
		pr_err("kvm: look up anon_vma_clone failed\n");
		return -ENOENT;
	}

	copy_page_range_ptr = (copy_page_range_fn)kallsyms_lookup_name("copy_page_range");
	if (!copy_page_range_ptr) {
		pr_err("kvm: look up copy_page_range failed\n");
		return -ENOENT;
	}

	split_vma_ptr = (split_vma_fn)kallsyms_lookup_name("split_vma");
	if (!split_vma_ptr) {
		pr_err("kvm: look up split_vma failed\n");
		return -ENOENT;
	}

	pr_info("kvm: kernel hotreplace state %d\n", hotreplace);
	return 0;
}

/* must be called with src_mm->mmap_sem & dst_mm->mmap_sem held */
static struct hotreplace_chain *kvm_hot_replace_alloc_chain(struct mm_struct *src_mm,
			struct mm_struct *dst_mm, hotreplace_chain_fn func)
{
	struct hotreplace_chain *hrc;

	if (!src_mm->hrc) {
		hrc = kzalloc(sizeof(struct hotreplace_chain), GFP_KERNEL);
		if (!hrc) {
			pr_err("kvm: alloc hotreplace chain failed\n");
			return NULL;
		}
		spin_lock_init(&hrc->lock);
	} else {
		hrc = src_mm->hrc;
		pr_info("kvm: reuse source mm's chain obj\n");
	}

	hrc->mm_master = src_mm;
	hrc->mm_slaver = dst_mm;
	hrc->func = func;

	if (src_mm->hrc != hrc) {
		if (src_mm->hrc)
			pr_warn("kvm: covered mm hotreplace chain object\n");
		src_mm->hrc = hrc;
		atomic_inc(&hrc->refcount);
	}
	if (dst_mm->hrc != hrc) {
		if (dst_mm->hrc)
			pr_warn("kvm: covered mm hotreplace chain object\n");
		dst_mm->hrc = hrc;
		atomic_inc(&hrc->refcount);
	}

	return hrc;
}

enum hot_replace_role {
	HOT_REPLACE_ROLE_MASTER,
	HOT_REPLACE_ROLE_SLAVER,
	HOT_REPLACE_ROLE_NR
};

static int kvm_hot_replace_get_mm_role(struct mm_struct *mm)
{
	if (mm->hrc->mm_master == mm)
		return HOT_REPLACE_ROLE_MASTER;

	if (mm->hrc->mm_slaver == mm)
		return HOT_REPLACE_ROLE_SLAVER;

	return HOT_REPLACE_ROLE_NR;
}

/*
 * This's safe because:
 *  1. only one could hold the chain's spinlock
 *  2. acct operation is atomic
 */
static void kvm_hot_replace_sync_stat(struct mm_struct *from, struct mm_struct *to)
{
	long val;
	int i;

	if (!from || !to)
		return;

#ifdef CONFIG_MMU
	val = atomic_long_read(&from->pgtables_bytes);
	atomic_long_add(val, &to->pgtables_bytes);
	atomic_long_sub(val, &from->pgtables_bytes);
#endif

	for (i = 0; i < NR_MM_COUNTERS; i++) {
		val = atomic_long_read(&from->rss_stat.count[i]);
		if (val) {
			atomic_long_add(val, &to->rss_stat.count[i]);
			atomic_long_sub(val, &from->rss_stat.count[i]);
		}
	}
}

static void kvm_hot_replace_sync_rss_and_ptes(struct mm_struct *curr_mm)
{
	struct hotreplace_chain *chain;

	if (!curr_mm || !curr_mm->hrc)
		return;

	chain = curr_mm->hrc;
	spin_lock(&chain->lock);
	switch (kvm_hot_replace_get_mm_role(curr_mm)) {
	case HOT_REPLACE_ROLE_MASTER:
		kvm_hot_replace_sync_stat(chain->mm_master,
					  chain->mm_slaver);
		chain->mm_master = NULL;
		break;
	case HOT_REPLACE_ROLE_SLAVER:
		kvm_hot_replace_sync_stat(chain->mm_slaver,
					  chain->mm_master);
		chain->mm_slaver = NULL;
		break;
	default:
		pr_err("kvm: unknown mm role.\n");
		break;
	}
	curr_mm->hrc = NULL;
	spin_unlock(&chain->lock);

	if (atomic_dec_and_test(&chain->refcount))
		kfree(chain);
}


static struct kvm *kvm_get_source_kvm(struct kvm *kvm, int fd)
{
	int found = 0;
	struct fd f;
	struct kvm *src_kvm;
	struct kvm *iter;

	f = fdget(fd);
	if (!f.file)
		return NULL;

	src_kvm = f.file->private_data;
	if (IS_ERR_OR_NULL(src_kvm) || src_kvm == kvm)
		goto cleanup;

	mutex_lock(&kvm_lock);
	list_for_each_entry(iter, &vm_list, vm_list) {
		if (iter == src_kvm) {
			found = 1;
			break;
		}
	}
	mutex_unlock(&kvm_lock);

	if (!found)
		goto cleanup;

	kvm_get_kvm(src_kvm);
	fdput(f);
	return src_kvm;

cleanup:
	fdput(f);
	return NULL;
}

static int kvm_hot_replace_share_vma_internal(struct mm_struct *dst, struct mm_struct *src,
			unsigned long start, unsigned long end)
{
	pgd_t *src_pgd, *dst_pgd;
	unsigned long next;
	struct page *page;
	int rc = 0;

	dst_pgd = pgd_offset(dst, start);
	src_pgd = pgd_offset(src, start);
	do {
		next = pgd_addr_end(start, end);
		if (pgd_none(*src_pgd) || unlikely(pgd_bad(*src_pgd)))
			continue;
		page = hotreplace_pgd_page(*src_pgd);
		if (PageHotreplace(page))
			continue;
		/* work but ugly */
#ifdef CONFIG_X86
		clone_pgd_range(dst_pgd, src_pgd, 1);
#else
		*dst_pgd = *src_pgd;
#endif
		SetPageHotreplace(page);
		page_ref_inc(page);
		++rc;
	} while (dst_pgd++, src_pgd++, start = next, start != end);

	return rc;
}

static int kvm_hot_replace_share_ram_block_judge(struct kvm_share_ram_block *ram)
{
	if (!hotreplace) {
		pr_err("kvm: kernel not support hotreplace\n");
		return -EOPNOTSUPP;
	}

	if (ram->src_addr != ram->dst_addr) {
		pr_err("kvm: invalid addr src:0x%llx != dst:0x%llx\n",
			ram->src_addr, ram->dst_addr);
		return -EINVAL;
	}
	return 0;
}

static int kvm_hot_replace_share_ram_block_rc(unsigned long start, unsigned long end,
			struct vm_area_struct *dst, struct vm_area_struct *src,
			struct kvm *kvm, struct kvm *src_kvm)
{
	int rc = 0;

	while (start < end) {
		if (src->vm_end > dst->vm_end) {
			pr_err("kvm: invalid source vma 0x %lx(src_end:0x %lx > dst_end:0x %lx)\n",
				src->vm_start, src->vm_end, dst->vm_end);
			rc = -EINVAL;
			break;
		} else if (src->vm_end < dst->vm_end) {
			rc = split_vma_ptr(kvm->mm, dst, src->vm_end, 0);
			pr_info("kvm: split dest vma 0x %lx 0x %lx -> 0x %lx %d\n",
				dst->vm_start, dst->vm_end, src->vm_end, rc);
			if (rc)
				break;
		}

		rc = anon_vma_clone_ptr(dst, src);
		pr_info("kvm: clone anon_vma 0x %lx-0x %lx 0x %lx-0x %lx 0x %lu %d\n",
			src->vm_start, src->vm_end, dst->vm_start, dst->vm_end,
			src->vm_flags, rc);
		if (rc)
			break;
		dst->anon_vma = src->anon_vma;
		if (dst->anon_vma)
			dst->anon_vma->num_active_vmas++;
		if (src->vm_flags & VM_NORESERVE)
			dst->vm_flags |= VM_NORESERVE;

		rc = kvm_hot_replace_share_vma_internal(kvm->mm, src_kvm->mm,
							src->vm_start, src->vm_end);
		pr_info("kvm: share_vma src_mm dst_mm: 0x %lx-0x %lx 0x %lx-0x %lx %d\n",
			src->vm_start, src->vm_end,
			dst->vm_start, dst->vm_end, rc);
		rc = 0;

		start = src->vm_end;
		if (start >= end)
			break;

		src = find_vma(src_kvm->mm, start);
		dst = find_vma(kvm->mm, start);
		if (!src || !dst) {
			pr_err("kvm: invalid vma start addr 0x %lx\n", start);
			rc = -EINVAL;
			break;
		}
	}
	return rc;
}

int kvm_hot_replace_share_ram_block(struct kvm *kvm,
					struct kvm_share_ram_block *ram)
{
	int rc = 0;
	unsigned long ram_start, ram_end;
	struct kvm *src_kvm;
	struct vm_area_struct *dst, *src;

	rc = kvm_hot_replace_share_ram_block_judge(ram);
	if (rc)
		return rc;

	src_kvm = kvm_get_source_kvm(kvm, ram->src_vm_fd);
	if (!src_kvm) {
		pr_err("kvm: invalid source kvm file %d\n", ram->src_vm_fd);
		return -EBADF;
	}

	mmap_write_lock(src_kvm->mm);
	mmap_write_lock(kvm->mm);

	if (!kvm_hot_replace_alloc_chain(src_kvm->mm, kvm->mm,
		(hotreplace_chain_fn)kvm_hot_replace_sync_rss_and_ptes)) {
		rc = -ENOMEM;
		goto unlock;
	}

	src = find_vma(src_kvm->mm, ram->src_addr);
	if (!src || ram->src_addr != src->vm_start) {
		pr_err("kvm: invalid source addr 0x %llx\n", ram->src_addr);
		rc = -EINVAL;
		goto unlock;
	}

	dst = find_vma(kvm->mm, ram->dst_addr);
	if (!dst || ram->dst_addr != dst->vm_start ||
		ram->size != (dst->vm_end - dst->vm_start)) {
		pr_err("kvm: invalid dest addr 0x %llx\n", ram->dst_addr);
		rc = -EINVAL;
		goto unlock;
	}

	ram_start = dst->vm_start;
	ram_end = dst->vm_end;

	if (dst->anon_vma) {
		pr_err("kvm: dst->anon_vma must be NULL!!!\n");
		rc = -EINVAL;
		goto unlock;
	}

	rc = kvm_hot_replace_share_ram_block_rc(dst->vm_start, dst->vm_end, dst, src, kvm, src_kvm);
#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS
	if (!rc)
		sync_huge_pmd_lru_list(src_kvm->mm, kvm->mm, ram_start, ram_end);
#endif

unlock:
	mmap_write_unlock(kvm->mm);
	mmap_write_unlock(src_kvm->mm);

	kvm_put_kvm(src_kvm);
	return rc;
}

int kvm_hot_replace_set_skip_notifier(struct kvm *kvm, int skip_notifier)
{
#ifdef CONFIG_EULEROS_VIRTUAL
	KVM_MMU_LOCK(kvm);
#else
	spin_lock(&kvm->mmu_lock);
#endif
	kvm->skip_notifier = skip_notifier;
#ifdef CONFIG_EULEROS_VIRTUAL
	KVM_MMU_UNLOCK(kvm);
#else
	spin_unlock(&kvm->mmu_lock);
#endif
	return 0;
}
