// SPDX-License-Identifier: GPL-2.0
/*
 * Copyright (C) Huawei Technologies Co., Ltd. 2023. All rights reserved.
 *
 * Debug for BUG_ON in migration_entry_to_page()
 */
#include <linux/mmdebug.h>
#include <linux/nmi.h>
#include <linux/swap.h>
#include <linux/swapops.h>

#include "internal.h"

DEFINE_STATIC_KEY_FALSE(migration_lockless_debug_enabled);

static int __init parse_migration_lockless_debug_enabled(char *arg)
{
	static_branch_enable(&migration_lockless_debug_enabled);

	return 0;
}
early_param("migdfx", parse_migration_lockless_debug_enabled);

inline void migration_lockless_debug(struct mm_struct *mm, pte_t pte, swp_entry_t entry)
{
	struct vm_area_struct *vma;
	struct page *p;

	if (!debug_migration_lockless_enabled())
		return;

	p = pfn_to_page(swp_offset(entry));
	if (unlikely(!PageLocked(compound_head(p)))) {
		pr_err("Dump from migration_entry_to_page BUG_ON:\n");
		pr_err("page: %px, head:%px\n", p, compound_head(p));
		dump_page(p, "migration_entry_to_page BUG_ON");
		pr_err("pte val:%llx, swap entry:%lx\n", (unsigned long long)pte_val(pte),
				entry.val);
		trigger_all_cpu_backtrace();
		/*
		 * handle_mm_fault() already asks to hold the mm semaphore
		 */
		for (vma = mm->mmap; vma; vma = vma->vm_next)
			pr_err("vma %px start %px end %px\n"
				"next %px prev %px mm %px\n"
				"prot %lx anon_vma %px vm_ops %px\n"
				"pgoff %lx file %px private_data %px\n"
				"flags: %#lx(%pGv)\n",
				vma, (void *)vma->vm_start,
				(void *)vma->vm_end, vma->vm_next,
				vma->vm_prev, vma->vm_mm,
				(unsigned long)pgprot_val(vma->vm_page_prot),
				vma->anon_vma, vma->vm_ops, vma->vm_pgoff,
				vma->vm_file, vma->vm_private_data,
				vma->vm_flags, &vma->vm_flags);
	}
}
