// SPDX-License-Identifier: GPL-2.0-only

#include <linux/vmalloc.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/rbtree.h>
#include <memory/vmalloc_debuginfo.h>
#include <linux/kernel_fault_notifier.h>
#include "internal.h"

bool vmallocinfo_debug_is_open;
bool vmalloc_oom_panic_enabled;

static int __init vmallocinfo_debug(char *s)
{
	vmallocinfo_debug_is_open = true;
	return 1;
}
__setup("dmesg_show_vmallocinfo", vmallocinfo_debug);

static int __init vmalloc_oom_panic(char *s)
{
	vmalloc_oom_panic_enabled = true;
	return 1;
}
__setup("vmalloc_oom_panic", vmalloc_oom_panic);

static void insert_vmallocinfo_node(struct rb_root *root, struct vmallocinfo_node *vn)
{
	struct rb_node **p = &root->rb_node;
	struct rb_node *parent = NULL;

	while (*p) {
		struct vmallocinfo_node *tmp_vn;

		parent = *p;
		tmp_vn = rb_entry(parent, struct vmallocinfo_node, rb_node);
		if (vn->size > tmp_vn->size)
			p = &(*p)->rb_left;
		else
			p = &(*p)->rb_right;
	}
	rb_link_node(&vn->rb_node, parent, p);
	rb_insert_color(&vn->rb_node, root);
}

void vmalloc_oom_notify_fault(void)
{
	/* It is convenient for the product to locate the module that runs out
	 * of virtual memory and repair the problem. The system is unavailable
	 * due to exhaustion, and the system recovers automatically without
	 * customer perception (convenient for customers)
	 */
	if (unlikely(vmalloc_oom_panic_enabled)) {
#ifdef CONFIG_EULEROS_KERNEL_FAULT_NOTIFIER
		notify_kernel_fault(KERNEL_FAULT_VMALLOC_OOM);
#endif
		panic("vmalloc oom!");
	}
}

void vmallocinfo_show(void)
{
	struct rb_root merge_vmap_area_root = RB_ROOT;
	struct vmap_area *va;
	struct vmallocinfo_node *tmp_node;
	struct vmallocinfo_node *n = NULL;
	unsigned long tmp_size;
	unsigned int tmp_type;
	const void *tmp_caller;
	struct rb_node *node;
	bool find_caller;
	int print_cnt = 0;
	int stat_cnt = 0;

	if (!vmallocinfo_debug_is_open)
		return;

	lock_vmap_area();
	list_for_each_entry(va, &vmap_area_list, list) {
		if (!va->vm) {
			tmp_caller = NULL;
			tmp_size = va->va_end - va->va_start;
			tmp_type = VM_MAP_RAM;
		} else {
			tmp_caller = va->vm->caller;
			tmp_size = va->vm->size;
			tmp_type = NORMAL_VM_AREA;
		}
		find_caller = false;
		for (node = rb_first(&merge_vmap_area_root); node; node = rb_next(node)) {
			tmp_node = rb_entry(node, struct vmallocinfo_node, rb_node);
			if (tmp_type == tmp_node->type) {
				if ((tmp_type == NORMAL_VM_AREA
					&& tmp_node->caller == tmp_caller)
					|| tmp_type != NORMAL_VM_AREA) {
					find_caller = true;
					break;
				}
			}
		}
		if (find_caller) {
			rb_erase(&tmp_node->rb_node, &merge_vmap_area_root);
			tmp_node->size += tmp_size;
		} else {
			tmp_node = kmalloc(sizeof(struct vmallocinfo_node), GFP_ATOMIC);
			if (tmp_node == NULL)
				break;
			tmp_node->caller = tmp_caller;
			tmp_node->size = tmp_size;
			tmp_node->type = tmp_type;
		}
		insert_vmallocinfo_node(&merge_vmap_area_root, tmp_node);
		if (++stat_cnt == VMALLOC_INFO_MAX_COUNT)
			break;
	}
	unlock_vmap_area();

	pr_warn("=====================VMALLOCINFO START=======================\n");
	pr_warn("Number of statistics: %d\n", stat_cnt);
	for (node = rb_first(&merge_vmap_area_root); node; node = rb_next(node)) {
		tmp_node = rb_entry(node, struct vmallocinfo_node, rb_node);
		if (tmp_node->type == NORMAL_VM_AREA)
			pr_warn("[vmallocinfo]%pS: %lu\n", tmp_node->caller,
					tmp_node->size);
		else
			pr_warn("[vmallocinfo]vm_map_ram: %lu\n", tmp_node->size);
		if (++print_cnt == VMALLOC_INFO_TOP_N)
			break;
	}
	pr_warn("======================VMALLOCINFO END========================\n");
	rbtree_postorder_for_each_entry_safe(tmp_node, n,
				&merge_vmap_area_root, rb_node)
		kfree(tmp_node);
}
