/*
 * Copyright (c) Huawei Technologies Co., Ltd. 2018-2022.
 * Description: This is one function of RSM, using notifier mechanism.
 *		Monitoring oom, when oom happen, RSM will printk some information for oom analysis.
 *		Sush as vmallocinfo, slabinfo(slub), maybe there are more in future.
 * Author: fanglinxu <fanglinxu@huawei.com>
 * Create: 2018-08-25
 */

#include <linux/mm.h>
#include <linux/module.h>
#include <linux/swap.h>
#include <linux/vmalloc.h>
#include <linux/version.h>
#include <linux/list.h>
#include <linux/slab.h>
#include <linux/kallsyms.h>
#include "slab.h"

#ifdef CONFIG_SLAB
#include <linux/slab_def.h>
#endif

#ifdef CONFIG_SLUB
#include <linux/slub_def.h>
#include <linux/rwsem.h>
#include <linux/nodemask.h>
#endif

#include <linux/spinlock_types.h>
#include <linux/rtos_oom_extend.h>

#include "oom_slab_vmalloc.h"

#define KSYM_NAME_LEN 128
#define KSYM_SYMBOL_LEN (sizeof("%s+%#lx/%#lx [%s]") + (KSYM_NAME_LEN - 1) + \
			2*(BITS_PER_LONG*3/10) + (MODULE_NAME_LEN - 1) + 1)

static struct seq_file g_seq_file;
#define INFO_SEQ_FILE_BUF_MAX 4096
static unsigned char seq_file_buf[INFO_SEQ_FILE_BUF_MAX];
struct seq_file *seq_file_buf_init(void)
{
	struct seq_file *p = &g_seq_file;

	memset(p, 0, sizeof(struct seq_file));
	memset(seq_file_buf, 0, INFO_SEQ_FILE_BUF_MAX);
	p->private = current;
	p->buf = (char *)seq_file_buf;
	p->size = INFO_SEQ_FILE_BUF_MAX;
	return p;
}

void seq_to_printk(const struct seq_file *m)
{
	if (m->buf != NULL && m->count != 0)
		pr_info("%s", m->buf);
}

/*
 * oom_show_vmallocinfo - show vmallocinfo when oom.
 * mm/vmalloc.c -> s_show
 */
static void oom_show_vmallocinfo(void)
{
	pr_info("Vmallocinfo Start >>>>>>>>>>>>>>>>>>>>\n");
	oom_vmallocinfo_show();
	pr_info("Vmallocinfo End <<<<<<<<<<<<<<<<<<<<\n\n");
}

#ifdef CONFIG_SLAB
struct slab_rcu {
	struct rcu_head head;
	struct kmem_cache *cachep;
	void *addr;
};

/*
 * struct slab
 *
 * Manages the objs in a slab. Placed either at the beginning of mem allocated
 * for a slab, or allocated from an general cache.
 * Slabs are chained into three list: fully used, partial, fully free slabs.
 */

struct slab {
	union {
		struct {
			struct list_head list;
			unsigned long colouroff;
			void *s_mem;		/* including colour offset */
			unsigned int inuse;	/* num of objs active in slab */
			kmem_bufctl_t free;
			unsigned short nodeid;
		};
		struct slab_rcu _slab_cover_slab_rcu;
	};
};

struct array_cache {
	unsigned int avail;
	unsigned int limit;
	unsigned int batchcount;
	unsigned int touched;
	spinlock_t lock;
	void *entry[];	/*
			 * Must have this definition in here for the proper
			 * alignment of array_cache. Also simplifies accessing
			 * the entries.
			 *
			 * Entries should not be directly dereferenced as
			 * entries belonging to slabs marked pfmemalloc will
			 * have the lower bits set SLAB_OBJ_PFMEMALLOC
			 */
};

/*
 * oom_show_slabinfo - show slabinfo when oom.
 */
static void oom_show_slabinfo(void)
{
	struct kmem_cache *cachep = NULL;

	if (!mutex_trylock(&slab_mutex)) {
		pr_info("[SLAB]mutex_trylock(fm_cache_chain_mutex) failed, ignore slabinfo.\n");
		return;
	}

	pr_info("[SLAB]Slabinfo Start >>>>>>>>>>>>>>>>>>>>\n");
	pr_info("# name            <active_objs> <num_objs> <objsize> <objperslab> <pagesperslab>")
	pr_info(" : tunables <limit> <batchcount> <shared>")
	pr_info(" : slabdata <active_slabs> <num_slabs> <shared_avail> : globalstat : cpustat\n");

	list_for_each_entry(cachep, &slab_caches, list) {
		s_slabinfo(cachep);
	}
	pr_info("Slabinfo End <<<<<<<<<<<<<<<<<<<<\n\n");
	mutex_unlock(&slab_mutex);
}
#endif /* CONFIG_SLAB */

/* SLUB's slabinfo. */
#ifdef CONFIG_SLUB
#ifdef CONFIG_SLUB_DEBUG

#define OO_SHIFT 16
#define OO_MASK ((1 << OO_SHIFT) - 1)

static inline int oo_objects(struct kmem_cache_order_objects x)
{
	return x.x & OO_MASK;
}

static inline int oo_order(struct kmem_cache_order_objects x)
{
	return x.x >> OO_SHIFT;
}

static unsigned long count_partial(struct kmem_cache_node *n,
					int (*get_count)(struct page *))
{
	unsigned long flags;
	unsigned long x = 0;
	struct page *page = NULL;

	spin_lock_irqsave(&n->list_lock, flags);
	list_for_each_entry(page, &n->partial, lru)
		x += get_count(page);
	spin_unlock_irqrestore(&n->list_lock, flags);
	return x;
}

static int count_free(struct page *page)
{
	return page->objects - page->inuse;
}

/* s_slubinfo - show one slab information. */
static void s_slubinfo(struct kmem_cache *cachep)
{
	unsigned long nr_partials = 0;
	unsigned long nr_slabs = 0;
	unsigned long nr_inuse = 0;
	unsigned long nr_objs = 0;
	unsigned long nr_free = 0;
	struct kmem_cache *s;
	int node;

	s = cachep;
	for_each_online_node(node) {
		struct kmem_cache_node *n = get_node(s, node);

		if (!n)
			continue;
		nr_partials += n->nr_partial;
		nr_slabs += atomic_long_read(&n->nr_slabs);
		nr_objs += atomic_long_read(&n->total_objects);

		nr_free += count_partial(n, count_free);
	}

	nr_inuse = nr_objs - nr_free;

	/*
	 * Following has full slabinfo's information, but we remove some of them,
	 * because base slub they only print constant value 0. Add by Minh.
	 */
	pr_info("%-17s %6lu %6lu %6u %4u %4u  : slabdata %6lu %6lu\n",
			s->name, nr_inuse,
			nr_objs, s->size,
			oo_objects(s->oo), (1U << (unsigned int)oo_order(s->oo)),
			nr_slabs, nr_slabs);
}

/* oom_show_slubinfo - show slabinfo when oom. */
static void oom_show_slubinfo(void)
{
	struct kmem_cache *cachep = NULL;

	if (!mutex_trylock(&slab_mutex)) {
		pr_info("[SLUB]down_read_trylock(fm_slub_lock) failed, ignore slubinfo.\n");
		return;
	}

	pr_info("[SLUB]Slabinfo Start >>>>>>>>>>>>>>>>>>>>\n");
	pr_info("# name            <active_objs> <num_objs> <objsize> <objperslab> <pagesperslab>");
	pr_info(" : slabdata <active_slabs> <num_slabs>\n");
	list_for_each_entry(cachep, &slab_caches, list) {
		s_slubinfo(cachep);
	}
	pr_info("Slabinfo End <<<<<<<<<<<<<<<<<<<<\n\n");
	mutex_unlock(&slab_mutex);
}
#endif
#endif

/* oom_nofity_handler - notifier handler. */
void oom_show_vmalloc_slab(void)
{
	/* Show vmallocinfo */
	oom_show_vmallocinfo();
#if defined(CONFIG_SLAB)
	/* Show SLAB's slabinfo */
	oom_show_slabinfo();
#endif
#ifdef CONFIG_SLUB_DEBUG
	/* Show SLUB's slabinfo */
	oom_show_slubinfo();
#endif
}
EXPORT_SYMBOL(oom_show_vmalloc_slab);
