/*
 * Copyright (c) Huawei Technologies Co., Ltd. 2014-2019.
 * Description: idump parse vma
 * Author: nixiaoming
 * Create: 2014-6-18
 */

#include <linux/binfmts.h>
#include <linux/slab.h>
#include <linux/pagemap.h>
#include <linux/elf.h>
#include <linux/mm.h>
#include <linux/coredump.h>
#include <linux/idump.h>
#include <linux/sched/task_stack.h>
#include "vma_idump.h"

#if defined(CONFIG_RTOS_IDUMP_FORCE) || defined(CONFIG_RTOS_IDUMP_TRIGGER)
struct page *get_dump_page_task(struct task_struct *task, unsigned long addr)
{
	struct mm_struct *mm = task->mm;
	struct page *page = NULL;
	int locked = 1;
	int ret;

	if (mmap_read_lock_killable(mm))
		return NULL;
	ret = get_user_pages_remote(mm, addr, 1, FOLL_FORCE | FOLL_DUMP | FOLL_GET,
			&page, NULL, &locked);
	if (locked)
		mmap_read_unlock(mm);
	return (ret == 1) ? page : NULL;
}
#endif

static int vma_is_got_dyn(const struct vm_area_struct *vma, const struct idump_elf_info_struct *got_dyn)
{
	if (got_dyn->got_vaddr >= vma->vm_start && got_dyn->got_vaddr < vma->vm_end)
		return true;
	else if (got_dyn->dyn_vaddr >= vma->vm_start && got_dyn->dyn_vaddr < vma->vm_end)
		return true;
	else
		return false;
}

static void dump_got_dyn(struct vma_dump_region *region, struct idump_elf_info_struct *got_dyn,
		struct vm_area_struct *vma)
{
	unsigned long got_dyn_end;
	unsigned long start_got = vma->vm_end;
	unsigned long end_got = 0;
	unsigned long start_dyn = vma->vm_end;
	unsigned long end_dyn = 0;

	if (got_dyn->got_vaddr >= vma->vm_start && got_dyn->got_vaddr < vma->vm_end) {
		start_got = got_dyn->got_vaddr;
		end_got = min(got_dyn->got_vaddr + got_dyn->got_size, vma->vm_end);
	}
	if (got_dyn->dyn_vaddr >= vma->vm_start && got_dyn->dyn_vaddr < vma->vm_end) {
		start_dyn = got_dyn->dyn_vaddr;
		end_dyn = min(got_dyn->dyn_vaddr + got_dyn->dyn_size, vma->vm_end);
	}

	region->start = min(start_got, start_dyn);
	region->start = idump_align_down(region->start, PAGE_SIZE);

	got_dyn_end = max(end_got, end_dyn);

	region->size = got_dyn_end - region->start;
	region->size = idump_align_up(region->size, PAGE_SIZE);
	return;
}

static int vma_is_main_stack(struct task_struct *task,
		struct vm_area_struct *vma, struct vma_stack_region *stack_region)
{
	struct mm_struct *mm = vma->vm_mm;

	if ((vma->vm_start <= mm->start_stack) && (vma->vm_end >= mm->start_stack)) {
		if (unlikely(task->group_leader->stack == NULL)) {
			pr_info("The stack of group_leader is NULL\n");
			stack_region->esp = 0;
		} else {
			stack_region->esp = KSTK_ESP(task->group_leader);
		}
		stack_region->start = vma->vm_start;
		stack_region->end = vma->vm_end;
		stack_region->task = task;
		return 1;
	}
	return 0;
}

static int vma_is_thread_stack(struct task_struct *task,
		struct vm_area_struct *vma, struct vma_stack_region *stack_region)
{
	struct rtos_task_struct *rtos_task;
	int ret = false;
	struct core_thread *ct;
#if defined(CONFIG_RTOS_IDUMP_FORCE) || defined(CONFIG_RTOS_IDUMP_TRIGGER)
	struct rtos_mm_struct *rtos_mm;
#endif

	rcu_read_lock();
#if defined(CONFIG_RTOS_IDUMP_FORCE) || defined(CONFIG_RTOS_IDUMP_TRIGGER)
	rtos_mm = mm_to_rtos_mm(task->mm);
	ct = task->mm->core_state ?  &task->mm->core_state->dumper : &rtos_mm->idump.core_state->dumper;
	for (; ct; ct = ct->next) {
		if (ct->task == NULL)
			continue;
		if (task->mm->core_state == NULL) {
			struct core_thread *first_thread = &rtos_mm->idump.core_state->dumper;
			if (ct != first_thread && ct->task == first_thread->task)
				continue;
		}
#else
	for (ct = &task->mm->core_state->dumper; ct; ct = ct->next) {
#endif
		rtos_task = task_to_rtos_task(ct->task);
		if ((vma->vm_start <= rtos_task->idump.stack_start) && (vma->vm_end >= rtos_task->idump.stack_start)) {
			stack_region->esp = KSTK_ESP(ct->task);
			stack_region->start = vma->vm_start;
			stack_region->end = idump_align_up(rtos_task->idump.stack_start, PAGE_SIZE);
			stack_region->task = ct->task;
			ret = true;
			goto done;
		} else if ((vma->vm_start <= KSTK_ESP(ct->task)) && (vma->vm_end >= KSTK_ESP(ct->task))) {
			/* if task do dump after fork/vfork but no execv, t->stack_start == 0 */
			stack_region->esp = KSTK_ESP(ct->task);
			stack_region->start = vma->vm_start;
			stack_region->end = vma->vm_end;
			stack_region->task = ct->task;
			ret = true;
			goto done;
		}
	}
done:
	rcu_read_unlock();
	return ret;
}

static int vma_is_stack(struct task_struct *task,
		struct vm_area_struct *vma, struct vma_stack_region *stack_region)
{
	memset(stack_region, 0, sizeof(struct vma_stack_region));
	if (vma_is_thread_stack(task, vma, stack_region))
		return true;
	if (vma_is_main_stack(task, vma, stack_region))
		return true;
	return false;
}

static void vma_dump_stack(struct vma_dump_region *dump_region,
	struct vma_stack_region *stack_region, unsigned int idump_stacksize)
{
	if (idump_stacksize == IDUMP_FULL_STACK) {
		dump_region->start = stack_region->start;
	} else if (stack_region->start <= stack_region->esp &&
			stack_region->end >= stack_region->esp) {
		dump_region->start = idump_align_down(stack_region->esp, PAGE_SIZE);
	} else { /* stack  over flow */
		dump_region->start = stack_region->start;
		dump_region->size = stack_region->end - dump_region->start;
		pr_info("[idump/coredump] tgid=%d task[%d %s], out of the stack, dump size:%lx\n",
			stack_region->task->tgid, stack_region->task->pid,
			stack_region->task->comm, dump_region->size);
		return;
	}

	if (IDUMP_FULL_STACK == idump_stacksize ||
			IDUMP_USED_STACK == idump_stacksize)
		dump_region->size = stack_region->end - dump_region->start;
	else
		dump_region->size = min(idump_stacksize * PAGE_SIZE,
				stack_region->end - dump_region->start);
}

static int vma_is_heap(struct vm_area_struct *vma)
{
	struct mm_struct *mm = vma->vm_mm;

	if ((vma->vm_start <= mm->brk) && (vma->vm_end >= mm->start_brk))
		return true;
	return false;
}

static int vma_is_elf(struct vm_area_struct *vma)
{
	u32 word = 0;
	char *header;
	char *token;
	struct page *page;
	/* Doing it this way gets the constant folded by GCC. */
	union {
		u32 cmp;
		char elfmag[SELFMAG];
	} magic;
	BUILD_BUG_ON(sizeof(word) != SELFMAG);
	magic.elfmag[EI_MAG0] = ELFMAG0;
	magic.elfmag[EI_MAG1] = ELFMAG1;
	magic.elfmag[EI_MAG2] = ELFMAG2;
	magic.elfmag[EI_MAG3] = ELFMAG3;

	page = follow_page(vma, vma->vm_start, FOLL_FORCE); /* FOLL_FORCE: get_user_pages read/write w/o permission */
	if (page && !IS_ERR(page)) {
		header = kmap(page);
		if (header == NULL)
			return false;

		token = (char *)&word;
		strncpy(token, header, SELFMAG);
		kunmap(page);
	}

	if (word == magic.cmp)
		return true;

	return false;
}

static unsigned int read_elf_head(struct file *file)
{
	struct elfhdr elf_head;
	int ret;
	loff_t pos = 0;

	ret = kernel_read(file, &elf_head, sizeof(elf_head), &pos);
	if (ret != sizeof(elf_head))
		return ET_NONE;

	return (unsigned int)elf_head.e_type;
}

#define ANON	0
#define LIBEXEC 1
#define LIBDATA 2
#define BINEXEC 3
#define BINDATA 4
#define MAPPED_DATAFILE 5
#define VMA_IS_SOCKET	6
#define BIN_FILE 	7 /* BINEXEC+BINDATA */
#define UNKNOWN_FILE_TYPE 8

static int vma_file_type(struct vm_area_struct *vma, struct task_struct *task)
{
	struct mm_struct *mm = vma->vm_mm;
	struct inode *inode;
	int bin_file = 0;

	if (!vma->vm_file)
		return ANON;

	/*
	 * On the ppc architecture, there is a crossover between
	 * the code segment and the data segment interval.
	 */
	if ((mm->start_code < vma->vm_end) && (mm->end_code > vma->vm_start))
		bin_file = BINEXEC;

	/*
	 * While GNU ld provides only one RW RW segment,
	 * LLVM lld employs two explicit RW PT_LOAD segments.
	 * As a result, only the second RW PT_LOAD segment is
	 * identified as BINDATA, and the first RW PT_LOAD
	 * is identified as LIBDATA. Therefore,
	 * vma->vm_file == mm->exe_file is added
	 * to the judgment logic of BINDATA.
	 */
	if (((mm->start_data < vma->vm_end) && (mm->end_data > vma->vm_start))
		|| ((vma->vm_file == mm->exe_file) && !(vma->vm_flags & VM_EXEC)))
		bin_file += BINDATA;
	if (bin_file)
		return bin_file;

	inode = file_inode(vma->vm_file);
	if (S_ISSOCK(inode->i_mode))
		return VMA_IS_SOCKET;
	if (!S_ISREG(inode->i_mode))
		return UNKNOWN_FILE_TYPE;

	/*
	 * readelf -h  vma->vm_file get elf type
	 * BIN file is ET_EXEC 2
	 * lib file is ET_DYN 3
	 */
	if (read_elf_head(vma->vm_file) == ET_DYN) {
		if ((vma->vm_flags & VM_EXEC) && (vma_is_elf(vma) == true))
			return LIBEXEC;
		else
			return LIBDATA;
	}
	return MAPPED_DATAFILE;
}

static int vma_is_ldso(int type, struct vm_area_struct *vma)
{
	char *f_name;

	if (!vma->vm_file)
		return false;
	if ((type != LIBDATA) && (type != LIBEXEC))
		return false;

	f_name = vma->vm_file->f_path.dentry->d_iname;
	/* Sample: arm&arm64 ld-linux*so.3 ppc ld.so.1 ppc64 ld64.so.1 */
	if (strncmp(f_name, "ld", 2) == 0
			&& strstr(f_name, ".so") != NULL)
		return true;
	return false;
}

static int vma_is_ld_anon(struct vm_area_struct *vma, struct task_struct *task)
{
	int type;
	struct vm_area_struct *tmpvma = vma;
	struct vm_area_struct *prev = NULL;
	struct mm_struct *mm = vma->vm_mm;

	for (;;) {
		find_vma_prev(mm, tmpvma->vm_start, &prev);
		if (prev && prev->vm_file) {
			type = vma_file_type(prev, task);
			if ((type == LIBDATA) || (type == LIBEXEC)) /* is lib so */
				return vma_is_ldso(type, prev);
		}
		if (!prev)
			return false;
		tmpvma = prev;
	}

	return false;
}

#define idump_filter(type) (mm_flags & (1UL << MMF_SDUMP_##type))
char *str_vma_file_type[] = {
	"ANON",
	"LIBEXEC",
	"LIBDATA",
	"BINEXEC",
	"BINDATA",
	"MAPPED_DATAFILE",
	"SOCKET",
	"BIN_FILE",
	"unknown_file_type",
};

static inline void dump_nothing(const struct vm_area_struct *vma, struct vma_dump_region *region)
{
	idump_filter_debug("\tN\tempty\n");
	region->start = vma->vm_start;
	region->size = 0;
}

static inline void dump_all(const struct vm_area_struct *vma, struct vma_dump_region *region)
{
	region->start = vma->vm_start;
	region->size = vma->vm_end - vma->vm_start;
}

static bool need_dump_vma(const struct vm_area_struct *vma)
{
	if (vma->vm_flags & VM_DONTDUMP) {
		idump_filter_debug("\tN\tVM_DONTDUMP\n");
		return false;
	}

	/* no dump can't read vma */
	if (!(vma->vm_flags & VM_READ)) {
		idump_filter_debug("\tN\t!VM_READ\n");
		return false;
	}

	/* Do not dump I/O mapped devices or special mappings */
	if (vma->vm_flags & VM_IO) {
		idump_filter_debug("\tN\tVM_IO\n");
		return false;
	}
	return true;
}

static int dump_file(struct vm_area_struct *vma, struct task_struct *task,
		unsigned long mm_flags)
{
	int retval;
	int type = vma_file_type(vma, task);

	idump_filter_debug("type:%s\tfile_name=%s", str_vma_file_type[type],
			   vma->vm_file->f_path.dentry->d_iname);

	/* ld.so must dump */
	if (vma_is_ldso(type, vma)) {
		idump_filter_debug("\tY\tvma_is_ldso\n");
		return 1;
	}

	switch (type) {
	case BINDATA:
		retval = idump_filter(BIN_DATA);
		if (retval)
			idump_filter_debug("\tY\tBIN_DATA\n");
		break;
	case BINEXEC:
		retval = idump_filter(BIN_EXEC);
		if (retval)
			idump_filter_debug("\tY\tBIN_EXEC\n");
		break;
	case BIN_FILE:
		retval = (idump_filter(BIN_DATA) || idump_filter(BIN_EXEC));
		if (retval)
			idump_filter_debug("\tY\tBIN_FILE\n");
		break;
	case LIBDATA:
		retval = idump_filter(LIB_DATA);
		if (retval)
			idump_filter_debug("\tY\tLIB_DATA\n");
		break;
	case LIBEXEC:
		retval = idump_filter(LIB_EXEC);
		if (retval)
			idump_filter_debug("\tY\tLIB_EXEC\n");
		break;
	case MAPPED_DATAFILE:
		retval = idump_filter(MAPPED_DATAFILE);
		if (retval)
			idump_filter_debug("\tY\tMAPPED_DATAFILE\n");
		break;
	default:
		retval = 0;
		idump_filter_debug("\tN\tunknown_file_type\n");
		break;
	}

	return retval;
}

/* return: 1 if the dump is finished, 0 otherwise */
static int dump_others(struct vm_area_struct *vma, struct task_struct *task,
		unsigned long mm_flags, struct rtos_mm_struct *rtos_mm,
		struct vma_dump_region *region)
{
	struct vma_stack_region stack_region;

	/* anon vma may include stack, so dump anon before stack */
	if (idump_filter(ANON)) {
		idump_filter_debug("\tY\tANON\n");
		goto whole;
	}

	if (vma_is_stack(task, vma, &stack_region)) {
		if (idump_filter(TASK_STACK)) {
			vma_dump_stack(region, &stack_region, rtos_mm->idump.stacksize);
			return 1;
		}
		idump_filter_debug("\tN\tTASK_STACK\n");
		goto empty;
	}

	if (vma_is_heap(vma)) {
		if (idump_filter(HEAP)) {
			idump_filter_debug("\tY\tHEAP\n");
			goto whole;
		}
		idump_filter_debug("\tN\tHEAP\n");
		goto empty;
	}

	/* dump anaonymous vma between ld.so */
	if (vma_is_ld_anon(vma, task)) {
		idump_filter_debug("\tY\tvma_is_ld_anon\n");
		goto whole;
	}

	return 0;
empty:
	dump_nothing(vma, region);
	return 1;
whole:
	dump_all(vma, region);
	return 1;
}

static void vma_dump(struct vm_area_struct *vma, struct task_struct *task,
		unsigned long mm_flags, struct vma_dump_region *region)
{
	int retval;
	struct mm_struct *mm = vma->vm_mm;
	struct idump_elf_info_struct *got_dyn;
	struct rtos_mm_struct *rtos_mm;

	if (always_dump_vma(vma)) {
		idump_filter_debug("\tY\talways_dump\n");
		goto whole;
	}

	if (unlikely(!mm)) {
		idump_filter_debug("\tN\tvma->mm==NULL\n");
		goto empty;
	}
	rtos_mm = mm_to_rtos_mm(mm);

	if (!need_dump_vma(vma))
		goto empty;

	if (vma->vm_file) { /* vma is file */
		retval = dump_file(vma, task, mm_flags);
		if (retval)
			goto whole;
		/* got dyn for gdb */
		got_dyn = &rtos_mm->idump.elf_info;
		if (vma_is_got_dyn(vma, got_dyn)) {
			dump_got_dyn(region, got_dyn, vma);
			return;
		}
	} else { /* stack heap and other anonymous */
		if (dump_others(vma, task, mm_flags, rtos_mm, region))
			return;
	}

	if ((idump_filter(ELF_HEADERS)) && (vma->vm_pgoff == 0) &&
	    (vma_is_elf(vma)) && (vma_file_type(vma, task) != UNKNOWN_FILE_TYPE)) {
		region->start = vma->vm_start;
		region->size = PAGE_SIZE;
		idump_filter_debug("\tY\tELF_HEADERS\n");
		return;
	}

empty:
	dump_nothing(vma, region);
	return;
whole:
	dump_all(vma, region);
}

void idump_vma_metadata_fixup(struct task_struct *task, struct vm_area_struct *vma,
	struct core_vma_metadata *m, struct coredump_params *cprm)
{
	struct rtos_coredump_params *rtos_cprm = coredump_params_to_rtos(cprm);
	struct vma_dump_region region;

	if (rtos_cprm->dump_filter_type == FILTER_SMART_DUMP) {
		vma_dump(vma, task, cprm->mm_flags, &region);
		m->start = region.start;
		m->dump_size = region.size;
	}
}

#undef idump_filter
