/*
 * Copyright (c) Huawei Technologies Co., Ltd. 2023-2024.
 * Description: Support dumping ftrace log to non-volatile memory.
 * Author: Chen Zechuan
 * Create: 2023/9/13
 */

static bool delay_chk_sz __ro_after_init;
static int __init delay_check_sz_setup(char *str)
{
#ifdef CONFIG_RTOS_ADVANCED_FTRACE_DELAY_SIZE_CHECK
	delay_chk_sz = true;
#else
	delay_chk_sz = false;
#endif
	return 0;
}
early_param("ttrace_szdelaychk", delay_check_sz_setup);

static int
check_raw_dump_mem(phys_addr_t addr, unsigned long size, unsigned long *res_vaddr)
{
	void *vaddr = NULL;

	if (size > UINT_MAX)
		return -EINVAL;

	/* each cpu space must have at least a PAGE_SIZE */
	if ((!delay_chk_sz) && (size < (PAGE_SIZE * board_cpu_num)))
		return -EINVAL;

	if (!addr || !size)
		return -EINVAL;
	/* check whether overflow */
	if (addr + (phys_addr_t)size < addr)
		return -EINVAL;

	/* check whether the memory is reserved */
#ifdef CONFIG_RTOS_MEMORY_PMFS
#ifdef CONFIG_X86
	if (!mem_range_in_reserve_area((u64)addr, (u64)(addr + size)))
		return -EFAULT;
#endif
#endif
#ifndef CONFIG_PPC
	/*
	 * Powerpc don't support discontiguous System Ram.
	 * When reserved memory of trace is in System Ram, it will cause
	 * following situations:
	 * 1. If the no-map attribute is added into reserved-memory node
	 *    in dts file, only first contiguous memory region of System
	 *    Ram will be used;
	 * 2. If the no-map attribute is not added into reserved-memory node
	 *    in dts file or if use /memreserve/ to reserve memory from System
	 *    Ram, the System Ram fails to register into the resource.
	 * To ensure ttrace reliability in powerpc, disable request_mem_region
	 * before ioremap for ttrace in powerpc.
	 */
	if (!request_mem_region(addr, size, "raw_dump_mem")) {
		pr_err("ttrace request mem region failed\n");
		return -EBUSY;
	}
#endif
	vaddr = ioremap_wc(addr, size);
#ifndef CONFIG_PPC
	if (!vaddr)
		release_mem_region(addr, size);
#endif
	if (!vaddr)
		return -ENOMEM;

	*res_vaddr = (uintptr_t)vaddr;
	return 0;
}

static ssize_t
raw_dump_mem_read(struct file *filp, char __user *ubuf,
		size_t cnt, loff_t *ppos)
{
	char buf[64];
	int r;

	r = snprintf(buf, sizeof(buf), "%pa %ld\n", &raw_dump_addr, raw_dump_size);
	return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
}

#define PARSE_BUF_MAX 64
static bool parse_raw_dump_mem(char *buf, int buf_size, phys_addr_t *addr,
				unsigned long *size)
{
	char *pstr = buf;
	char *endp = buf;

	if (buf_size > PARSE_BUF_MAX)
		return false;

	*addr = simple_strtoull(pstr, &endp, 0);
	pstr = endp;
	while (!isxdigit(*pstr)) {
		if (*pstr != ' ' && *pstr != '\t')
			return false;
		pstr++;
	}
	*size = simple_strtoul(pstr, &endp, 0);

	if (*endp && *endp != '\n')
		return false;

	return true;
}

static ssize_t
raw_dump_mem_write(struct file *filp, const char __user *ubuf,
			size_t cnt, loff_t *ppos)
{
	char buf[PARSE_BUF_MAX];
	phys_addr_t addr;
	unsigned long vaddr;
	unsigned long size;
	int ret = -EINVAL;

	if (cnt >= sizeof(buf))
		return ret;

	if (copy_from_user(&buf, ubuf, cnt))
		return -EFAULT;

	buf[cnt] = 0;
	if (!parse_raw_dump_mem(buf, sizeof(buf), &addr, &size))
		return ret;

	ret = check_raw_dump_mem(addr, size, &vaddr);
	if (ret)
		return ret;

	down(&dump_mem_mutex);

	if (raw_dump_vaddr) {
		iounmap((void *)raw_dump_vaddr);
#ifndef CONFIG_PPC
		release_mem_region(raw_dump_addr, raw_dump_size);
#endif
	}

	raw_dump_addr = addr;
	raw_dump_size = size;
	raw_dump_vaddr = vaddr;

	memset((void *)raw_dump_vaddr, 0, size);

	ret = cnt;

	up(&dump_mem_mutex);
	return ret;
}

static const struct file_operations raw_dump_mem_fops = {
	.open           = tracing_open_generic,
	.read           = raw_dump_mem_read,
	.write          = raw_dump_mem_write,
};

#define COREDUMP_DISABLE_ON 1
#define COREDUMP_DISABLE_OFF 0

static int coredump_disable = COREDUMP_DISABLE_OFF;
static DEFINE_SEMAPHORE(coredump_disable_mutex);

static void
probe_signal_generate(void *ignore, int sig, struct kernel_siginfo *info, struct task_struct *task,
		int group, int result)
{
	if (!sig_kernel_coredump(sig))
		return;

	pr_info("============disable ttrace=============\n");
	ttrace_disable(sig, task);
}

static ssize_t
coredump_disable_write(struct file *file, const char __user *ubuf,
			size_t count, loff_t *offs)
{
	char buffer[32];
	unsigned long val;
	int ret;

	if (count >= sizeof(buffer))
		return -EINVAL;

	memset(buffer, 0, sizeof(buffer));

	if (copy_from_user(&buffer, ubuf, count))
		return -EINVAL;

	buffer[count] = 0;

	ret = kstrtoul(buffer, 0, &val);
	if (ret < 0)
		return ret;

	down(&coredump_disable_mutex);
	switch (val) {
	case COREDUMP_DISABLE_OFF:
		if (coredump_disable != COREDUMP_DISABLE_OFF) {
			unregister_trace_signal_generate(probe_signal_generate, NULL);
			tracepoint_synchronize_unregister();
			coredump_disable = COREDUMP_DISABLE_OFF;
		}
		break;
	case COREDUMP_DISABLE_ON:
		if (coredump_disable != COREDUMP_DISABLE_ON) {
			ret = register_trace_signal_generate(probe_signal_generate, NULL);
			if (ret)
				goto out_err;
			coredump_disable = COREDUMP_DISABLE_ON;
		}
		break;
	default:
		ret = -EINVAL;
		goto out_err;
	}

	ret = count;

out_err:
	up(&coredump_disable_mutex);
	return ret;
}

static ssize_t
coredump_disable_read(struct file *file, char __user *ubuf,
			size_t count, loff_t *ppos)
{
	char buffer[10];

	snprintf(buffer, sizeof(buffer), "%d\n", coredump_disable);
	return simple_read_from_buffer(ubuf, count, ppos,
					buffer, strlen(buffer));
}

static const struct file_operations coredump_disable_fops = {
	.read   = coredump_disable_read,
	.write  = coredump_disable_write
};

static int write_reserved_mem(struct trace_array *tr, void *vaddr,
			unsigned long size, void **bpage, int fiq)
{
	int ret = 0;
	void *cpu_addr = NULL;
	int nr_page;
	int cpu;
	int idx;
	int log_index = 0;
	int tatol_page;
	int mean_page;
	int left_page;

	if (!*bpage || !vaddr)
		return -ENOMEM;

	tatol_page = size / PAGE_SIZE;
	if (delay_chk_sz) {
		board_cpu_num = num_online_cpus();
		if (tatol_page < board_cpu_num)
			pr_info("%s pages %d cpu_num %d\n", __func__, tatol_page, board_cpu_num);
	}
	mean_page = tatol_page / board_cpu_num;
	left_page = tatol_page % board_cpu_num;
	cpu_addr = vaddr;

	for_each_tracing_cpu(cpu) {
		if (!cpu_online(cpu))
			continue;
		nr_page = (log_index < left_page) ? mean_page + 1 : mean_page;
		if (nr_page == 0)
			break;
		idx = 0;

		do {
			if (!fiq)
				trace_access_lock(cpu);

			ret = ring_buffer_read_page(tr->array_buffer.buffer,
				bpage, PAGE_SIZE, cpu, 0);

			if (!fiq)
				trace_access_unlock(cpu);

			if (ret < 0)
				break;
#ifdef CONFIG_RTOS_PPC_DCBZ_ON_IOADDR_BUGFIX
			generic_memcpy(cpu_addr + idx * PAGE_SIZE, *bpage, PAGE_SIZE);
#else
			memcpy(cpu_addr + idx * PAGE_SIZE, *bpage, PAGE_SIZE);
#endif
			idx = (idx + 1) % nr_page;
		} while (1);

		log_index++;
		cpu_addr = (void *)(cpu_addr + nr_page * PAGE_SIZE);
	}

	return 0;
}

/*
 * trace_panic_mem_dump() is called by the panic handler.
 * When system panic occurs, will dump ttrace fiq data.
 */
static int trace_panic_mem_dump(struct notifier_block *this,
				unsigned long event, void *unused)
{
	int ret;
	/*
	 * adviod write reserved memory two times, when panic and
	 * fiq biting dog reset.
	 */
	ret = ftrace_sched_switch_dump_fiq();
	if (ret)
		pr_warn("write reserved mem: make sure reserved mem had configure? returned %d\n", ret);

	return NOTIFY_DONE; /* on panic, don't care return value */
}

static struct notifier_block trace_panic_mem_dump_notifier = {
	.notifier_call  = trace_panic_mem_dump,
	.next           = NULL,
	.priority       = 150   /* priority: INT_MAX >= x >= 0 */
};

static int should_ftrace_dump(struct trace_array *tr, void *vaddr,
				unsigned long size)
{
	if (!tr)
		return -EINVAL;

	if (!(vaddr && size)) {
		pr_err("The raw dump memory did not configured, "
			"raw_dump_addr = %pa, raw_dump_size = %ld.\n",
			&raw_dump_addr, raw_dump_size);
		return -EINVAL;
	}

	return 0;
}

int ftrace_sched_switch_dump_task(void)
{
	int ret;
	void *bpage = NULL;
	void *vaddr = (void *)raw_dump_vaddr;
	unsigned long size = raw_dump_size;
	struct trace_array *tr = &global_trace;

	ret = should_ftrace_dump(tr, vaddr, size);
	if (ret)
		return ret;

	ret = kernel_sched_switch(0);
	if (ret) {
		pr_err("Turn off sched switch tracer failure(ret:%d).\n", ret);
		return ret;
	}

	bpage = ring_buffer_alloc_read_page(tr->array_buffer.buffer, 0);
	if (!bpage)
		return -ENOMEM;

	ret = write_reserved_mem(tr, vaddr, size, &bpage, 0);

	if (bpage)
		ring_buffer_free_read_page(tr->array_buffer.buffer, 0, bpage);

	return ret;
}
EXPORT_SYMBOL(ftrace_sched_switch_dump_task);

atomic_t fiq_dump_one = ATOMIC_INIT(0);

int ftrace_sched_switch_dump_fiq(void)
{
	int ret;
	void *vaddr = (void *)raw_dump_vaddr;
	unsigned long size = raw_dump_size;
	struct trace_array *tr = &global_trace;

	/* this only run once in fiq */
	if (atomic_inc_return(&fiq_dump_one) > 1)
		return 0;

	ret = should_ftrace_dump(tr, vaddr, size);
	if (ret)
		return ret;

	if (!swap_page_fiq)
		return -ENOMEM;

	/* turn off ring buffer */
	tracing_off();

	ret = write_reserved_mem(tr, vaddr, size, &swap_page_fiq, 1);

	return ret;
}
EXPORT_SYMBOL(ftrace_sched_switch_dump_fiq);

/* sched_switch ctrl */
int kernel_sched_switch(int val)
{
	int ret = 0;
	struct trace_array *tr = &global_trace;

	if (val == KERNEL_SCHED_SWITCH_ENABLE) {
		mutex_lock(&trace_types_lock);

		if (!tracing_is_on())
			tracing_on();

		if (tr->current_trace->start)
			tr->current_trace->start(tr);

		mutex_unlock(&trace_types_lock);
	} else if (val == KERNEL_SCHED_SWITCH_DISABLE) {
		if (tracing_is_on())
			tracing_off();
	} else {
		ret = -EINVAL;
	}

	return ret;
}
EXPORT_SYMBOL_GPL(kernel_sched_switch);

void ttrace_disable(int sig, struct task_struct *t)
{
	int ret;

	if (!atomic_read(&ttrace_disable_cnt))
		pr_err("%16s pid=%d send signal %d to %16s pid=%d on cpu %d\n",
			current->comm, current->pid, sig, t->comm, t->pid,
			smp_processor_id());

	/*
	 * Thread get coredump, it means that thread or system
	 * is in exception status, close the sched_switch tracer.
	 * Only update ttrace_disable_cnt on suceess diabling ftrace.
	 */
	ret = kernel_sched_switch(0);
	if (ret < 0)
		pr_err("Fail to close ftrace sched_switch tracer(ret:%d)\n", ret);
	else
		atomic_set(&ttrace_disable_cnt, 1);
}
