// SPDX-License-Identifier: GPL-2.0-only
/*
 * Copyright (c) Huawei Technologies Co., Ltd. 2021-2021. All rights reserved.
 * Description: Adapt for kasan
 * Author: Huawei OS Kernel Lab
 * Create: Tur Oct 19 17:57:08 2021
 */

#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/sched/task_stack.h>
#include <liblinux/pal.h>

#include "slab.h"
#include "hm_kasan.h"

static inline struct task_struct *kasan_get_current(void)
{
	return (struct task_struct *)liblinux_pal_thread_get_my_data();
}

void kasan_enable_current(void)
{
	current->kasan_depth++;
}

void kasan_disable_current(void)
{
	current->kasan_depth--;
}

static __always_inline void check_memory_region(unsigned long addr, size_t size, bool write,
							unsigned long ret_ip)
{
	struct task_struct *kasan_current = kasan_get_current();

	if (kasan_current == NULL || kasan_current->kasan_depth == 0U)
		liblinux_pal_kasan_check(addr, size, write, ret_ip);
}

bool __kasan_check_read(const volatile void *p, unsigned int size)
{
	check_memory_region((unsigned long)p, size, false, _RET_IP_);
	return true;
}
EXPORT_SYMBOL(__kasan_check_read);

bool __kasan_check_write(const volatile void *p, unsigned int size)
{
	check_memory_region((unsigned long)p, size, true, _RET_IP_);
	return true;
}
EXPORT_SYMBOL(__kasan_check_write);

#undef memset
void *memset(void *addr, int c, size_t len)
{
	check_memory_region((unsigned long)addr, len, true, _RET_IP_);
	return __memset(addr, c, len);
}

#ifdef __HAVE_ARCH_MEMMOVE
#undef memmove
void *memmove(void *dest, const void *src, size_t len)
{
	check_memory_region((unsigned long)src, len, false, _RET_IP_);
	check_memory_region((unsigned long)dest, len, true, _RET_IP_);
	return __memmove(dest, src, len);
}
#endif

#undef memcpy
void *memcpy(void *dest, const void *src, size_t len)
{
	check_memory_region((unsigned long)src, len, false, _RET_IP_);
	check_memory_region((unsigned long)dest, len, true, _RET_IP_);
	return __memcpy(dest, src, len);
}

static inline void kasan_poison_shadow(const void *address, size_t size)
{
	if (size != 0)
		liblinux_pal_kasan_poison(address, size, KASAN_POISON);
}

void kasan_unpoison_shadow(const void *address, size_t size)
{
	if (size != 0)
		liblinux_pal_kasan_poison(address, size, KASAN_UNPOISON);
}

static void __kasan_unpoison_stack(struct task_struct *task, const void *sp)
{
	void *base = task_stack_page(task);
	size_t size = sp - base;

	kasan_unpoison_shadow(base, size);
}

void kasan_unpoison_task_stack(struct task_struct *task)
{
	if (task != NULL)
		__kasan_unpoison_stack(task, task_stack_page(task) + THREAD_SIZE);
}

asmlinkage void kasan_unpoison_task_stack_below(const void *watermark)
{
	void *base = (void *)((unsigned long)watermark & ~(THREAD_SIZE - 1));

	kasan_unpoison_shadow(base, watermark - base);
}

void kasan_alloc_pages(struct page *page, unsigned int order)
{
	if (PageHighMem(page) == 0)
		kasan_unpoison_shadow(page_address(page), PAGE_SIZE << order);
}

void kasan_free_pages(struct page *page, unsigned int order)
{
	if (PageHighMem(page) == 0)
		kasan_poison_shadow(page_address(page), PAGE_SIZE << order);
}

static inline unsigned int optimal_redzone(unsigned int object_size)
{
	return
		object_size <= 64        - 16   ? 16 :
		object_size <= 128       - 32   ? 32 :
		object_size <= 512       - 64   ? 64 :
		object_size <= 4096      - 128  ? 128 :
		object_size <= (1 << 14) - 256  ? 256 :
		object_size <= (1 << 15) - 512  ? 512 :
		object_size <= (1 << 16) - 1024 ? 1024 : 2048;
}

void kasan_cache_create(struct kmem_cache *cache, unsigned int *size, slab_flags_t *flags)
{
	*size += optimal_redzone(cache->object_size);
	*flags |= SLAB_KASAN;
}

void kasan_poison_slab(struct page *page)
{
	kasan_poison_shadow(page_address(page), PAGE_SIZE << compound_order(page));
}

void kasan_unpoison_object_data(struct kmem_cache *cache, void *object)
{
	kasan_unpoison_shadow(object, cache->object_size);
}

void kasan_poison_object_data(struct kmem_cache *cache, void *object)
{
	kasan_poison_shadow(object, round_up(cache->object_size, KASAN_SHADOW_SCALE_SIZE));
}

void * __must_check kasan_init_slab_obj(struct kmem_cache *cache, const void *object)
{
	(void)cache;
	return (void *)object;
}

void * __must_check kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags)
{
	struct page *page = NULL;
	unsigned long redzone_start;
	unsigned long redzone_end;

	(void)flags;
	if (ptr != NULL) {
		page = virt_to_page(ptr);
		redzone_start = round_up((unsigned long)(ptr + size), KASAN_SHADOW_SCALE_SIZE);
		redzone_end = (unsigned long)ptr + page_size(page);
		kasan_unpoison_shadow(ptr, size);
		kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start);
	}

	return (void *)ptr;
}

void kasan_kfree_large(void *ptr, unsigned long ip)
{
	struct page *page = virt_to_page(ptr);

	if (ptr != page_address(virt_to_head_page(ptr)))
		liblinux_pal_kasan_report((unsigned long)ptr, 0, false, ip);

	kasan_poison_shadow(ptr, PAGE_SIZE << compound_order(page));
}

static bool __kasan_slab_free(struct kmem_cache *cache, void *object,
			      unsigned long ip, bool quarantine)
{
	unsigned long rounded_up_size;

	(void)quarantine;
	if (nearest_obj(cache, virt_to_head_page(object), object) != object) {
		liblinux_pal_kasan_report((unsigned long)object, 0, false, ip);
	} else if ((cache->flags & SLAB_TYPESAFE_BY_RCU) == 0) {
		check_memory_region((unsigned long)object, 1, false, ip);
		rounded_up_size = round_up(cache->object_size, KASAN_SHADOW_SCALE_SIZE);
		kasan_poison_shadow(object, rounded_up_size);
	}

	return false;
}

void kasan_poison_kfree(void *ptr, unsigned long ip)
{
	struct page *page = virt_to_head_page(ptr);

	if (PageSlab(page) == 0) {
		if (ptr != page_address(page)) {
			liblinux_pal_kasan_report((unsigned long)ptr, 0, false, ip);
			return;
		}
		kasan_poison_shadow(ptr, page_size(page));
	} else {
		(void)__kasan_slab_free(page->slab_cache, ptr, ip, false);
	}
}

void *__kasan_kmalloc(struct kmem_cache *cache, const void *object, size_t size,
				gfp_t flags, bool keep_tag)
{
	unsigned long redzone_start;
	unsigned long redzone_end;

	(void)flags;
	(void)keep_tag;
	if (object != NULL) {
		redzone_start = round_up((unsigned long)(object + size), KASAN_SHADOW_SCALE_SIZE);
		redzone_end = round_up((unsigned long)object + cache->object_size,
						KASAN_SHADOW_SCALE_SIZE);
		kasan_unpoison_shadow(object, size);
		kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start);
	}

	return (void *)object;
}

void * __must_check kasan_kmalloc(struct kmem_cache *cache, const void *object,
				  size_t size, gfp_t flags)
{
	return __kasan_kmalloc(cache, object, size, flags, true);
}
EXPORT_SYMBOL(kasan_kmalloc);

void * __must_check kasan_krealloc(const void *object, size_t size, gfp_t flags)
{
	void *ret_addr = (void *)object;
	struct page *page = NULL;

	if (object != ZERO_SIZE_PTR) {
		page = virt_to_head_page(object);
		if (PageSlab(page) == 0) {
			ret_addr = kasan_kmalloc_large(object, size, flags);
		} else {
			ret_addr = __kasan_kmalloc(page->slab_cache, object, size,
							flags, true);
		}
	}

	return ret_addr;
}

void * __must_check kasan_slab_alloc(struct kmem_cache *cache, void *object, gfp_t flags)
{
	return __kasan_kmalloc(cache, object, cache->object_size, flags, false);
}


bool kasan_slab_free(struct kmem_cache *s, void *object, unsigned long ip)
{
	return __kasan_slab_free(s, object, ip, true);
}

int kasan_module_alloc(void *addr, size_t size, gfp_t gfp_mask)
{
	(void)addr;
	(void)size;
	(void)gfp_mask;
	return 0;
}

void kasan_free_shadow(const struct vm_struct *vm)
{
	(void)vm;
}

int kasan_add_zero_shadow(void *start, unsigned long size)
{
	(void)start;
	(void)size;
	return 0;
}

void kasan_remove_zero_shadow(void *start, unsigned long size)
{
	(void)start;
	(void)size;
}

size_t kasan_metadata_size(struct kmem_cache *cache)
{
	(void)cache;
	return 0;
}

bool kasan_save_enable_multi_shot(void)
{
	return false;
}
EXPORT_SYMBOL_GPL(kasan_save_enable_multi_shot);

void kasan_restore_multi_shot(bool enabled)
{
	(void)enabled;
}
EXPORT_SYMBOL_GPL(kasan_restore_multi_shot);

bool kasan_report(unsigned long addr, size_t size, bool is_write, unsigned long ip)
{
	struct task_struct *kasan_current = kasan_get_current();

	if (kasan_current == NULL || kasan_current->kasan_depth == 0)
		liblinux_pal_kasan_report(addr, size, is_write, ip);

	return true;
}

void __asan_handle_no_return(void)
{
}
EXPORT_SYMBOL(__asan_handle_no_return);

#define DEFINE_ASAN_LOAD_STORE(size)						\
	void __asan_load##size(unsigned long addr)				\
	{									\
		check_memory_region(addr, size, false, _RET_IP_);		\
	}									\
	EXPORT_SYMBOL(__asan_load##size);					\
	__alias(__asan_load##size)						\
	void __asan_load##size##_noabort(unsigned long);			\
	EXPORT_SYMBOL(__asan_load##size##_noabort);				\
	void __asan_store##size(unsigned long addr)				\
	{									\
		check_memory_region(addr, size, true, _RET_IP_);		\
	}									\
	EXPORT_SYMBOL(__asan_store##size);					\
	__alias(__asan_store##size)						\
	void __asan_store##size##_noabort(unsigned long);			\
	EXPORT_SYMBOL(__asan_store##size##_noabort)

DEFINE_ASAN_LOAD_STORE(1);
DEFINE_ASAN_LOAD_STORE(2);
DEFINE_ASAN_LOAD_STORE(4);
DEFINE_ASAN_LOAD_STORE(8);
DEFINE_ASAN_LOAD_STORE(16);

void __asan_loadN(unsigned long addr, size_t size)
{
	check_memory_region(addr, size, false, _RET_IP_);
}
EXPORT_SYMBOL(__asan_loadN);

__alias(__asan_loadN)
void __asan_loadN_noabort(unsigned long, size_t);
EXPORT_SYMBOL(__asan_loadN_noabort);

void __asan_storeN(unsigned long addr, size_t size)
{
	check_memory_region(addr, size, true, _RET_IP_);
}
EXPORT_SYMBOL(__asan_storeN);

__alias(__asan_storeN)
void __asan_storeN_noabort(unsigned long, size_t);
EXPORT_SYMBOL(__asan_storeN_noabort);

static void register_global(struct kasan_global *global)
{
	size_t aligned_size = round_up(global->size, KASAN_SHADOW_SCALE_SIZE);

	kasan_unpoison_shadow(global->beg, global->size);
	kasan_poison_shadow(global->beg + aligned_size,
			    global->size_with_redzone - aligned_size);
}

void __asan_register_globals(struct kasan_global *globals, size_t size)
{
	int i;

	for (i = 0; i < size; i++)
		register_global(&globals[i]);
}
EXPORT_SYMBOL(__asan_register_globals);

void __asan_unregister_globals(struct kasan_global *globals, size_t size)
{
	(void)globals;
	(void)size;
}
EXPORT_SYMBOL(__asan_unregister_globals);

void __asan_alloca_poison(unsigned long addr, size_t size)
{
	size_t rounded_up_size = round_up(size, KASAN_SHADOW_SCALE_SIZE);
	size_t padding_size = round_up(size, KASAN_ALLOCA_REDZONE_SIZE) -
			rounded_up_size;
	size_t rounded_down_size = round_down(size, KASAN_SHADOW_SCALE_SIZE);

	const void *left_redzone = (const void *)(addr -
			KASAN_ALLOCA_REDZONE_SIZE);
	const void *right_redzone = (const void *)(addr + rounded_up_size);

	WARN_ON(!IS_ALIGNED(addr, KASAN_ALLOCA_REDZONE_SIZE));

	kasan_unpoison_shadow((const void *)(addr + rounded_down_size),
			      size - rounded_down_size);
	kasan_poison_shadow(left_redzone, KASAN_ALLOCA_REDZONE_SIZE);
	kasan_poison_shadow(right_redzone, padding_size + KASAN_ALLOCA_REDZONE_SIZE);
}
EXPORT_SYMBOL(__asan_alloca_poison);

void __asan_allocas_unpoison(const void *stack_top, const void *stack_bottom)
{
	if (unlikely(!stack_top || stack_top > stack_bottom))
		return;

	kasan_unpoison_shadow(stack_top, stack_bottom - stack_top);
}
EXPORT_SYMBOL(__asan_allocas_unpoison);
