// SPDX-License-Identifier: GPL-2.0 or GPL-3.0
// Copyright © 2018-2019 Ariadne Devos
/* sHT -- allocate pages from the kernel */

#include <sHT/block.h>
#include <sHT/compiler.h>
#include <sHT/nospec.h>
#include <sHT/test.h>
#include <sHT/index.h>

#ifdef _WIN32
# include <memoryapi.h>
# include <winnt.h>
#endif


void *
sHT_block_alloc(size_t size)
{
	/* TODO: use large pages? */

#ifdef _WIN32
	return VirtualAlloc(NULL, size, MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE);
#else
	/* Don't test @code{size != 0}, as mmap(2) will fail anyway otherwise
	   and array allocation is most likely not conditional. */
	int prot = PROT_READ | PROT_WRITE;
	int flags = MAP_PRIVATE | MAP_ANONYMOUS;
# ifdef MAP_POPULATE
	flags |= MAP_POPULATE;
# endif
# ifdef MAP_UNINITIALIZED
	flags |= MAP_UNINITIALIZED;
# endif
	return mmap(NULL, size, prot, flags, -1, 0);
#endif
}

void
sHT_block_free(void *block, size_t size)
{
#ifdef _WIN32
	/* Mingw header files are inconsistent in the type of BOOL.
	   Pick something safe. */
	long ret = VirtualFree(block, 0, MEM_RELEASE);
	if (sHT_nonzero_p(ret))
		sHT_halt("freeing unallocated memory (perhaps a double-free?)");
#else
	/* Check that @var{block} is actually points to something. On systems
	   with a MMU, this will probably catch many double-free cases, as the
	   pointer then doesn't point to anything. On architectures that do
	   not allow unaligned access, this may catch pointer corruptions. */
	sHT_hide_var(block);
	sHT_depend(block, *(int *) block);
	sHT_hide_var(block);
	munmap(block, size);
#endif
}

_Bool
sHT_block_alloc_batch(size_t n, void *dest[], const size_t sizes[])
{
	/** @var{i} is supposedly-dependent upon all tried allocations.
	    The return value is supposedly-dependent upon @var{i} when it
	    is zero (such that on despeculation, the allocated objects are
	    freshly allocated).

	    Using @var{i} instead of a variable holding the return value
	    reduces register therefore stack usage.

	    Speculative double-allocation is not problematic. */
	size_t i;
	sHT_index_iterate(i, n) {
		void *val = sHT_block_alloc(sizes[i]);
		if (sHT_eq_pointer(val, sHT_BLOCK_ALLOC_FAILED))
			goto oom;
		sHT_depend(i, val);
		dest[i] = val;
	}
	return sHT_depending(0, i);
oom:
	/* Speculative OOMs or memory leaks are not problematic,
	   so do not track supposed dependencies or despeculate. */
	sHT_block_free_batch(i, dest, sizes);
	return 1;
}

void
sHT_block_free_batch(const size_t n, void *const dest[], const size_t sizes[])
{
	/* Free in reverse order of allocation, as the fresher entries are
	   more likely to be in the cache than old. */
	size_t i = n;
	while (!sHT_zero_p(i--)) {
		/* When a block has been unmapped, its address may be reused
		   for a new block. So don't double free, as concurrency is
		   allowed. */
		sHT_despeculate();
		/* No index masking on the index `i` necesary, because of the
		  despeculation above. */
		sHT_block_free(dest[i], sizes[i]);
	}
}
