// SPDX-License-Identifier: GPL-2.0 or GPL-3.0
// Copyright © 2018-2019 Ariadne Devos
// sHT -- allocate fixed-size objects

#include <stdint.h>
#include <stddef.h>

#include <sHT/block.h>
#include <sHT/bugs.h>
#include <sHT/compiler.h>
#include <sHT/nospec.h>
#include <sHT/resource.h>
#include <sHT/vector.h>
#include <sHT/test.h>
#include <sHT/index.h>

struct sHT_objcache
{
	size_t capacity;
	size_t elem_size;
	size_t used;
	_Alignas(sHT_vector_align)
	/* Its actual size is a multiple of @var{sHT_vector_align}, to
	   simplify SIMD code. */
	void *elems[];
	/* And after that, capacity * elem_size untyped bytes */
};

_Static_assert(sHT_BLOCK_ALIGN % _Alignof(struct sHT_objcache) == 0, "objcache must be at start of page");
_Static_assert(sHT_vector_align >= _Alignof(max_align_t), "increase alignment");

#define sHT_OBJCACHE_UNIT (sHT_vector_align/sizeof(void *))

static size_t
sHT_objcache_round_capacity(size_t capacity)
{
	/* For alignment, let @var{capacity} * sizeof(void *) be a multiple
	   sHT_vector_align, which is a power of two..
	   To do that, compute the remainder with sHT_OBJCACHE_UNIT. Then,
	   add sHT_OBJCACHE_UNIT - remainder unless the remainder is zero.
	   No branch mispredictions are possible! */
	size_t remainder = capacity & (sHT_OBJCACHE_UNIT - 1);
	capacity += (sHT_OBJCACHE_UNIT - remainder) & (sHT_OBJCACHE_UNIT - 1);
	return capacity;
}

static _Bool
sHT_objcache_size(size_t *dest, size_t capacity, size_t elem_size)
{
	_Bool overflow;
	/* Avoid overflow later on. */
	overflow = sHT_gt(capacity, SIZE_MAX - sHT_OBJCACHE_UNIT + 1);
	capacity = sHT_objcache_round_capacity(capacity);
	size_t calc;
	/* TODO: hypothetical Spectre issues */
	/* TODO: other compilers than GCC */
	overflow |= __builtin_add_overflow(sizeof(void *), elem_size, &calc);
	overflow |= __builtin_mul_overflow(calc, capacity, &calc);
	overflow |= __builtin_add_overflow(calc, sizeof(struct sHT_objcache), &calc);
	*dest = calc;
	return overflow;
}

size_t
sHT_objcache_size_batch(size_t n, size_t size[], const size_t capacity[], const size_t elem_size[])
{
	/* TODO: Spectre mitigations for hypothetical issue
	   (sHT_test_hidden2?) */
	_Bool overflow = 0;
	size_t i;
	sHT_index_iterate(i, n) {
		overflow |= sHT_objcache_size(&size[i], capacity[i], elem_size[i]);
	}
	if (sHT_nonzero_p(overflow))
		i = 0;
	return i;
}


/* Inlined into @var{sHT_objcache_bless} */
static size_t
sHT_init_elems(struct sHT_objcache *cache, size_t n, size_t elem_size, char *objects)
{
	size_t i = 0;
	/* Always do at least one loop, such that @var{i} cannot be 0 after
	   the loop in a speculative execution. That would cause
	   @code{cache->capacity} to be set to zero, which would break Spectre
	   mitigations in @code{sHT_alloc}. */
	do {
		/* If @var{n} were 0, there would be a speculative
		   out-of-bounds access. That's why the capacity must be
		   positive. */
		i = sHT_index_nospec(i, n);
		cache->elems[i] = objects + i * elem_size;
		i++;
	} while (i < n);
	return i;
}

static struct sHT_objcache *
sHT_objcache_bless(struct sHT_objcache *cache, size_t n, size_t elem_size)
{
	/* @code{(void) sHT_fix_alignment(&elem_size);} */
	char *objects = (char *) (cache->elems + n);
	/* Because of strict aliasing, and perhaps out-of-bound accesses */
	sHT_hide_var(objects);
	/* (After sHT_init_elems)
	   The 'continue' branch on @code{i < n} may have been speculatively
	   ignored, but @code{cache->elems[...]}, a pointer, may be returned
	   by @var{sHT_alloc}.

	   We do, however, have a variable that holds something less than or
	   equal to the capacity: @var{i}. @var{i} is not an
	   over-approximation, even on a speculative execution. An
	   under-approximation is safe, as long as it is positive. */
	n = sHT_init_elems(cache, n, elem_size, objects);
	cache->capacity = n;
	cache->elem_size = elem_size;
	cache->used = 0;
	return cache;
}

size_t
sHT_objcache_bless_batch(size_t n, void *cache[], const size_t capacity[], const size_t elem_size[])
{
	size_t i;
	sHT_index_iterate(i, n) {
		struct sHT_objcache *c = sHT_objcache_bless(cache[i], capacity[i], elem_size[i]);
		sHT_depend(i, c);
	}
	return i;
}

void *
sHT_alloc(struct sHT_objcache *cache)
{
	sHT_assert(cache->used <= cache->capacity);
	if (sHT_eq(cache->used, cache->capacity))
		return NULL;
	size_t i = sHT_index_nospec(cache->used++, cache->capacity);
	/* This may speculatively allocate an already-allocated object,
	   which is documented in <sHT/resource.h>. */
	return cache->elems[i];
}

void
sHT_free(struct sHT_objcache *cache, void *object)
{
	sHT_assert(cache->used > 0);
	sHT_assert(cache->used <= cache->capacity);
	size_t i = sHT_index_nospec(--cache->used, cache->capacity);
	/* This may speculatively free a speculatively a double-allocated
	   object, which is documented in <sHT/resource.h> */
	cache->elems[i] = object;
}

_Bool
sHT_cache_exhausted_p(struct sHT_objcache *cache)
{
	return sHT_eq(cache->used, cache->capacity);
}
