/* shttpd - allocate all kinds of stuff
   Copyright (C) 2018 Ariadne Devos

   This program is free software: you can redistribute it and/or modify
   it under the terms of the GNU General Public License as published by
   the Free Software Foundation, either version 3 of the License, or
   (at your option) any later version.

   This program is distributed in the hope that it will be useful,
   but WITHOUT ANY WARRANTY; without even the implied warranty of
   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
   GNU General Public License for more details.

   You should have received a copy of the GNU General Public License
   along with this program.  If not, see <http://www.gnu.org/licenses/>. */

#include <stdint.h>
#include <stddef.h>
#include <sHT/bugs.h>
#include <sHT/compiler.h>
#include <sHT/nospec.h>
#include <sys/mman.h>
#include <sHT/resource.h>
#include <sHT/vector.h>

struct sHT_object_cache
{
	size_t size;
	size_t capacity;
	size_t elem_size;
	size_t used;
	_Alignas(sHT_vector_align)
	/* Its actual size is a multiple of @var{sHT_vector_align}, to
	   simplify SIMD code. */
	void *elems[];
	/* And after that, capacity * elem_size untyped bytes */
};

#ifndef MAP_POPULATE
# define MAP_POPULATE 0
#endif

/* May speculatively be incorrect. */
static _Bool
sHT_fix_alignment(size_t *size)
{
	/* TODO: consider bit ops orr sHT_modulo_nospec */
	size_t s = *size;
	if (s % _Alignof(max_align_t) == 0)
		return 0;
	size_t extra = _Alignof(max_align_t) - (s % _Alignof(max_align_t));
	if (sHT_unlikely(extra > SIZE_MAX - s))
		return 1;
	*size += extra;
	return 0;
}

/* May speculatively be incorrect. */
__attribute__((const))
static inline size_t
sHT_cache_size(size_t n, size_t elem_size)
{
	/* size = fix_alignment(sizeof(sHT_object_cache)
	     + n * sizeof(void *)) + n * elem_size */
	size_t object_size;
	_Bool overflow = 0;
	/* This is a GCC and Clang builtin that checks for overflow */
	overflow |= __builtin_mul_overflow(n, elem_size, &object_size);
	size_t freelist_size;
	overflow |= __builtin_mul_overflow(n, sizeof(void *), &freelist_size);
	overflow |= sHT_fix_alignment(&freelist_size);
	size_t dynamic_size;
	overflow |= __builtin_add_overflow(object_size, freelist_size, &dynamic_size);
	size_t size;
	overflow |= __builtin_add_overflow(sizeof(struct sHT_object_cache), dynamic_size, &size);
	if (sHT_unlikely(overflow))
		return 0;
	return size;
}

/* Inlined into sHT_alloc_cache */
static size_t
sHT_init_elems(struct sHT_object_cache *cache, size_t n, size_t elem_size, char *objects)
{
	size_t i = 0;
	/* Always do at least one loop, such that @var{i} cannot be 0 after
	   the loop in a speculative execution. That would cause
	   @code{cache->capacity} to be set to zero, which would break Spectre
	   mitigations in @code{sHT_alloc}. */
	do {
		/* If @var{n} were 0, there would be a speculative
		   out-of-bounds access. That's why the capacity must be
		   positive. */
		i = sHT_index_nospec(i, n);
		cache->elems[i] = objects + i * elem_size;
		i++;
	} while (i < n);
	return i;
}

static struct sHT_object_cache *
sHT_objcache_bless(struct sHT_object_cache *cache, size_t n, size_t elem_size)
{
	/* has already been done by @var{sHT_alloc_cache} */
	/* @code{(void) sHT_fix_alignment(&elem_size);} */
	char *objects = (char *) (cache->elems + n);
	/* Because of strict aliasing, and perhaps out-of-bound accesses */
	sHT_hide_var(objects);
	cache->size = sHT_cache_size(n, elem_size);
	/* (After sHT_init_elems)
	   The 'continue' branch on @code{i < n} may have been speculatively
	   ignored, but @code{cache->elems[...]}, a pointer, may be returned
	   by @var{sHT_alloc}.

	   We do, however, have a variable that holds something less than or
	   equal to the capacity: @var{i}. @var{i} is not an
	   over-approximation, even on a speculative execution. An
	   under-approximation is safe, as long as it is positive. */
	n = sHT_init_elems(cache, n, elem_size, objects);
	cache->capacity = n;
	cache->elem_size = elem_size;
	cache->used = 0;
	return cache;
}

struct sHT_object_cache *
sHT_alloc_cache(size_t n, size_t elem_size)
{
	sHT_require(n > 0);
	sHT_require(elem_size > 0);
	/* Overflow is caught by sHT_cache_size. */
	(void) sHT_fix_alignment(&elem_size);
	/* TODO: huge pages? */
	/* TODO: MAP_UNINITIALIZED on embedded devices? */
	size_t size = sHT_cache_size(n, elem_size);
	if (sHT_unlikely(size == 0))
		return NULL;
	sHT_despeculate(size);

	void *bytes = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_POPULATE, -1, 0);
	if (sHT_unlikely(bytes == MAP_FAILED))
		return NULL;
	/* @var{size} might be large, so the elements and pointers can be in a
	   page mapped for something else if the branch above has been
	   speculatively ignored. @var{sHT_index_nospec} won't work here. */
	sHT_despeculate(bytes);
	struct sHT_object_cache *cache = bytes;
	return sHT_objcache_bless(cache, n, elem_size);
}

void
sHT_free_cache(struct sHT_object_cache *cache)
{
	munmap(cache, cache->size);
}

void *
sHT_alloc(struct sHT_object_cache *cache)
{
	sHT_assert(cache->used <= cache->capacity);
	if (cache->used == cache->capacity)
		return NULL;
	size_t i = sHT_index_nospec(cache->used++, cache->capacity);
	/* This may speculatively allocate an already-allocated object,
	   which is documented in <sHT/resource.h>. */
	return cache->elems[i];
}

void
sHT_free(struct sHT_object_cache *cache, void *object)
{
	sHT_assert(cache->used > 0);
	sHT_assert(cache->used <= cache->capacity);
	size_t i = sHT_index_nospec(--cache->used, cache->capacity);
	/* This may speculatively free a speculatively a double-allocated
	   object, which is documented in <sHT/resource.h> */
	cache->elems[i] = object;
}

_Bool
sHT_cache_exhausted_p(struct sHT_object_cache *cache)
{
	return cache->used == cache->capacity;
}
