// SPDX-License-Identifier: GPL-2.0 or GPL-3.0
// Copyright © 2019 Ariadne Devos
/* sHT -- fixed-size memory allocation */

#ifndef _sHT_RESOURCE_H
#define _sHT_RESOURCE_H

#include <stddef.h>

/** Object caches

  s2's object caches allocate pre-allocated objects of fixed size. They
  are akin to slabs and object pools, but subtly different.

  A slab's memory is allocated on-demand[SLAB], whereas in a cache, it
  is done in advance, when s2 boots or loads a module. That seems to
  be the only conceptual difference, although this causes many
  repercussions in the implementation.

  Why this inflexibility? If s2 is the only application, it can allocate
  as it pleases. If there are other applications that allocate memory
  on-demand, s2 services could be unpredictably unavailable, which is not
  reliable -- but s2 must be, up to a configurable value of load.

  (This could be less rigid. E.g., the object cache may be grown
  opportunistically for non-critical tasks. But s2 allows for spawning
  new, independent but cooperating processes, so this is not a problem.
  In fact, this forms automatic tests.)

  [SLAB]: The Slab Allocator: An Object-Caching Kernel Memory Allocator
  by Jeff Bonwick, Sun Microsystems.

  It is assumed that there is no shortage of virtual memory.

  * Usage

  Caches are initialised with @var{sHT_objcache_bless_batch}. It does not
  allocate memory itself, rather, it takes a block of memory to use.

  @var{sHT_objcache_size_batch} calculates how large the blocks must be.

  @var{sHT_alloc} tries to allocates an object from a cache.
  @var{sHT_free} frees the object, making it available for later operations.

  Caches do not have to be freed. While in use, however, they require the
  block to remain available. In particular, the objects are allocated from
  the block. */

struct sHT_objcache;

/** Calculate the size of the memory blocks the object caches need

  @var{n}: the number of caches to calculate the size for, positive
  @var{size}: an writable array to put the calculated cache size in, in the
    same order as @var{capacity} and @var{elem_size}, of length @var{n} and
    not accessed concurrently
  @var{capacity}: an array of the numbers of distinct objects that can be
    allocated from the cache, of length @var{n}, readable and unchanging
  @var{elem_size}: an array of the sizes of each object that can be
    allocated from the cache, of length @var{n}, readable and unchanging

  The return value is non-negative and less than or equal to @var{n}. Name
  it @var{i}. At least @var{i} elements of @var{size} are set, although they
  may speculatively be incorrect. On a non-speculative execution, @var{i}
  being less than @var{n} signifies an overflow condition, implying the
  hypothetical memory block isn't allocatable.

  Once @var{i} is despeculated, if it is equal to @var{n}, the elements of
  @var{size} are correct.

  (Strictly speaking, a memory block could span multiple caches. Would that
  be a good idea? Less syscalls, less TLB pressure, less padding overhead,
  higher portability, but also less bug detection and more predictability
  for attackers.) */
size_t
sHT_objcache_size_batch(size_t n, size_t size[], const size_t capacity[], const size_t elem_size[]);

/** Bless some memory blocks into caches

  @var{n}: the number of blocks to bless into caches, positive
  @var{cache}: a readable, unchanging array of distinct memory blocks
    / caches-to-be, in the same order as @var{capacity} and @var{elem_size},
    that can be written to, read from after writing, and are not accessed
    concurrently, of size returned by @var{sHT_objcache_size_batch}...
  @var{capacity}: the capacity of each cache, unchanging and readable
    for the duration of this call, disjoint from @var{cache}
  @var{elem_size}: the size of an element in each cache, unchanging and
    readable for the duration of this call, disjoint from @var{cache}

  The return value is non-negative and less than or equal to @var{n}. Name
  it @var{i}. At least @var{i} elements of @var{cache} are set, although they
  may speculatively be incorrect. On a non-speculative execution, @var{i} is
  @var{n}. */
size_t
sHT_objcache_bless_batch(size_t n, void *cache[], const size_t capacity[], const size_t elem_size[]);

/** Try to allocate a fresh object from an object cache.

    @var{cache}: the object cache to utilise

    The object will have the size specified by @var{sHT_alloc_cache},
    and at least standard alignment (i.e. @code{_Alignof(max_align_t)}}.

    This procedure does not know about signals or POSIX asynchronuous
    cancellation.

    This may fail by returning @code{NULL} if the object cache is exhausted.
    Else, return a fresh object. On a speculative execution, the object may
    not be fresh: it may share memory with an already-allocated object of the
    cache. If this is a problem in your use cache, you can use
    @var{sHT_despeculate}. */
__attribute__((warn_unused_result))
__attribute__((nonnull (1)))
void *
sHT_alloc(struct sHT_objcache *cache);

/** Free an object that belongs to a certain cache.

    @var{cache}: the object cache @var{object} was allocated from
    @var{object}: the object to free

    The object must have been allocated by @var{sHT_alloc} with the same
    cache argument. The object will become dangling.
    @var{object} speculatively being NULL is also allowed, although that
    speculatively messes up statistics.

    This procedure does not know about signals or POSIX asynchronuous
    cancellation.

    This cannot fail in any way. */
__attribute__((nonnull (1)))
void
sHT_free(struct sHT_objcache *cache, void *object);

/** Test if a cache is exhausted.

    @var{cache}: the object cache to test

    This does not mutate anything.

    This procedure does not know about signals or POSIX asynchronuous
    cancellation.

    This cannot fail in any way. Return 1 if exhausted, 0 otherwise.
    An incorrect boolean may be returned on a speculative execution. */
__attribute__((nonnull (1)))
__attribute__((pure))
_Bool
sHT_cache_exhausted_p(struct sHT_objcache *cache);

#endif
