/* shttpd - make sure each task can make some progress
   Copyright (C) 2018 Ariadne Devos

   This program is free software: you can redistribute it and/or modify
   it under the terms of the GNU General Public License as published by
   the Free Software Foundation, either version 3 of the License, or
   (at your option) any later version.

   This program is distributed in the hope that it will be useful,
   but WITHOUT ANY WARRANTY; without even the implied warranty of
   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
   GNU General Public License for more details.

   You should have received a copy of the GNU General Public License
   along with this program.  If not, see <http://www.gnu.org/licenses/>. */

#ifndef _sHT_SCHEDULING_H
#define _sHT_SCHEDULING_H

#include <sHT/bugs.h>
#include <sHT/compiler.h>
#include <sHT/nospec.h>
/* For sHT_TASK_SCHEDULED */
#include <sHT/task.h>
#include <sHT/test.h>

#include <stddef.h>

/** A FIFO queue of tasks.

  A task can only be in one queue at the time,
  because of the @var{.next} pointer embedded inside.
  But on the upside, no memory has to be allocated seperately!

  Queued tasks have the @var{sHT_TASK_QUEUED} flag set.
  It is set by @var{sHT_qadd} and @var{sHT_qadd_try},
  it should not be set by any other function. */
struct sHT_task_queue
{
	/** The first task, or @code{NULL} if the queue is empty.
	  Tasks are taken from the front. */
	struct sHT_task *head;
	/** A pointer to @code{t->next}, where @code{t} is the last task
	  in the queue, or @var{head}, if the queue is empty.
	  New tasks are added to the end.
	  The last task, if any, has no 'next' pointer and is reachable
	  within a finite number of steps from @var{head} through 'next'. */
	struct sHT_task **tailp;
};

/** Initialise a task queue, letting it be empty.
  @var{q} may not be accessed concurrently. */
static inline void
sHT_task_queue_init(struct sHT_task_queue *q)
{
	*q = (struct sHT_task_queue) {
		/* Empty queue, no head */
		.head = NULL,
		.tailp = &q->head,
	};
}

/** Add a task to the back of the queue,

  @var{q}: any valid queue
  @var{task}: a non-queued task

  It may not already be in the queue.

  Modifies @code{q->tailp}, @code{*q->tailp}
  and accesses the flag @var{sHT_TASK_QUEUED} in @var{task}.

  These may not be accessed concurrently. */
__attribute__((nonnull (1, 2)))
static inline void
sHT_qadd(struct sHT_task_queue *q, struct sHT_task *task)
{
	sHT_require(!(task->flags & sHT_TASK_QUEUED));
	task->flags |= sHT_TASK_QUEUED;
	/* Invariants are temporarily violated */
	*q->tailp = task;
	q->tailp = &task->next;
}

/** Add a task to the back of the queue, if it isn't inside

  @var{q}: any valid queue
  @var{task}: a task. If queued, it must belong to @var{q}.

  Modifies @code{q->tailp}, @code{*q->tailp}
  and accesses the flag @var{sHT_TASK_QUEUED} in @var{task}.

  These may not be accessed concurrently. */
__attribute__((nonnull (1, 2)))
static inline void
sHT_qadd_try(struct sHT_task_queue *q, struct sHT_task *task)
{
	if (sHT_and_any(task->flags, sHT_TASK_QUEUED))
		return;
	task->flags |= sHT_TASK_QUEUED;
	/* Invariants are temporarily violated */
	*q->tailp = task;
	q->tailp = &task->next;
}

/** Try to take a single object from the front of the queue

  @var{q}: any valid queue
  @var{task}: an @var{sHT_task} variable
  @var{nil}: code to perform if the queue is empty
  @var{some}: code to perform if the first task is taken

  @var{nil} and @var{some} first argument is @var{q}. They are
  called in tail position and their behaviour is not part of this
  specification.

  The queue and @var{.next} pointers may not be accessed concurrently. */
__attribute__((always_inline))
static inline void
sHT_qtake(struct sHT_task_queue *q, void (* nil)(struct sHT_task_queue *), void (* some)(struct sHT_task_queue *, struct sHT_task *))
{
	struct sHT_task *first = q->head;
	if (sHT_null_p(first)) {
		nil(q);
		return;
	}
	struct sHT_task *next = q->head = first->next;
	if (sHT_null_p(next))
		/* @code{q->tailp} points to @code{first->next},
		   but that task is about to be removed, leaving
		   an empty queue -- reset the tail pointer. */
		q->tailp = &q->head;
	some(q, first);
}

#if 0

/** Convenience typedef for @var{sHT_perform_all} */
typedef void (sHT_task_perform)(void *cookie, struct sHT_task *);

/** Perform all tasks that have been queued in @var{deck}.

  @var{deck} must be non-free and concurrent scheduling and descheduling is
  forbidden.

  Let all tasks belonging to @var{deck}, even invalid, unscheduled and removed
  tasks, be U, which may contain duplicates. Let all scheduled tasks in
  @var{deck} be S. (S is a subset of U, and both are multisets.)
  Compose P, a -- possibly infinite -- list of tasks, such that every element
  of P is in U (but not necessarily having the same multiplicity) and on a
  non-speculative execution, there is a bijection between P and S.

  (Informally speaking, P are all tasks of that will actually be executed.
  Ignoring Spectre, it corresponds to all tasks that have been scheduled.
  However, as the Spectre is real, it is possible that, temporarily, tasks will
  be executed multiple times and old tasks are 'recycled'.)

  Apply @var{f} to each element of P (the first argument is @var{cookie}).
  @var{f} is limited in operations to @var{deck}. It may be abandoned.
  Let abandonement be:
  - a cancellation within this function call, unhandled by @var{f}
  - a decision of @var{f} to abandon @var{deck}
  - a non-local return by @var{f}, skipping this function call

  Abandonement implies @var{deck} may be freed or have its capacity read.
  Non-abandonement implies @var{deck} may have its capacity read and that
  tasks may be scheduled. */
__attribute__((nonnull (2, 3)))
__attribute__((always_inline))
static inline void
sHT_perform_all(void *cookie, struct sHT_task_deque *deck, sHT_task_perform *f)
{
	/* @var{f} may schedule extra tasks, so remember the number of tasks
	   to perform. */
	size_t n_todo = deck->n;
	for (size_t i = 0; sHT_gt(n_todo, i); i++) {
		size_t j = deck->head + i;
		/* Make sure it points to a task belonging to @var{deck},
		   to take control of Spectre's branch. */
		j = sHT_modulo_nospec(j, deck->capacity);
		struct sHT_task *task = deck->tasks[j];
		task->flags &= ~sHT_TASK_SCHEDULED;
		f(cookie, task);
	}
	deck->head = sHT_modulo_nospec(deck->head + n_todo, deck->capacity);
	deck->n -= n_todo;
}
#endif

/** An non-negative identifier representing a set of kernel resources to watch
  for IO readyness. On *BSD, this is a kqueue(2), on Linux, this is an epoll(7)
  instance. */
typedef int sHT_watch_set;

enum sHT_watch_set_alloc_error
{
	/** The per-process limit on watch sets would have been exceeded. */
	sHT_WATCH_SET_ALLOC_EMFILE = -1,
	/** The system-wide limit on watch sets would have been exceeded. */
	sHT_WATCH_SET_ALLOC_ENFILE = -2,
	/** Memory limits would have been exceeded. */
	sHT_WATCH_SET_ALLOC_ENOMEM = -3,
};

/** Try to allocate a watch set.

  There may be speculative file descriptor leaks. This function may be
  performed concurrently. If a call to this function is interrupted by
  POSIX asynchronuous cancellation, some file descriptors may be leaked.

  If the return value is despeculated and it is non-negative, it is the
  identifier of the watch set as a @var{sHT_event_polling}.
  If the return value is despeculated and it is negative, it is a negative
  @var{sHT_event_polling_alloc_error} error code. */
int
sHT_alloc_watch_set(void);

/** Free the watch set @var{watches}.

  @var{watches}: the possibly non-empty watch set to free.

  @var{watches} may not be accessed concurrently. If a call to this function
  is interrupted by POSIX asynchronuous cancellation, @var{watches} may be
  leaked.

  There may be speculative file descriptor leaks.

  For the avoidance of doubt: @var{watches} must be valid. As such, it must
  have been despeculated at some point in time. Double-freeing is not allowed.

  This cannot fail in any way. */
void
sHT_free_watch_set(sHT_watch_set watches);

enum sHT_watch_set_install
{
	/** The file descriptor has been put into the watch set. */
	sHT_WATCH_SET_INSTALL_DONE = 0,
	/** The watch could not be allocated due to memory limits. */
	sHT_WATCH_SET_INSTALL_ENOMEM = 1,
	/** A maximum on the number of watches would have been exceeded
	  if the watch were allocated */
	sHT_WATCH_SET_INSTALL_LIMIT = 2,
};

/** Try to asynchronuously wait for any readyness change on a file descriptor.

  @var{watches}: the watch set to put @var{fd} in.
  @var{fd}: the pollable file descriptor to register (e.g. a watch set or
  socket). Linux does not allow polling on regular files.
  @var{task}: the task to continue later, after a change in readyness.

  @var{watches} may not be accessed concurrenty and may be modified.
  If a call to this function is cancelled, freeing becomes the only operation
  allowed upon @var{watches}.

  @var{fd} may not already be in @var{polling} and creating loops is not
  allowed.

  The return value may speculatively be incorrect. There may be speculative
  memory leaks.

  If the return value is despeculated, it is a valid
  @var{sHT_watch_set_install} indicating the result. */
__attribute__((warn_unused_result))
__attribute__((nonnull (3)))
enum sHT_watch_set_install
sHT_install_edge_trigger(sHT_watch_set watches, int fd, struct sHT_task *task);

/** Stop asynchronuous waiting for a readyness change on a file descriptor.

  @var{watches}: the watch set to remove @var{fd} from.
  @var{fd}: the file descriptor to remove

  @var{watches} may not be accessed concurrently and may be modified.
  If a call to this function is cancelled, freeing becomes the only operation
  allowed upon @var{watches}.

  @var{fd} must be in @var{polling}.

  There may be speculative memory leaks and the removal may speculatively be
  ignored. This cannot fail in any way. */
void
sHT_delete_edge_trigger(sHT_watch_set watches, int fd);

#endif
