// SPDX-License-Identifier: GPL-3.0-or-later
// Copyright © 2018-2019 Ariadne Devos

/* (Extracted and transcluded from <sHT/nospec.h> */

/* "cmp %1,%2; sbb %0,%0": mask <- (mask - mask) - (pos < length).
  (unsigned, so pos, length > SSIZE_MAX is allowed).

  Alternatively, one could do a compare, setcc, and decrement, but
  that's an instruction longer. */
#define _sHT_index_mask(maskp, pos, length) \
	__asm__("cmp %1,%2; sbb %0,%0" : "=&r" (*(maskp)) : "rm" (length), "r" (pos) : "cc")

/* This sequence is what Intel tells us to do, and what
  Linux can do. It also has some effect upon memory ordering.

  TODO: check its correctness. Linux sometimes does "mfence"
  instead, why? Something to do with AMD / Intel differences.

  volatile "memory" is for paranoia, to avoid reordering or
  or elimination (Linux does this). Although in my tests (GCC 6.3.0,
  GCC 8.3.0, Clang 6.0), even without any of these nothing happens
  ... */
#define _sHT_speculation_barrier() \
	__asm__ volatile("lfence" : : : "memory")
