/* SPDX-License-Identifier: LGPL-3.0-or-later */
/* Copyright (C) 2014 Stony Brook University */

#ifndef _SHIM_INTERNAL_H_
#define _SHIM_INTERNAL_H_

#include <stdarg.h>
#include <stdbool.h>
#include <stdnoreturn.h>

#include "api.h"
#include "assert.h"
#include "atomic.h"
#include "log.h"
#include "pal.h"
#include "pal_error.h"
#include "shim_defs.h"
#include "shim_internal-arch.h"
#include "shim_tcb.h"
#include "shim_types.h"

void* shim_init(int argc, void* args);

/* important macros and static inline functions */

extern int g_log_level;

extern const PAL_CONTROL* g_pal_control;

// TODO(mkow): We should make it cross-object-inlinable, ideally by enabling LTO, less ideally by
// pasting it here and making `inline`, but our current linker scripts prevent both.
void shim_log(int level, const char* fmt, ...) __attribute__((format(printf, 2, 3)));

#if 0
#define DEBUG_BREAK_ON_FAILURE() DEBUG_BREAK()
#else
#define DEBUG_BREAK_ON_FAILURE() do {} while (0)
#endif

#define BUG()                                           \
    do {                                                \
        log_error("BUG() " __FILE__ ":%d", __LINE__);   \
        DEBUG_BREAK_ON_FAILURE();                       \
        die_or_inf_loop();                              \
    } while (0)

#define DEBUG_HERE()                                             \
    do {                                                         \
        log_debug("%s (" __FILE__ ":%d)", __func__, __LINE__);   \
    } while (0)

/*!
 * \brief High-level syscall emulation entrypoint.
 *
 * \param context CPU context at syscall entry.
 *
 * Emulates the syscall given the entry \p context.
 */
noreturn void shim_emulate_syscall(PAL_CONTEXT* context);
/*!
 * \brief Restore the CPU context.
 *
 * \param context CPU context to restore.
 *
 * This function restores the given \p context. It is only called on returning from a syscall, so
 * it does not need to be reentrant (there is no such thing as nested syscalls), but it cannot
 * assume that the CPU context is the same as at the entry to the syscall (e.g. sigreturn, or signal
 * handling may change it).
 */
noreturn void return_from_syscall(PAL_CONTEXT* context);
/*!
 * \brief Restore the context after clone/fork.
 *
 * \param context LibOS context to restore.
 *
 * Restores LibOS \p context after a successful clone or fork.
 */
noreturn void restore_child_context_after_clone(struct shim_context* context);
/*!
 * \brief Creates a signal frame
 *
 * \param context CPU context
 * \param siginfo signal to be delivered
 * \param handler pointer to the user app signal handler
 * \param restorer pointer to the restorer function
 * \param should_use_altstack `true` - use alternative stack if possible, `false` - never use it
 * \param old_mask old signal mask (to be stored in the signal frame)
 *
 * Creates a signal frame on the user app stack (either normal or alternative stack, depending on
 * \p use_altstack and the currently used stack). Arranges \p context so that restoring it jumps
 * to \p handler with appropriate arguments and returning from \p handler will jump to \p restorer,
 * (which usually just calls `sigreturn` syscall). On most (all?) architectures old \p context,
 * \p siginfo and \p old_mask are saved into the signal frame.
 */
void prepare_sigframe(PAL_CONTEXT* context, siginfo_t* siginfo, void* handler, void* restorer,
                      bool should_use_altstack, __sigset_t* old_mask);
/*!
 * \brief Restart a syscall
 *
 * \param context CPU context
 * \param syscall_nr syscall number
 *
 * Arranges \p context so that upon return to it redoes the \p syscall_nr syscall.
 */
void restart_syscall(PAL_CONTEXT* context, uint64_t syscall_nr);
/*!
 * \brief Restores a sigreturn context
 *
 * \param context original CPU context
 * \param[out] new_mask new signal mask
 *
 * Restores CPU context in an architecture-specific way. On entry to this function \p context holds
 * initial CPU context and this function extracts signal frame (generated by `prepare_sigframe`)
 * and restores it into \p context. The signal mask extracted from the signal frame is written into
 * \p new_mask.
 */
void restore_sigreturn_context(PAL_CONTEXT* context, __sigset_t* new_mask);
/*!
 * \brief Emulate a syscall
 *
 * \param context CPU context
 *
 * If the current instruction pointer in \p context points to a syscall instruction, arrange
 * \p context so that the syscall is emulated.
 * Returns `true` if it was a syscall instruction (hence a syscall will be emulated).
 * Note that this function merely changes context, so the actual emulation is done upon returning
 * to that context.
 * Used e.g. in Linux-SGX Pal to handle `syscall` instruction.
 */
bool maybe_emulate_syscall(PAL_CONTEXT* context);
/*!
 * \brief Handle a signal
 *
 * \param context CPU context
 * \param old_mask_ptr pointer to the old signal mask
 *
 * If there is a signal to be handled, this function arranges its delivery using `prepare_sigframe`.
 * If \p old_mask_ptr is not `NULL`, it is stored into the signal frame, otherwise the current
 * signal mask is used.
 * Returns `true` if a not-ignored signal was handled (hence \p context was changed), `false`
 * otherwise.
 *
 * XXX: Signals are delivered only during transition from LibOS to the user app, so a situation is
 * possible when a signal is queued, but is not delivered for an arbitrary amount of time. There are
 * two distinct situations when this can happen:
 * 1) A blocking host-level syscall was issued and the signal arrived at any point before it and
 *    after the thread entered LibOS code. In such case the host syscall can block indefinitely.
 * 2) The signal arrived in the middle of or after `handle_signal`. In such case delivery of this
 *    signal is delayed until the next syscall is issued or another signal arrives.
 */
bool handle_signal(PAL_CONTEXT* context, __sigset_t* old_mask_ptr);

/*!
 * \brief Translate PAL error code into UNIX error code.
 *
 * The sign of the error code is preserved.
 */
long pal_to_unix_errno(long err);

void warn_unsupported_syscall(unsigned long sysno);
void debug_print_syscall_before(unsigned long sysno, ...);
void debug_print_syscall_after(unsigned long sysno, ...);

/*
 * These events have counting semaphore semantics:
 * - `set_event(e, n)` increases value of the semaphore by `n`,
 * - `wait_event(e)` decreases value by 1 (blocking if it's 0),
 * - `clear_event(e)` decreases value to 0, without blocking - this operation is not atomic.
 * Note that using `clear_event` probably requires external locking to avoid races.
 */
static inline int create_event(AEVENTTYPE* e) {
     return pal_to_unix_errno(DkStreamOpen(URI_PREFIX_PIPE, PAL_ACCESS_RDWR, 0, 0, 0, &e->event));
}

static inline PAL_HANDLE event_handle(AEVENTTYPE* e) {
    return e->event;
}

static inline void destroy_event(AEVENTTYPE* e) {
    if (e->event) {
        DkObjectClose(e->event); // TODO: handle errors
        e->event = NULL;
    }
}

static inline int set_event(AEVENTTYPE* e, size_t n) {
    /* TODO: this should be changed into an assert, once we make sure it does not happen (old
     * version handled it). */
    if (!e->event) {
        return -EINVAL;
    }

    char bytes[n];
    memset(bytes, '\0', n);
    while (n > 0) {
        size_t size = n;
        int ret = DkStreamWrite(e->event, 0, &size, bytes, NULL);
        if (ret < 0) {
            if (ret == -PAL_ERROR_INTERRUPTED || ret == -PAL_ERROR_TRYAGAIN) {
                continue;
            }
            return pal_to_unix_errno(ret);
        }
        if (size == 0) {
            /* This should never happen. */
            return -EINVAL;
        }
        n -= size;
    }

    return 0;
}

static inline int wait_event(AEVENTTYPE* e) {
    /* TODO: this should be changed into an assert, once we make sure it does not happen (old
     * version handled it). */
    if (!e->event) {
        return -EINVAL;
    }

    int ret = 0;
    do {
        char byte;
        size_t size = 1;
        ret = DkStreamRead(e->event, 0, &size, &byte, NULL, 0);
        if (ret < 0) {
            ret = pal_to_unix_errno(ret);
        } else if (size == 0) {
            ret = -ENODATA;
        }
    /* XXX(borysp): I think we should actually return both of these. */
    } while (ret == -EINTR || ret == -EAGAIN);

    return ret;
}

static inline int clear_event(AEVENTTYPE* e) {
    /* TODO: this should be changed into an assert, once we make sure it does not happen (old
     * version handled it). */
    if (!e->event) {
        return -EINVAL;
    }

    while (1) {
        PAL_HANDLE handle = e->event;
        PAL_FLG ievent = PAL_WAIT_READ;
        PAL_FLG revent = 0;

        int ret = DkStreamsWaitEvents(1, &handle, &ievent, &revent, /*timeout=*/0);
        if (ret < 0) {
            if (ret == -PAL_ERROR_INTERRUPTED) {
                continue;
            } else if (ret == -PAL_ERROR_TRYAGAIN) {
                break;
            }
            return pal_to_unix_errno(ret);
        }

        /* Even if `revent` has `PAL_WAIT_ERROR` marked, let `DkSitreamRead()` report the error
         * below. */
        assert(revent);

        char bytes[100];
        size_t n = sizeof(bytes);
        ret = DkStreamRead(e->event, 0, &n, bytes, NULL, 0);
        if (ret < 0) {
            if (ret == -PAL_ERROR_INTERRUPTED) {
                continue;
            } else if (ret == -PAL_ERROR_TRYAGAIN) {
                /* This should not happen, since we polled above... */
                break;
            }
            return pal_to_unix_errno(ret);
        } else if (n == 0) {
            /* This should not happen, something closed the handle? */
            return -ENODATA;
        }
    }

    return 0;
}

/* reference counter APIs */
#define REF_GET(ref)        __atomic_load_n(&(ref).counter, __ATOMIC_SEQ_CST)
#define REF_SET(ref, count) __atomic_store_n(&(ref).counter, count, __ATOMIC_SEQ_CST);

static inline int64_t __ref_inc(REFTYPE* ref) {
    int64_t _c;
    do {
        _c = __atomic_load_n(&ref->counter, __ATOMIC_SEQ_CST);
        assert(_c >= 0);
    } while (!__atomic_compare_exchange_n(&ref->counter, &_c, _c + 1, /*weak=*/false,
                                          __ATOMIC_SEQ_CST, __ATOMIC_RELAXED));
    return _c + 1;
}

#define REF_INC(ref) __ref_inc(&(ref))

static inline int64_t __ref_dec(REFTYPE* ref) {
    int64_t _c;
    do {
        _c = __atomic_load_n(&ref->counter, __ATOMIC_SEQ_CST);
        if (!_c) {
            log_error("Fail: Trying to drop reference count below 0");
            BUG();
            return 0;
        }
    } while (!__atomic_compare_exchange_n(&ref->counter, &_c, _c - 1, /*weak=*/false,
                                          __ATOMIC_SEQ_CST, __ATOMIC_RELAXED));
    return _c - 1;
}

#define REF_DEC(ref) __ref_dec(&(ref))

#ifndef __alloca
#define __alloca __builtin_alloca
#endif

#define ALLOC_ALIGNMENT         (g_pal_control->alloc_align)
#define IS_ALLOC_ALIGNED(x)     IS_ALIGNED_POW2(x, ALLOC_ALIGNMENT)
#define IS_ALLOC_ALIGNED_PTR(x) IS_ALIGNED_PTR_POW2(x, ALLOC_ALIGNMENT)
#define ALLOC_ALIGN_DOWN(x)     ALIGN_DOWN_POW2(x, ALLOC_ALIGNMENT)
#define ALLOC_ALIGN_UP(x)       ALIGN_UP_POW2(x, ALLOC_ALIGNMENT)
#define ALLOC_ALIGN_DOWN_PTR(x) ALIGN_DOWN_PTR_POW2(x, ALLOC_ALIGNMENT)
#define ALLOC_ALIGN_UP_PTR(x)   ALIGN_UP_PTR_POW2(x, ALLOC_ALIGNMENT)

void* __system_malloc(size_t size);
void __system_free(void* addr, size_t size);

#define system_malloc __system_malloc
#define system_free   __system_free

extern void* migrated_memory_start;
extern void* migrated_memory_end;

static inline bool memory_migrated(void* mem) {
    return mem >= migrated_memory_start && mem < migrated_memory_end;
}

extern void* __load_address;
extern void* __load_address_end;

extern const char** migrated_envp;

struct shim_handle;
int init_brk_from_executable(struct shim_handle* exec);
int init_brk_region(void* brk_region, size_t data_segment_size);
void reset_brk(void);
int init_loader(void);
int init_rlimit(void);

bool is_user_memory_readable(const void* addr, size_t size);
bool is_user_memory_writable(const void* addr, size_t size);
bool is_user_string_readable(const char* addr);

uint64_t get_rlimit_cur(int resource);
void set_rlimit_cur(int resource, uint64_t rlim);

int object_wait_with_retry(PAL_HANDLE handle);

void _update_epolls(struct shim_handle* handle);
void delete_from_epoll_handles(struct shim_handle* handle);
/*!
 * \brief Check if next `epoll_wait` with `EPOLLET` should trigger for this handle
 *
 * \param handle handle to check
 * \param ret return value from last operation
 * \param in `true` if last operation was of input type (e.g. `read`)
 * \param was_partial `true` if last operation did not fill the whole buffer
 *
 * This function should be called after each in/out operation on \p handle that is epoll-able,
 * i.e. that you can wait on using `epoll` syscall. \p ret should be the return value from that
 * operation, \p in should indicate if that was an input operation (e.g. `read` or `accept`) or
 * output (e.g. `write`), \p was_partial - if that operation was done partially (e.g. `read` did not
 * fill the whole buffer).
 *
 * This `EPOLLET` emulation is not entirely identical with Linux. Unfortunately we cannot implement
 * it ideally without PAL support, but adding such support is impossible on PALs other than native
 * Linux. Due to this we have a "hacky" approach: if a handle is successfully waited for on some
 * epoll instance using `EPOLLET` semantics, it's marked as not epollet-ready and then is not
 * included in further `EPOLLET` waits. To mark it back as epollet-ready, an operation must be
 * observed, which either returns `-EAGAIN` (non-blocking operation which cannot be completed atm)
 * or a partial operation (e.g. `read` not filling the whole buffer). The idea is that application
 * cannot assume that all data was processed (hence expect next `EPOLLET` wait not to hang), until
 * it sees one of the above happening.
 */
void maybe_epoll_et_trigger(struct shim_handle* handle, int ret, bool in, bool was_partial);

void* allocate_stack(size_t size, size_t protect_size, bool user);
int init_stack(const char** argv, const char** envp, const char*** out_argp, elf_auxv_t** out_auxv);

#endif /* _SHIM_INTERNAL_H_ */
