/*
 * util.h
 *
 *  Created on: 2023年7月12日
 *      Author: fjx
 */

#ifndef UTIL_UTIL_H_
#define UTIL_UTIL_H_




#include <string.h>
#include <stdbool.h>
#include <stdarg.h>
#include <inttypes.h>
#include "ds_endian.h"
//#include <byteswap.h>
//#include <sys/uio.h>
//#include <ell/cleanup.h>

#ifdef __cplusplus
extern "C" {
#endif

#define l_container_of(ptr, type, member) ({        \
_Pragma("GCC diagnostic push")            \
_Pragma("GCC diagnostic ignored \"-Wcast-align\"")      \
    const __typeof__(((type *) 0)->member) *__mptr = (ptr); \
    (type *)((char *) __mptr - offsetof(type, member)); \
_Pragma("GCC diagnostic pop")           \
  })

#define L_STRINGIFY(val) L_STRINGIFY_ARG(val)
#define L_STRINGIFY_ARG(contents) #contents

#define L_WARN_ON(condition) __extension__ ({       \
    bool r = !!(condition);         \
    if (__builtin_expect(r, 0))       \
      l_warn("WARNING: %s:%s() condition %s failed",  \
        __FILE__, __func__,     \
        #condition);        \
    r;              \
  })

#define L_PTR_TO_UINT(p) ((unsigned int) ((uintptr_t) (p)))
#define L_UINT_TO_PTR(u) ((void *) ((uintptr_t) (u)))

#define L_PTR_TO_INT(p) ((int) ((intptr_t) (p)))
#define L_INT_TO_PTR(u) ((void *) ((intptr_t) (u)))

#define L_GET_UNALIGNED(ptr) __extension__  \
({            \
  struct __attribute__((packed)) {  \
            __typeof__(*(ptr)) __v;   \
  } *__p = (__typeof__(__p)) (ptr); \
  __p->__v;       \
})

#define L_PUT_UNALIGNED(val, ptr)   \
do {            \
  struct __attribute__((packed)) {  \
    __typeof__(*(ptr)) __v;   \
  } *__p = (__typeof__(__p)) (ptr); \
  __p->__v = (val);     \
} while(0)


#if __BYTE_ORDER == __LITTLE_ENDIAN

#define L_LE16_TO_CPU(val) (val)
#define L_LE32_TO_CPU(val) (val)
#define L_LE64_TO_CPU(val) (val)
#define L_CPU_TO_LE16(val) (val)
#define L_CPU_TO_LE32(val) (val)
#define L_CPU_TO_LE64(val) (val)
#define L_BE16_TO_CPU(val) __bswap16(val)  //bswap_16(val)
#define L_BE32_TO_CPU(val) __bswap32(val) //bswap_32(val)
#define L_BE64_TO_CPU(val) __bswap64(val) //bswap_64(val)
#define L_CPU_TO_BE16(val) __bswap16(val) //bswap_16(val)
#define L_CPU_TO_BE32(val) __bswap32(val) //bswap_32(val)
#define L_CPU_TO_BE64(val) __bswap64(val)  //bswap_64(val)
#elif __BYTE_ORDER == __BIG_ENDIAN
#define L_LE16_TO_CPU(val) bswap_16(val)
#define L_LE32_TO_CPU(val) bswap_32(val)
#define L_LE64_TO_CPU(val) bswap_64(val)
#define L_CPU_TO_LE16(val) bswap_16(val)
#define L_CPU_TO_LE32(val) bswap_32(val)
#define L_CPU_TO_LE64(val) bswap_64(val)
#define L_BE16_TO_CPU(val) (val)
#define L_BE32_TO_CPU(val) (val)
#define L_BE64_TO_CPU(val) (val)
#define L_CPU_TO_BE16(val) (val)
#define L_CPU_TO_BE32(val) (val)
#define L_CPU_TO_BE64(val) (val)
#else
#error "Unknown byte order"
#endif

#if __STDC_VERSION__ <= 199409L
#define inline __inline__
#endif

static inline uint8_t l_get_u8(const void *ptr)
{
  return *((const uint8_t *) ptr);
}

static inline void l_put_u8(uint8_t val, void *ptr)
{
  *((uint8_t *) ptr) = val;
}

static inline uint16_t l_get_u16(const void *ptr)
{
  return L_GET_UNALIGNED((const uint16_t *) ptr);
}

static inline void l_put_u16(uint16_t val, void *ptr)
{
  L_PUT_UNALIGNED(val, (uint16_t *) ptr);
}

static inline uint32_t l_get_u32(const void *ptr)
{
  return L_GET_UNALIGNED((const uint32_t *) ptr);
}

static inline void l_put_u32(uint32_t val, void *ptr)
{
  L_PUT_UNALIGNED(val, (uint32_t *) ptr);
}

static inline uint64_t l_get_u64(const void *ptr)
{
  return L_GET_UNALIGNED((const uint64_t *) ptr);
}

static inline void l_put_u64(uint64_t val, void *ptr)
{
  L_PUT_UNALIGNED(val, (uint64_t *) ptr);
}

static inline int16_t l_get_s16(const void *ptr)
{
  return L_GET_UNALIGNED((const int16_t *) ptr);
}

static inline int32_t l_get_s32(const void *ptr)
{
  return L_GET_UNALIGNED((const int32_t *) ptr);
}

static inline int64_t l_get_s64(const void *ptr)
{
  return L_GET_UNALIGNED((const int64_t *) ptr);
}

static inline uint16_t l_get_le16(const void *ptr)
{
  return L_LE16_TO_CPU(L_GET_UNALIGNED((const uint16_t *) ptr));
}

static inline uint16_t l_get_be16(const void *ptr)
{
  return L_BE16_TO_CPU(L_GET_UNALIGNED((const uint16_t *) ptr));
}

static inline uint32_t l_get_le32(const void *ptr)
{
  return L_LE32_TO_CPU(L_GET_UNALIGNED((const uint32_t *) ptr));
}

static inline uint32_t l_get_be32(const void *ptr)
{
  return L_BE32_TO_CPU(L_GET_UNALIGNED((const uint32_t *) ptr));
}

static inline uint32_t l_get_be24(const void *ptr)
{
  return (L_BE32_TO_CPU(L_GET_UNALIGNED((const uint32_t *) ptr))>>8)&0x00FFFFFF;
}

static inline uint64_t l_get_le64(const void *ptr)
{
  return L_LE64_TO_CPU(L_GET_UNALIGNED((const uint64_t *) ptr));
}

static inline uint64_t l_get_be64(const void *ptr)
{
  return L_BE64_TO_CPU(L_GET_UNALIGNED((const uint64_t *) ptr));
}

static inline void l_put_le16(uint16_t val, void *ptr)
{
  L_PUT_UNALIGNED(L_CPU_TO_LE16(val), (uint16_t *) ptr);
}

static inline void l_put_be16(uint16_t val, const void *ptr)
{
  L_PUT_UNALIGNED(L_CPU_TO_BE16(val), (uint16_t *) ptr);
}

static inline void l_put_le32(uint32_t val, void *ptr)
{
  L_PUT_UNALIGNED(L_CPU_TO_LE32(val), (uint32_t *) ptr);
}

static inline void l_put_be32(uint32_t val, void *ptr)
{
  L_PUT_UNALIGNED(L_CPU_TO_BE32(val), (uint32_t *) ptr);
}
static inline void l_put_be24(uint32_t val,void *ptr )
{
  l_put_be32(((uint32_t)val)<<8,(uint32_t *) ptr);
}
static inline void l_put_le64(uint64_t val, void *ptr)
{
  L_PUT_UNALIGNED(L_CPU_TO_LE64(val), (uint64_t *) ptr);
}

static inline void l_put_be64(uint64_t val, void *ptr)
{
  L_PUT_UNALIGNED(L_CPU_TO_BE64(val), (uint64_t *) ptr);
}

#define L_AUTO_FREE_VAR(vartype,varname) \
  vartype varname __attribute__((cleanup(auto_free)))

#define L_ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))


/**
 * l_new:
 * @type: type of structure
 * @count: amount of structures
 *
 * Returns: pointer to allocated memory
 **/
#define l_new(type, count)      \
  (type *) (__extension__ ({    \
    size_t __n = (size_t) (count);  \
    size_t __s = sizeof(type);  \
    void *__p;      \
    __p = l_malloc(__n * __s);  \
    memset(__p, 0, __n * __s);  \
    __p;        \
  }))







const char *l_util_get_debugfs_path(void);

#define L_TFR(expression)                          \
  (__extension__                                   \
    ({ long int __result;                          \
       do __result = (long int) (expression);      \
       while (__result == -1L && errno == EINTR);  \
       __result; }))

#define _L_IN_SET_CMP(val, type, cmp, ...) __extension__ ({   \
    const type __v = (val);         \
    const typeof(__v) __elems[] = {__VA_ARGS__};    \
    unsigned int __i;         \
    static const unsigned int __n = L_ARRAY_SIZE(__elems);  \
    bool __r = false;         \
    for (__i = 0; __i < __n && !__r; __i++)     \
      __r = (cmp);          \
    __r;              \
  })

/* Warning: evaluates all set elements even after @val has matched one */
#define L_IN_SET(val, ...)  \
  _L_IN_SET_CMP((val), __auto_type, __v == __elems[__i], ##__VA_ARGS__)

#define L_IN_STRSET(val, ...)           \
  _L_IN_SET_CMP((val), char *, __v == __elems[__i] ||   \
        (__v && __elems[__i] &&     \
         !strcmp(__v, __elems[__i])), ##__VA_ARGS__)

/*
 * Taken from https://github.com/chmike/cst_time_memcmp, adding a volatile to
 * ensure the compiler does not try to optimize the constant time behavior.
 * The code has been modified to add comments and project specific code
 * styling.
 * This specific piece of code is subject to the following copyright:
 *
 * The MIT License (MIT)
 *
 * Copyright (c) 2015 Christophe Meessen
 *
 * Permission is hereby granted, free of charge, to any person obtaining a copy
 * of this software and associated documentation files (the "Software"), to
 * deal in the Software without restriction, including without limitation the
 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
 * sell copies of the Software, and to permit persons to whom the Software is
 * furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 * IN THE SOFTWARE.
 *
 * This function performs a secure memory comparison of two buffers of size
 * bytes, representing an integer (byte order is big endian). It returns
 * a negative, zero or positif value if a < b, a == b or a > b respectively.
 */
static inline int l_secure_memcmp(const void *a, const void *b,
          size_t size)
{
  const volatile uint8_t *aa = a;
  const volatile uint8_t *bb = b;
  int res = 0, diff, mask;

  /*
   * We will compare all bytes, starting with the less significant. When
   * we find a non-zero difference, we update the result accordingly.
   */
  if (size > 0) {
    /*
     * The following couple of lines can be summarized as a
     * constant time/memory access version of:
     * if (diff != 0) res = diff;
     *
     * From the previous operation, we know that diff is in
     * [-255, 255]
     *
     * The following figure show the possible value of mask, based
     * on different cases of diff:
     *
     * diff  |   diff-1   |   ~diff    | ((diff-1) & ~diff) |  mask
     * ------|------------|------------|--------------------|------
     *   < 0 | 0xFFFFFFXX | 0x000000YY |     0x000000ZZ     |   0
     *  == 0 | 0xFFFFFFFF | 0xFFFFFFFF |     0xFFFFFFFF     | 0xF..F
     *  > 0  | 0x000000XX | 0xFFFFFFYY |     0x000000ZZ     |   0
     *
     * Hence, the mask allows to keep res when diff == 0, and to
     * set res to diff otherwise.
    */
    do {
      --size;
      diff = aa[size] - bb[size];
      mask = (((diff - 1) & ~diff) >> 8);
      res = (res & mask) | diff;
    } while (size != 0);
  }

  return res;
}

bool l_memeq(const void *field, size_t size, uint8_t byte);
bool l_secure_memeq(const void *field, size_t size, uint8_t byte);

static inline bool l_memeqzero(const void *field, size_t size)
{
  return l_memeq(field, size, 0);
}

static inline void l_secure_select(bool select_left,
        const void *left, const void *right,
        void *out, size_t len)
{
  const uint8_t *l = left;
  const uint8_t *r = right;
  uint8_t *o = out;
  uint8_t mask = -(!!select_left);
  size_t i;

  for (i = 0; i < len; i++)
    o[i] = r[i] ^ ((l[i] ^ r[i]) & mask);
}


/*!
 * brief XOrs two data lines
 *
 * param [IN]  line1  1st Data line to be XORed
 * param [IN]  line2  2nd Data line to be XORed
 * param [IN]  size   Number of elements in line1
 *
 * \param [OUT] result XOR( line1, line2 ) result stored in line1
 */
void XorDataLine( uint8_t *line1, uint8_t *line2, int32_t size );

void orDataLine( uint8_t *line1, uint8_t *line2, int32_t size );

#ifdef __cplusplus
}
#endif




#endif /* UTIL_UTIL_H_ */
