/**
 * xarray.c
 * xarray 用户程序版本简易实现
 * 
 * 竹影半墙
 * 2023.08
 */
#include "bsw_xarray.h"
#include "bsw_auto_compiler.h"

/**
 * bsw_xarray_s.head
 *  [63] 预分配
 *  [62] 动态分配
 *  [61..0] 顶层结点
 * bsw_xarray_s.ca
 *  [0] 中间结点预分配
 *  [1] 叶子结点预分配
*/
#define BSW_XY_TOP_BITS     (62)
#define BSW_XY_TOP_MASK     ((1ull<<BSW_XY_TOP_BITS) - 1)
#define BSW_XY_CACHE_SHIFT  (63)
#define BSW_XY_HEAP_SHIFT   (62)
#define BSW_XY_HEAPCA_LEN   \
    (sizeof(bsw_xarray_s)+ (sizeof(bsw_pageca_s)<<1))
#define bsw_xy_canode(xy)   ( (xy)->ca )
#define bsw_xy_caleaf(xy)   ( (xy)->ca+1 )

/**
 * bsw_xy_node_s 槽位数和长度相关
 * BSW_XY_LEVEL_EXP = log2(BSW_XY_SLOTS)
 */
#define BSW_XY_LEVEL_EXP (4)
#define BSW_XY_SLOTS_MOD (BSW_XY_SLOTS-1)
#define BSW_XY_LEAF_LEN  (sizeof(bsw_xy_node_s) + sizeof(bsw_xy_leaf_t))
#define BSW_XY_LEAF_VLEN (sizeof(bsw_xy_leaf_t))

#if 0
#endif
typedef struct bsw_xy_xrange bsw_xy_xrange_s;

struct bsw_xy_xrange {
    uint64_t *x;
    int direct;
    int step;
};

#define bsw_xys_xrange_not(v,o,n) bsw_cas(v,o,n)
#define bsw_xys_xrange_define(xs, rg)           \
    bsw_xy_xrange_s x##rg[] = {                 \
        {&xs->x,     0,           1},       \
        {&xs->x_max, BSW_XY_SLOTS-1, -1},   \
    };
#define BSW_XY_CACHE_MAX (2)

#if 0
#endif
static bsw_inline bool xy_cant_shrink(bsw_xy_node_s *n)
{
    return !n->l_exp || !n->slot[0] || 1 != n->s_use;
}

static bsw_inline void xy_leaf_zero(bsw_xy_node_s *n)
{
    if (!n->l_exp)
        memset(n->s_value, 0, BSW_XY_LEAF_VLEN);
    return ;
}

static bsw_inline uint64_t xy_ts_to_ns(struct timespec ts)
{
    return ts.tv_sec*1000000000 + ts.tv_nsec;
}

static bsw_inline 
void xy_top_set(bsw_xarray_s *xy, bsw_xy_node_s *n)
{
    xy->head &= ~BSW_XY_TOP_MASK;
    xy->head |= (uintptr_t)n;
    return ;
}

static bsw_inline 
bsw_xy_node_s * xy_top_get(bsw_xarray_s *xy)
{
    return (bsw_xy_node_s*)(xy->head&BSW_XY_TOP_MASK);
}

static bsw_inline void xy_cache_set(bsw_xarray_s *xy)
{
    xy->head |= 1ull << BSW_XY_CACHE_SHIFT;
    return ;
}

static bsw_inline bool xy_cache_get(bsw_xarray_s *xy)
{
    return (bool)(xy->head & (1ull << BSW_XY_CACHE_SHIFT));
}

static bsw_inline void xy_heap_set(bsw_xarray_s *xy)
{
    xy->head |= 1ull << BSW_XY_HEAP_SHIFT;
    return ;
}

static bsw_inline bool xy_heap_get(bsw_xarray_s *xy)
{
    return (bool)(xy->head & (1ull << BSW_XY_HEAP_SHIFT));
}

static bsw_inline void xy_leaf_set(bsw_xy_node_s *n, uint8_t slot)
{
    *n->s_value |= 1ull << slot;
    return ;
}

static bsw_inline bool xy_leaf_get(bsw_xy_node_s *n, uint8_t slot)
{
    return (bool)(*n->s_value & (1ull << slot));
}

static bsw_inline void xy_leaf_clear(bsw_xy_node_s *n, uint8_t slot)
{
    *n->s_value &= ~(1ull << slot);
    return ;
}

static bsw_inline bool xys_leaf_get(bsw_xy_state_s *xs)
{
    bool iset;
    bsw_xy_node_s *n = xs->node;

    iset = false;
    if (n && !n->l_exp)
        iset = (bool)(*n->s_value & (1ull << xs->s_idx));
    return iset;
}

#if 0
#endif
enum xy_lock_e
{
    XY_LOCKR = 0,
    XY_LOCKW = 1,
    XY_LOCKN = 2
};

static bsw_inline int
xy_lock_none  (bsw_lock_rw_t *lock){return 0;}
static bsw_inline int
xy_unlock_none(bsw_lock_rw_t *lock){return 0;}

static bsw_xy_lock_f  xy_locke[] = {
    bsw_lock_rw_rd,
    bsw_lock_rw_wr,
    xy_lock_none
};
static bsw_xy_lock_f xy_unlocke[] = {
    bsw_lock_rw_un,
    bsw_lock_rw_un,
    xy_unlock_none
};

static bsw_inline int 
xy_lock(bsw_xarray_s *xy, enum xy_lock_e ei)
{
    bsw_xy_lock_f lock = xy_locke[ei];
    return lock(&xy->lock);
}

static bsw_inline int 
xy_unlock(bsw_xarray_s *xy, enum xy_lock_e ei)
{
    bsw_xy_lock_f unlock = xy_unlocke[ei];
    return unlock(&xy->lock);
}

#if 0
#endif
static bsw_pageca_s xy_ca = {
    {
        .size = BSW_XY_HEAPCA_LEN,
        .max  = (1u<<12)/BSW_XY_HEAPCA_LEN+1,
        .num  = 0,
        .head = NULL
    },
    NULL, NULL, 0
};
__attribute__((constructor))
static void xy_ca_init(void)
{
    bsw_pca_init(&xy_ca);
    return ;
}

__attribute__((destructor))
static void xy_ca_close(void)
{
    bsw_pca_close(&xy_ca);
    return ;
}

static bsw_inline bsw_xarray_s * xy_palloc(void)
{
    bsw_xarray_s *xy;
    void *a;

    a = bsw_pca_alloc(&xy_ca);
    if (bsw_unlikely(!a))
        return a;
    xy = (bsw_xarray_s *)a;
    memset(xy, 0, bsw_pca_size(&xy_ca));
    xy_cache_set(xy);
    return xy;
}

static bsw_inline bsw_xarray_s * xy_malloc(void)
{
    void *a;
    bsw_xarray_s *xy;
    size_t size = sizeof(*xy);

    a = malloc(size);
    if (bsw_unlikely(!a))
        return a;
    xy = (bsw_xarray_s*)a;
    xy->head = 0;
    xy_heap_set(xy);
    return xy;
}

static bsw_xarray_s *
xy_tryalloc(bsw_xarray_s *xy, unsigned flag)
{
    if (bsw_unlikely(NULL != xy)) {
        xy->head = 0;
        return xy;
    }
    if ((flag != BSW_XY_CACHE_NONE) &&
        (xy = xy_palloc()))
        return xy;
    return xy_malloc();
}

static uint64_t xy_cache_full(uint64_t ca)
{
    uint32_t leaves, nodes = 0;
    int le = 0;

    while ((ca>>le) >= BSW_XY_SLOTS) {
        nodes += 1u<<le;
        le += BSW_XY_LEVEL_EXP;
    }
    leaves = ca / BSW_XY_SLOTS + 1;
    return ((uint64_t)nodes<<32) | leaves;
}

static uint64_t 
xy_cache_calc(bsw_xarray_s *xy, unsigned flag, uint64_t ca)
{
    uint64_t calc;

    switch (flag)
    {
    case BSW_XY_CACHE_FULL:
        calc = xy_cache_full(ca);
        break;
    case BSW_XY_CACHE_MAX:
        calc = ca;
        break;
    default:
        calc = 0;
        break;
    }
    return calc;
}

static void 
xy_node_cache_init(bsw_xarray_s *xy, unsigned flag, uint64_t ca)
{
    int i;
    bsw_pageca_s *pa;
    uint64_t calc;

    calc = xy_cache_calc(xy, flag, ca);
    if (!calc)
        return ;
    for (i = 0; i < BSW_XY_CACHE_MAX; ++i) {
        pa = xy->ca + i;
        bsw_pca_app(pa,
            !i ? sizeof(bsw_xy_node_s) : BSW_XY_LEAF_LEN,
            !i ? (uint32_t)(calc>>32):(uint32_t)calc);
        bsw_pca_init(pa);
    }
    return ;
}

#if 0
#endif
static bsw_xy_node_s * xy_node_alloc(bsw_xarray_s *xy, uint8_t le)
{
    bsw_xy_node_s *n;
    size_t size;

    if (!xy_cache_get(xy)) {
        size = le ? sizeof(bsw_xy_node_s) : BSW_XY_LEAF_LEN;
        return (bsw_xy_node_s*)malloc(size);
    }
    n = le ? 
        (bsw_xy_node_s *)bsw_pca_alloc(bsw_xy_canode(xy)):
        (bsw_xy_node_s *)bsw_pca_alloc(bsw_xy_caleaf(xy));
    return n;
}

static bsw_xy_node_s *
xys_alloc(bsw_xy_state_s *xs, uint8_t le)
{
    bsw_xy_node_s *n;

    n = xy_node_alloc(xs->xy, le);
    if (bsw_unlikely(!n)) {
        bsw_xys_error_set(xs, -1);
        return n;
    }
    memset(n->slot,0,sizeof(n->slot));
    n->parent = xs->node;
    n->l_exp = le;
    n->s_use = 0;
    xy_leaf_zero(n);
    return n;
}

static void xys_free(bsw_xy_state_s *xs, bsw_xy_node_s *n)
{
    bsw_xarray_s *xy = xs->xy;

    if (!xy_cache_get(xy)) {
        free(n);
        return ;
    }
    if (n->l_exp) {
        bsw_pca_free(bsw_xy_canode(xy), n);
        return ;
    }
    bsw_pca_free(bsw_xy_caleaf(xy), n);
    return ;
}

#if 0
#endif
static void * 
xys_descend(bsw_xy_state_s *xs, bsw_xy_node_s *n)
{
    unsigned i = (xs->x >> n->l_exp)&BSW_XY_SLOTS_MOD;

    xs->node  = n;
    xs->s_idx = i;
    return n->slot[i];
}

static bsw_inline uint64_t xys_xmax(bsw_xy_node_s *n)
{
    return (BSW_XY_SLOTS << n->l_exp) - 1;
}

static int 
xy_xlevel_exp(uint64_t x, bsw_xy_node_s *t, bsw_xy_node_s **n)
{
    int le = 0;

    *n = t;
    if (!t) {
        while ((x>>le) >= BSW_XY_SLOTS)
            le += BSW_XY_LEVEL_EXP;
        return le + BSW_XY_LEVEL_EXP;
    }
    le = t->l_exp + BSW_XY_LEVEL_EXP;
    return le;
}

static int xys_expand(bsw_xy_state_s *xs, bsw_xy_node_s *t)
{
    int le;
    bsw_xy_node_s *n;
    uint64_t x = xs->x;

    xs->node = NULL;
    le = xy_xlevel_exp(x, t, &n);
    if (!n)
        return le;
    while (x > xys_xmax(t)) {
        n = xys_alloc(xs, le);
        if (bsw_unlikely(!n))
            return -1;
        n->slot[0] = t;
        n->s_use  += 1;
        t->parent = n;
        t->n_idx  = 0;

        t = n;
        le += BSW_XY_LEVEL_EXP;
    }
    xy_top_set(xs->xy, n);
    xs->node = n;
    return le;
}

static void 
xys_parent_update(bsw_xarray_s *xy, void **s, bsw_xy_node_s *n)
{
    void **start;
    bsw_xy_node_s *p;

    if (&xy->head == (void*)s) {
        xy_top_set(xy, n);
        return ;
    }

    *s = n;
    p  = n->parent;
    start = (void**)p->slot;
    n->n_idx = s - start;
    p->s_use += 1;
    return ;
}

static void * xys_create(bsw_xy_state_s *xs)
{
    bsw_xy_node_s *n;
    bsw_xy_node_s *c;
    void **s;
    int le;
    bsw_xarray_s *xy = xs->xy;

    le = xys_expand(xs, xy_top_get(xy));
    if (bsw_xys_error(xs))
        return NULL;
    s = (void**)&xy->head;
    c = xy_top_get(xy);

    while (le > 0) {
        le -= BSW_XY_LEVEL_EXP;
        if (c)
            n = c;
        else if ((n=xys_alloc(xs,le)))
            xys_parent_update(xy,s,n);
        else
            break;
        c = xys_descend(xs, n);
        s = (void**)(n->slot + xs->s_idx);
    }
    return c;
}

static bsw_inline uint64_t xys_timens(void)
{
    struct timespec ts;
    clock_gettime(CLOCK_MONOTONIC, &ts);
    return xy_ts_to_ns(ts);
}

static void 
xys_sleep(bsw_xy_state_s *xs, uint64_t *t_last, enum xy_lock_e lei)
{
    struct timespec ts;
    bsw_xarray_s *xy;
    uint64_t tns;
    uint64_t t_max_ns = xs->t_max_ns;
    uint64_t t_sleep_ns = xs->t_sleep_ns;

    if (!t_sleep_ns || !t_max_ns)
        return ;

    if (*t_last+t_max_ns <= (tns=xys_timens())) {
        xy = xs->xy;
        xy_unlock(xy, lei);

        ts.tv_sec  = t_sleep_ns / 1000000000;
        ts.tv_nsec = t_sleep_ns % 1000000000;
        nanosleep(&ts, NULL);

        *t_last = tns + xy_ts_to_ns(ts);
        xy_lock(xy, lei);
    }
    return ;
}

static bool xys_slot_empty(bsw_xy_node_s *n, uint8_t s_idx)
{
    if (n->slot[s_idx])
        return false;
    if (!n->l_exp && xy_leaf_get(n, s_idx))
        return false;
    return true;
}

static void xys_xrange(bsw_xy_state_s *xs, uint64_t *t_last)
{
    bsw_xarray_s *xy;
    bsw_xys_xrange_define(xs, rg);
    int i, k, step;
    bsw_xy_node_s *n;
    uint64_t x = 0;

    i = 0;
    xy = xs->xy;

    x_boundary:
    for (x = 0, step = xrg[i].step,
         n = xy_top_get(xy); n;/**/) {

        for (k = xrg[i].direct;
             k>=0 && k<BSW_XY_SLOTS &&
             xys_slot_empty(n,k);
             k += step) {

            ;/* none */
        }
        assert(k>=0 && k<BSW_XY_SLOTS);
        x += k << n->l_exp;

        if (!n->l_exp)
            break;
        n = n->slot[k];
        xys_sleep(xs, t_last, XY_LOCKR);
    }
    * xrg[i].x = x;
    if (++i < bsw_arrays(xrg))
        goto x_boundary;

    return ;
}

static void xys_shrink(bsw_xy_state_s *xs)
{
    bsw_xy_node_s *c;
    bsw_xarray_s  *xy;
    bsw_xy_node_s *n = xs->node;

    xy = xs->xy;
    for (;;) {
        if (xy_cant_shrink(n))
            break;
        c = n->slot[0];
        c->parent = NULL;
        xy_top_set(xy, c);
        xys_free(xs, n);

        n = c;
    }
    return ;
}

static void xys_del_node(bsw_xy_state_s *xs)
{
    bsw_xarray_s *xy;
    uint8_t i;
    bsw_xy_node_s *p;
    bsw_xy_node_s *n = xs->node;

    xy = xs->xy;
    for (;;) {
        if (!n || n->s_use)
            break;
        p = n->parent;
        if (p) {
            i = n->n_idx;
            p->slot[i] = NULL;
            p->s_use -= 1;
        }
        xys_free(xs, n);
        n = p;

        xs->node = n;
    }
    if (!n) {
        xy_top_set(xy, 0);
        return ;
    }
    if (n->parent)
        return ;
    xys_shrink(xs);
    return ;
}

static void * xys_get_nolock(bsw_xy_state_s *xs)
{
    void *y;
    bsw_xy_node_s *n = xy_top_get(xs->xy);

    y = xs->node = NULL;
    while (n) {
        if (!n->l_exp) {
            y = xys_descend(xs, n);
            break;
        }
        n = xys_descend(xs, n);
    }
    return y;
}

static bool 
xys_no_value(bsw_xy_state_s *xs, void **y)
{
    *y = xys_get_nolock(xs);
    if (xys_leaf_get(xs))
        return false;
    return true;
}

#if 0
#endif
void * bsw_xys_add(bsw_xy_state_s *xs, void *y)
{
    void *yo;
    uint8_t i;
    bsw_xy_node_s *n;
    bsw_xarray_s *xy = xs->xy;

    xy_lock(xy, XY_LOCKW);

    bsw_xys_error_set(xs,0);
    yo = xys_create(xs);
    if ( bsw_xys_error (xs))
        goto addend;

    n = xs->node;
    i = xs->s_idx;
    n->slot[i]= y;
    if (!xy_leaf_get(n,i)) {
        xy_leaf_set(n, i);
        n->s_use += 1;
        yo = y;
    }
addend:
    xy_unlock(xy, XY_LOCKW);
    return yo;
}

void * bsw_xys_get(bsw_xy_state_s *xs)
{
    void *y;
    bsw_xarray_s *xy = xs->xy;

    xy_lock(xy, XY_LOCKR);
    y = xys_get_nolock(xs);
    xy_unlock(xy, XY_LOCKR);
    return y;
}

void * bsw_xys_del(bsw_xy_state_s *xs)
{
    void *y;
    uint8_t i;
    bsw_xy_node_s *n;
    bsw_xarray_s *xy = xs->xy;

    xy_lock(xy, XY_LOCKW);
    if (xys_no_value(xs, &y))
        goto delend;
    n = xs->node;
    i = xs->s_idx;
    n->slot[i] = NULL;
    n->s_use  -= 1;
    xy_leaf_clear(n, i);
    xys_del_node(xs);
delend:
    xy_unlock(xy, XY_LOCKW);
    return y;
}

void * bsw_xys_foreach(bsw_xy_state_s *xs)
{
    void *y;
    unsigned yn;
    uint64_t xmax;
    uint64_t t_last;
    bsw_xarray_s *xy = xs->xy;

    t_last = xys_timens();
    yn = xs->y_null;
    y = NULL;
    xy_lock(xy, XY_LOCKR);

    if (bsw_xys_xrange_not(&xs->x_ranged, 0, 1))
        xys_xrange(xs, &t_last);
    for (xmax=xs->x_max; xs->x<=xmax; /**/) {
        y = xys_get_nolock(xs);
        xs->x += 1;

        if (xys_leaf_get(xs) || yn)
            break;
        xys_sleep(xs, &t_last, XY_LOCKR);
    }

    xy_unlock(xy, XY_LOCKR);
    return y;
}

bsw_xarray_s * 
bsw_xy_init(bsw_xarray_s *xy, unsigned flag, uint64_t ca)
{
    xy = xy_tryalloc(xy, flag);
    if (bsw_unlikely(!xy))
        return xy;
    xy->lock = xy_locker;
    xy_node_cache_init(xy, flag, ca);
    return xy;
}

void bsw_xy_close(bsw_xarray_s *xy)
{
    if (xy_cache_get(xy)) {
        bsw_pca_close(bsw_xy_canode(xy));
        bsw_pca_close(bsw_xy_caleaf(xy));
        bsw_pca_free(&xy_ca, xy);
        return ;
    }
    if (xy_heap_get(xy)) {
        free(xy);
        return ;
    }
    return ;
}
