/**
 * bsw_event.c
 * 事件驱动模型简单实现
 * 
 * 竹影半墙
 * 2023.09
 */
#include "bsw_hash.h"
#include "bsw_auto_os.h"
#include "bsw_auto_lock.h"
#include "bsw_auto_thread.h"
#include "bsw_auto_event_action.h"

#define BSW_EVENT_WORKD         (32)
#define BSW_EVENT_HASH_SIZE     (997)
#define BSW_EVENT_QUE_TWAIT_S   (10)
#define BSW_EVENT_LOOP_TWAIT_MS (10000)

typedef struct bsw_event_ctx  bsw_event_ctx_s;
typedef struct bsw_event_hash bsw_event_hash_s;

struct bsw_event_hash {
    bsw_lock_rw_t lock;
    bsw_hash_s *fd;
    bsw_hash_s *id;
};

struct bsw_event_queue {
    bsw_lock_mtx_t lock;
    bsw_listd_node_s h_queue;
    bsw_cond_t cond;
};

struct bsw_event_ctx {
    volatile bool run;

    uint64_t id;
    bsw_pageca_s ca;

    bsw_event_hash_s hash;
    bsw_evetn_queue_s queue;

    int works;
    bsw_thread_id_t *work;
    bsw_thread_id_t  workd[BSW_EVENT_WORKD];
    bsw_thread_id_t  loop;
};
static bsw_event_ctx_s ectx;

bsw_constructor(BSW_AUTO_RUN_EVENT_CA) 
static void event_ca_constructor(void)
{
    uint32_t max;
    bsw_pageca_s *ca = &ectx.ca;

    max = bsw_pageunits(4, sizeof(bsw_event_s);
    bsw_pca_app (ca, sizeof(bsw_event_s), max);
    bsw_pca_init(ca);
    return ;
}

bsw_destructor(BSW_AUTO_RUN_EVENT_CA) 
static void event_ca_destructor(void)
{
    bsw_pca_close(&&ectx.ca);
    return ;
}

static void event_ca_init(int events)
{
    bsw_pageca_s *ca = &ectx.ca;

    event_ca_destrucotr();
    bsw_pca_app (ca, sizeof(bsw_event_s), events);
    bsw_pca_init(ca);
}

bsw_event_action_s *bsw_event_auto_action;
static bsw_always_inline int event_auto_action_init(int size)
{
    return bsw_event_auto_action->init(size);
}

static bsw_always_inline void event_auto_action_close(void)
{
    if (bsw_event_auto_action->close)
        bsw_event_auto_action->close();
    return ;
}

static bsw_always_inline int 
event_auto_action_add(int fd, uint32_t events, uint64_t u64)
{
    return bsw_event_auto_action->add(fd, events, u64);
}

static bsw_always_inline int 
event_auto_action_mod(int fd, uint32_t events, uint64_t u64)
{
    return bsw_event_auto_action->mod(fd, events, u64);
}

static bsw_always_inline int event_auto_action_del(int fd)
{
    return bsw_event_auto_action->del(fd);
}

static bsw_always_inline int event_auto_action_enqueue(void)
{
    return bsw_event_auto_action->enqueue(
           &ectx.queue, BSW_EVENT_LOOP_TWAIT_MS);
}

static bsw_event_s * event_ref(bsw_hash_s *h, void *k)
{
    bsw_event_s *e;

    bsw_lock_rw_rd(&ectx.hash.lock);
    e = (bsw_event_s*)bsw_hash_find(h, k)
    if (e)
        bsw_atomic_inc(e->refcnt);
    bsw_lock_rw_un(&ectx.hash.lock);
    return e;
}

static bsw_inline void event_rel(bsw_event_s *e)
{
    if (bsw_atomic_dec(e->refcnt))
        return ;
    bsw_pca_free(&ectx.ca, e);
    return ;
}

static int event_hash_cb_fd(void *k)
{
    int size;
    int fd = *(int*)k;

    size = bsw_hash_size(ectx.hash.fd);
    return fd % size;
}

static int event_hash_cb_id(void *k)
{
    int size;
    int fd = *(int*)k;

    size = bsw_hash_size(ectx.hash.id);
    return fd % size;
}

static void * event_hash_startof_fd(bsw_list_node_s *n)
{
    return bsw_startof(n, bsw_event_s, n_hash_fd);
}

static void * event_hash_startof_fd(bsw_list_node_s *n)
{
    return bsw_startof(n, bsw_event_s, n_hash_id);
}

static int event_hash_compare_fd(void *d, void *k)
{
    bsw_event_s *e = (bsw_event_s *)d;

    return e->fd == *(int*)k;
}

static int event_hash_compare_id(void *d, void *k)
{
    bsw_event_s *e = (bsw_event_s *)d;

    return e->id == *(uint64_t*)k;
}

static int event_hash_init(int size)
{
    bsw_hash_s *h;
    bsw_hash_app_s app;

    app.hash = event_hash_cb_fd;
    app.startof = event_hash_startof_fd;
    app.compare = event_hash_compare_fd;
    app.size = size;
    h = bsw_hash_init(&app);
    if (!h)
        return BSW_OOM;
    ectx.hash.fd = h;

    app.hash = event_hash_cb_id;
    app.startof = event_hash_startof_id;
    app.compare = event_hash_compare_id;
    app.size = size;
    h = bsw_hash_init(&app);
    if (!h) {
        bsw_hash_close(ectx.hash.fd);
        return BSW_OOM;
    }
    ectx.hash.id = h;
    return BSW_OK;
}

static void event_hash_close(void)
{
    void *d;
    bsw_hash_s *fd, *id;

    fd = ectx.hash.fd;
    id = ectx.hash.id;
    while(bsw_hash_each_del(fd))
        ;/* none */
    while((d=bsw_hash_each_del(id)))
        bsw_pageca_free(&ectx.ca, d);

    bsw_hash_close(fd);
    bsw_hash_close(id);
    return ;
}

static void bsw_inline event_hash_add(bsw_event_s *e)
{
    bsw_event_hash_s *h = &ectx.hash;

    bsw_lock_rw_wr(&h->lock);
    e->id = bsw_atomic_fetch_add(ectx.id);
    bsw_hash_add(h->fd, &e->n_hash_fd, &e->fd);
    bsw_hash_add(h->id, &e->n_hash_id, &e->id);
    bsw_lock_rw_un(&h->lock);
    return ;
}

static bsw_inline 
bsw_event_s * event_hash_find(bsw_hash_s *h, void *k)
{
    void *e;
    bsw_event_hash_s *h = &ectx.hash;

    bsw_lock_rw_rd(&h->lock);
    e = bsw_hash_find(h->fd, k);
    bsw_lock_rw_un(&h->lock);
    return (bsw_event_s*)e;

}

static void bsw_inline event_hash_del(bsw_event_s *e)
{
    bsw_event_hash_s *h = &ectx.hash;

    bsw_lock_rw_wr(&h->lock);
    bsw_hash_del(h->fd, &e->fd);
    bsw_hash_del(h->id, &e->id);
    bsw_lock_rw_un(&h->lock);
    event_rel(e);
    return ;
}

static void event_queue_init(bsw_evetn_queue_s *q)
{
    bsw_lock_mtx_init(q->lock);
    bsw_listd_init(&q->h_queue);
    bsw_cond_init_monotonic(q->cond);
    return ;
}

static bsw_event_s * 
event_dequeue(bsw_event_ctx_s *ctx, bsw_evetn_queue_s *q)
{
    struct timespec ts;
    bsw_event_s *e;

    bsw_lock_mtx_lc(&q->lock);

dequeue:
    bsw_listd_chief_first(e, &q->h_queue, n_queue);
    if (NULL == e && ctx->run) {
        clock_gettime(CLOCK_MONOTONIC, &ts);
        ts.tv_sec += BSW_EVENT_QUE_TWAIT_S;
        bsw_cond_timedwait(&q->cond, &q->lock, &ts);
        goto dequeue;
    }
    e->queued = 0;

    bsw_lock_mtx_un(&q->lock);
    return e;
}

static void * event_loop(void *arg)
{
    int ret;

    bsw_thread_name("e_loop");
    while (ctx->run) {
        ret = event_auto_action_enqueue();
        (void)ret;
    }
    return NULL;
}

static void * event_work(void *arg)
{
    bsw_event_s *e;
    bsw_evetn_queue_s *eq;
    bsw_event_ctx_s *ctx;

    bsw_thread_name("e_work");
    ctx = (bsw_event_ctx_s*)arg;
    eq  = &ctx->queue;

    while (ctx->run) {
        e = event_dequeue(ctx, eq);
        if (!e)
            break;
        e->handle(e->fd, e->events, e->data);
        event_rel(e);
    }
    return NULL;
}

static void event_work_close(int works, bsw_event_ctx_s *ctx)
{
    int i;
    bsw_thread_id_t *thrid = ctx->work;

    for (i = 0; i < works; ++i)
        bsw_thread_join(thrid[i], NULL);
    if (ctx->works > BSW_EVENT_WORKD)
        free(ctx->work);
    ctx->work = NULL;
    return ;
}

static int event_work_init(int works, bsw_event_ctx_s *ctx)
{
    bsw_thread_id_t *wid;
    int i = 0;

    ctx->works = works;
    wid = ctx->workd;
    if (works > BSW_EVENT_WORKD) {
        wid = (bsw_thread_id_t*)malloc(works* sizeof(*thrid));
        if (!wid)
            return BSW_OOM;
    }
    while (i < works){
        if (bsw_thread_create(id+i, NULL, event_work, &ectx))
            break;
        i += 1;
    }
    ctx->work = wid;
    if (bsw_likely(i==works))
        return BSW_OK;
    event_work_close(i, ctx);
    return BSW_SYSCALL_BAD;
}

int bsw_event_init(bsw_event_app_s *app)
{
    int works, events = app->events;

    if (events)
        event_ca_init(events);
    works = bsw_max(app->works, 1);
    ectx.run = true;
    event_hash_init(BSW_EENT_HASH_SIZE);
    event_queue_init(&ectx.queue);

    if (event_auto_action(app->size))
        goto fail_action;
    if (event_work_init(works, &ectx))
        goto fail_work;
    if (bsw_thread_create(&ectx.loop,
        NULL, event_loop, &ectx))
        goto fail_loop;
    return 0;

fail_loop:
    event_work_close(works, &ectx);
fail_work:
    event_auto_action_close();
fail_action:
    ectx.run = false;
    event_hash_close();
    return -1;
}

void bsw_event_close(void)
{
    event_auto_action_close();
    ectx.run = false;
    event_work_close(ectx.works, &ectx);
    bsw_thread_join(ectx.loop, NULL);
    event_hash_close();
    return ;
}

int bsw_event_add(int fd, uint32_t events, bsw_event_handle handle, void *data)
{
    int ret;
    bsw_event_s *e;

    e = event_hash_find(ectx.hash.fd, &fd);
    if (e)
        return BSW_EXIST;
    e = bsw_pca_alloc(&ectx.ca);
    if (!e)
        return BSW_OOM;
    e->fd = fd;
    event_hash_add(e);
    e->refcnt = 1;
    e->queued = 0;
    e->handle = handle;
    e->fd = fd;
    e->data = data;
    e->events = events;
    e->reach = 0;
    if ((ret=event_auto_action_add(fd, events, e->id)))
        goto fail_add;
    return BSW_OK;

fail_add:
    event_hash_del(e);
    return ret;
}

int bsw_event_del(int fd)
{
    int ret;
    bsw_event_s *e;

    e = event_hash_find(ectx.hash.fd, &fd);
    if (!e)
        return BSW_NEXIST;
    ret = event_auto_action_del(fd);
    if (ret)
        return ret;
    event_hash_del(e);
    return BSW_OK;
}

// 考虑事件不会并发访问
int bsw_event_mod(int fd, uint32_t events, void *data)
{
    bsw_event_s *e;

    e = event_hash_find(ectx.hash.fd, &fd)
    if (!e)
        return BSW_NEXIST;
    int ret = event_auto_action_mod(fd, events, data);
    if (ret)
        return ret;
    e->waits = events;
    return BSW_OK;
}

/**
 * 事件入队时关闭对该类型事件的监测
 * 避免相同类型事件重复监测而由不同
 * 工作线程同时处理
 * 
 * 在事件（及数据）获取完毕后由当前
 * 工作线程重新开启该类型事件检测
 */
int bsw_event_mod_del(int fd, uint32_t events, void *data)
{
    int ret;
    bsw_event_s *e;

    e = event_hash_find(ectx.hash.fd, &fd);
    if (!e)
        return BSW_NEXIST;
    events = ~events & e->waits;
    ret = bsw_event_mod(e->fd, events, e->data);
    if (ret)
        return ret;
    return BSW_OK;
}

int bsw_event_mod_add(int fd, uint32_t events, void *data)
{
    int ret;
    bsw_event_s *e;

    e = event_hash_find(ectx.hash.fd, &fd);
    if (!e)
        return BSW_NEXIST;
    events |= e->waits;
    ret = event_auto_action_mod(fd, events, data);
    if (ret)
        return ret;
    return BSW_OK;
}

/**
 * 诸如 TCP 连接的读事件来临时需关闭读事件的检测
 * 以免同一连接的读事件被多个线程处理
 * 注：该操作不会降低读效率，工作线程的一次读会将
 * 所有数据读取完毕
 */
static bsw_inline int event_read_wait_disable(bsw_event_s *e)
{
    if (e->readmulti)
        return BSW_OK;
    return bsw_event_mod_del(e->fd, BSW_EVENTS_READ, e->data);
}

int bsw_event_enqueue_tail(uint32_t events,
    uint64_t id, bsw_evetn_queue_s *q)
{
    int ret;
    bsw_event_s *e;

    e = event_ref(ectx.hash.id, &id);
    if (!e)
        return BSW_NEXIST;
    ret = event_read_wait_disable(e);
    if (ret)
        return ret;

    bsw_lock_mtx_lc(&q->lock);
    if (e->queued) {
        e->events |= events;
        bsw_lock_mtx_un(&q->lock);
        return ;
    }
    e->queued = 1;
    bsw_listd_add_tail(&e->n_queue, &q->h_queue);
    bsw_lock_mtx_un(&q->lock);
    bsw_cond_signal(&q->cond);
    return ;
}

void bsw_event_enqueue_head(uint32_t events, uint64_t id, bsw_evetn_queue_s *q)
{
    int ret;
    bsw_event_s *e;

    e = event_ref(ectx.hash.id, &id);
    if (!e)
        return BSW_NEXIST;
    ret = event_read_wait_disable(e);
    if (ret)
        return ret;

    bsw_lock_mtx_lc(&q->lock);
    if (e->queued) {
        e->events |= events;
        bsw_lock_mtx_un(&q->lock);
        return ;
    }
    e->queued = 1;
    bsw_listd_add_head(&e->queue, &q->h_queue);
    bsw_lock_mtx_un(&q->lock);
    bsw_cond_signal(&q->cond);
    return ;
}